* GPL HEADER END
*/
/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <libcfs/libcfs.h>
-#ifdef __KERNEL__
-
/** destroy cpu-partition lock, see libcfs_private.h for more detail */
void
cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
* reason we always allocate cacheline-aligned memory block.
*/
struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys)
{
struct cfs_percpt_lock *pcl;
spinlock_t *lock;
return NULL;
}
- cfs_percpt_for_each(lock, i, pcl->pcl_locks)
+ if (keys == NULL) {
+ CWARN("Cannot setup class key for percpt lock, you may see "
+ "recursive locking warnings which are actually fake.\n");
+ }
+
+ cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock);
+ if (keys != NULL)
+ lockdep_set_class(lock, &keys[i]);
+ }
return pcl;
}
-EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_create);
/**
* lock a CPU partition
*/
void
cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
+__acquires(pcl->pcl_locks)
{
int ncpt = cfs_cpt_number(pcl->pcl_cptab);
int i;
/** unlock a CPU partition */
void
cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
+__releases(pcl->pcl_locks)
{
int ncpt = cfs_cpt_number(pcl->pcl_cptab);
int i;
}
}
EXPORT_SYMBOL(cfs_percpt_unlock);
-
-#else /* !__KERNEL__ */
-# ifdef HAVE_LIBPTHREAD
-
-struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
-{
- struct cfs_percpt_lock *pcl;
-
- CFS_ALLOC_PTR(pcl);
- if (pcl != NULL)
- pthread_mutex_init(&pcl->pcl_mutex, NULL);
-
- return pcl;
-}
-
-void
-cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
-{
- pthread_mutex_destroy(&pcl->pcl_mutex);
- CFS_FREE_PTR(pcl);
-}
-
-void
-cfs_percpt_lock(struct cfs_percpt_lock *pcl, int lock)
-{
- pthread_mutex_lock(&(pcl)->pcl_mutex);
-}
-
-void
-cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int lock)
-{
- pthread_mutex_unlock(&(pcl)->pcl_mutex);
-}
-
-# else /* !HAVE_LIBPTHREAD */
-
-struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
-{
- return ((struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
-}
-
-void
-cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
-{
- LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
-}
-
-void
-cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
-{
- LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
-}
-
-void
-cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
-{
- LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
-}
-
-# endif /* HAVE_LIBPTHREAD */
-#endif /* __KERNEL__ */
-
-/** free cpu-partition refcount */
-void
-cfs_percpt_atomic_free(cfs_atomic_t **refs)
-{
- cfs_percpt_free(refs);
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_free);
-
-/** allocate cpu-partition refcount with initial value @init_val */
-cfs_atomic_t **
-cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
-{
- cfs_atomic_t **refs;
- cfs_atomic_t *ref;
- int i;
-
- refs = cfs_percpt_alloc(cptab, sizeof(*ref));
- if (refs == NULL)
- return NULL;
-
- cfs_percpt_for_each(ref, i, refs)
- cfs_atomic_set(ref, init_val);
- return refs;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
-
-/** return sum of cpu-partition refs */
-int
-cfs_percpt_atomic_summary(cfs_atomic_t **refs)
-{
- cfs_atomic_t *ref;
- int i;
- int val = 0;
-
- cfs_percpt_for_each(ref, i, refs)
- val += cfs_atomic_read(ref);
-
- return val;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_summary);