From: James Simmons Date: Wed, 12 Aug 2015 14:29:03 +0000 (-0400) Subject: LU-6245 libcfs: remove atomic cpt allocations X-Git-Tag: 2.7.59~59 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=2de6f02a3e1a4c5f17d4baf3044795453ac52009 LU-6245 libcfs: remove atomic cpt allocations libcfs contains functions to perform atomic memory operations. These functions have never been used so remove them. Change-Id: I48dff483c4560700acbddff6268735837ed12b79 Signed-off-by: frank zago Signed-off-by: James Simmons Reviewed-on: http://review.whamcloud.com/15913 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: John L. Hammond Reviewed-by: Oleg Drokin --- diff --git a/libcfs/include/libcfs/libcfs_cpu.h b/libcfs/include/libcfs/libcfs_cpu.h index f137a64..98832fd 100644 --- a/libcfs/include/libcfs/libcfs_cpu.h +++ b/libcfs/include/libcfs/libcfs_cpu.h @@ -208,6 +208,68 @@ int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt); */ int cfs_cpu_ht_nsiblings(int cpu); +/* + * allocate per-cpu-partition data, returned value is an array of pointers, + * variable can be indexed by CPU ID. + * cptab != NULL: size of array is number of CPU partitions + * cptab == NULL: size of array is number of HW cores + */ +void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); +/* + * destory per-cpu-partition variable + */ +void cfs_percpt_free(void *vars); +int cfs_percpt_number(void *vars); + +#define cfs_percpt_for_each(var, i, vars) \ + for (i = 0; i < cfs_percpt_number(vars) && \ + ((var) = (vars)[i]) != NULL; i++) + +/* + * percpu partition lock + * + * There are some use-cases like this in Lustre: + * . each CPU partition has it's own private data which is frequently changed, + * and mostly by the local CPU partition. + * . all CPU partitions share some global data, these data are rarely changed. + * + * LNet is typical example. + * CPU partition lock is designed for this kind of use-cases: + * . each CPU partition has it's own private lock + * . change on private data just needs to take the private lock + * . read on shared data just needs to take _any_ of private locks + * . change on shared data needs to take _all_ private locks, + * which is slow and should be really rare. + */ +enum { + CFS_PERCPT_LOCK_EX = -1, /* negative */ +}; + +struct cfs_percpt_lock { + /* cpu-partition-table for this lock */ + struct cfs_cpt_table *pcl_cptab; + /* exclusively locked */ + unsigned int pcl_locked; + /* private lock table */ + spinlock_t **pcl_locks; +}; + +/* return number of private locks */ +#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab) + +/* + * create a cpu-partition lock based on CPU partition table \a cptab, + * each private lock has extra \a psize bytes padding data + */ +struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); +/* destroy a cpu-partition lock */ +void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); + +/* lock private lock \a index of \a pcl */ +void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); +/* unlock private lock \a index of \a pcl */ +void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); + /** * allocate \a nr_bytes of physical memory from a contiguous region with the * properties of \a flags which are bound to the partition id \a cpt. This diff --git a/libcfs/include/libcfs/libcfs_private.h b/libcfs/include/libcfs/libcfs_private.h index 4f42773..09ae277f 100644 --- a/libcfs/include/libcfs/libcfs_private.h +++ b/libcfs/include/libcfs/libcfs_private.h @@ -311,27 +311,6 @@ int libcfs_debug_cleanup(void); /* !__KERNEL__ */ #endif -struct cfs_cpt_table; - -/* - * allocate per-cpu-partition data, returned value is an array of pointers, - * variable can be indexed by CPU ID. - * cptable != NULL: size of array is number of CPU partitions - * cptable == NULL: size of array is number of HW cores - */ -void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); -/* - * destory per-cpu-partition variable - */ -void cfs_percpt_free(void *vars); -int cfs_percpt_number(void *vars); -void *cfs_percpt_current(void *vars); -void *cfs_percpt_index(void *vars, int idx); - -#define cfs_percpt_for_each(var, i, vars) \ - for (i = 0; i < cfs_percpt_number(vars) && \ - ((var) = (vars)[i]) != NULL; i++) - /* * allocate a variable array, returned value is an array of pointers. * Caller can specify length of array by count. @@ -434,61 +413,6 @@ do { \ #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr))); #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr))); -/* - * percpu partition lock - * - * There are some use-cases like this in Lustre: - * . each CPU partition has it's own private data which is frequently changed, - * and mostly by the local CPU partition. - * . all CPU partitions share some global data, these data are rarely changed. - * - * LNet is typical example. - * CPU partition lock is designed for this kind of use-cases: - * . each CPU partition has it's own private lock - * . change on private data just needs to take the private lock - * . read on shared data just needs to take _any_ of private locks - * . change on shared data needs to take _all_ private locks, - * which is slow and should be really rare. - */ - -enum { - CFS_PERCPT_LOCK_EX = -1, /* negative */ -}; - -#ifdef __KERNEL__ - -struct cfs_percpt_lock { - /* cpu-partition-table for this lock */ - struct cfs_cpt_table *pcl_cptab; - /* exclusively locked */ - unsigned int pcl_locked; - /* private lock table */ - spinlock_t **pcl_locks; -}; - -/* return number of private locks */ -#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab) - -/* - * create a cpu-partition lock based on CPU partition table \a cptab, - * each private lock has extra \a psize bytes padding data - */ -struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); -/* destroy a cpu-partition lock */ -void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); - -/* lock private lock \a index of \a pcl */ -void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); -/* unlock private lock \a index of \a pcl */ -void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); -/* create percpt (atomic) refcount based on @cptab */ -atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val); -/* destroy percpt refcount */ -void cfs_percpt_atomic_free(atomic_t **refs); -/* return sum of all percpu refs */ -int cfs_percpt_atomic_summary(atomic_t **refs); -#endif /* __KERNEL__ */ - /** Compile-time assertion. * Check an invariant described by a constant expression at compile time by diff --git a/libcfs/libcfs/libcfs_lock.c b/libcfs/libcfs/libcfs_lock.c index 747337f..dae4287 100644 --- a/libcfs/libcfs/libcfs_lock.c +++ b/libcfs/libcfs/libcfs_lock.c @@ -146,44 +146,3 @@ __releases(pcl->pcl_locks) } } EXPORT_SYMBOL(cfs_percpt_unlock); - -/** free cpu-partition refcount */ -void -cfs_percpt_atomic_free(atomic_t **refs) -{ - cfs_percpt_free(refs); -} -EXPORT_SYMBOL(cfs_percpt_atomic_free); - -/** allocate cpu-partition refcount with initial value @init_val */ -atomic_t ** -cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val) -{ - atomic_t **refs; - atomic_t *ref; - int i; - - refs = cfs_percpt_alloc(cptab, sizeof(*ref)); - if (refs == NULL) - return NULL; - - cfs_percpt_for_each(ref, i, refs) - atomic_set(ref, init_val); - return refs; -} -EXPORT_SYMBOL(cfs_percpt_atomic_alloc); - -/** return sum of cpu-partition refs */ -int -cfs_percpt_atomic_summary(atomic_t **refs) -{ - atomic_t *ref; - int i; - int val = 0; - - cfs_percpt_for_each(ref, i, refs) - val += atomic_read(ref); - - return val; -} -EXPORT_SYMBOL(cfs_percpt_atomic_summary); diff --git a/libcfs/libcfs/libcfs_mem.c b/libcfs/libcfs/libcfs_mem.c index 0f1fb56..3e83f50 100644 --- a/libcfs/libcfs/libcfs_mem.c +++ b/libcfs/libcfs/libcfs_mem.c @@ -119,34 +119,6 @@ cfs_percpt_number(void *vars) EXPORT_SYMBOL(cfs_percpt_number); /* - * return memory block shadowed from current CPU - */ -void * -cfs_percpt_current(void *vars) -{ - struct cfs_var_array *arr; - int cpt; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - cpt = cfs_cpt_current(arr->va_cptab, 0); - if (cpt < 0) - return NULL; - - return arr->va_ptrs[cpt]; -} - -void * -cfs_percpt_index(void *vars, int idx) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - LASSERT(idx >= 0 && idx < arr->va_count); - return arr->va_ptrs[idx]; -} - -/* * free variable array, see more detail in cfs_array_alloc */ void