From 5ce10d8850a3d104193a634ca6ee796299fd6270 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 11 Feb 2019 10:46:14 -0500 Subject: [PATCH] LU-9859 libcfs: use a workqueue for rehash work. lustre has a work-item queuing scheme that provides the same functionality as linux work_queues. To make the code easier for linux devs to follow, change to use work_queues. Linux-commit: 0aa211e39857f17e24126c47f6e3fe3b971344b3 Change-Id: I1600ea1ef8769f1f6489b81fd578685ea58f9cb6 Signed-off-by: NeilBrown Signed-off-by: Greg Kroah-Hartman Reviewed-on: https://review.whamcloud.com/34169 Tested-by: Jenkins Reviewed-by: Ben Evans Tested-by: Maloo Reviewed-by: Jian Yu Reviewed-by: Oleg Drokin --- libcfs/include/libcfs/libcfs.h | 3 + libcfs/include/libcfs/libcfs_hash.h | 8 +- libcfs/include/libcfs/libcfs_private.h | 1 - libcfs/libcfs/hash.c | 137 ++++++++++++--------------------- libcfs/libcfs/module.c | 17 ++-- 5 files changed, 64 insertions(+), 102 deletions(-) diff --git a/libcfs/include/libcfs/libcfs.h b/libcfs/include/libcfs/libcfs.h index b9ad772..99dc942 100644 --- a/libcfs/include/libcfs/libcfs.h +++ b/libcfs/include/libcfs/libcfs.h @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -113,6 +114,8 @@ static inline void *__container_of(const void *ptr, unsigned long shift) #define container_of0(ptr, type, member) \ ((type *)__container_of((ptr), offsetof(type, member))) +extern struct workqueue_struct *cfs_rehash_wq; + struct lnet_debugfs_symlink_def { const char *name; const char *target; diff --git a/libcfs/include/libcfs/libcfs_hash.h b/libcfs/include/libcfs/libcfs_hash.h index 0c385a3..bd3d7d0 100644 --- a/libcfs/include/libcfs/libcfs_hash.h +++ b/libcfs/include/libcfs/libcfs_hash.h @@ -39,6 +39,8 @@ #define __LIBCFS_HASH_H__ #include +#include +#include /* * Knuth recommends primes in approximately golden ratio to the maximum @@ -241,7 +243,7 @@ struct cfs_hash { /** # of iterators (caller of cfs_hash_for_each_*) */ __u32 hs_iterators; /** rehash workitem */ - struct cfs_workitem hs_rehash_wi; + struct work_struct hs_rehash_work; /** refcount on this hash table */ atomic_t hs_refcount; /** rehash buckets-table */ @@ -258,7 +260,7 @@ struct cfs_hash { /** bits when we found the max depth */ unsigned int hs_dep_bits; /** workitem to output max depth */ - struct cfs_workitem hs_dep_wi; + struct work_struct hs_dep_work; #endif /** name of htable */ char hs_name[0]; @@ -729,7 +731,7 @@ __u64 cfs_hash_size_get(struct cfs_hash *hs); */ void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs); void cfs_hash_rehash_cancel(struct cfs_hash *hs); -int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash); +void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash); void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, void *new_key, struct hlist_node *hnode); diff --git a/libcfs/include/libcfs/libcfs_private.h b/libcfs/include/libcfs/libcfs_private.h index 4931e70..653ed67 100644 --- a/libcfs/include/libcfs/libcfs_private.h +++ b/libcfs/include/libcfs/libcfs_private.h @@ -414,6 +414,5 @@ static inline size_t cfs_round_strlen(char *fset) extern struct cfs_psdev_ops libcfs_psdev_ops; extern struct miscdevice libcfs_dev; -extern struct cfs_wi_sched *cfs_sched_rehash; #endif diff --git a/libcfs/libcfs/hash.c b/libcfs/libcfs/hash.c index 228cf0b..3d2127f 100644 --- a/libcfs/libcfs/hash.c +++ b/libcfs/libcfs/hash.c @@ -114,7 +114,7 @@ module_param(warn_on_depth, uint, 0644); MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); #endif -struct cfs_wi_sched *cfs_sched_rehash; +struct workqueue_struct *cfs_rehash_wq; static inline void cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} @@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) hs->hs_dep_bits = hs->hs_cur_bits; spin_unlock(&hs->hs_dep_lock); - cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi); + queue_work(cfs_rehash_wq, &hs->hs_dep_work); # endif } @@ -940,12 +940,12 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, * @flags - CFS_HASH_REHASH enable synamic hash resizing * - CFS_HASH_SORT enable chained hash sort */ -static int cfs_hash_rehash_worker(struct cfs_workitem *wi); +static void cfs_hash_rehash_worker(struct work_struct *work); #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static int cfs_hash_dep_print(struct cfs_workitem *wi) +static void cfs_hash_dep_print(struct work_struct *work) { - struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); + struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work); int dep; int bkt; int off; @@ -969,21 +969,12 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi) static void cfs_hash_depth_wi_init(struct cfs_hash *hs) { spin_lock_init(&hs->hs_dep_lock); - cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print); + INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print); } static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) { - if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi)) - return; - - spin_lock(&hs->hs_dep_lock); - while (hs->hs_dep_bits != 0) { - spin_unlock(&hs->hs_dep_lock); - cond_resched(); - spin_lock(&hs->hs_dep_lock); - } - spin_unlock(&hs->hs_dep_lock); + cancel_work_sync(&hs->hs_dep_work); } #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ @@ -1050,7 +1041,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, hs->hs_ops = ops; hs->hs_extra_bytes = extra_bytes; hs->hs_rehash_bits = 0; - cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); + INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker); cfs_hash_depth_wi_init(hs); if (cfs_hash_with_rehash(hs)) @@ -1371,16 +1362,17 @@ cfs_hash_for_each_enter(struct cfs_hash *hs) */ hs->hs_iterating = 1; - cfs_hash_lock(hs, 1); - hs->hs_iterators++; + cfs_hash_lock(hs, 1); + hs->hs_iterators++; + cfs_hash_unlock(hs, 1); - /* NB: iteration is mostly called by service thread, + /* NB: iteration is mostly called by service thread, * we tend to cancel pending rehash-request, instead of - * blocking service thread, we will relaunch rehash request - * after iteration */ - if (cfs_hash_is_rehashing(hs)) - cfs_hash_rehash_cancel_locked(hs); - cfs_hash_unlock(hs, 1); + * blocking service thread, we will relaunch rehash request + * after iteration + */ + if (cfs_hash_is_rehashing(hs)) + cfs_hash_rehash_cancel(hs); } static void @@ -1790,42 +1782,13 @@ EXPORT_SYMBOL(cfs_hash_for_each_key); * theta thresholds for @hs are tunable via cfs_hash_set_theta(). */ void -cfs_hash_rehash_cancel_locked(struct cfs_hash *hs) -{ - int i; - - /* need hold cfs_hash_lock(hs, 1) */ - LASSERT(cfs_hash_with_rehash(hs) && - !cfs_hash_with_no_lock(hs)); - - if (!cfs_hash_is_rehashing(hs)) - return; - - if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) { - hs->hs_rehash_bits = 0; - return; - } - - for (i = 2; cfs_hash_is_rehashing(hs); i++) { - cfs_hash_unlock(hs, 1); - /* raise console warning while waiting too long */ - CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO, - "hash %s is still rehashing, rescheded %d\n", - hs->hs_name, i - 1); - cond_resched(); - cfs_hash_lock(hs, 1); - } -} - -void cfs_hash_rehash_cancel(struct cfs_hash *hs) { - cfs_hash_lock(hs, 1); - cfs_hash_rehash_cancel_locked(hs); - cfs_hash_unlock(hs, 1); + LASSERT(cfs_hash_with_rehash(hs)); + cancel_work_sync(&hs->hs_rehash_work); } -int +void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) { int rc; @@ -1834,24 +1797,24 @@ cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) cfs_hash_lock(hs, 1); - rc = cfs_hash_rehash_bits(hs); - if (rc <= 0) { - cfs_hash_unlock(hs, 1); - return rc; - } + rc = cfs_hash_rehash_bits(hs); + if (rc <= 0) { + cfs_hash_unlock(hs, 1); + return; + } - hs->hs_rehash_bits = rc; - if (!do_rehash) { - /* launch and return */ - cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi); - cfs_hash_unlock(hs, 1); - return 0; - } + hs->hs_rehash_bits = rc; + if (!do_rehash) { + /* launch and return */ + queue_work(cfs_rehash_wq, &hs->hs_rehash_work); + cfs_hash_unlock(hs, 1); + return; + } - /* rehash right now */ - cfs_hash_unlock(hs, 1); + /* rehash right now */ + cfs_hash_unlock(hs, 1); - return cfs_hash_rehash_worker(&hs->hs_rehash_wi); + cfs_hash_rehash_worker(&hs->hs_rehash_work); } static int @@ -1884,11 +1847,11 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) return c; } -static int -cfs_hash_rehash_worker(struct cfs_workitem *wi) +static void +cfs_hash_rehash_worker(struct work_struct *work) { - struct cfs_hash *hs = - container_of(wi, struct cfs_hash, hs_rehash_wi); + struct cfs_hash *hs = container_of(work, struct cfs_hash, + hs_rehash_work); struct cfs_hash_bucket **bkts; struct cfs_hash_bd bd; unsigned int old_size; @@ -1969,20 +1932,16 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi) hs->hs_buckets = hs->hs_rehash_buckets; hs->hs_rehash_buckets = NULL; - hs->hs_cur_bits = hs->hs_rehash_bits; - out: - hs->hs_rehash_bits = 0; - if (rc == -ESRCH) /* never be scheduled again */ - cfs_wi_exit(cfs_sched_rehash, wi); - bsize = cfs_hash_bkt_size(hs); - cfs_hash_unlock(hs, 1); - /* can't refer to @hs anymore because it could be destroyed */ - if (bkts != NULL) - cfs_hash_buckets_free(bkts, bsize, new_size, old_size); - if (rc != 0) + hs->hs_cur_bits = hs->hs_rehash_bits; +out: + hs->hs_rehash_bits = 0; + bsize = cfs_hash_bkt_size(hs); + cfs_hash_unlock(hs, 1); + /* can't refer to @hs anymore because it could be destroyed */ + if (bkts != NULL) + cfs_hash_buckets_free(bkts, bsize, new_size, old_size); + if (rc != 0) CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); - /* return 1 only if cfs_wi_exit is called */ - return rc == -ESRCH; } /** diff --git a/libcfs/libcfs/module.c b/libcfs/libcfs/module.c index 370c905..99b6f79 100644 --- a/libcfs/libcfs/module.c +++ b/libcfs/libcfs/module.c @@ -600,12 +600,11 @@ static int __init libcfs_init(void) goto cleanup_deregister; } - /* max to 4 threads, should be enough for rehash */ - rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); - rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, - rc, &cfs_sched_rehash); - if (rc != 0) { - CERROR("Startup workitem scheduler: error: %d\n", rc); + cfs_rehash_wq = alloc_workqueue("cfs_rh", WQ_SYSFS, 4); + if (!cfs_rehash_wq) { + rc = -ENOMEM; + CERROR("libcfs: failed to start rehash workqueue: rc = %d\n", + rc); goto cleanup_deregister; } @@ -639,9 +638,9 @@ static void __exit libcfs_exit(void) CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n", atomic_read(&libcfs_kmemory)); - if (cfs_sched_rehash != NULL) { - cfs_wi_sched_destroy(cfs_sched_rehash); - cfs_sched_rehash = NULL; + if (cfs_rehash_wq) { + destroy_workqueue(cfs_rehash_wq); + cfs_rehash_wq = NULL; } cfs_crypto_unregister(); -- 1.8.3.1