MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif
-struct cfs_wi_sched *cfs_sched_rehash;
+struct workqueue_struct *cfs_rehash_wq;
static inline void
cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
write_unlock(&lock->rw);
}
+static inline void
+cfs_hash_rw_sem_lock(union cfs_hash_lock *lock, int exclusive)
+ __acquires(&lock->rw_sem)
+{
+ if (!exclusive)
+ down_read(&lock->rw_sem);
+ else
+ down_write(&lock->rw_sem);
+}
+
+static inline void
+cfs_hash_rw_sem_unlock(union cfs_hash_lock *lock, int exclusive)
+ __releases(&lock->rw_sem)
+{
+ if (!exclusive)
+ up_read(&lock->rw_sem);
+ else
+ up_write(&lock->rw_sem);
+}
+
/** No lock hash */
static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
.hs_lock = cfs_hash_nl_lock,
.hs_bkt_unlock = cfs_hash_rw_unlock,
};
+/** rw_sem bucket lock, rehash is disabled */
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_sem_lops = {
+ .hs_lock = cfs_hash_nl_lock,
+ .hs_unlock = cfs_hash_nl_unlock,
+ .hs_bkt_lock = cfs_hash_rw_sem_lock,
+ .hs_bkt_unlock = cfs_hash_rw_sem_unlock,
+};
+
+/** rw_sem bucket lock, rehash is enabled */
+static struct cfs_hash_lock_ops cfs_hash_bkt_rw_sem_lops = {
+ .hs_lock = cfs_hash_rw_sem_lock,
+ .hs_unlock = cfs_hash_rw_sem_unlock,
+ .hs_bkt_lock = cfs_hash_rw_sem_lock,
+ .hs_bkt_unlock = cfs_hash_rw_sem_unlock,
+};
+
static void
cfs_hash_lock_setup(struct cfs_hash *hs)
{
spin_lock_init(&hs->hs_lock.spin);
} else if (cfs_hash_with_rehash(hs)) {
- rwlock_init(&hs->hs_lock.rw);
-
- if (cfs_hash_with_rw_bktlock(hs))
- hs->hs_lops = &cfs_hash_bkt_rw_lops;
- else if (cfs_hash_with_spin_bktlock(hs))
- hs->hs_lops = &cfs_hash_bkt_spin_lops;
- else
- LBUG();
+ if (cfs_hash_with_rw_sem_bktlock(hs)) {
+ init_rwsem(&hs->hs_lock.rw_sem);
+ hs->hs_lops = &cfs_hash_bkt_rw_sem_lops;
+ } else {
+ rwlock_init(&hs->hs_lock.rw);
+
+ if (cfs_hash_with_rw_bktlock(hs))
+ hs->hs_lops = &cfs_hash_bkt_rw_lops;
+ else if (cfs_hash_with_spin_bktlock(hs))
+ hs->hs_lops = &cfs_hash_bkt_spin_lops;
+ else
+ LBUG();
+ }
} else {
if (cfs_hash_with_rw_bktlock(hs))
hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
else if (cfs_hash_with_spin_bktlock(hs))
hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
+ else if (cfs_hash_with_rw_sem_bktlock(hs))
+ hs->hs_lops = &cfs_hash_nr_bkt_rw_sem_lops;
else
LBUG();
}
hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock);
- cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
+ queue_work(cfs_rehash_wq, &hs->hs_dep_work);
# endif
}
rwlock_init(&new_bkts[i]->hsb_lock.rw);
else if (cfs_hash_with_spin_bktlock(hs))
spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+ else if (cfs_hash_with_rw_sem_bktlock(hs))
+ init_rwsem(&new_bkts[i]->hsb_lock.rw_sem);
else
LBUG(); /* invalid use-case */
}
* @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort
*/
-static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
+static void cfs_hash_rehash_worker(struct work_struct *work);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(struct cfs_workitem *wi)
+static void cfs_hash_dep_print(struct work_struct *work)
{
- struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
+ struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
int dep;
int bkt;
int off;
static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
{
spin_lock_init(&hs->hs_dep_lock);
- cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
+ INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
}
static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
{
- if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
- return;
-
- spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
- spin_unlock(&hs->hs_dep_lock);
- cond_resched();
- spin_lock(&hs->hs_dep_lock);
- }
- spin_unlock(&hs->hs_dep_lock);
+ cancel_work_sync(&hs->hs_dep_work);
}
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
ENTRY;
- CLASSERT(CFS_HASH_THETA_BITS < 15);
+ BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
LASSERT(name != NULL);
LASSERT(ops != NULL);
hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
- cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
+ INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
cfs_hash_depth_wi_init(hs);
if (cfs_hash_with_rehash(hs))
*/
hs->hs_iterating = 1;
- cfs_hash_lock(hs, 1);
- hs->hs_iterators++;
+ cfs_hash_lock(hs, 1);
+ hs->hs_iterators++;
+ cfs_hash_unlock(hs, 1);
- /* NB: iteration is mostly called by service thread,
+ /* NB: iteration is mostly called by service thread,
* we tend to cancel pending rehash-request, instead of
- * blocking service thread, we will relaunch rehash request
- * after iteration */
- if (cfs_hash_is_rehashing(hs))
- cfs_hash_rehash_cancel_locked(hs);
- cfs_hash_unlock(hs, 1);
+ * blocking service thread, we will relaunch rehash request
+ * after iteration
+ */
+ if (cfs_hash_is_rehashing(hs))
+ cfs_hash_rehash_cancel(hs);
}
static void
* theta thresholds for @hs are tunable via cfs_hash_set_theta().
*/
void
-cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
-{
- int i;
-
- /* need hold cfs_hash_lock(hs, 1) */
- LASSERT(cfs_hash_with_rehash(hs) &&
- !cfs_hash_with_no_lock(hs));
-
- if (!cfs_hash_is_rehashing(hs))
- return;
-
- if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
- hs->hs_rehash_bits = 0;
- return;
- }
-
- for (i = 2; cfs_hash_is_rehashing(hs); i++) {
- cfs_hash_unlock(hs, 1);
- /* raise console warning while waiting too long */
- CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
- "hash %s is still rehashing, rescheded %d\n",
- hs->hs_name, i - 1);
- cond_resched();
- cfs_hash_lock(hs, 1);
- }
-}
-
-void
cfs_hash_rehash_cancel(struct cfs_hash *hs)
{
- cfs_hash_lock(hs, 1);
- cfs_hash_rehash_cancel_locked(hs);
- cfs_hash_unlock(hs, 1);
+ LASSERT(cfs_hash_with_rehash(hs));
+ cancel_work_sync(&hs->hs_rehash_work);
}
-int
+void
cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
{
int rc;
cfs_hash_lock(hs, 1);
- rc = cfs_hash_rehash_bits(hs);
- if (rc <= 0) {
- cfs_hash_unlock(hs, 1);
- return rc;
- }
+ rc = cfs_hash_rehash_bits(hs);
+ if (rc <= 0) {
+ cfs_hash_unlock(hs, 1);
+ return;
+ }
- hs->hs_rehash_bits = rc;
- if (!do_rehash) {
- /* launch and return */
- cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
- cfs_hash_unlock(hs, 1);
- return 0;
- }
+ hs->hs_rehash_bits = rc;
+ if (!do_rehash) {
+ /* launch and return */
+ queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
+ cfs_hash_unlock(hs, 1);
+ return;
+ }
- /* rehash right now */
- cfs_hash_unlock(hs, 1);
+ /* rehash right now */
+ cfs_hash_unlock(hs, 1);
- return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
+ cfs_hash_rehash_worker(&hs->hs_rehash_work);
}
static int
return c;
}
-static int
-cfs_hash_rehash_worker(struct cfs_workitem *wi)
+static void
+cfs_hash_rehash_worker(struct work_struct *work)
{
- struct cfs_hash *hs =
- container_of(wi, struct cfs_hash, hs_rehash_wi);
+ struct cfs_hash *hs = container_of(work, struct cfs_hash,
+ hs_rehash_work);
struct cfs_hash_bucket **bkts;
struct cfs_hash_bd bd;
unsigned int old_size;
hs->hs_buckets = hs->hs_rehash_buckets;
hs->hs_rehash_buckets = NULL;
- hs->hs_cur_bits = hs->hs_rehash_bits;
- out:
- hs->hs_rehash_bits = 0;
- if (rc == -ESRCH) /* never be scheduled again */
- cfs_wi_exit(cfs_sched_rehash, wi);
- bsize = cfs_hash_bkt_size(hs);
- cfs_hash_unlock(hs, 1);
- /* can't refer to @hs anymore because it could be destroyed */
- if (bkts != NULL)
- cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
- if (rc != 0)
+ hs->hs_cur_bits = hs->hs_rehash_bits;
+out:
+ hs->hs_rehash_bits = 0;
+ bsize = cfs_hash_bkt_size(hs);
+ cfs_hash_unlock(hs, 1);
+ /* can't refer to @hs anymore because it could be destroyed */
+ if (bkts != NULL)
+ cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
+ if (rc != 0)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
- /* return 1 only if cfs_wi_exit is called */
- return rc == -ESRCH;
}
/**