-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <libcfs/libcfs.h>
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static unsigned int warn_on_depth = 0;
+static unsigned int warn_on_depth = 8;
CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
"warning when hash depth is high.");
#endif
+struct cfs_wi_sched *cfs_sched_rehash;
+
static inline void
cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
static inline void
cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
{
- cfs_spin_lock(&lock->spin);
+ spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
{
- cfs_spin_unlock(&lock->spin);
+ spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
{
- if (!exclusive)
- cfs_read_lock(&lock->rw);
- else
- cfs_write_lock(&lock->rw);
+ if (!exclusive)
+ read_lock(&lock->rw);
+ else
+ write_lock(&lock->rw);
}
static inline void
cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
{
- if (!exclusive)
- cfs_read_unlock(&lock->rw);
- else
- cfs_write_unlock(&lock->rw);
+ if (!exclusive)
+ read_unlock(&lock->rw);
+ else
+ write_unlock(&lock->rw);
}
/** No lock hash */
static void
cfs_hash_lock_setup(cfs_hash_t *hs)
{
- if (cfs_hash_with_no_lock(hs)) {
- hs->hs_lops = &cfs_hash_nl_lops;
+ if (cfs_hash_with_no_lock(hs)) {
+ hs->hs_lops = &cfs_hash_nl_lops;
- } else if (cfs_hash_with_no_bktlock(hs)) {
- hs->hs_lops = &cfs_hash_nbl_lops;
- cfs_spin_lock_init(&hs->hs_lock.spin);
+ } else if (cfs_hash_with_no_bktlock(hs)) {
+ hs->hs_lops = &cfs_hash_nbl_lops;
+ spin_lock_init(&hs->hs_lock.spin);
- } else if (cfs_hash_with_rehash(hs)) {
- cfs_rwlock_init(&hs->hs_lock.rw);
+ } else if (cfs_hash_with_rehash(hs)) {
+ rwlock_init(&hs->hs_lock.rw);
if (cfs_hash_with_rw_bktlock(hs))
hs->hs_lops = &cfs_hash_bkt_rw_lops;
max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
return;
- cfs_spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
- hs->hs_dep_bits = hs->hs_cur_bits;
- cfs_spin_unlock(&hs->hs_dep_lock);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_bits = hs->hs_cur_bits;
+ spin_unlock(&hs->hs_dep_lock);
- cfs_wi_schedule(&hs->hs_dep_wi);
+ cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
# endif
}
CFS_EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
cfs_hlist_node_t *
+cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+{
+ return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
+ CFS_HS_LOOKUP_IT_PEEK);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
+
+cfs_hlist_node_t *
cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
const void *key, cfs_hlist_node_t *hnode,
int noref)
cfs_hash_with_no_bktlock(hs))
continue;
- if (cfs_hash_with_rw_bktlock(hs))
- cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
- else if (cfs_hash_with_spin_bktlock(hs))
- cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
- else
- LBUG(); /* invalid use-case */
- }
- return new_bkts;
+ if (cfs_hash_with_rw_bktlock(hs))
+ rwlock_init(&new_bkts[i]->hsb_lock.rw);
+ else if (cfs_hash_with_spin_bktlock(hs))
+ spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+ else
+ LBUG(); /* invalid use-case */
+ }
+ return new_bkts;
}
/**
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(cfs_workitem_t *wi)
{
- cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
- int dep;
- int bkt;
- int off;
- int bits;
-
- cfs_spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
- bits = hs->hs_dep_bits;
- cfs_spin_unlock(&hs->hs_dep_lock);
-
- LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
- hs->hs_name, bits, dep, bkt, off);
- cfs_spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_bits = 0; /* mark as workitem done */
- cfs_spin_unlock(&hs->hs_dep_lock);
- return 0;
+ cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+ int dep;
+ int bkt;
+ int off;
+ int bits;
+
+ spin_lock(&hs->hs_dep_lock);
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
+ bits = hs->hs_dep_bits;
+ spin_unlock(&hs->hs_dep_lock);
+
+ LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
+ hs->hs_name, bits, dep, bkt, off);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_bits = 0; /* mark as workitem done */
+ spin_unlock(&hs->hs_dep_lock);
+ return 0;
}
static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
{
- cfs_spin_lock_init(&hs->hs_dep_lock);
- cfs_wi_init(&hs->hs_dep_wi, hs,
- cfs_hash_dep_print, CFS_WI_SCHED_ANY);
+ spin_lock_init(&hs->hs_dep_lock);
+ cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
}
static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
{
- if (cfs_wi_cancel(&hs->hs_dep_wi))
- return;
+ if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
+ return;
- cfs_spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
- cfs_spin_unlock(&hs->hs_dep_lock);
- cfs_cond_resched();
- cfs_spin_lock(&hs->hs_dep_lock);
- }
- cfs_spin_unlock(&hs->hs_dep_lock);
+ spin_lock(&hs->hs_dep_lock);
+ while (hs->hs_dep_bits != 0) {
+ spin_unlock(&hs->hs_dep_lock);
+ cfs_cond_resched();
+ spin_lock(&hs->hs_dep_lock);
+ }
+ spin_unlock(&hs->hs_dep_lock);
}
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
- cfs_wi_init(&hs->hs_rehash_wi, hs,
- cfs_hash_rehash_worker, CFS_WI_SCHED_ANY);
+ cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
cfs_hash_depth_wi_init(hs);
if (cfs_hash_with_rehash(hs))
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
- if (bds[1].bd_bucket == NULL && hnode != NULL)
- cfs_hash_bd_del_locked(hs, &bds[0], hnode);
- else
- hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, key, hnode);
+ /* NB: do nothing if @hnode is not in hash table */
+ if (hnode == NULL || !cfs_hlist_unhashed(hnode)) {
+ if (bds[1].bd_bucket == NULL && hnode != NULL) {
+ cfs_hash_bd_del_locked(hs, &bds[0], hnode);
+ } else {
+ hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
+ key, hnode);
+ }
+ }
if (hnode != NULL) {
obj = cfs_hash_object(hs, hnode);
if (!cfs_hash_is_rehashing(hs))
return;
- if (cfs_wi_cancel(&hs->hs_rehash_wi)) {
+ if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
hs->hs_rehash_bits = 0;
return;
}
hs->hs_rehash_bits = rc;
if (!do_rehash) {
/* launch and return */
- cfs_wi_schedule(&hs->hs_rehash_wi);
+ cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
cfs_hash_unlock(hs, 1);
return 0;
}
hs->hs_cur_bits = hs->hs_rehash_bits;
out:
hs->hs_rehash_bits = 0;
- if (rc == -ESRCH)
- cfs_wi_exit(wi); /* never be scheduled again */
+ if (rc == -ESRCH) /* never be scheduled again */
+ cfs_wi_exit(cfs_sched_rehash, wi);
bsize = cfs_hash_bkt_size(hs);
cfs_hash_unlock(hs, 1);
/* can't refer to @hs anymore because it could be destroyed */
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
if (rc != 0)
CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
- /* cfs_workitem require us to always return 0 */
- return 0;
+ /* return 1 only if cfs_wi_exit is called */
+ return rc == -ESRCH;
}
/**
if (maxdep < bd.bd_bucket->hsb_depmax) {
maxdep = bd.bd_bucket->hsb_depmax;
#ifdef __KERNEL__
- maxdepb = cfs_ffz(~maxdep);
+ maxdepb = ffz(~maxdep);
#endif
}
total += bd.bd_bucket->hsb_count;
- dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
+ dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
cfs_hash_bd_unlock(hs, &bd, 0);
}