lock_vars[0].read_fptr = lprocfs_rd_uint;
lock_vars[0].write_fptr = lprocfs_wr_uint;
lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+ snprintf(lock_name, MAX_STRING_SIZE, "%s/max_parallel_ast",
+ ldlm_ns_name(ns));
+ lock_vars[0].data = &ns->ns_max_parallel_ast;
+ lock_vars[0].read_fptr = lprocfs_rd_uint;
+ lock_vars[0].write_fptr = lprocfs_wr_uint;
+ lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
}
return 0;
}
#endif /* LPROCFS */
-static unsigned ldlm_res_hop_hash(cfs_hash_t *hs, void *key, unsigned mask)
+static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
+ const void *key, unsigned mask)
{
- struct ldlm_res_id *id = key;
+ const struct ldlm_res_id *id = key;
unsigned val = 0;
unsigned i;
return val & mask;
}
-static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs, void *key, unsigned mask)
+static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
+ const void *key, unsigned mask)
{
- struct ldlm_res_id *id = key;
+ const struct ldlm_res_id *id = key;
struct lu_fid fid;
- __u64 hash;
+ __u32 hash;
+ __u32 val;
fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_OID_OFF];
fid.f_ver = (__u32)id->name[LUSTRE_RES_ID_VER_OFF];
- hash = fid_flatten(&fid);
+ hash = fid_flatten32(&fid);
+ hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+ if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
+ val = id->name[LUSTRE_RES_ID_HSH_OFF];
+ hash += (val >> 5) + (val << 11);
+ } else {
+ val = fid_oid(&fid);
+ }
hash = cfs_hash_long(hash, hs->hs_bkt_bits);
- /* ignore a few low bits */
- if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0)
- hash += id->name[LUSTRE_RES_ID_HSH_OFF] >> 5;
- else
- hash = hash >> 5;
+ /* give me another random factor */
+ hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3);
+
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
return &res->lr_name;
}
-static int ldlm_res_eq(const struct ldlm_res_id *res0,
- const struct ldlm_res_id *res1)
-{
- return !memcmp(res0, res1, sizeof(*res0));
-}
-
-static int ldlm_res_hop_keycmp(void *key, cfs_hlist_node_t *hnode)
+static int ldlm_res_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
{
struct ldlm_resource *res;
{
.nsd_type = LDLM_NS_TYPE_MDC,
.nsd_bkt_bits = 11,
- .nsd_all_bits = 15,
+ .nsd_all_bits = 16,
.nsd_hops = &ldlm_ns_fid_hash_ops,
},
{
ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
+ ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
ns->ns_nr_unused = 0;
ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
ns->ns_timeouts = 0;
ns->ns_orig_connect_flags = 0;
ns->ns_connect_flags = 0;
+ ns->ns_stopping = 0;
rc = ldlm_namespace_proc_register(ns);
if (rc != 0) {
CERROR("Can't initialize ns proc, rc %d\n", rc);
return;
}
+ cfs_spin_lock(&ns->ns_lock);
+ ns->ns_stopping = 1;
+ cfs_spin_unlock(&ns->ns_lock);
/*
* Can fail with -EINTR when force == 0 in which case try harder.
int ldlm_resource_putref(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- int ref = cfs_atomic_read(&res->lr_refcount);
cfs_hash_bd_t bd;
- CDEBUG(D_INFO, "putref res: %p count: %d\n", res, ref - 1);
- LASSERTF(ref > 0 && ref < LI_POISON, "%d", ref);
+ LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "putref res: %p count: %d\n",
+ res, cfs_atomic_read(&res->lr_refcount) - 1);
+
cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
__ldlm_resource_putref_final(&bd, res);
int ldlm_resource_putref_locked(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- int ref = cfs_atomic_read(&res->lr_refcount);
- CDEBUG(D_INFO, "putref res: %p count: %d\n", res, ref - 1);
- LASSERTF(ref > 0 && ref < LI_POISON, "%d", ref);
+ LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "putref res: %p count: %d\n",
+ res, cfs_atomic_read(&res->lr_refcount) - 1);
+
if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
cfs_hash_bd_t bd;