#include <lustre_dlm.h>
#include <lustre_fid.h>
#include <obd_class.h>
+#include <libcfs/linux/linux-hash.h>
#include "ldlm_internal.h"
struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
const char __user *buffer, size_t count,
loff_t *off)
{
- __s64 value;
+ struct seq_file *m = file->private_data;
+ u64 value;
__u64 watermark;
- __u64 *data = ((struct seq_file *)file->private_data)->private;
+ __u64 *data = m->private;
bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
+ char kernbuf[22] = "";
int rc;
- rc = lprocfs_str_with_units_to_s64(buffer, count, &value, 'M');
- if (rc) {
+ if (count >= sizeof(kernbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+ kernbuf[count] = 0;
+
+ rc = sysfs_memparse(kernbuf, count, &value, "MiB");
+ if (rc < 0) {
CERROR("Failed to set %s, rc = %d.\n",
wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
rc);
#endif /* HAVE_SERVER_SUPPORT */
-static struct lprocfs_vars ldlm_debugfs_list[] = {
+static struct ldebugfs_vars ldlm_debugfs_list[] = {
{ .name = "dump_namespaces",
.fops = &ldlm_dump_ns_fops,
.proc_mode = 0222 },
int ldlm_debugfs_setup(void)
{
- int rc;
-
ENTRY;
- ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
- debugfs_lustre_root,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
- CERROR("LDebugFS failed in ldlm-init\n");
- rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
- GOTO(err, rc);
- }
+ ldlm_debugfs_dir = debugfs_create_dir(OBD_LDLM_DEVICENAME,
+ debugfs_lustre_root);
+ ldlm_ns_debugfs_dir = debugfs_create_dir("namespaces",
+ ldlm_debugfs_dir);
+ ldlm_svc_debugfs_dir = debugfs_create_dir("services",
+ ldlm_debugfs_dir);
- ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
- ldlm_debugfs_dir,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
- : -ENOMEM;
- GOTO(err_type, rc);
- }
-
- ldlm_svc_debugfs_dir = ldebugfs_register("services",
- ldlm_debugfs_dir,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
- : -ENOMEM;
- GOTO(err_ns, rc);
- }
-
- rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
- if (rc != 0) {
- CERROR("LProcFS failed in ldlm-init\n");
- GOTO(err_svc, rc);
- }
+ ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
RETURN(0);
-
-err_svc:
- ldebugfs_remove(&ldlm_svc_debugfs_dir);
-err_ns:
- ldebugfs_remove(&ldlm_ns_debugfs_dir);
-err_type:
- ldebugfs_remove(&ldlm_debugfs_dir);
-err:
- ldlm_svc_debugfs_dir = NULL;
- ldlm_ns_debugfs_dir = NULL;
- ldlm_debugfs_dir = NULL;
- RETURN(rc);
}
void ldlm_debugfs_cleanup(void)
{
- if (!IS_ERR_OR_NULL(ldlm_svc_debugfs_dir))
- ldebugfs_remove(&ldlm_svc_debugfs_dir);
-
- if (!IS_ERR_OR_NULL(ldlm_ns_debugfs_dir))
- ldebugfs_remove(&ldlm_ns_debugfs_dir);
-
- if (!IS_ERR_OR_NULL(ldlm_debugfs_dir))
- ldebugfs_remove(&ldlm_debugfs_dir);
+ debugfs_remove_recursive(ldlm_debugfs_dir);
ldlm_svc_debugfs_dir = NULL;
ldlm_ns_debugfs_dir = NULL;
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
__u64 res = 0;
- struct cfs_hash_bd bd;
int i;
/* result is not strictly consistant */
- cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
- res += cfs_hash_bd_count_get(&bd);
+ for (i = 0; i < (1 << ns->ns_bucket_bits); i++)
+ res += atomic_read(&ns->ns_rs_buckets[i].nsb_count);
return sprintf(buf, "%lld\n", res);
}
LUSTRE_RO_ATTR(resource_count);
CDEBUG(D_DLMTRACE,
"dropping all unused locks from namespace %s\n",
ldlm_ns_name(ns));
- if (ns_connect_lru_resize(ns)) {
- /* Try to cancel all @ns_nr_unused locks. */
- ldlm_cancel_lru(ns, ns->ns_nr_unused, 0,
- LDLM_LRU_FLAG_PASSED |
- LDLM_LRU_FLAG_CLEANUP);
- } else {
- tmp = ns->ns_max_unused;
- ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
- LDLM_LRU_FLAG_CLEANUP);
- ns->ns_max_unused = tmp;
- }
+ /* Try to cancel all @ns_nr_unused locks. */
+ ldlm_cancel_lru(ns, INT_MAX, 0, LDLM_LRU_FLAG_CLEANUP);
return count;
}
"changing namespace %s unused locks from %u to %u\n",
ldlm_ns_name(ns), ns->ns_nr_unused,
(unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
if (!lru_resize) {
CDEBUG(D_DLMTRACE,
ldlm_ns_name(ns));
ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
}
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, 0);
} else {
CDEBUG(D_DLMTRACE,
"changing namespace %s max_unused from %u to %u\n",
ldlm_ns_name(ns), ns->ns_max_unused,
(unsigned int)tmp);
- ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here.
ldlm_ns_name(ns));
ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
}
+ ns->ns_max_unused = (unsigned int)tmp;
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
}
return count;
}
LUSTRE_RW_ATTR(lru_size);
+static ssize_t lru_cancel_batch_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return snprintf(buf, sizeof(buf) - 1, "%u\n", ns->ns_cancel_batch);
+}
+
+static ssize_t lru_cancel_batch_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long tmp;
+
+ if (kstrtoul(buffer, 10, &tmp))
+ return -EINVAL;
+
+ ns->ns_cancel_batch = (unsigned int)tmp;
+
+ return count;
+}
+LUSTRE_RW_ATTR(lru_cancel_batch);
+
+static ssize_t ns_recalc_pct_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return snprintf(buf, sizeof(buf) - 1, "%u\n", ns->ns_recalc_pct);
+}
+
+static ssize_t ns_recalc_pct_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long tmp;
+
+ if (kstrtoul(buffer, 10, &tmp))
+ return -EINVAL;
+
+ if (tmp > 100)
+ return -ERANGE;
+
+ ns->ns_recalc_pct = (unsigned int)tmp;
+
+ return count;
+}
+LUSTRE_RW_ATTR(ns_recalc_pct);
+
static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%llu\n", ns->ns_dirty_age_limit);
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ ktime_divns(ns->ns_dirty_age_limit, NSEC_PER_SEC));
}
static ssize_t dirty_age_limit_store(struct kobject *kobj,
if (kstrtoull(buffer, 10, &tmp))
return -EINVAL;
- ns->ns_dirty_age_limit = tmp;
+ ns->ns_dirty_age_limit = ktime_set(tmp, 0);
return count;
}
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
+ return snprintf(buf, PAGE_SIZE, "%u\n", ns->ns_ctime_age_limit);
}
static ssize_t ctime_age_limit_store(struct kobject *kobj,
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- unsigned long long tmp;
+ unsigned long tmp;
- if (kstrtoull(buffer, 10, &tmp))
+ if (kstrtoul(buffer, 10, &tmp))
return -EINVAL;
ns->ns_ctime_age_limit = tmp;
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%llu\n", ns->ns_contention_time);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ns->ns_contention_time);
}
static ssize_t contention_seconds_store(struct kobject *kobj,
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- unsigned long long tmp;
+ unsigned int tmp;
- if (kstrtoull(buffer, 10, &tmp))
+ if (kstrtouint(buffer, 10, &tmp))
return -EINVAL;
ns->ns_contention_time = tmp;
&lustre_attr_resource_count.attr,
&lustre_attr_lock_count.attr,
&lustre_attr_lock_unused_count.attr,
+ &lustre_attr_ns_recalc_pct.attr,
&lustre_attr_lru_size.attr,
+ &lustre_attr_lru_cancel_batch.attr,
&lustre_attr_lru_max_age.attr,
&lustre_attr_early_lock_cancel.attr,
&lustre_attr_dirty_age_limit.attr,
CERROR("dlm namespace %s has no procfs dir?\n",
ldlm_ns_name(ns));
else
- ldebugfs_remove(&ns->ns_debugfs_entry);
+ debugfs_remove_recursive(ns->ns_debugfs_entry);
if (ns->ns_stats != NULL)
lprocfs_free_stats(&ns->ns_stats);
return val & mask;
}
-static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
{
- const struct ldlm_res_id *id = key;
struct lu_fid fid;
__u32 hash;
__u32 val;
hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
val = id->name[LUSTRE_RES_ID_HSH_OFF];
- hash += (val >> 5) + (val << 11);
} else {
val = fid_oid(&fid);
}
- hash = hash_long(hash, hs->hs_bkt_bits);
- /* give me another random factor */
- hash -= hash_long((unsigned long)hs, val % 11 + 3);
-
- hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
- hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
-
- return hash & mask;
+ hash += (val >> 5) + (val << 11);
+ return cfs_hash_32(hash, bits);
}
static void *ldlm_res_hop_key(struct hlist_node *hnode)
.hs_put = ldlm_res_hop_put
};
-static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
- .hs_hash = ldlm_res_hop_fid_hash,
- .hs_key = ldlm_res_hop_key,
- .hs_keycmp = ldlm_res_hop_keycmp,
- .hs_keycpy = NULL,
- .hs_object = ldlm_res_hop_object,
- .hs_get = ldlm_res_hop_get_locked,
- .hs_put = ldlm_res_hop_put
-};
-
-typedef struct ldlm_ns_hash_def {
- enum ldlm_ns_type nsd_type;
+static struct {
/** hash bucket bits */
unsigned nsd_bkt_bits;
/** hash bits */
unsigned nsd_all_bits;
- /** hash operations */
- struct cfs_hash_ops *nsd_hops;
-} ldlm_ns_hash_def_t;
-
-static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
-{
- {
- .nsd_type = LDLM_NS_TYPE_MDC,
+} ldlm_ns_hash_defs[] = {
+ [LDLM_NS_TYPE_MDC] = {
.nsd_bkt_bits = 11,
.nsd_all_bits = 16,
- .nsd_hops = &ldlm_ns_fid_hash_ops,
},
- {
- .nsd_type = LDLM_NS_TYPE_MDT,
+ [LDLM_NS_TYPE_MDT] = {
.nsd_bkt_bits = 14,
.nsd_all_bits = 21,
- .nsd_hops = &ldlm_ns_fid_hash_ops,
},
- {
- .nsd_type = LDLM_NS_TYPE_OSC,
+ [LDLM_NS_TYPE_OSC] = {
.nsd_bkt_bits = 8,
.nsd_all_bits = 12,
- .nsd_hops = &ldlm_ns_hash_ops,
},
- {
- .nsd_type = LDLM_NS_TYPE_OST,
+ [LDLM_NS_TYPE_OST] = {
.nsd_bkt_bits = 11,
.nsd_all_bits = 17,
- .nsd_hops = &ldlm_ns_hash_ops,
},
- {
- .nsd_type = LDLM_NS_TYPE_MGC,
- .nsd_bkt_bits = 4,
+ [LDLM_NS_TYPE_MGC] = {
+ .nsd_bkt_bits = 3,
.nsd_all_bits = 4,
- .nsd_hops = &ldlm_ns_hash_ops,
},
- {
- .nsd_type = LDLM_NS_TYPE_MGT,
- .nsd_bkt_bits = 4,
+ [LDLM_NS_TYPE_MGT] = {
+ .nsd_bkt_bits = 3,
.nsd_all_bits = 4,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_UNKNOWN,
},
};
enum ldlm_ns_type ns_type)
{
struct ldlm_namespace *ns = NULL;
- struct ldlm_ns_bucket *nsb;
- struct ldlm_ns_hash_def *nsd;
- struct cfs_hash_bd bd;
int idx;
int rc;
rc = ldlm_get_ref();
if (rc) {
- CERROR("ldlm_get_ref failed: %d\n", rc);
- RETURN(NULL);
+ CERROR("%s: ldlm_get_ref failed: rc = %d\n", name, rc);
+ RETURN(ERR_PTR(rc));
}
- for (idx = 0; ; idx++) {
- nsd = &ldlm_ns_hash_defs[idx];
- if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
- CERROR("Unknown type %d for ns %s\n", ns_type, name);
- GOTO(out_ref, NULL);
- }
-
- if (nsd->nsd_type == ns_type)
- break;
+ if (ns_type >= ARRAY_SIZE(ldlm_ns_hash_defs) ||
+ ldlm_ns_hash_defs[ns_type].nsd_bkt_bits == 0) {
+ rc = -EINVAL;
+ CERROR("%s: unknown namespace type %d: rc = %d\n",
+ name, ns_type, rc);
+ GOTO(out_ref, rc);
}
OBD_ALLOC_PTR(ns);
if (!ns)
- GOTO(out_ref, NULL);
+ GOTO(out_ref, rc = -ENOMEM);
ns->ns_rs_hash = cfs_hash_create(name,
- nsd->nsd_all_bits, nsd->nsd_all_bits,
- nsd->nsd_bkt_bits, sizeof(*nsb),
+ ldlm_ns_hash_defs[ns_type].nsd_all_bits,
+ ldlm_ns_hash_defs[ns_type].nsd_all_bits,
+ ldlm_ns_hash_defs[ns_type].nsd_bkt_bits,
+ 0,
CFS_HASH_MIN_THETA,
CFS_HASH_MAX_THETA,
- nsd->nsd_hops,
+ &ldlm_ns_hash_ops,
CFS_HASH_DEPTH |
CFS_HASH_BIGNAME |
CFS_HASH_SPIN_BKTLOCK |
CFS_HASH_NO_ITEMREF);
- if (ns->ns_rs_hash == NULL)
- GOTO(out_ns, NULL);
+ if (!ns->ns_rs_hash)
+ GOTO(out_ns, rc = -ENOMEM);
+
+ ns->ns_bucket_bits = ldlm_ns_hash_defs[ns_type].nsd_all_bits -
+ ldlm_ns_hash_defs[ns_type].nsd_bkt_bits;
+
+ OBD_ALLOC_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
+ if (!ns->ns_rs_buckets)
+ GOTO(out_hash, rc = -ENOMEM);
+
+ for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
+ struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
- cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
- nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
nsb->nsb_namespace = ns;
nsb->nsb_reclaim_start = 0;
+ atomic_set(&nsb->nsb_count, 0);
}
ns->ns_obd = obd;
ns->ns_client = client;
ns->ns_name = kstrdup(name, GFP_KERNEL);
if (!ns->ns_name)
- goto out_hash;
+ GOTO(out_hash, rc = -ENOMEM);
INIT_LIST_HEAD(&ns->ns_list_chain);
INIT_LIST_HEAD(&ns->ns_unused_list);
ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
ns->ns_nr_unused = 0;
ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
+ ns->ns_cancel_batch = LDLM_DEFAULT_LRU_SHRINK_BATCH;
+ ns->ns_recalc_pct = LDLM_DEFAULT_SLV_RECALC_PCT;
ns->ns_max_age = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
- ns->ns_dirty_age_limit = LDLM_DIRTY_AGE_LIMIT;
+ ns->ns_dirty_age_limit = ktime_set(LDLM_DIRTY_AGE_LIMIT, 0);
ns->ns_timeouts = 0;
ns->ns_orig_connect_flags = 0;
ns->ns_connect_flags = 0;
ns->ns_stopping = 0;
ns->ns_reclaim_start = 0;
ns->ns_last_pos = &ns->ns_unused_list;
+ ns->ns_flags = 0;
rc = ldlm_namespace_sysfs_register(ns);
if (rc) {
- CERROR("Can't initialize ns sysfs, rc %d\n", rc);
+ CERROR("%s: cannot initialize ns sysfs: rc = %d\n", name, rc);
GOTO(out_hash, rc);
}
rc = ldlm_namespace_debugfs_register(ns);
if (rc) {
- CERROR("Can't initialize ns proc, rc %d\n", rc);
+ CERROR("%s: cannot initialize ns proc: rc = %d\n", name, rc);
GOTO(out_sysfs, rc);
}
idx = ldlm_namespace_nr_read(client);
rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
if (rc) {
- CERROR("Can't initialize lock pool, rc %d\n", rc);
+ CERROR("%s: cannot initialize lock pool, rc = %d\n", name, rc);
GOTO(out_proc, rc);
}
ldlm_namespace_sysfs_unregister(ns);
ldlm_namespace_cleanup(ns, 0);
out_hash:
+ OBD_FREE_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
kfree(ns->ns_name);
cfs_hash_putref(ns->ns_rs_hash);
out_ns:
OBD_FREE_PTR(ns);
out_ref:
ldlm_put_ref();
- RETURN(NULL);
+ RETURN(ERR_PTR(rc));
}
EXPORT_SYMBOL(ldlm_namespace_new);
*/
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- if (lock->l_flags & LDLM_FL_FAIL_LOC) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(4));
- set_current_state(TASK_RUNNING);
- }
+ if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(4));
+
if (lock->l_completion_ast)
lock->l_completion_ast(lock,
LDLM_FL_FAILED, NULL);
ldlm_namespace_debugfs_unregister(ns);
ldlm_namespace_sysfs_unregister(ns);
cfs_hash_putref(ns->ns_rs_hash);
+ OBD_FREE_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
kfree(ns->ns_name);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold
/* Initialize interval trees for each lock mode. */
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
res->lr_itree[idx].lit_size = 0;
- res->lr_itree[idx].lit_mode = 1 << idx;
+ res->lr_itree[idx].lit_mode = BIT(idx);
res->lr_itree[idx].lit_root = NULL;
}
return true;
struct cfs_hash_bd bd;
__u64 version;
int ns_refcount = 0;
+ int hash;
LASSERT(ns != NULL);
LASSERT(parent == NULL);
if (res == NULL)
return ERR_PTR(-ENOMEM);
- res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
+ hash = ldlm_res_hop_fid_hash(name, ns->ns_bucket_bits);
+ res->lr_ns_bucket = &ns->ns_rs_buckets[hash];
res->lr_name = *name;
res->lr_type = type;
}
/* We won! Let's add the resource. */
cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
- if (cfs_hash_bd_count_get(&bd) == 1)
+ if (atomic_inc_return(&res->lr_ns_bucket->nsb_count) == 1)
ns_refcount = ldlm_namespace_get_return(ns);
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
bd, &res->lr_hash);
lu_ref_fini(&res->lr_reference);
- if (cfs_hash_bd_count_get(bd) == 0)
+ if (atomic_dec_and_test(&nsb->nsb_count))
ldlm_namespace_put(nsb->nsb_namespace);
}
}
EXPORT_SYMBOL(ldlm_resource_putref);
-/**
- * Add a lock into a given resource into specified lock list.
- */
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
- struct ldlm_lock *lock)
+static void __ldlm_resource_add_lock(struct ldlm_resource *res,
+ struct list_head *head,
+ struct ldlm_lock *lock,
+ bool tail)
{
check_res_locked(res);
- LDLM_DEBUG(lock, "About to add this lock");
-
if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
LASSERT(list_empty(&lock->l_res_link));
- list_add_tail(&lock->l_res_link, head);
+ if (tail)
+ list_add_tail(&lock->l_res_link, head);
+ else
+ list_add(&lock->l_res_link, head);
if (res->lr_type == LDLM_IBITS)
- ldlm_inodebits_add_lock(res, head, lock);
+ ldlm_inodebits_add_lock(res, head, lock, tail);
+
+ ldlm_resource_dump(D_INFO, res);
+}
+
+/**
+ * Add a lock into a given resource into specified lock list.
+ */
+void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+ struct ldlm_lock *lock)
+{
+ LDLM_DEBUG(lock, "About to add this lock");
+
+ __ldlm_resource_add_lock(res, head, lock, true);
}
/**
* Insert a lock into resource after specified lock.
- *
- * Obtain resource description from the lock we are inserting after.
*/
void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
struct ldlm_lock *new)
{
- struct ldlm_resource *res = original->l_resource;
+ LASSERT(!list_empty(&original->l_res_link));
- check_res_locked(res);
-
- ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
+ __ldlm_resource_add_lock(original->l_resource,
+ &original->l_res_link,
+ new, false);
+}
- if (ldlm_is_destroyed(new)) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- goto out;
- }
-
- LASSERT(list_empty(&new->l_res_link));
+/**
+ * Insert a lock into resource before the specified lock.
+ *
+ * IBITS waiting locks are to be inserted to the ibit lists as well, and only
+ * the insert-after operation is supported for them, because the set of bits
+ * of the previous and the new locks must match. Therefore, get the previous
+ * lock and insert after.
+ */
+void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
+ struct ldlm_lock *new)
+{
+ LASSERT(!list_empty(&original->l_res_link));
- list_add(&new->l_res_link, &original->l_res_link);
- out:;
+ LDLM_DEBUG(new, "About to insert this lock before %p: ", original);
+ __ldlm_resource_add_lock(original->l_resource,
+ original->l_res_link.prev, new, false);
}
void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
struct ldlm_lock *lock;
unsigned int granted = 0;
- CLASSERT(RES_NAME_SIZE == 4);
+ BUILD_BUG_ON(RES_NAME_SIZE != 4);
if (!((libcfs_debug | D_ERROR) & level))
return;