#include <lustre_dlm.h>
#include <lustre_fid.h>
#include <obd_class.h>
+#include <libcfs/linux/linux-hash.h>
#include "ldlm_internal.h"
struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
#endif /* HAVE_SERVER_SUPPORT */
-static struct lprocfs_vars ldlm_debugfs_list[] = {
+static struct ldebugfs_vars ldlm_debugfs_list[] = {
{ .name = "dump_namespaces",
.fops = &ldlm_dump_ns_fops,
.proc_mode = 0222 },
CDEBUG(D_DLMTRACE,
"dropping all unused locks from namespace %s\n",
ldlm_ns_name(ns));
- if (ns_connect_lru_resize(ns)) {
- /* Try to cancel all @ns_nr_unused locks. */
- ldlm_cancel_lru(ns, ns->ns_nr_unused, 0,
- LDLM_LRU_FLAG_PASSED |
- LDLM_LRU_FLAG_CLEANUP);
- } else {
- tmp = ns->ns_max_unused;
- ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
- LDLM_LRU_FLAG_CLEANUP);
- ns->ns_max_unused = tmp;
- }
+ /* Try to cancel all @ns_nr_unused locks. */
+ ldlm_cancel_lru(ns, INT_MAX, 0, LDLM_LRU_FLAG_CLEANUP);
return count;
}
"changing namespace %s unused locks from %u to %u\n",
ldlm_ns_name(ns), ns->ns_nr_unused,
(unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
if (!lru_resize) {
CDEBUG(D_DLMTRACE,
ldlm_ns_name(ns));
ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
}
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, 0);
} else {
CDEBUG(D_DLMTRACE,
"changing namespace %s max_unused from %u to %u\n",
ldlm_ns_name(ns), ns->ns_max_unused,
(unsigned int)tmp);
- ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here.
ldlm_ns_name(ns));
ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
}
+ ns->ns_max_unused = (unsigned int)tmp;
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
}
return count;
}
LUSTRE_RW_ATTR(lru_size);
+static ssize_t lru_cancel_batch_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return snprintf(buf, sizeof(buf) - 1, "%u\n", ns->ns_cancel_batch);
+}
+
+static ssize_t lru_cancel_batch_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long tmp;
+
+ if (kstrtoul(buffer, 10, &tmp))
+ return -EINVAL;
+
+ ns->ns_cancel_batch = (unsigned int)tmp;
+
+ return count;
+}
+LUSTRE_RW_ATTR(lru_cancel_batch);
+
+static ssize_t ns_recalc_pct_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return snprintf(buf, sizeof(buf) - 1, "%u\n", ns->ns_recalc_pct);
+}
+
+static ssize_t ns_recalc_pct_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long tmp;
+
+ if (kstrtoul(buffer, 10, &tmp))
+ return -EINVAL;
+
+ if (tmp > 100)
+ return -ERANGE;
+
+ ns->ns_recalc_pct = (unsigned int)tmp;
+
+ return count;
+}
+LUSTRE_RW_ATTR(ns_recalc_pct);
+
static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%llu\n", ns->ns_dirty_age_limit);
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ ktime_divns(ns->ns_dirty_age_limit, NSEC_PER_SEC));
}
static ssize_t dirty_age_limit_store(struct kobject *kobj,
if (kstrtoull(buffer, 10, &tmp))
return -EINVAL;
- ns->ns_dirty_age_limit = tmp;
+ ns->ns_dirty_age_limit = ktime_set(tmp, 0);
return count;
}
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
+ return snprintf(buf, PAGE_SIZE, "%u\n", ns->ns_ctime_age_limit);
}
static ssize_t ctime_age_limit_store(struct kobject *kobj,
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- unsigned long long tmp;
+ unsigned long tmp;
- if (kstrtoull(buffer, 10, &tmp))
+ if (kstrtoul(buffer, 10, &tmp))
return -EINVAL;
ns->ns_ctime_age_limit = tmp;
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- return sprintf(buf, "%llu\n", ns->ns_contention_time);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ns->ns_contention_time);
}
static ssize_t contention_seconds_store(struct kobject *kobj,
{
struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
ns_kobj);
- unsigned long long tmp;
+ unsigned int tmp;
- if (kstrtoull(buffer, 10, &tmp))
+ if (kstrtouint(buffer, 10, &tmp))
return -EINVAL;
ns->ns_contention_time = tmp;
&lustre_attr_resource_count.attr,
&lustre_attr_lock_count.attr,
&lustre_attr_lock_unused_count.attr,
+ &lustre_attr_ns_recalc_pct.attr,
&lustre_attr_lru_size.attr,
+ &lustre_attr_lru_cancel_batch.attr,
&lustre_attr_lru_max_age.attr,
&lustre_attr_early_lock_cancel.attr,
&lustre_attr_dirty_age_limit.attr,
rc = ldlm_get_ref();
if (rc) {
- CERROR("ldlm_get_ref failed: %d\n", rc);
- RETURN(NULL);
+ CERROR("%s: ldlm_get_ref failed: rc = %d\n", name, rc);
+ RETURN(ERR_PTR(rc));
}
if (ns_type >= ARRAY_SIZE(ldlm_ns_hash_defs) ||
ldlm_ns_hash_defs[ns_type].nsd_bkt_bits == 0) {
- CERROR("Unknown type %d for ns %s\n", ns_type, name);
- GOTO(out_ref, NULL);
+ rc = -EINVAL;
+ CERROR("%s: unknown namespace type %d: rc = %d\n",
+ name, ns_type, rc);
+ GOTO(out_ref, rc);
}
OBD_ALLOC_PTR(ns);
if (!ns)
- GOTO(out_ref, NULL);
+ GOTO(out_ref, rc = -ENOMEM);
ns->ns_rs_hash = cfs_hash_create(name,
ldlm_ns_hash_defs[ns_type].nsd_all_bits,
CFS_HASH_BIGNAME |
CFS_HASH_SPIN_BKTLOCK |
CFS_HASH_NO_ITEMREF);
- if (ns->ns_rs_hash == NULL)
- GOTO(out_ns, NULL);
+ if (!ns->ns_rs_hash)
+ GOTO(out_ns, rc = -ENOMEM);
ns->ns_bucket_bits = ldlm_ns_hash_defs[ns_type].nsd_all_bits -
ldlm_ns_hash_defs[ns_type].nsd_bkt_bits;
- OBD_ALLOC_LARGE(ns->ns_rs_buckets,
- BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
+ OBD_ALLOC_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
if (!ns->ns_rs_buckets)
- goto out_hash;
+ GOTO(out_hash, rc = -ENOMEM);
for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
ns->ns_client = client;
ns->ns_name = kstrdup(name, GFP_KERNEL);
if (!ns->ns_name)
- goto out_hash;
+ GOTO(out_hash, rc = -ENOMEM);
INIT_LIST_HEAD(&ns->ns_list_chain);
INIT_LIST_HEAD(&ns->ns_unused_list);
ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
ns->ns_nr_unused = 0;
ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
+ ns->ns_cancel_batch = LDLM_DEFAULT_LRU_SHRINK_BATCH;
+ ns->ns_recalc_pct = LDLM_DEFAULT_SLV_RECALC_PCT;
ns->ns_max_age = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
- ns->ns_dirty_age_limit = LDLM_DIRTY_AGE_LIMIT;
+ ns->ns_dirty_age_limit = ktime_set(LDLM_DIRTY_AGE_LIMIT, 0);
ns->ns_timeouts = 0;
ns->ns_orig_connect_flags = 0;
ns->ns_connect_flags = 0;
ns->ns_stopping = 0;
ns->ns_reclaim_start = 0;
ns->ns_last_pos = &ns->ns_unused_list;
+ ns->ns_flags = 0;
rc = ldlm_namespace_sysfs_register(ns);
if (rc) {
- CERROR("Can't initialize ns sysfs, rc %d\n", rc);
+ CERROR("%s: cannot initialize ns sysfs: rc = %d\n", name, rc);
GOTO(out_hash, rc);
}
rc = ldlm_namespace_debugfs_register(ns);
if (rc) {
- CERROR("Can't initialize ns proc, rc %d\n", rc);
+ CERROR("%s: cannot initialize ns proc: rc = %d\n", name, rc);
GOTO(out_sysfs, rc);
}
idx = ldlm_namespace_nr_read(client);
rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
if (rc) {
- CERROR("Can't initialize lock pool, rc %d\n", rc);
+ CERROR("%s: cannot initialize lock pool, rc = %d\n", name, rc);
GOTO(out_proc, rc);
}
ldlm_namespace_sysfs_unregister(ns);
ldlm_namespace_cleanup(ns, 0);
out_hash:
- OBD_FREE_LARGE(ns->ns_rs_buckets,
- BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
+ OBD_FREE_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
kfree(ns->ns_name);
cfs_hash_putref(ns->ns_rs_hash);
out_ns:
OBD_FREE_PTR(ns);
out_ref:
ldlm_put_ref();
- RETURN(NULL);
+ RETURN(ERR_PTR(rc));
}
EXPORT_SYMBOL(ldlm_namespace_new);
ldlm_namespace_debugfs_unregister(ns);
ldlm_namespace_sysfs_unregister(ns);
cfs_hash_putref(ns->ns_rs_hash);
- OBD_FREE_LARGE(ns->ns_rs_buckets,
- BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
+ OBD_FREE_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
kfree(ns->ns_name);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold
/* Initialize interval trees for each lock mode. */
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
res->lr_itree[idx].lit_size = 0;
- res->lr_itree[idx].lit_mode = 1 << idx;
+ res->lr_itree[idx].lit_mode = BIT(idx);
res->lr_itree[idx].lit_root = NULL;
}
return true;
}
EXPORT_SYMBOL(ldlm_resource_putref);
-/**
- * Add a lock into a given resource into specified lock list.
- */
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
- struct ldlm_lock *lock)
+static void __ldlm_resource_add_lock(struct ldlm_resource *res,
+ struct list_head *head,
+ struct ldlm_lock *lock,
+ bool tail)
{
check_res_locked(res);
- LDLM_DEBUG(lock, "About to add this lock");
-
if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
LASSERT(list_empty(&lock->l_res_link));
- list_add_tail(&lock->l_res_link, head);
+ if (tail)
+ list_add_tail(&lock->l_res_link, head);
+ else
+ list_add(&lock->l_res_link, head);
if (res->lr_type == LDLM_IBITS)
- ldlm_inodebits_add_lock(res, head, lock);
+ ldlm_inodebits_add_lock(res, head, lock, tail);
+
+ ldlm_resource_dump(D_INFO, res);
+}
+
+/**
+ * Add a lock into a given resource into specified lock list.
+ */
+void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+ struct ldlm_lock *lock)
+{
+ LDLM_DEBUG(lock, "About to add this lock");
+
+ __ldlm_resource_add_lock(res, head, lock, true);
}
/**
* Insert a lock into resource after specified lock.
- *
- * Obtain resource description from the lock we are inserting after.
*/
void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
struct ldlm_lock *new)
{
- struct ldlm_resource *res = original->l_resource;
-
- check_res_locked(res);
+ LASSERT(!list_empty(&original->l_res_link));
- ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
+ __ldlm_resource_add_lock(original->l_resource,
+ &original->l_res_link,
+ new, false);
+}
- if (ldlm_is_destroyed(new)) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- goto out;
- }
-
- LASSERT(list_empty(&new->l_res_link));
+/**
+ * Insert a lock into resource before the specified lock.
+ *
+ * IBITS waiting locks are to be inserted to the ibit lists as well, and only
+ * the insert-after operation is supported for them, because the set of bits
+ * of the previous and the new locks must match. Therefore, get the previous
+ * lock and insert after.
+ */
+void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
+ struct ldlm_lock *new)
+{
+ LASSERT(!list_empty(&original->l_res_link));
- list_add(&new->l_res_link, &original->l_res_link);
- out:;
+ LDLM_DEBUG(new, "About to insert this lock before %p: ", original);
+ __ldlm_resource_add_lock(original->l_resource,
+ original->l_res_link.prev, new, false);
}
void ldlm_resource_unlink_lock(struct ldlm_lock *lock)