struct obd_ops;
struct obd_device;
+extern struct kset *ldlm_ns_kset;
+extern struct kset *ldlm_svc_kset;
+
#define OBD_LDLM_DEVICENAME "ldlm"
#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
int pl_grant_plan;
/** Pool statistics. */
struct lprocfs_stats *pl_stats;
+
+ /* sysfs object */
+ struct kobject pl_kobj;
+ struct completion pl_kobj_unregister;
};
typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
* Which bucket should we start with the lock reclaim.
*/
int ns_reclaim_start;
+
+ struct kobject ns_kobj; /* sysfs object */
+ struct completion ns_kobj_unregister;
};
/**
LDLM_POLICY_SKIP_LOCK
};
-#define LDLM_POOL_PROC_READER_SEQ_SHOW(var, type) \
- static int lprocfs_##var##_seq_show(struct seq_file *m, void *v)\
- { \
- struct ldlm_pool *pl = m->private; \
- type tmp; \
- \
- spin_lock(&pl->pl_lock); \
- tmp = pl->pl_##var; \
- spin_unlock(&pl->pl_lock); \
- \
- return lprocfs_uint_seq_show(m, &tmp); \
- } \
+#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
+#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
+#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
+#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
+#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
+#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
+
+#define LDLM_POOL_SYSFS_READER_SHOW(var, type) \
+ static ssize_t var##_show(struct kobject *kobj, \
+ struct attribute *attr, \
+ char *buf) \
+ { \
+ struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
+ pl_kobj); \
+ type tmp; \
+ \
+ spin_lock(&pl->pl_lock); \
+ tmp = pl->pl_##var; \
+ spin_unlock(&pl->pl_lock); \
+ \
+ return LDLM_POOL_SYSFS_PRINT_##type(tmp); \
+ } \
struct __##var##__dummy_read {;} /* semicolon catcher */
-#define LDLM_POOL_PROC_WRITER(var, type) \
- static int lprocfs_wr_##var(struct file *file, \
- const char __user *buffer, \
- unsigned long count, void *data) \
- { \
- struct ldlm_pool *pl = data; \
- type tmp; \
- int rc; \
- \
- rc = lprocfs_wr_uint(file, buffer, count, &tmp); \
- if (rc < 0) { \
- CERROR("Can't parse user input, rc = %d\n", rc);\
- return rc; \
- } \
- \
- spin_lock(&pl->pl_lock); \
- pl->pl_##var = tmp; \
- spin_unlock(&pl->pl_lock); \
- \
- return rc; \
- } \
- struct __##var##__dummy_write {;} /* semicolon catcher */
+#define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \
+ static ssize_t var##_store(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buffer, \
+ unsigned long count) \
+ { \
+ struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
+ pl_kobj); \
+ unsigned long tmp; \
+ int rc; \
+ \
+ rc = kstrtoul(buffer, 10, &tmp); \
+ if (rc < 0) { \
+ return rc; \
+ } \
+ \
+ spin_lock(&pl->pl_lock); \
+ LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
+ spin_unlock(&pl->pl_lock); \
+ \
+ return count; \
+ } \
+ struct __##var##__dummy_write {; } /* semicolon catcher */
+
+#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \
+ static ssize_t var##_show(struct kobject *kobj, \
+ struct attribute *attr, \
+ char *buf) \
+ { \
+ struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
+ pl_kobj); \
+ \
+ return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \
+ } \
+ struct __##var##__dummy_read {; } /* semicolon catcher */
+
+#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \
+ static ssize_t var##_store(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buffer, \
+ unsigned long count) \
+ { \
+ struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
+ pl_kobj); \
+ unsigned long tmp; \
+ int rc; \
+ \
+ rc = kstrtoul(buffer, 10, &tmp); \
+ if (rc < 0) { \
+ return rc; \
+ } \
+ \
+ LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
+ \
+ return count; \
+ } \
+ struct __##var##__dummy_write {; } /* semicolon catcher */
static inline void
ldlm_add_var(struct lprocfs_vars *vars, struct proc_dir_entry *proc_dir,
static struct mutex ldlm_ref_mutex;
static int ldlm_refcount;
+struct kobject *ldlm_kobj;
+struct kset *ldlm_ns_kset;
+struct kset *ldlm_svc_kset;
+
struct ldlm_cb_async_args {
struct ldlm_cb_set_arg *ca_set_arg;
struct ldlm_lock *ca_lock;
}
EXPORT_SYMBOL(ldlm_destroy_export);
+static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
+}
+
+static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ int rc;
+ unsigned long val;
+
+ rc = kstrtoul(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ ldlm_cancel_unused_locks_before_replay = val;
+
+ return count;
+}
+LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
+
+static struct attribute *ldlm_attrs[] = {
+ &lustre_attr_cancel_unused_locks_before_replay.attr,
+ NULL,
+};
+
+static struct attribute_group ldlm_attr_group = {
+ .attrs = ldlm_attrs,
+};
+
static int ldlm_setup(void)
{
static struct ptlrpc_service_conf conf;
if (ldlm_state == NULL)
RETURN(-ENOMEM);
+ ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
+ if (!ldlm_kobj)
+ GOTO(out, -ENOMEM);
+
+ rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
+ if (rc)
+ GOTO(out, rc);
+
+ ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
+ if (!ldlm_ns_kset)
+ GOTO(out, -ENOMEM);
+
+ ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
+ if (!ldlm_svc_kset)
+ GOTO(out, -ENOMEM);
+
#ifdef CONFIG_PROC_FS
- rc = ldlm_proc_setup();
- if (rc != 0)
+ rc = ldlm_proc_setup();
+ if (rc != 0)
GOTO(out, rc);
#endif /* CONFIG_PROC_FS */
ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
#endif
+ if (ldlm_ns_kset)
+ kset_unregister(ldlm_ns_kset);
+ if (ldlm_svc_kset)
+ kset_unregister(ldlm_svc_kset);
+ if (ldlm_kobj)
+ kobject_put(ldlm_kobj);
+
ldlm_proc_cleanup();
#ifdef HAVE_SERVER_SUPPORT
* pl_server_lock_volume - Current server lock volume (calculated);
*
* As it may be seen from list above, we have few possible tunables which may
- * affect behavior much. They all may be modified via proc. However, they also
+ * affect behavior much. They all may be modified via sysfs. However, they also
* give a possibility for constructing few pre-defined behavior policies. If
* none of predefines is suitable for a working pattern being used, new one may
- * be "constructed" via proc tunables.
+ * be "constructed" via sysfs tunables.
*/
#define DEBUG_SUBSYSTEM S_LDLM
}
LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
-static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
+static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
{
- struct ldlm_pool *pl = m->private;
- int grant_speed;
+ struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
+ pl_kobj);
+ int grant_speed;
spin_lock(&pl->pl_lock);
/* serialize with ldlm_pool_recalc */
grant_speed = atomic_read(&pl->pl_grant_rate) -
atomic_read(&pl->pl_cancel_rate);
spin_unlock(&pl->pl_lock);
- return lprocfs_uint_seq_show(m, &grant_speed);
+ return sprintf(buf, "%d\n", grant_speed);
}
+LUSTRE_RO_ATTR(grant_speed);
-LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
-LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
+LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
+LUSTRE_RO_ATTR(grant_plan);
-LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
-LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
+LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
+LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
+LUSTRE_RW_ATTR(recalc_period);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
+LUSTRE_RO_ATTR(server_lock_volume);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
+LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
+LUSTRE_RW_ATTR(limit);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
+LUSTRE_RO_ATTR(granted);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
+LUSTRE_RO_ATTR(cancel_rate);
- return lprocfs_wr_recalc_period(file, buf, len, seq->private);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
+LUSTRE_RO_ATTR(grant_rate);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
+LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
+LUSTRE_RW_ATTR(lock_volume_factor);
+
+/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
+static struct attribute *ldlm_pl_attrs[] = {
+ &lustre_attr_grant_speed.attr,
+ &lustre_attr_grant_plan.attr,
+ &lustre_attr_recalc_period.attr,
+ &lustre_attr_server_lock_volume.attr,
+ &lustre_attr_limit.attr,
+ &lustre_attr_granted.attr,
+ &lustre_attr_cancel_rate.attr,
+ &lustre_attr_grant_rate.attr,
+ &lustre_attr_lock_volume_factor.attr,
+ NULL,
+};
+
+static void ldlm_pl_release(struct kobject *kobj)
+{
+ struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
+ pl_kobj);
+ complete(&pl->pl_kobj_unregister);
}
-LPROC_SEQ_FOPS(lprocfs_recalc_period);
-LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
-LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
-LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
+static struct kobj_type ldlm_pl_ktype = {
+ .default_attrs = ldlm_pl_attrs,
+ .sysfs_ops = &lustre_sysfs_ops,
+ .release = ldlm_pl_release,
+};
+
+static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
+{
+ struct ldlm_namespace *ns = ldlm_pl2ns(pl);
+ int err;
+
+ init_completion(&pl->pl_kobj_unregister);
+ err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
+ "pool");
-LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
+ return err;
+}
static int ldlm_pool_proc_init(struct ldlm_pool *pl)
{
memset(pool_vars, 0, sizeof(pool_vars));
pool_vars[0].name = var_name;
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "server_lock_volume",
- &pl->pl_server_lock_volume, &ldlm_pool_u64_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "limit", &pl->pl_limit,
- &ldlm_pool_rw_atomic_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "granted",
- &pl->pl_granted, &ldlm_pool_atomic_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_speed", pl,
- &lprocfs_grant_speed_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "cancel_rate",
- &pl->pl_cancel_rate, &ldlm_pool_atomic_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_rate",
- &pl->pl_grant_rate, &ldlm_pool_atomic_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_plan", pl,
- &lprocfs_grant_plan_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "recalc_period",
- pl, &lprocfs_recalc_period_fops);
- ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "lock_volume_factor",
- &pl->pl_lock_volume_factor, &ldlm_pool_rw_atomic_fops);
ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "state", pl,
&lprocfs_pool_state_fops);
return rc;
}
+static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
+{
+ kobject_put(&pl->pl_kobj);
+ wait_for_completion(&pl->pl_kobj_unregister);
+}
+
static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
{
if (pl->pl_stats != NULL) {
if (rc)
RETURN(rc);
- CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
+ rc = ldlm_pool_sysfs_init(pl);
+ if (rc)
+ RETURN(rc);
+
+ CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
- RETURN(rc);
+ RETURN(rc);
}
void ldlm_pool_fini(struct ldlm_pool *pl)
{
- ENTRY;
- ldlm_pool_proc_fini(pl);
+ ENTRY;
+ ldlm_pool_sysfs_fini(pl);
+ ldlm_pool_proc_fini(pl);
/*
* Pool should not be used after this point. We can't free it here as
{ .name = "dump_granted_max",
.fops = &ldlm_rw_uint_fops,
.data = &ldlm_dump_granted_max },
- { .name = "cancel_unused_locks_before_replay",
- .fops = &ldlm_rw_uint_fops,
- .data = &ldlm_cancel_unused_locks_before_replay },
#ifdef HAVE_SERVER_SUPPORT
{ .name = "lock_reclaim_threshold_mb",
.fops = &ldlm_watermark_fops,
lprocfs_remove(&ldlm_type_proc_dir);
}
-static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
+static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
{
- struct ldlm_namespace *ns = m->private;
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
__u64 res = 0;
struct cfs_hash_bd bd;
int i;
/* result is not strictly consistant */
cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
res += cfs_hash_bd_count_get(&bd);
- return lprocfs_u64_seq_show(m, &res);
+ return sprintf(buf, "%lld\n", res);
}
-LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
+LUSTRE_RO_ATTR(resource_count);
-static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
+static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
{
- struct ldlm_namespace *ns = m->private;
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
__u64 locks;
locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
LPROCFS_FIELDS_FLAGS_SUM);
- return lprocfs_u64_seq_show(m, &locks);
+ return sprintf(buf, "%lld\n", locks);
}
-LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
+LUSTRE_RO_ATTR(lock_count);
-static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
+static ssize_t lock_unused_count_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
{
- struct ldlm_namespace *ns = m->private;
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return sprintf(buf, "%d\n", ns->ns_nr_unused);
+}
+LUSTRE_RO_ATTR(lock_unused_count);
+
+static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
__u32 *nr = &ns->ns_max_unused;
if (ns_connect_lru_resize(ns))
nr = &ns->ns_nr_unused;
- return lprocfs_uint_seq_show(m, nr);
+ return sprintf(buf, "%u", *nr);
}
-static ssize_t lprocfs_lru_size_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
{
- struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
- char dummy[MAX_STRING_SIZE + 1];
- char *end;
- unsigned long tmp;
- int lru_resize;
-
- if (count >= sizeof(dummy))
- return -EINVAL;
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long tmp;
+ int lru_resize;
+ int err;
- if (count == 0)
- return 0;
-
- if (copy_from_user(dummy, buffer, count))
- return -EFAULT;
-
- dummy[count] = 0;
-
- if (strncmp(dummy, "clear", 5) == 0) {
+ if (strncmp(buffer, "clear", 5) == 0) {
CDEBUG(D_DLMTRACE,
"dropping all unused locks from namespace %s\n",
ldlm_ns_name(ns));
LDLM_LRU_FLAG_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
- "not all requested locks are canceled, "
- "requested: %d, canceled: %d\n", unused,
+ "not all requested locks are canceled, requested: %d, canceled: %d\n",
+ unused,
canceled);
return -EINVAL;
}
return count;
}
- tmp = simple_strtoul(dummy, &end, 0);
- if (dummy == end) {
- CERROR("invalid value written\n");
+ err = kstrtoul(buffer, 10, &tmp);
+ if (err != 0) {
+ CERROR("lru_size: invalid value written\n");
return -EINVAL;
}
- lru_resize = (tmp == 0);
+ lru_resize = (tmp == 0);
if (ns_connect_lru_resize(ns)) {
if (!lru_resize)
- ns->ns_max_unused = tmp;
+ ns->ns_max_unused = (unsigned int)tmp;
if (tmp > ns->ns_nr_unused)
tmp = ns->ns_nr_unused;
ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
/* Make sure that LRU resize was originally supported before
- * turning it on here. */
+ * turning it on here.
+ */
if (lru_resize &&
(ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
CDEBUG(D_DLMTRACE,
return count;
}
-LPROC_SEQ_FOPS(lprocfs_lru_size);
+LUSTRE_RW_ATTR(lru_size);
+
+static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return sprintf(buf, "%u", ns->ns_max_age);
+}
-static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
+static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
{
- struct ldlm_namespace *ns = m->private;
- unsigned int supp = ns_connect_cancelset(ns);
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long tmp;
+ int err;
+
+ err = kstrtoul(buffer, 10, &tmp);
+ if (err != 0)
+ return -EINVAL;
- return lprocfs_uint_seq_show(m, &supp);
+ ns->ns_max_age = tmp;
+
+ return count;
}
+LUSTRE_RW_ATTR(lru_max_age);
-static ssize_t lprocfs_elc_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+static ssize_t early_lock_cancel_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
{
- struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
- unsigned int supp = -1;
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+
+ return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
+}
+
+static ssize_t early_lock_cancel_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ unsigned long supp = -1;
int rc;
- rc = lprocfs_wr_uint(file, buffer, count, &supp);
+ rc = kstrtoul(buffer, 10, &supp);
if (rc < 0)
return rc;
ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
return count;
}
-LPROC_SEQ_FOPS(lprocfs_elc);
+LUSTRE_RW_ATTR(early_lock_cancel);
+
+/* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
+static struct attribute *ldlm_ns_attrs[] = {
+ &lustre_attr_resource_count.attr,
+ &lustre_attr_lock_count.attr,
+ &lustre_attr_lock_unused_count.attr,
+ &lustre_attr_lru_size.attr,
+ &lustre_attr_lru_max_age.attr,
+ &lustre_attr_early_lock_cancel.attr,
+ NULL,
+};
+
+static void ldlm_ns_release(struct kobject *kobj)
+{
+ struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
+ ns_kobj);
+ complete(&ns->ns_kobj_unregister);
+}
+
+static struct kobj_type ldlm_ns_ktype = {
+ .default_attrs = ldlm_ns_attrs,
+ .sysfs_ops = &lustre_sysfs_ops,
+ .release = ldlm_ns_release,
+};
static void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
{
lprocfs_free_stats(&ns->ns_stats);
}
+void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
+{
+ kobject_put(&ns->ns_kobj);
+ wait_for_completion(&ns->ns_kobj_unregister);
+}
+
+int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
+{
+ int err;
+
+ ns->ns_kobj.kset = ldlm_ns_kset;
+ init_completion(&ns->ns_kobj_unregister);
+ err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
+ "%s", ldlm_ns_name(ns));
+
+ ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
+ if (!ns->ns_stats) {
+ kobject_put(&ns->ns_kobj);
+ return -ENOMEM;
+ }
+
+ lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
+ LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
+
+ return err;
+}
+
static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
{
struct lprocfs_vars lock_vars[2];
ns->ns_proc_dir_entry = ns_pde;
}
- ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
- if (ns->ns_stats == NULL)
- return -ENOMEM;
-
- lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
- LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
-
lock_name[MAX_STRING_SIZE] = '\0';
memset(lock_vars, 0, sizeof(lock_vars));
lock_vars[0].name = lock_name;
- ldlm_add_var(&lock_vars[0], ns_pde, "resource_count", ns,
- &lprocfs_ns_resources_fops);
- ldlm_add_var(&lock_vars[0], ns_pde, "lock_count", ns,
- &lprocfs_ns_locks_fops);
-
- if (ns_is_client(ns)) {
- ldlm_add_var(&lock_vars[0], ns_pde, "lock_unused_count",
- &ns->ns_nr_unused, &ldlm_uint_fops);
- ldlm_add_var(&lock_vars[0], ns_pde, "lru_size", ns,
- &lprocfs_lru_size_fops);
- ldlm_add_var(&lock_vars[0], ns_pde, "lru_max_age",
- &ns->ns_max_age, &ldlm_rw_uint_fops);
- ldlm_add_var(&lock_vars[0], ns_pde, "early_lock_cancel",
- ns, &lprocfs_elc_fops);
- } else {
+ if (!ns_is_client(ns)) {
ldlm_add_var(&lock_vars[0], ns_pde, "ctime_age_limit",
&ns->ns_ctime_age_limit, &ldlm_rw_uint_fops);
ldlm_add_var(&lock_vars[0], ns_pde, "lock_timeouts",
ns->ns_connect_flags = 0;
ns->ns_stopping = 0;
ns->ns_reclaim_start = 0;
- rc = ldlm_namespace_proc_register(ns);
- if (rc != 0) {
- CERROR("Can't initialize ns proc, rc %d\n", rc);
- GOTO(out_hash, rc);
- }
+
+ rc = ldlm_namespace_sysfs_register(ns);
+ if (rc) {
+ CERROR("Can't initialize ns sysfs, rc %d\n", rc);
+ GOTO(out_hash, rc);
+ }
+
+ rc = ldlm_namespace_proc_register(ns);
+ if (rc) {
+ CERROR("Can't initialize ns proc, rc %d\n", rc);
+ GOTO(out_sysfs, rc);
+ }
idx = ldlm_namespace_nr_read(client);
rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
ldlm_namespace_register(ns, client);
RETURN(ns);
out_proc:
- ldlm_namespace_proc_unregister(ns);
- ldlm_namespace_cleanup(ns, 0);
+ ldlm_namespace_proc_unregister(ns);
+out_sysfs:
+ ldlm_namespace_sysfs_unregister(ns);
+ ldlm_namespace_cleanup(ns, 0);
out_hash:
cfs_hash_putref(ns->ns_rs_hash);
out_ns:
ldlm_pool_fini(&ns->ns_pool);
ldlm_namespace_proc_unregister(ns);
+ ldlm_namespace_sysfs_unregister(ns);
cfs_hash_putref(ns->ns_rs_hash);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold