+
+/* scrub iterator */
+const struct dt_index_features dt_otable_features;
+EXPORT_SYMBOL(dt_otable_features);
+
+/* lfsck layout orphan */
+const struct dt_index_features dt_lfsck_layout_orphan_features = {
+ .dif_flags = 0,
+ .dif_keysize_min = sizeof(struct lu_fid),
+ .dif_keysize_max = sizeof(struct lu_fid),
+ .dif_recsize_min = sizeof(struct lu_orphan_rec_v3),
+ .dif_recsize_max = sizeof(struct lu_orphan_rec_v3),
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_lfsck_layout_orphan_features);
+
+/* lfsck layout dangling */
+const struct dt_index_features dt_lfsck_layout_dangling_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(struct lfsck_layout_dangling_key),
+ .dif_keysize_max = sizeof(struct lfsck_layout_dangling_key),
+ .dif_recsize_min = sizeof(struct lu_fid),
+ .dif_recsize_max = sizeof(struct lu_fid),
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_lfsck_layout_dangling_features);
+
+/* lfsck namespace */
+const struct dt_index_features dt_lfsck_namespace_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(struct lu_fid),
+ .dif_keysize_max = sizeof(struct lu_fid),
+ .dif_recsize_min = sizeof(__u8),
+ .dif_recsize_max = sizeof(__u8),
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_lfsck_namespace_features);
+
+/* accounting indexes */
+const struct dt_index_features dt_acct_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_recsize_min = sizeof(struct lquota_acct_rec), /* 16 bytes */
+ .dif_recsize_max = sizeof(struct lquota_acct_rec), /* 16 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_acct_features);
+
+/* global quota files */
+const struct dt_index_features dt_quota_glb_features = {
+ .dif_flags = DT_IND_UPDATE,
+ /* a different key would have to be used for per-directory quota */
+ .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_recsize_min = sizeof(struct lquota_glb_rec), /* 32 bytes */
+ .dif_recsize_max = sizeof(struct lquota_glb_rec), /* 32 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_quota_glb_features);
+
+/* slave quota files */
+const struct dt_index_features dt_quota_slv_features = {
+ .dif_flags = DT_IND_UPDATE,
+ /* a different key would have to be used for per-directory quota */
+ .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_recsize_min = sizeof(struct lquota_slv_rec), /* 8 bytes */
+ .dif_recsize_max = sizeof(struct lquota_slv_rec), /* 8 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_quota_slv_features);
+
+/* nodemap files, nodemap_rec size asserted in nodemap_storage.c */
+const struct dt_index_features dt_nodemap_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(__u64), /* 64-bit nodemap/record id */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit nodemap/record id */
+ .dif_recsize_min = sizeof(union nodemap_rec), /* 32 bytes */
+ .dif_recsize_max = sizeof(union nodemap_rec), /* 32 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_nodemap_features);
+
+/*
+ * helper function returning what dt_index_features structure should be used
+ * based on the FID sequence. This is used by OBD_IDX_READ RPC
+ */
+static inline const struct dt_index_features *dt_index_feat_select(__u64 seq,
+ __u32 mode)
+{
+ if (seq == FID_SEQ_QUOTA_GLB) {
+ /* global quota index */
+ if (!S_ISREG(mode))
+ /* global quota index should be a regular file */
+ return ERR_PTR(-ENOENT);
+ return &dt_quota_glb_features;
+ } else if (seq == FID_SEQ_QUOTA) {
+ /* quota slave index */
+ if (!S_ISREG(mode))
+ /* slave index should be a regular file */
+ return ERR_PTR(-ENOENT);
+ return &dt_quota_slv_features;
+ } else if (seq == FID_SEQ_LAYOUT_RBTREE){
+ return &dt_lfsck_layout_orphan_features;
+ } else if (seq >= FID_SEQ_NORMAL) {
+ /* object is part of the namespace, verify that it is a
+ * directory */
+ if (!S_ISDIR(mode))
+ /* sorry, we can only deal with directory */
+ return ERR_PTR(-ENOTDIR);
+ return &dt_directory_features;
+ }
+
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+/*
+ * Fill a lu_idxpage with key/record pairs read for transfer via OBD_IDX_READ
+ * RPC
+ *
+ * \param env - is the environment passed by the caller
+ * \param lp - is a pointer to the lu_page to fill
+ * \param nob - is the maximum number of bytes that should be copied
+ * \param iops - is the index operation vector associated with the index object
+ * \param it - is a pointer to the current iterator
+ * \param attr - is the index attribute to pass to iops->rec()
+ * \param arg - is a pointer to the idx_info structure
+ */
+static int dt_index_page_build(const struct lu_env *env, union lu_page *lp,
+ size_t nob, const struct dt_it_ops *iops,
+ struct dt_it *it, __u32 attr, void *arg)
+{
+ struct idx_info *ii = (struct idx_info *)arg;
+ struct lu_idxpage *lip = &lp->lp_idx;
+ char *entry;
+ __u64 hash;
+ __u16 hashsize = 0;
+ __u16 keysize = 0;
+ __u16 recsize;
+ int rc;
+
+ ENTRY;
+
+ if (nob < LIP_HDR_SIZE)
+ return -EINVAL;
+
+ /* initialize the header of the new container */
+ memset(lip, 0, LIP_HDR_SIZE);
+ lip->lip_magic = LIP_MAGIC;
+ nob -= LIP_HDR_SIZE;
+
+ /* client wants to the 64-bit hash value associated with each record */
+ if (!(ii->ii_flags & II_FL_NOHASH))
+ hashsize = sizeof(hash);
+
+ entry = lip->lip_entries;
+ do {
+ /* fetch 64-bit hash value */
+ hash = iops->store(env, it);
+ ii->ii_hash_end = hash;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_IDX_READ_BREAK)) {
+ if (lip->lip_nr != 0)
+ GOTO(out, rc = 0);
+ }
+
+ if (!(ii->ii_flags & II_FL_NOKEY)) {
+ keysize = iops->key_size(env, it);
+ if (!(ii->ii_flags & II_FL_VARKEY) &&
+ keysize != ii->ii_keysize) {
+ CERROR("keysize mismatch %hu != %hu.\n",
+ keysize, ii->ii_keysize);
+ GOTO(out, rc = -EINVAL);
+ }
+ }
+
+ /* and finally the record */
+ if (ii->ii_flags & II_FL_VARREC)
+ recsize = iops->rec_size(env, it, attr);
+ else
+ recsize = ii->ii_recsize;
+
+ if (nob < hashsize + keysize + recsize) {
+ if (lip->lip_nr == 0)
+ GOTO(out, rc = -E2BIG);
+ GOTO(out, rc = 0);
+ }
+
+ rc = iops->rec(env, it,
+ (struct dt_rec *)(entry + hashsize + keysize),
+ attr);
+ if (!rc) {
+ if (hashsize)
+ memcpy(entry, &hash, hashsize);
+ if (keysize) {
+ struct dt_key *key;
+
+ key = iops->key(env, it);
+ memcpy(entry + hashsize, key, keysize);
+ }
+ /* hash/key/record successfully copied! */
+ lip->lip_nr++;
+ if (unlikely(lip->lip_nr == 1 && ii->ii_count == 0))
+ ii->ii_hash_start = hash;
+ entry += hashsize + keysize + recsize;
+ nob -= hashsize + keysize + recsize;
+ } else if (rc != -ESTALE) {
+ GOTO(out, rc);
+ }
+
+ /* move on to the next record */
+ do {
+ rc = iops->next(env, it);
+ } while (rc == -ESTALE);
+ } while (rc == 0);
+
+ GOTO(out, rc);
+out:
+ if (rc >= 0 && lip->lip_nr > 0)
+ /* one more container */
+ ii->ii_count++;
+ if (rc > 0)
+ /* no more entries */
+ ii->ii_hash_end = II_END_OFF;
+ return rc;
+}
+
+
+/*
+ * Walk index and fill lu_page containers with key/record pairs
+ *
+ * \param env - is the environment passed by the caller
+ * \param obj - is the index object to parse
+ * \param rdpg - is the lu_rdpg descriptor associated with the transfer
+ * \param filler - is the callback function responsible for filling a lu_page
+ * with key/record pairs in the format wanted by the caller.
+ * If NULL, uses dt_index_page_build
+ * \param arg - is an opaq argument passed to the filler function
+ *
+ * \retval sum (in bytes) of all filled lu_pages
+ * \retval -ve errno on failure
+ */
+int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
+ const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
+ void *arg)
+{
+ struct dt_it *it;
+ const struct dt_it_ops *iops;
+ size_t pageidx, nob, nlupgs = 0;
+ int rc;
+ ENTRY;
+
+ LASSERT(rdpg->rp_pages != NULL);
+ LASSERT(obj->do_index_ops != NULL);
+
+ if (filler == NULL)
+ filler = dt_index_page_build;
+
+ nob = rdpg->rp_count;
+ if (nob == 0)
+ RETURN(-EFAULT);
+
+ /* Iterate through index and fill containers from @rdpg */
+ iops = &obj->do_index_ops->dio_it;
+ LASSERT(iops != NULL);
+ it = iops->init(env, obj, rdpg->rp_attrs);
+ if (IS_ERR(it))
+ RETURN(PTR_ERR(it));
+
+ rc = iops->load(env, it, rdpg->rp_hash);
+ if (rc == 0) {
+ /*
+ * Iterator didn't find record with exactly the key requested.
+ *
+ * It is currently either
+ *
+ * - positioned above record with key less than
+ * requested---skip it.
+ * - or not positioned at all (is in IAM_IT_SKEWED
+ * state)---position it on the next item.
+ */
+ rc = iops->next(env, it);
+ } else if (rc > 0) {
+ rc = 0;
+ } else {
+ if (rc == -ENODATA)
+ rc = 0;
+ GOTO(out, rc);
+ }
+
+ /*
+ * Fill containers one after the other. There might be multiple
+ * containers per physical page.
+ *
+ * At this point and across for-loop:
+ * rc == 0 -> ok, proceed.
+ * rc > 0 -> end of index.
+ * rc < 0 -> error.
+ */
+ for (pageidx = 0; rc == 0 && nob > 0; pageidx++) {
+ union lu_page *lp;
+ int i;
+
+ LASSERT(pageidx < rdpg->rp_npages);
+ lp = kmap(rdpg->rp_pages[pageidx]);
+
+ /* fill lu pages */
+ for (i = 0; i < LU_PAGE_COUNT; i++, lp++, nob -= LU_PAGE_SIZE) {
+ rc = filler(env, lp, min_t(size_t, nob, LU_PAGE_SIZE),
+ iops, it, rdpg->rp_attrs, arg);
+ if (rc < 0)
+ break;
+ /* one more lu_page */
+ nlupgs++;
+ if (rc > 0)
+ /* end of index */
+ break;
+ }
+ kunmap(rdpg->rp_pages[i]);
+ }
+
+out:
+ iops->put(env, it);
+ iops->fini(env, it);
+
+ if (rc >= 0)
+ rc = min_t(size_t, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(dt_index_walk);
+
+/**
+ * Walk key/record pairs of an index and copy them into 4KB containers to be
+ * transferred over the network. This is the common handler for OBD_IDX_READ
+ * RPC processing.
+ *
+ * \param env - is the environment passed by the caller
+ * \param dev - is the dt_device storing the index
+ * \param ii - is the idx_info structure packed by the client in the
+ * OBD_IDX_READ request
+ * \param rdpg - is the lu_rdpg descriptor
+ *
+ * \retval on success, return sum (in bytes) of all filled containers
+ * \retval appropriate error otherwise.
+ */
+int dt_index_read(const struct lu_env *env, struct dt_device *dev,
+ struct idx_info *ii, const struct lu_rdpg *rdpg)
+{
+ const struct dt_index_features *feat;
+ struct dt_object *obj;
+ int rc;
+ ENTRY;
+
+ /*
+ * rp_count shouldn't be null and should be a multiple of the container
+ * size
+ */
+ if (rdpg->rp_count == 0 || (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
+ RETURN(-EFAULT);
+
+ if (!fid_is_quota(&ii->ii_fid) && !fid_is_layout_rbtree(&ii->ii_fid) &&
+ !fid_is_norm(&ii->ii_fid))
+ RETURN(-EOPNOTSUPP);
+
+ /* lookup index object subject to the transfer */
+ obj = dt_locate(env, dev, &ii->ii_fid);
+ if (IS_ERR(obj))
+ RETURN(PTR_ERR(obj));
+ if (dt_object_exists(obj) == 0)
+ GOTO(out, rc = -ENOENT);
+
+ /* fetch index features associated with index object */
+ feat = dt_index_feat_select(fid_seq(&ii->ii_fid),
+ lu_object_attr(&obj->do_lu));
+ if (IS_ERR(feat))
+ GOTO(out, rc = PTR_ERR(feat));
+
+ /* load index feature if not done already */
+ if (obj->do_index_ops == NULL) {
+ rc = obj->do_ops->do_index_try(env, obj, feat);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ /* fill ii_flags with supported index features */
+ ii->ii_flags &= (II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
+ II_FL_VARREC);
+
+ if (!(feat->dif_flags & DT_IND_VARKEY))
+ ii->ii_keysize = feat->dif_keysize_max;
+
+ if (!(feat->dif_flags & DT_IND_VARREC))
+ ii->ii_recsize = feat->dif_recsize_max;
+
+ if (feat->dif_flags & DT_IND_NONUNQ)
+ /* key isn't necessarily unique */
+ ii->ii_flags |= II_FL_NONUNQ;
+
+ if (!fid_is_layout_rbtree(&ii->ii_fid)) {
+ dt_read_lock(env, obj, 0);
+ /* fetch object version before walking the index */
+ ii->ii_version = dt_version_get(env, obj);
+ }
+
+ /* walk the index and fill lu_idxpages with key/record pairs */
+ rc = dt_index_walk(env, obj, rdpg, dt_index_page_build, ii);
+ if (!fid_is_layout_rbtree(&ii->ii_fid))
+ dt_read_unlock(env, obj);
+
+ if (rc == 0) {
+ /* index is empty */
+ LASSERT(ii->ii_count == 0);
+ ii->ii_hash_end = II_END_OFF;
+ }
+
+ GOTO(out, rc);
+out:
+ dt_object_put(env, obj);
+ return rc;
+}
+EXPORT_SYMBOL(dt_index_read);
+
+#ifdef CONFIG_PROC_FS
+int lprocfs_dt_blksize_seq_show(struct seq_file *m, void *v)
+{
+ struct dt_device *dt = m->private;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0)
+ seq_printf(m, "%u\n", (unsigned) osfs.os_bsize);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_blksize_seq_show);
+
+int lprocfs_dt_kbytestotal_seq_show(struct seq_file *m, void *v)
+{
+ struct dt_device *dt = m->private;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_blocks;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ seq_printf(m, "%llu\n", result);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_kbytestotal_seq_show);
+
+int lprocfs_dt_kbytesfree_seq_show(struct seq_file *m, void *v)
+{
+ struct dt_device *dt = m->private;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bfree;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ seq_printf(m, "%llu\n", result);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_kbytesfree_seq_show);
+
+int lprocfs_dt_kbytesavail_seq_show(struct seq_file *m, void *v)
+{
+ struct dt_device *dt = m->private;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bavail;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ seq_printf(m, "%llu\n", result);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_kbytesavail_seq_show);
+
+int lprocfs_dt_filestotal_seq_show(struct seq_file *m, void *v)
+{
+ struct dt_device *dt = m->private;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0)
+ seq_printf(m, "%llu\n", osfs.os_files);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_filestotal_seq_show);
+
+int lprocfs_dt_filesfree_seq_show(struct seq_file *m, void *v)
+{
+ struct dt_device *dt = m->private;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0)
+ seq_printf(m, "%llu\n", osfs.os_ffree);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_filesfree_seq_show);
+
+#endif /* CONFIG_PROC_FS */
+
+static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct lu_device *lu = dt2lu_dev(dt);
+
+ if (!lu->ld_obd)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", lu->ld_obd->obd_uuid.uuid);
+}
+LUSTRE_RO_ATTR(uuid);
+
+static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct obd_statfs osfs;
+ int rc;
+
+ rc = dt_statfs(NULL, dt, &osfs);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%u\n", (unsigned) osfs.os_bsize);
+}
+LUSTRE_RO_ATTR(blocksize);
+
+static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct obd_statfs osfs;
+ u32 blk_size;
+ u64 result;
+ int rc;
+
+ rc = dt_statfs(NULL, dt, &osfs);
+ if (rc)
+ return rc;
+
+ blk_size = osfs.os_bsize >> 10;
+ result = osfs.os_blocks;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ return sprintf(buf, "%llu\n", result);
+}
+LUSTRE_RO_ATTR(kbytestotal);
+
+static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct obd_statfs osfs;
+ u32 blk_size;
+ u64 result;
+ int rc;
+
+ rc = dt_statfs(NULL, dt, &osfs);
+ if (rc)
+ return rc;
+
+ blk_size = osfs.os_bsize >> 10;
+ result = osfs.os_bfree;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ return sprintf(buf, "%llu\n", result);
+}
+LUSTRE_RO_ATTR(kbytesfree);
+
+static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct obd_statfs osfs;
+ u32 blk_size;
+ u64 result;
+ int rc;
+
+ rc = dt_statfs(NULL, dt, &osfs);
+ if (rc)
+ return rc;
+
+ blk_size = osfs.os_bsize >> 10;
+ result = osfs.os_bavail;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ return sprintf(buf, "%llu\n", result);
+}
+LUSTRE_RO_ATTR(kbytesavail);
+
+static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct obd_statfs osfs;
+ int rc;
+
+ rc = dt_statfs(NULL, dt, &osfs);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%llu\n", osfs.os_files);
+}
+LUSTRE_RO_ATTR(filestotal);
+
+static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct obd_statfs osfs;
+ int rc;
+
+ rc = dt_statfs(NULL, dt, &osfs);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%llu\n", osfs.os_ffree);
+}
+LUSTRE_RO_ATTR(filesfree);
+
+static const struct attribute *dt_def_attrs[] = {
+ &lustre_attr_uuid.attr,
+ &lustre_attr_blocksize.attr,
+ &lustre_attr_kbytestotal.attr,
+ &lustre_attr_kbytesfree.attr,
+ &lustre_attr_kbytesavail.attr,
+ &lustre_attr_filestotal.attr,
+ &lustre_attr_filesfree.attr,
+ NULL,
+};
+
+static void dt_sysfs_release(struct kobject *kobj)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+
+ debugfs_remove_recursive(dt->dd_debugfs_entry);
+ dt->dd_debugfs_entry = NULL;
+
+ complete(&dt->dd_kobj_unregister);
+}
+
+int dt_tunables_fini(struct dt_device *dt)
+{
+ if (!dt)
+ return -EINVAL;
+
+ if (dt->dd_def_attrs)
+ sysfs_remove_files(&dt->dd_kobj, dt->dd_def_attrs);
+
+ kobject_put(&dt->dd_kobj);
+ wait_for_completion(&dt->dd_kobj_unregister);
+
+ return 0;
+}
+EXPORT_SYMBOL(dt_tunables_fini);
+
+int dt_tunables_init(struct dt_device *dt, struct obd_type *type,
+ const char *name, struct lprocfs_vars *list)
+{
+ int rc;
+
+ dt->dd_ktype.sysfs_ops = &lustre_sysfs_ops;
+ dt->dd_ktype.release = dt_sysfs_release;
+
+ init_completion(&dt->dd_kobj_unregister);
+ rc = kobject_init_and_add(&dt->dd_kobj, &dt->dd_ktype, &type->typ_kobj,
+ "%s", name);
+ if (rc)
+ return rc;
+
+ dt->dd_def_attrs = dt_def_attrs;
+
+ rc = sysfs_create_files(&dt->dd_kobj, dt->dd_def_attrs);
+ if (rc) {
+ kobject_put(&dt->dd_kobj);
+ return rc;
+ }
+
+ /*
+ * No need to register debugfs if no enteries. This allows us to
+ * choose between using dt_device or obd_device for debugfs.
+ */
+ if (!list)
+ return rc;
+
+ dt->dd_debugfs_entry = ldebugfs_register(name,
+ type->typ_debugfs_entry,
+ list, dt);
+ if (IS_ERR_OR_NULL(dt->dd_debugfs_entry)) {
+ rc = dt->dd_debugfs_entry ? PTR_ERR(dt->dd_debugfs_entry)
+ : -ENOMEM;
+ CERROR("%s: error %d setting up debugfs\n",
+ name, rc);
+ dt->dd_debugfs_entry = NULL;
+ sysfs_remove_files(&dt->dd_kobj, dt->dd_def_attrs);
+ kobject_put(&dt->dd_kobj);
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(dt_tunables_init);