4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/obdclass/dt_object.c
34 * Generic functions from dt_object.h
36 * Author: Nikita Danilov <nikita@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_CLASS
41 #include <linux/list.h>
42 #include <obd_class.h>
43 #include <dt_object.h>
45 #include <lustre_fid.h>
46 #include <lustre_nodemap.h>
47 #include <lustre_quota.h>
48 #include <lustre_lfsck.h>
50 /* context key constructor/destructor: dt_global_key_init, dt_global_key_fini */
51 LU_KEY_INIT(dt_global, struct dt_thread_info);
52 LU_KEY_FINI(dt_global, struct dt_thread_info);
54 struct lu_context_key dt_key = {
55 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_LOCAL,
56 .lct_init = dt_global_key_init,
57 .lct_fini = dt_global_key_fini
61 * no lock is necessary to protect the list, because call-backs
62 * are added during system startup. Please refer to "struct dt_device".
64 void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
66 list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
68 EXPORT_SYMBOL(dt_txn_callback_add);
70 void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
72 list_del_init(&cb->dtc_linkage);
74 EXPORT_SYMBOL(dt_txn_callback_del);
76 int dt_txn_hook_start(const struct lu_env *env,
77 struct dt_device *dev, struct thandle *th)
80 struct dt_txn_callback *cb;
85 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
86 struct thandle *dtc_th = th;
88 if (cb->dtc_txn_start == NULL ||
89 !(cb->dtc_tag & env->le_ctx.lc_tags))
93 * Usually dt_txn_hook_start is called from bottom device,
94 * and if the thandle has th_top, then we need use top
95 * thandle for the callback in the top thandle layer
97 if (th->th_top != NULL)
100 rc = cb->dtc_txn_start(env, dtc_th, cb->dtc_cookie);
106 EXPORT_SYMBOL(dt_txn_hook_start);
108 int dt_txn_hook_stop(const struct lu_env *env, struct thandle *th)
110 struct dt_device *dev = th->th_dev;
111 struct dt_txn_callback *cb;
117 if (OBD_FAIL_CHECK(OBD_FAIL_DT_TXN_STOP))
120 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
121 struct thandle *dtc_th = th;
123 if (cb->dtc_txn_stop == NULL ||
124 !(cb->dtc_tag & env->le_ctx.lc_tags))
128 * Usually dt_txn_hook_stop is called from bottom device,
129 * and if the thandle has th_top, then we need use top
130 * thandle for the callback in the top thandle layer
132 if (th->th_top != NULL)
135 rc = cb->dtc_txn_stop(env, dtc_th, cb->dtc_cookie);
141 EXPORT_SYMBOL(dt_txn_hook_stop);
143 int dt_device_init(struct dt_device *dev, struct lu_device_type *t)
145 INIT_LIST_HEAD(&dev->dd_txn_callbacks);
146 return lu_device_init(&dev->dd_lu_dev, t);
148 EXPORT_SYMBOL(dt_device_init);
150 void dt_device_fini(struct dt_device *dev)
152 lu_device_fini(&dev->dd_lu_dev);
154 EXPORT_SYMBOL(dt_device_fini);
156 int dt_object_init(struct dt_object *obj,
157 struct lu_object_header *h, struct lu_device *d)
160 return lu_object_init(&obj->do_lu, h, d);
162 EXPORT_SYMBOL(dt_object_init);
164 void dt_object_fini(struct dt_object *obj)
166 lu_object_fini(&obj->do_lu);
168 EXPORT_SYMBOL(dt_object_fini);
170 int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj)
172 if (obj->do_index_ops == NULL)
173 obj->do_ops->do_index_try(env, obj, &dt_directory_features);
174 return obj->do_index_ops != NULL;
176 EXPORT_SYMBOL(dt_try_as_dir);
178 enum dt_format_type dt_mode_to_dft(__u32 mode)
180 enum dt_format_type result;
182 switch (mode & S_IFMT) {
187 result = DFT_REGULAR;
199 LASSERTF(0, "invalid mode %o\n", mode);
200 result = 0; /* Just for satisfying compiler. */
205 EXPORT_SYMBOL(dt_mode_to_dft);
208 * lookup fid for object named \a name in directory \a dir.
211 int dt_lookup_dir(const struct lu_env *env, struct dt_object *dir,
212 const char *name, struct lu_fid *fid)
214 if (dt_try_as_dir(env, dir))
215 return dt_lookup(env, dir, (struct dt_rec *)fid,
216 (const struct dt_key *)name);
219 EXPORT_SYMBOL(dt_lookup_dir);
222 * this differs from dt_locate by top_dev as parameter
223 * but not one from lu_site
225 struct dt_object *dt_locate_at(const struct lu_env *env,
226 struct dt_device *dev,
227 const struct lu_fid *fid,
228 struct lu_device *top_dev,
229 const struct lu_object_conf *conf)
231 struct lu_object *lo;
234 lo = lu_object_find_at(env, top_dev, fid, conf);
240 list_for_each_entry(n, &lo->lo_header->loh_layers, lo_linkage) {
241 if (n->lo_dev == &dev->dd_lu_dev)
242 return container_of(n, struct dt_object, do_lu);
245 lu_object_put(env, lo);
246 return ERR_PTR(-ENOENT);
248 EXPORT_SYMBOL(dt_locate_at);
251 * find an object named \a entry in given \a dfh->dfh_o directory.
253 static int dt_find_entry(const struct lu_env *env, const char *entry,
256 struct dt_find_hint *dfh = data;
257 struct dt_device *dt = dfh->dfh_dt;
258 struct lu_fid *fid = dfh->dfh_fid;
259 struct dt_object *obj = dfh->dfh_o;
262 rc = dt_lookup_dir(env, obj, entry, fid);
263 dt_object_put(env, obj);
265 obj = dt_locate(env, dt, fid);
275 * Abstract function which parses path name. This function feeds
276 * path component to \a entry_func.
278 int dt_path_parser(const struct lu_env *env,
279 char *path, dt_entry_func_t entry_func,
286 e = strsep(&path, "/");
291 if (!path || path[0] == '\0')
295 rc = entry_func(env, e, data);
304 dt_store_resolve(const struct lu_env *env, struct dt_device *dt,
305 const char *path, struct lu_fid *fid)
307 struct dt_thread_info *info = dt_info(env);
308 struct dt_find_hint *dfh = &info->dti_dfh;
309 struct dt_object *obj;
316 strlcpy(info->dti_buf, path, sizeof(info->dti_buf));
318 result = dt->dd_ops->dt_root_get(env, dt, fid);
320 obj = dt_locate(env, dt, fid);
323 result = dt_path_parser(env, info->dti_buf,
326 obj = ERR_PTR(result);
331 obj = ERR_PTR(result);
336 static struct dt_object *dt_reg_open(const struct lu_env *env,
337 struct dt_device *dt,
345 result = dt_lookup_dir(env, p, name, fid);
347 o = dt_locate(env, dt, fid);
355 * Open dt object named \a filename from \a dirname directory.
356 * \param dt dt device
357 * \param fid on success, object fid is stored in *fid
359 struct dt_object *dt_store_open(const struct lu_env *env, struct dt_device *dt,
360 const char *dirname, const char *filename,
363 struct dt_object *file;
364 struct dt_object *dir;
366 dir = dt_store_resolve(env, dt, dirname, fid);
368 file = dt_reg_open(env, dt, dir, filename, fid);
369 dt_object_put(env, dir);
377 struct dt_object *dt_find_or_create(const struct lu_env *env,
378 struct dt_device *dt,
379 const struct lu_fid *fid,
380 struct dt_object_format *dof,
383 struct dt_object *dto;
389 dto = dt_locate(env, dt, fid);
393 LASSERT(dto != NULL);
394 if (dt_object_exists(dto))
397 th = dt_trans_create(env, dt);
399 GOTO(out, rc = PTR_ERR(th));
401 rc = dt_declare_create(env, dto, at, NULL, dof, th);
403 GOTO(trans_stop, rc);
405 rc = dt_trans_start_local(env, dt, th);
407 GOTO(trans_stop, rc);
409 dt_write_lock(env, dto, 0);
410 if (dt_object_exists(dto))
411 GOTO(unlock, rc = 0);
413 CDEBUG(D_OTHER, "create new object "DFID"\n", PFID(fid));
415 rc = dt_create(env, dto, at, NULL, dof, th);
418 LASSERT(dt_object_exists(dto));
420 dt_write_unlock(env, dto);
422 dt_trans_stop(env, dt, th);
425 dt_object_put(env, dto);
431 EXPORT_SYMBOL(dt_find_or_create);
433 /* dt class init function. */
434 int dt_global_init(void)
438 LU_CONTEXT_KEY_INIT(&dt_key);
439 result = lu_context_key_register(&dt_key);
443 void dt_global_fini(void)
445 lu_context_key_degister(&dt_key);
449 * Generic read helper. May return an error for partial reads.
451 * \param env lustre environment
452 * \param dt object to be read
453 * \param buf lu_buf to be filled, with buffer pointer and length
454 * \param pos position to start reading, updated as data is read
456 * \retval real size of data read
457 * \retval -ve errno on failure
459 int dt_read(const struct lu_env *env, struct dt_object *dt,
460 struct lu_buf *buf, loff_t *pos)
462 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
463 return dt->do_body_ops->dbo_read(env, dt, buf, pos);
465 EXPORT_SYMBOL(dt_read);
468 * Read structures of fixed size from storage. Unlike dt_read(), using
469 * dt_record_read() will return an error for partial reads.
471 * \param env lustre environment
472 * \param dt object to be read
473 * \param buf lu_buf to be filled, with buffer pointer and length
474 * \param pos position to start reading, updated as data is read
476 * \retval 0 on successfully reading full buffer
477 * \retval -EFAULT on short read
478 * \retval -ve errno on failure
480 int dt_record_read(const struct lu_env *env, struct dt_object *dt,
481 struct lu_buf *buf, loff_t *pos)
485 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
487 size = dt->do_body_ops->dbo_read(env, dt, buf, pos);
490 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
492 EXPORT_SYMBOL(dt_record_read);
494 int dt_record_write(const struct lu_env *env, struct dt_object *dt,
495 const struct lu_buf *buf, loff_t *pos, struct thandle *th)
499 LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
501 LASSERT(dt->do_body_ops);
502 LASSERT(dt->do_body_ops->dbo_write);
504 size = dt->do_body_ops->dbo_write(env, dt, buf, pos, th);
507 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
509 EXPORT_SYMBOL(dt_record_write);
511 int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
515 char *xname = XATTR_NAME_VERSION;
519 vbuf.lb_len = sizeof(dt_obj_version_t);
520 return dt_declare_xattr_set(env, o, &vbuf, xname, 0, th);
523 EXPORT_SYMBOL(dt_declare_version_set);
525 void dt_version_set(const struct lu_env *env, struct dt_object *o,
526 dt_obj_version_t version, struct thandle *th)
529 char *xname = XATTR_NAME_VERSION;
533 vbuf.lb_buf = &version;
534 vbuf.lb_len = sizeof(version);
536 rc = dt_xattr_set(env, o, &vbuf, xname, 0, th);
538 CDEBUG(D_INODE, "Can't set version, rc %d\n", rc);
540 EXPORT_SYMBOL(dt_version_set);
542 dt_obj_version_t dt_version_get(const struct lu_env *env, struct dt_object *o)
545 char *xname = XATTR_NAME_VERSION;
546 dt_obj_version_t version;
550 vbuf.lb_buf = &version;
551 vbuf.lb_len = sizeof(version);
552 rc = dt_xattr_get(env, o, &vbuf, xname);
553 if (rc != sizeof(version)) {
554 CDEBUG(D_INODE, "Can't get version, rc %d\n", rc);
559 EXPORT_SYMBOL(dt_version_get);
561 /* list of all supported index types */
564 const struct dt_index_features dt_directory_features;
565 EXPORT_SYMBOL(dt_directory_features);
568 const struct dt_index_features dt_otable_features;
569 EXPORT_SYMBOL(dt_otable_features);
571 /* lfsck layout orphan */
572 const struct dt_index_features dt_lfsck_layout_orphan_features = {
574 .dif_keysize_min = sizeof(struct lu_fid),
575 .dif_keysize_max = sizeof(struct lu_fid),
576 .dif_recsize_min = sizeof(struct lu_orphan_rec_v3),
577 .dif_recsize_max = sizeof(struct lu_orphan_rec_v3),
580 EXPORT_SYMBOL(dt_lfsck_layout_orphan_features);
582 /* lfsck layout dangling */
583 const struct dt_index_features dt_lfsck_layout_dangling_features = {
584 .dif_flags = DT_IND_UPDATE,
585 .dif_keysize_min = sizeof(struct lfsck_layout_dangling_key),
586 .dif_keysize_max = sizeof(struct lfsck_layout_dangling_key),
587 .dif_recsize_min = sizeof(struct lu_fid),
588 .dif_recsize_max = sizeof(struct lu_fid),
591 EXPORT_SYMBOL(dt_lfsck_layout_dangling_features);
593 /* lfsck namespace */
594 const struct dt_index_features dt_lfsck_namespace_features = {
595 .dif_flags = DT_IND_UPDATE,
596 .dif_keysize_min = sizeof(struct lu_fid),
597 .dif_keysize_max = sizeof(struct lu_fid),
598 .dif_recsize_min = sizeof(__u8),
599 .dif_recsize_max = sizeof(__u8),
602 EXPORT_SYMBOL(dt_lfsck_namespace_features);
604 /* accounting indexes */
605 const struct dt_index_features dt_acct_features = {
606 .dif_flags = DT_IND_UPDATE,
607 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
608 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
609 .dif_recsize_min = sizeof(struct lquota_acct_rec), /* 16 bytes */
610 .dif_recsize_max = sizeof(struct lquota_acct_rec), /* 16 bytes */
613 EXPORT_SYMBOL(dt_acct_features);
615 /* global quota files */
616 const struct dt_index_features dt_quota_glb_features = {
617 .dif_flags = DT_IND_UPDATE,
618 /* a different key would have to be used for per-directory quota */
619 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
620 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
621 .dif_recsize_min = sizeof(struct lquota_glb_rec), /* 32 bytes */
622 .dif_recsize_max = sizeof(struct lquota_glb_rec), /* 32 bytes */
625 EXPORT_SYMBOL(dt_quota_glb_features);
627 /* slave quota files */
628 const struct dt_index_features dt_quota_slv_features = {
629 .dif_flags = DT_IND_UPDATE,
630 /* a different key would have to be used for per-directory quota */
631 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
632 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
633 .dif_recsize_min = sizeof(struct lquota_slv_rec), /* 8 bytes */
634 .dif_recsize_max = sizeof(struct lquota_slv_rec), /* 8 bytes */
637 EXPORT_SYMBOL(dt_quota_slv_features);
639 /* nodemap files, nodemap_rec size asserted in nodemap_storage.c */
640 const struct dt_index_features dt_nodemap_features = {
641 .dif_flags = DT_IND_UPDATE,
642 .dif_keysize_min = sizeof(__u64), /* 64-bit nodemap/record id */
643 .dif_keysize_max = sizeof(__u64), /* 64-bit nodemap/record id */
644 .dif_recsize_min = sizeof(union nodemap_rec), /* 32 bytes */
645 .dif_recsize_max = sizeof(union nodemap_rec), /* 32 bytes */
648 EXPORT_SYMBOL(dt_nodemap_features);
651 * helper function returning what dt_index_features structure should be used
652 * based on the FID sequence. This is used by OBD_IDX_READ RPC
654 static inline const struct dt_index_features *dt_index_feat_select(__u64 seq,
657 if (seq == FID_SEQ_QUOTA_GLB) {
658 /* global quota index */
660 /* global quota index should be a regular file */
661 return ERR_PTR(-ENOENT);
662 return &dt_quota_glb_features;
663 } else if (seq == FID_SEQ_QUOTA) {
664 /* quota slave index */
666 /* slave index should be a regular file */
667 return ERR_PTR(-ENOENT);
668 return &dt_quota_slv_features;
669 } else if (seq == FID_SEQ_LAYOUT_RBTREE){
670 return &dt_lfsck_layout_orphan_features;
671 } else if (seq >= FID_SEQ_NORMAL) {
672 /* object is part of the namespace, verify that it is a
675 /* sorry, we can only deal with directory */
676 return ERR_PTR(-ENOTDIR);
677 return &dt_directory_features;
680 return ERR_PTR(-EOPNOTSUPP);
684 * Fill a lu_idxpage with key/record pairs read for transfer via OBD_IDX_READ
687 * \param env - is the environment passed by the caller
688 * \param obj - index object being traversed (mostly for debugging)
689 * \param lp - is a pointer to the lu_page to fill
690 * \param bytes - is the maximum number of bytes that should be copied
691 * \param iops - is the index operation vector associated with the index object
692 * \param it - is a pointer to the current iterator
693 * \param attr - is the index attribute to pass to iops->rec()
694 * \param arg - is a pointer to the idx_info structure
696 static int dt_index_page_build(const struct lu_env *env, struct dt_object *obj,
697 union lu_page *lp, size_t bytes,
698 const struct dt_it_ops *iops,
699 struct dt_it *it, __u32 attr, void *arg)
701 struct idx_info *ii = (struct idx_info *)arg;
702 struct lu_idxpage *lip = &lp->lp_idx;
712 if (bytes < LIP_HDR_SIZE)
715 /* initialize the header of the new container */
716 memset(lip, 0, LIP_HDR_SIZE);
717 lip->lip_magic = LIP_MAGIC;
718 bytes -= LIP_HDR_SIZE;
720 /* client wants to the 64-bit hash value associated with each record */
721 if (!(ii->ii_flags & II_FL_NOHASH))
722 hashsize = sizeof(hash);
724 entry = lip->lip_entries;
726 /* fetch 64-bit hash value */
727 hash = iops->store(env, it);
728 ii->ii_hash_end = hash;
730 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_IDX_READ_BREAK)) {
731 if (lip->lip_nr != 0)
735 if (!(ii->ii_flags & II_FL_NOKEY)) {
736 keysize = iops->key_size(env, it);
737 if (!(ii->ii_flags & II_FL_VARKEY) &&
738 keysize != ii->ii_keysize) {
740 CERROR("%s: keysize mismatch %hu != %hu on "
742 lu_dev_name(obj->do_lu.lo_dev),
743 keysize, ii->ii_keysize,
744 PFID(lu_object_fid(&obj->do_lu)), rc);
749 /* and finally the record */
750 if (ii->ii_flags & II_FL_VARREC)
751 recsize = iops->rec_size(env, it, attr);
753 recsize = ii->ii_recsize;
755 if (bytes < hashsize + keysize + recsize) {
756 if (lip->lip_nr == 0)
757 GOTO(out, rc = -E2BIG);
761 rc = iops->rec(env, it,
762 (struct dt_rec *)(entry + hashsize + keysize),
766 memcpy(entry, &hash, hashsize);
770 key = iops->key(env, it);
771 memcpy(entry + hashsize, key, keysize);
773 /* hash/key/record successfully copied! */
775 if (unlikely(lip->lip_nr == 1 && ii->ii_count == 0))
776 ii->ii_hash_start = hash;
777 entry += hashsize + keysize + recsize;
778 bytes -= hashsize + keysize + recsize;
779 } else if (rc != -ESTALE) {
783 /* move on to the next record */
785 rc = iops->next(env, it);
786 } while (rc == -ESTALE);
791 if (rc >= 0 && lip->lip_nr > 0)
792 /* one more container */
795 /* no more entries */
796 ii->ii_hash_end = II_END_OFF;
802 * Walk index and fill lu_page containers with key/record pairs
804 * \param env - is the environment passed by the caller
805 * \param obj - is the index object to parse
806 * \param rdpg - is the lu_rdpg descriptor associated with the transfer
807 * \param filler - is the callback function responsible for filling a lu_page
808 * with key/record pairs in the format wanted by the caller.
809 * If NULL, uses dt_index_page_build
810 * \param arg - is an opaq argument passed to the filler function
812 * \retval sum (in bytes) of all filled lu_pages
813 * \retval -ve errno on failure
815 int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
816 const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
820 const struct dt_it_ops *iops;
821 size_t pageidx, bytes, nlupgs = 0;
825 LASSERT(rdpg->rp_pages != NULL);
826 LASSERT(obj->do_index_ops != NULL);
829 filler = dt_index_page_build;
831 bytes = rdpg->rp_count;
835 /* Iterate through index and fill containers from @rdpg */
836 iops = &obj->do_index_ops->dio_it;
837 LASSERT(iops != NULL);
838 it = iops->init(env, obj, rdpg->rp_attrs);
842 rc = iops->load(env, it, rdpg->rp_hash);
845 * Iterator didn't find record with exactly the key requested.
847 * It is currently either
849 * - positioned above record with key less than
850 * requested---skip it.
851 * - or not positioned at all (is in IAM_IT_SKEWED
852 * state)---position it on the next item.
854 rc = iops->next(env, it);
864 * Fill containers one after the other. There might be multiple
865 * containers per physical page.
867 * At this point and across for-loop:
868 * rc == 0 -> ok, proceed.
869 * rc > 0 -> end of index.
872 for (pageidx = 0; rc == 0 && bytes > 0; pageidx++) {
876 LASSERT(pageidx < rdpg->rp_npages);
877 lp = kmap(rdpg->rp_pages[pageidx]);
880 for (i = 0; i < LU_PAGE_COUNT; i++, lp++, bytes-=LU_PAGE_SIZE) {
881 rc = filler(env, obj, lp,
882 min_t(size_t, bytes, LU_PAGE_SIZE),
883 iops, it, rdpg->rp_attrs, arg);
886 /* one more lu_page */
892 kunmap(rdpg->rp_pages[i]);
900 rc = min_t(size_t, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
904 EXPORT_SYMBOL(dt_index_walk);
907 * Walk key/record pairs of an index and copy them into 4KB containers to be
908 * transferred over the network. This is the common handler for OBD_IDX_READ
911 * \param env - is the environment passed by the caller
912 * \param dev - is the dt_device storing the index
913 * \param ii - is the idx_info structure packed by the client in the
914 * OBD_IDX_READ request
915 * \param rdpg - is the lu_rdpg descriptor
917 * \retval on success, return sum (in bytes) of all filled containers
918 * \retval appropriate error otherwise.
920 int dt_index_read(const struct lu_env *env, struct dt_device *dev,
921 struct idx_info *ii, const struct lu_rdpg *rdpg)
923 const struct dt_index_features *feat;
924 struct dt_object *obj;
929 * rp_count shouldn't be null and should be a multiple of the container
932 if (rdpg->rp_count == 0 || (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
935 if (!fid_is_quota(&ii->ii_fid) && !fid_is_layout_rbtree(&ii->ii_fid) &&
936 !fid_is_norm(&ii->ii_fid))
939 /* lookup index object subject to the transfer */
940 obj = dt_locate(env, dev, &ii->ii_fid);
942 RETURN(PTR_ERR(obj));
943 if (dt_object_exists(obj) == 0)
944 GOTO(out, rc = -ENOENT);
946 /* fetch index features associated with index object */
947 feat = dt_index_feat_select(fid_seq(&ii->ii_fid),
948 lu_object_attr(&obj->do_lu));
950 GOTO(out, rc = PTR_ERR(feat));
952 /* load index feature if not done already */
953 if (obj->do_index_ops == NULL) {
954 rc = obj->do_ops->do_index_try(env, obj, feat);
959 /* fill ii_flags with supported index features */
960 ii->ii_flags &= (II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
963 if (!(feat->dif_flags & DT_IND_VARKEY))
964 ii->ii_keysize = feat->dif_keysize_max;
966 if (!(feat->dif_flags & DT_IND_VARREC))
967 ii->ii_recsize = feat->dif_recsize_max;
969 if (feat->dif_flags & DT_IND_NONUNQ)
970 /* key isn't necessarily unique */
971 ii->ii_flags |= II_FL_NONUNQ;
973 if (!fid_is_layout_rbtree(&ii->ii_fid)) {
974 dt_read_lock(env, obj, 0);
975 /* fetch object version before walking the index */
976 ii->ii_version = dt_version_get(env, obj);
979 /* walk the index and fill lu_idxpages with key/record pairs */
980 rc = dt_index_walk(env, obj, rdpg, dt_index_page_build, ii);
981 if (!fid_is_layout_rbtree(&ii->ii_fid))
982 dt_read_unlock(env, obj);
986 LASSERT(ii->ii_count == 0);
987 ii->ii_hash_end = II_END_OFF;
992 dt_object_put(env, obj);
995 EXPORT_SYMBOL(dt_index_read);
997 #ifdef CONFIG_PROC_FS
998 int lprocfs_dt_blksize_seq_show(struct seq_file *m, void *v)
1000 struct dt_device *dt = m->private;
1001 struct obd_statfs osfs;
1003 int rc = dt_statfs(NULL, dt, &osfs);
1005 seq_printf(m, "%u\n", (unsigned) osfs.os_bsize);
1008 EXPORT_SYMBOL(lprocfs_dt_blksize_seq_show);
1010 int lprocfs_dt_kbytestotal_seq_show(struct seq_file *m, void *v)
1012 struct dt_device *dt = m->private;
1013 struct obd_statfs osfs;
1015 int rc = dt_statfs(NULL, dt, &osfs);
1017 __u32 blk_size = osfs.os_bsize >> 10;
1018 __u64 result = osfs.os_blocks;
1020 while (blk_size >>= 1)
1023 seq_printf(m, "%llu\n", result);
1027 EXPORT_SYMBOL(lprocfs_dt_kbytestotal_seq_show);
1029 int lprocfs_dt_kbytesfree_seq_show(struct seq_file *m, void *v)
1031 struct dt_device *dt = m->private;
1032 struct obd_statfs osfs;
1034 int rc = dt_statfs(NULL, dt, &osfs);
1036 __u32 blk_size = osfs.os_bsize >> 10;
1037 __u64 result = osfs.os_bfree;
1039 while (blk_size >>= 1)
1042 seq_printf(m, "%llu\n", result);
1046 EXPORT_SYMBOL(lprocfs_dt_kbytesfree_seq_show);
1048 int lprocfs_dt_kbytesavail_seq_show(struct seq_file *m, void *v)
1050 struct dt_device *dt = m->private;
1051 struct obd_statfs osfs;
1053 int rc = dt_statfs(NULL, dt, &osfs);
1055 __u32 blk_size = osfs.os_bsize >> 10;
1056 __u64 result = osfs.os_bavail;
1058 while (blk_size >>= 1)
1061 seq_printf(m, "%llu\n", result);
1065 EXPORT_SYMBOL(lprocfs_dt_kbytesavail_seq_show);
1067 int lprocfs_dt_filestotal_seq_show(struct seq_file *m, void *v)
1069 struct dt_device *dt = m->private;
1070 struct obd_statfs osfs;
1072 int rc = dt_statfs(NULL, dt, &osfs);
1074 seq_printf(m, "%llu\n", osfs.os_files);
1077 EXPORT_SYMBOL(lprocfs_dt_filestotal_seq_show);
1079 int lprocfs_dt_filesfree_seq_show(struct seq_file *m, void *v)
1081 struct dt_device *dt = m->private;
1082 struct obd_statfs osfs;
1084 int rc = dt_statfs(NULL, dt, &osfs);
1086 seq_printf(m, "%llu\n", osfs.os_ffree);
1089 EXPORT_SYMBOL(lprocfs_dt_filesfree_seq_show);
1091 #endif /* CONFIG_PROC_FS */
1093 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
1096 struct dt_device *dt = container_of(kobj, struct dt_device,
1098 struct lu_device *lu = dt2lu_dev(dt);
1103 return sprintf(buf, "%s\n", lu->ld_obd->obd_uuid.uuid);
1105 LUSTRE_RO_ATTR(uuid);
1107 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
1110 struct dt_device *dt = container_of(kobj, struct dt_device,
1112 struct obd_statfs osfs;
1115 rc = dt_statfs(NULL, dt, &osfs);
1119 return sprintf(buf, "%u\n", (unsigned) osfs.os_bsize);
1121 LUSTRE_RO_ATTR(blocksize);
1123 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
1126 struct dt_device *dt = container_of(kobj, struct dt_device,
1128 struct obd_statfs osfs;
1133 rc = dt_statfs(NULL, dt, &osfs);
1137 blk_size = osfs.os_bsize >> 10;
1138 result = osfs.os_blocks;
1140 while (blk_size >>= 1)
1143 return sprintf(buf, "%llu\n", result);
1145 LUSTRE_RO_ATTR(kbytestotal);
1147 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
1150 struct dt_device *dt = container_of(kobj, struct dt_device,
1152 struct obd_statfs osfs;
1157 rc = dt_statfs(NULL, dt, &osfs);
1161 blk_size = osfs.os_bsize >> 10;
1162 result = osfs.os_bfree;
1164 while (blk_size >>= 1)
1167 return sprintf(buf, "%llu\n", result);
1169 LUSTRE_RO_ATTR(kbytesfree);
1171 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
1174 struct dt_device *dt = container_of(kobj, struct dt_device,
1176 struct obd_statfs osfs;
1181 rc = dt_statfs(NULL, dt, &osfs);
1185 blk_size = osfs.os_bsize >> 10;
1186 result = osfs.os_bavail;
1188 while (blk_size >>= 1)
1191 return sprintf(buf, "%llu\n", result);
1193 LUSTRE_RO_ATTR(kbytesavail);
1195 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
1198 struct dt_device *dt = container_of(kobj, struct dt_device,
1200 struct obd_statfs osfs;
1203 rc = dt_statfs(NULL, dt, &osfs);
1207 return sprintf(buf, "%llu\n", osfs.os_files);
1209 LUSTRE_RO_ATTR(filestotal);
1211 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
1214 struct dt_device *dt = container_of(kobj, struct dt_device,
1216 struct obd_statfs osfs;
1219 rc = dt_statfs(NULL, dt, &osfs);
1223 return sprintf(buf, "%llu\n", osfs.os_ffree);
1225 LUSTRE_RO_ATTR(filesfree);
1227 static const struct attribute *dt_def_attrs[] = {
1228 &lustre_attr_uuid.attr,
1229 &lustre_attr_blocksize.attr,
1230 &lustre_attr_kbytestotal.attr,
1231 &lustre_attr_kbytesfree.attr,
1232 &lustre_attr_kbytesavail.attr,
1233 &lustre_attr_filestotal.attr,
1234 &lustre_attr_filesfree.attr,
1238 static void dt_sysfs_release(struct kobject *kobj)
1240 struct dt_device *dt = container_of(kobj, struct dt_device,
1243 debugfs_remove_recursive(dt->dd_debugfs_entry);
1244 dt->dd_debugfs_entry = NULL;
1246 complete(&dt->dd_kobj_unregister);
1249 int dt_tunables_fini(struct dt_device *dt)
1254 if (dt->dd_def_attrs) {
1255 sysfs_remove_files(&dt->dd_kobj, dt->dd_def_attrs);
1256 kobject_put(&dt->dd_kobj);
1257 wait_for_completion(&dt->dd_kobj_unregister);
1262 EXPORT_SYMBOL(dt_tunables_fini);
1264 int dt_tunables_init(struct dt_device *dt, struct obd_type *type,
1265 const char *name, struct ldebugfs_vars *list)
1269 dt->dd_ktype.sysfs_ops = &lustre_sysfs_ops;
1270 dt->dd_ktype.release = dt_sysfs_release;
1272 init_completion(&dt->dd_kobj_unregister);
1273 rc = kobject_init_and_add(&dt->dd_kobj, &dt->dd_ktype, &type->typ_kobj,
1278 dt->dd_def_attrs = dt_def_attrs;
1280 rc = sysfs_create_files(&dt->dd_kobj, dt->dd_def_attrs);
1282 kobject_put(&dt->dd_kobj);
1283 dt->dd_def_attrs = NULL;
1288 * No need to register debugfs if no enteries. This allows us to
1289 * choose between using dt_device or obd_device for debugfs.
1294 dt->dd_debugfs_entry = debugfs_create_dir(name,
1295 type->typ_debugfs_entry);
1296 ldebugfs_add_vars(dt->dd_debugfs_entry, list, dt);
1300 EXPORT_SYMBOL(dt_tunables_init);