4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/dt_object.c
35 * Generic functions from dt_object.h
37 * Author: Nikita Danilov <nikita@clusterfs.com>
40 #define DEBUG_SUBSYSTEM S_CLASS
42 #include <linux/list.h>
43 #include <obd_class.h>
44 #include <dt_object.h>
46 #include <lustre_fid.h>
47 #include <lustre_nodemap.h>
48 #include <lustre_quota.h>
49 #include <lustre_lfsck.h>
51 /* context key constructor/destructor: dt_global_key_init, dt_global_key_fini */
52 LU_KEY_INIT(dt_global, struct dt_thread_info);
53 LU_KEY_FINI(dt_global, struct dt_thread_info);
55 struct lu_context_key dt_key = {
56 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_LOCAL,
57 .lct_init = dt_global_key_init,
58 .lct_fini = dt_global_key_fini
62 * no lock is necessary to protect the list, because call-backs
63 * are added during system startup. Please refer to "struct dt_device".
65 void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
67 list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
69 EXPORT_SYMBOL(dt_txn_callback_add);
71 void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
73 list_del_init(&cb->dtc_linkage);
75 EXPORT_SYMBOL(dt_txn_callback_del);
77 int dt_txn_hook_start(const struct lu_env *env,
78 struct dt_device *dev, struct thandle *th)
81 struct dt_txn_callback *cb;
86 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
87 struct thandle *dtc_th = th;
89 if (cb->dtc_txn_start == NULL ||
90 !(cb->dtc_tag & env->le_ctx.lc_tags))
94 * Usually dt_txn_hook_start is called from bottom device,
95 * and if the thandle has th_top, then we need use top
96 * thandle for the callback in the top thandle layer
98 if (th->th_top != NULL)
101 rc = cb->dtc_txn_start(env, dtc_th, cb->dtc_cookie);
107 EXPORT_SYMBOL(dt_txn_hook_start);
109 int dt_txn_hook_stop(const struct lu_env *env, struct thandle *th)
111 struct dt_device *dev = th->th_dev;
112 struct dt_txn_callback *cb;
118 if (OBD_FAIL_CHECK(OBD_FAIL_DT_TXN_STOP))
121 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
122 struct thandle *dtc_th = th;
124 if (cb->dtc_txn_stop == NULL ||
125 !(cb->dtc_tag & env->le_ctx.lc_tags))
129 * Usually dt_txn_hook_stop is called from bottom device,
130 * and if the thandle has th_top, then we need use top
131 * thandle for the callback in the top thandle layer
133 if (th->th_top != NULL)
136 rc = cb->dtc_txn_stop(env, dtc_th, cb->dtc_cookie);
142 EXPORT_SYMBOL(dt_txn_hook_stop);
144 void dt_txn_hook_commit(struct thandle *th)
146 struct dt_txn_callback *cb;
151 list_for_each_entry(cb, &th->th_dev->dd_txn_callbacks,
154 * Right now, the bottom device (OSD) will use this hook
155 * commit to notify OSP, so we do not check and replace
156 * the thandle to top thandle now
158 if (cb->dtc_txn_commit)
159 cb->dtc_txn_commit(th, cb->dtc_cookie);
162 EXPORT_SYMBOL(dt_txn_hook_commit);
164 int dt_device_init(struct dt_device *dev, struct lu_device_type *t)
166 INIT_LIST_HEAD(&dev->dd_txn_callbacks);
167 return lu_device_init(&dev->dd_lu_dev, t);
169 EXPORT_SYMBOL(dt_device_init);
171 void dt_device_fini(struct dt_device *dev)
173 lu_device_fini(&dev->dd_lu_dev);
175 EXPORT_SYMBOL(dt_device_fini);
177 int dt_object_init(struct dt_object *obj,
178 struct lu_object_header *h, struct lu_device *d)
181 return lu_object_init(&obj->do_lu, h, d);
183 EXPORT_SYMBOL(dt_object_init);
185 void dt_object_fini(struct dt_object *obj)
187 lu_object_fini(&obj->do_lu);
189 EXPORT_SYMBOL(dt_object_fini);
191 int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj)
193 if (obj->do_index_ops == NULL)
194 obj->do_ops->do_index_try(env, obj, &dt_directory_features);
195 return obj->do_index_ops != NULL;
197 EXPORT_SYMBOL(dt_try_as_dir);
199 enum dt_format_type dt_mode_to_dft(__u32 mode)
201 enum dt_format_type result;
203 switch (mode & S_IFMT) {
208 result = DFT_REGULAR;
220 LASSERTF(0, "invalid mode %o\n", mode);
221 result = 0; /* Just for satisfying compiler. */
226 EXPORT_SYMBOL(dt_mode_to_dft);
229 * lookup fid for object named \a name in directory \a dir.
232 int dt_lookup_dir(const struct lu_env *env, struct dt_object *dir,
233 const char *name, struct lu_fid *fid)
235 if (dt_try_as_dir(env, dir))
236 return dt_lookup(env, dir, (struct dt_rec *)fid,
237 (const struct dt_key *)name);
240 EXPORT_SYMBOL(dt_lookup_dir);
243 * this differs from dt_locate by top_dev as parameter
244 * but not one from lu_site
246 struct dt_object *dt_locate_at(const struct lu_env *env,
247 struct dt_device *dev,
248 const struct lu_fid *fid,
249 struct lu_device *top_dev,
250 const struct lu_object_conf *conf)
252 struct lu_object *lo;
255 lo = lu_object_find_at(env, top_dev, fid, conf);
257 return ERR_PTR(PTR_ERR(lo));
261 list_for_each_entry(n, &lo->lo_header->loh_layers, lo_linkage) {
262 if (n->lo_dev == &dev->dd_lu_dev)
263 return container_of0(n, struct dt_object, do_lu);
266 lu_object_put(env, lo);
267 return ERR_PTR(-ENOENT);
269 EXPORT_SYMBOL(dt_locate_at);
272 * find an object named \a entry in given \a dfh->dfh_o directory.
274 static int dt_find_entry(const struct lu_env *env, const char *entry,
277 struct dt_find_hint *dfh = data;
278 struct dt_device *dt = dfh->dfh_dt;
279 struct lu_fid *fid = dfh->dfh_fid;
280 struct dt_object *obj = dfh->dfh_o;
283 rc = dt_lookup_dir(env, obj, entry, fid);
284 dt_object_put(env, obj);
286 obj = dt_locate(env, dt, fid);
296 * Abstract function which parses path name. This function feeds
297 * path component to \a entry_func.
299 int dt_path_parser(const struct lu_env *env,
300 char *path, dt_entry_func_t entry_func,
307 e = strsep(&path, "/");
312 if (!path || path[0] == '\0')
316 rc = entry_func(env, e, data);
325 dt_store_resolve(const struct lu_env *env, struct dt_device *dt,
326 const char *path, struct lu_fid *fid)
328 struct dt_thread_info *info = dt_info(env);
329 struct dt_find_hint *dfh = &info->dti_dfh;
330 struct dt_object *obj;
337 strlcpy(info->dti_buf, path, sizeof(info->dti_buf));
339 result = dt->dd_ops->dt_root_get(env, dt, fid);
341 obj = dt_locate(env, dt, fid);
344 result = dt_path_parser(env, info->dti_buf,
347 obj = ERR_PTR(result);
352 obj = ERR_PTR(result);
357 static struct dt_object *dt_reg_open(const struct lu_env *env,
358 struct dt_device *dt,
366 result = dt_lookup_dir(env, p, name, fid);
368 o = dt_locate(env, dt, fid);
376 * Open dt object named \a filename from \a dirname directory.
377 * \param dt dt device
378 * \param fid on success, object fid is stored in *fid
380 struct dt_object *dt_store_open(const struct lu_env *env, struct dt_device *dt,
381 const char *dirname, const char *filename,
384 struct dt_object *file;
385 struct dt_object *dir;
387 dir = dt_store_resolve(env, dt, dirname, fid);
389 file = dt_reg_open(env, dt, dir, filename, fid);
390 dt_object_put(env, dir);
398 struct dt_object *dt_find_or_create(const struct lu_env *env,
399 struct dt_device *dt,
400 const struct lu_fid *fid,
401 struct dt_object_format *dof,
404 struct dt_object *dto;
410 dto = dt_locate(env, dt, fid);
414 LASSERT(dto != NULL);
415 if (dt_object_exists(dto))
418 th = dt_trans_create(env, dt);
420 GOTO(out, rc = PTR_ERR(th));
422 rc = dt_declare_create(env, dto, at, NULL, dof, th);
424 GOTO(trans_stop, rc);
426 rc = dt_trans_start_local(env, dt, th);
428 GOTO(trans_stop, rc);
430 dt_write_lock(env, dto, 0);
431 if (dt_object_exists(dto))
432 GOTO(unlock, rc = 0);
434 CDEBUG(D_OTHER, "create new object "DFID"\n", PFID(fid));
436 rc = dt_create(env, dto, at, NULL, dof, th);
439 LASSERT(dt_object_exists(dto));
441 dt_write_unlock(env, dto);
443 dt_trans_stop(env, dt, th);
446 dt_object_put(env, dto);
452 EXPORT_SYMBOL(dt_find_or_create);
454 /* dt class init function. */
455 int dt_global_init(void)
459 LU_CONTEXT_KEY_INIT(&dt_key);
460 result = lu_context_key_register(&dt_key);
464 void dt_global_fini(void)
466 lu_context_key_degister(&dt_key);
470 * Generic read helper. May return an error for partial reads.
472 * \param env lustre environment
473 * \param dt object to be read
474 * \param buf lu_buf to be filled, with buffer pointer and length
475 * \param pos position to start reading, updated as data is read
477 * \retval real size of data read
478 * \retval -ve errno on failure
480 int dt_read(const struct lu_env *env, struct dt_object *dt,
481 struct lu_buf *buf, loff_t *pos)
483 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
484 return dt->do_body_ops->dbo_read(env, dt, buf, pos);
486 EXPORT_SYMBOL(dt_read);
489 * Read structures of fixed size from storage. Unlike dt_read(), using
490 * dt_record_read() will return an error for partial reads.
492 * \param env lustre environment
493 * \param dt object to be read
494 * \param buf lu_buf to be filled, with buffer pointer and length
495 * \param pos position to start reading, updated as data is read
497 * \retval 0 on successfully reading full buffer
498 * \retval -EFAULT on short read
499 * \retval -ve errno on failure
501 int dt_record_read(const struct lu_env *env, struct dt_object *dt,
502 struct lu_buf *buf, loff_t *pos)
506 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
508 size = dt->do_body_ops->dbo_read(env, dt, buf, pos);
511 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
513 EXPORT_SYMBOL(dt_record_read);
515 int dt_record_write(const struct lu_env *env, struct dt_object *dt,
516 const struct lu_buf *buf, loff_t *pos, struct thandle *th)
520 LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
522 LASSERT(dt->do_body_ops);
523 LASSERT(dt->do_body_ops->dbo_write);
525 size = dt->do_body_ops->dbo_write(env, dt, buf, pos, th);
528 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
530 EXPORT_SYMBOL(dt_record_write);
532 int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
536 char *xname = XATTR_NAME_VERSION;
540 vbuf.lb_len = sizeof(dt_obj_version_t);
541 return dt_declare_xattr_set(env, o, &vbuf, xname, 0, th);
544 EXPORT_SYMBOL(dt_declare_version_set);
546 void dt_version_set(const struct lu_env *env, struct dt_object *o,
547 dt_obj_version_t version, struct thandle *th)
550 char *xname = XATTR_NAME_VERSION;
554 vbuf.lb_buf = &version;
555 vbuf.lb_len = sizeof(version);
557 rc = dt_xattr_set(env, o, &vbuf, xname, 0, th);
559 CDEBUG(D_INODE, "Can't set version, rc %d\n", rc);
562 EXPORT_SYMBOL(dt_version_set);
564 dt_obj_version_t dt_version_get(const struct lu_env *env, struct dt_object *o)
567 char *xname = XATTR_NAME_VERSION;
568 dt_obj_version_t version;
572 vbuf.lb_buf = &version;
573 vbuf.lb_len = sizeof(version);
574 rc = dt_xattr_get(env, o, &vbuf, xname);
575 if (rc != sizeof(version)) {
576 CDEBUG(D_INODE, "Can't get version, rc %d\n", rc);
581 EXPORT_SYMBOL(dt_version_get);
583 /* list of all supported index types */
586 const struct dt_index_features dt_directory_features;
587 EXPORT_SYMBOL(dt_directory_features);
590 const struct dt_index_features dt_otable_features;
591 EXPORT_SYMBOL(dt_otable_features);
593 /* lfsck layout orphan */
594 const struct dt_index_features dt_lfsck_layout_orphan_features = {
596 .dif_keysize_min = sizeof(struct lu_fid),
597 .dif_keysize_max = sizeof(struct lu_fid),
598 .dif_recsize_min = sizeof(struct lu_orphan_rec_v3),
599 .dif_recsize_max = sizeof(struct lu_orphan_rec_v3),
602 EXPORT_SYMBOL(dt_lfsck_layout_orphan_features);
604 /* lfsck layout dangling */
605 const struct dt_index_features dt_lfsck_layout_dangling_features = {
606 .dif_flags = DT_IND_UPDATE,
607 .dif_keysize_min = sizeof(struct lfsck_layout_dangling_key),
608 .dif_keysize_max = sizeof(struct lfsck_layout_dangling_key),
609 .dif_recsize_min = sizeof(struct lu_fid),
610 .dif_recsize_max = sizeof(struct lu_fid),
613 EXPORT_SYMBOL(dt_lfsck_layout_dangling_features);
615 /* lfsck namespace */
616 const struct dt_index_features dt_lfsck_namespace_features = {
617 .dif_flags = DT_IND_UPDATE,
618 .dif_keysize_min = sizeof(struct lu_fid),
619 .dif_keysize_max = sizeof(struct lu_fid),
620 .dif_recsize_min = sizeof(__u8),
621 .dif_recsize_max = sizeof(__u8),
624 EXPORT_SYMBOL(dt_lfsck_namespace_features);
626 /* accounting indexes */
627 const struct dt_index_features dt_acct_features = {
628 .dif_flags = DT_IND_UPDATE,
629 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
630 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
631 .dif_recsize_min = sizeof(struct lquota_acct_rec), /* 16 bytes */
632 .dif_recsize_max = sizeof(struct lquota_acct_rec), /* 16 bytes */
635 EXPORT_SYMBOL(dt_acct_features);
637 /* global quota files */
638 const struct dt_index_features dt_quota_glb_features = {
639 .dif_flags = DT_IND_UPDATE,
640 /* a different key would have to be used for per-directory quota */
641 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
642 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
643 .dif_recsize_min = sizeof(struct lquota_glb_rec), /* 32 bytes */
644 .dif_recsize_max = sizeof(struct lquota_glb_rec), /* 32 bytes */
647 EXPORT_SYMBOL(dt_quota_glb_features);
649 /* slave quota files */
650 const struct dt_index_features dt_quota_slv_features = {
651 .dif_flags = DT_IND_UPDATE,
652 /* a different key would have to be used for per-directory quota */
653 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
654 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
655 .dif_recsize_min = sizeof(struct lquota_slv_rec), /* 8 bytes */
656 .dif_recsize_max = sizeof(struct lquota_slv_rec), /* 8 bytes */
659 EXPORT_SYMBOL(dt_quota_slv_features);
661 /* nodemap files, nodemap_rec size asserted in nodemap_storage.c */
662 const struct dt_index_features dt_nodemap_features = {
663 .dif_flags = DT_IND_UPDATE,
664 .dif_keysize_min = sizeof(__u64), /* 64-bit nodemap/record id */
665 .dif_keysize_max = sizeof(__u64), /* 64-bit nodemap/record id */
666 .dif_recsize_min = sizeof(union nodemap_rec), /* 32 bytes */
667 .dif_recsize_max = sizeof(union nodemap_rec), /* 32 bytes */
670 EXPORT_SYMBOL(dt_nodemap_features);
673 * helper function returning what dt_index_features structure should be used
674 * based on the FID sequence. This is used by OBD_IDX_READ RPC
676 static inline const struct dt_index_features *dt_index_feat_select(__u64 seq,
679 if (seq == FID_SEQ_QUOTA_GLB) {
680 /* global quota index */
682 /* global quota index should be a regular file */
683 return ERR_PTR(-ENOENT);
684 return &dt_quota_glb_features;
685 } else if (seq == FID_SEQ_QUOTA) {
686 /* quota slave index */
688 /* slave index should be a regular file */
689 return ERR_PTR(-ENOENT);
690 return &dt_quota_slv_features;
691 } else if (seq == FID_SEQ_LAYOUT_RBTREE){
692 return &dt_lfsck_layout_orphan_features;
693 } else if (seq >= FID_SEQ_NORMAL) {
694 /* object is part of the namespace, verify that it is a
697 /* sorry, we can only deal with directory */
698 return ERR_PTR(-ENOTDIR);
699 return &dt_directory_features;
702 return ERR_PTR(-EOPNOTSUPP);
706 * Fill a lu_idxpage with key/record pairs read for transfer via OBD_IDX_READ
709 * \param env - is the environment passed by the caller
710 * \param lp - is a pointer to the lu_page to fill
711 * \param nob - is the maximum number of bytes that should be copied
712 * \param iops - is the index operation vector associated with the index object
713 * \param it - is a pointer to the current iterator
714 * \param attr - is the index attribute to pass to iops->rec()
715 * \param arg - is a pointer to the idx_info structure
717 static int dt_index_page_build(const struct lu_env *env, union lu_page *lp,
718 size_t nob, const struct dt_it_ops *iops,
719 struct dt_it *it, __u32 attr, void *arg)
721 struct idx_info *ii = (struct idx_info *)arg;
722 struct lu_idxpage *lip = &lp->lp_idx;
728 if (nob < LIP_HDR_SIZE)
731 /* initialize the header of the new container */
732 memset(lip, 0, LIP_HDR_SIZE);
733 lip->lip_magic = LIP_MAGIC;
736 /* compute size needed to store a key/record pair */
737 size = ii->ii_recsize + ii->ii_keysize;
738 if ((ii->ii_flags & II_FL_NOHASH) == 0)
739 /* add hash if the client wants it */
740 size += sizeof(__u64);
742 entry = lip->lip_entries;
744 char *tmp_entry = entry;
750 /* fetch 64-bit hash value */
751 hash = iops->store(env, it);
752 ii->ii_hash_end = hash;
754 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_IDX_READ_BREAK)) {
755 if (lip->lip_nr != 0)
760 if (lip->lip_nr == 0)
761 GOTO(out, rc = -EINVAL);
765 if (!(ii->ii_flags & II_FL_NOHASH)) {
767 * client wants to the 64-bit hash value associated
770 memcpy(tmp_entry, &hash, sizeof(hash));
771 tmp_entry += sizeof(hash);
774 if (ii->ii_flags & II_FL_VARKEY)
775 keysize = iops->key_size(env, it);
777 keysize = ii->ii_keysize;
779 if (!(ii->ii_flags & II_FL_NOKEY)) {
780 /* then the key value */
781 key = iops->key(env, it);
782 memcpy(tmp_entry, key, keysize);
783 tmp_entry += keysize;
786 /* and finally the record */
787 rc = iops->rec(env, it, (struct dt_rec *)tmp_entry, attr);
792 /* hash/key/record successfully copied! */
794 if (unlikely(lip->lip_nr == 1 && ii->ii_count == 0))
795 ii->ii_hash_start = hash;
797 if (ii->ii_flags & II_FL_VARREC)
798 recsize = iops->rec_size(env, it, attr);
800 recsize = ii->ii_recsize;
802 entry = tmp_entry + recsize;
806 /* move on to the next record */
808 rc = iops->next(env, it);
809 } while (rc == -ESTALE);
815 if (rc >= 0 && lip->lip_nr > 0)
816 /* one more container */
819 /* no more entries */
820 ii->ii_hash_end = II_END_OFF;
826 * Walk index and fill lu_page containers with key/record pairs
828 * \param env - is the environment passed by the caller
829 * \param obj - is the index object to parse
830 * \param rdpg - is the lu_rdpg descriptor associated with the transfer
831 * \param filler - is the callback function responsible for filling a lu_page
832 * with key/record pairs in the format wanted by the caller.
833 * If NULL, uses dt_index_page_build
834 * \param arg - is an opaq argument passed to the filler function
836 * \retval sum (in bytes) of all filled lu_pages
837 * \retval -ve errno on failure
839 int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
840 const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
844 const struct dt_it_ops *iops;
845 size_t pageidx, nob, nlupgs = 0;
849 LASSERT(rdpg->rp_pages != NULL);
850 LASSERT(obj->do_index_ops != NULL);
853 filler = dt_index_page_build;
855 nob = rdpg->rp_count;
859 /* Iterate through index and fill containers from @rdpg */
860 iops = &obj->do_index_ops->dio_it;
861 LASSERT(iops != NULL);
862 it = iops->init(env, obj, rdpg->rp_attrs);
866 rc = iops->load(env, it, rdpg->rp_hash);
869 * Iterator didn't find record with exactly the key requested.
871 * It is currently either
873 * - positioned above record with key less than
874 * requested---skip it.
875 * - or not positioned at all (is in IAM_IT_SKEWED
876 * state)---position it on the next item.
878 rc = iops->next(env, it);
888 * Fill containers one after the other. There might be multiple
889 * containers per physical page.
891 * At this point and across for-loop:
892 * rc == 0 -> ok, proceed.
893 * rc > 0 -> end of index.
896 for (pageidx = 0; rc == 0 && nob > 0; pageidx++) {
900 LASSERT(pageidx < rdpg->rp_npages);
901 lp = kmap(rdpg->rp_pages[pageidx]);
904 for (i = 0; i < LU_PAGE_COUNT; i++, lp++, nob -= LU_PAGE_SIZE) {
905 rc = filler(env, lp, min_t(size_t, nob, LU_PAGE_SIZE),
906 iops, it, rdpg->rp_attrs, arg);
909 /* one more lu_page */
915 kunmap(rdpg->rp_pages[i]);
923 rc = min_t(size_t, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
927 EXPORT_SYMBOL(dt_index_walk);
930 * Walk key/record pairs of an index and copy them into 4KB containers to be
931 * transferred over the network. This is the common handler for OBD_IDX_READ
934 * \param env - is the environment passed by the caller
935 * \param dev - is the dt_device storing the index
936 * \param ii - is the idx_info structure packed by the client in the
937 * OBD_IDX_READ request
938 * \param rdpg - is the lu_rdpg descriptor
940 * \retval on success, return sum (in bytes) of all filled containers
941 * \retval appropriate error otherwise.
943 int dt_index_read(const struct lu_env *env, struct dt_device *dev,
944 struct idx_info *ii, const struct lu_rdpg *rdpg)
946 const struct dt_index_features *feat;
947 struct dt_object *obj;
952 * rp_count shouldn't be null and should be a multiple of the container
955 if (rdpg->rp_count == 0 || (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
958 if (!fid_is_quota(&ii->ii_fid) && !fid_is_layout_rbtree(&ii->ii_fid) &&
959 !fid_is_norm(&ii->ii_fid))
962 /* lookup index object subject to the transfer */
963 obj = dt_locate(env, dev, &ii->ii_fid);
965 RETURN(PTR_ERR(obj));
966 if (dt_object_exists(obj) == 0)
967 GOTO(out, rc = -ENOENT);
969 /* fetch index features associated with index object */
970 feat = dt_index_feat_select(fid_seq(&ii->ii_fid),
971 lu_object_attr(&obj->do_lu));
973 GOTO(out, rc = PTR_ERR(feat));
975 /* load index feature if not done already */
976 if (obj->do_index_ops == NULL) {
977 rc = obj->do_ops->do_index_try(env, obj, feat);
982 /* fill ii_flags with supported index features */
983 ii->ii_flags &= (II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
986 if (!(feat->dif_flags & DT_IND_VARKEY))
987 ii->ii_keysize = feat->dif_keysize_max;
989 if (!(feat->dif_flags & DT_IND_VARREC))
990 ii->ii_recsize = feat->dif_recsize_max;
992 if (feat->dif_flags & DT_IND_NONUNQ)
993 /* key isn't necessarily unique */
994 ii->ii_flags |= II_FL_NONUNQ;
996 if (!fid_is_layout_rbtree(&ii->ii_fid)) {
997 dt_read_lock(env, obj, 0);
998 /* fetch object version before walking the index */
999 ii->ii_version = dt_version_get(env, obj);
1002 /* walk the index and fill lu_idxpages with key/record pairs */
1003 rc = dt_index_walk(env, obj, rdpg, dt_index_page_build, ii);
1004 if (!fid_is_layout_rbtree(&ii->ii_fid))
1005 dt_read_unlock(env, obj);
1008 /* index is empty */
1009 LASSERT(ii->ii_count == 0);
1010 ii->ii_hash_end = II_END_OFF;
1015 dt_object_put(env, obj);
1018 EXPORT_SYMBOL(dt_index_read);
1020 #ifdef CONFIG_PROC_FS
1021 int lprocfs_dt_blksize_seq_show(struct seq_file *m, void *v)
1023 struct dt_device *dt = m->private;
1024 struct obd_statfs osfs;
1026 int rc = dt_statfs(NULL, dt, &osfs);
1028 seq_printf(m, "%u\n", (unsigned) osfs.os_bsize);
1031 EXPORT_SYMBOL(lprocfs_dt_blksize_seq_show);
1033 int lprocfs_dt_kbytestotal_seq_show(struct seq_file *m, void *v)
1035 struct dt_device *dt = m->private;
1036 struct obd_statfs osfs;
1038 int rc = dt_statfs(NULL, dt, &osfs);
1040 __u32 blk_size = osfs.os_bsize >> 10;
1041 __u64 result = osfs.os_blocks;
1043 while (blk_size >>= 1)
1046 seq_printf(m, "%llu\n", result);
1050 EXPORT_SYMBOL(lprocfs_dt_kbytestotal_seq_show);
1052 int lprocfs_dt_kbytesfree_seq_show(struct seq_file *m, void *v)
1054 struct dt_device *dt = m->private;
1055 struct obd_statfs osfs;
1057 int rc = dt_statfs(NULL, dt, &osfs);
1059 __u32 blk_size = osfs.os_bsize >> 10;
1060 __u64 result = osfs.os_bfree;
1062 while (blk_size >>= 1)
1065 seq_printf(m, "%llu\n", result);
1069 EXPORT_SYMBOL(lprocfs_dt_kbytesfree_seq_show);
1071 int lprocfs_dt_kbytesavail_seq_show(struct seq_file *m, void *v)
1073 struct dt_device *dt = m->private;
1074 struct obd_statfs osfs;
1076 int rc = dt_statfs(NULL, dt, &osfs);
1078 __u32 blk_size = osfs.os_bsize >> 10;
1079 __u64 result = osfs.os_bavail;
1081 while (blk_size >>= 1)
1084 seq_printf(m, "%llu\n", result);
1088 EXPORT_SYMBOL(lprocfs_dt_kbytesavail_seq_show);
1090 int lprocfs_dt_filestotal_seq_show(struct seq_file *m, void *v)
1092 struct dt_device *dt = m->private;
1093 struct obd_statfs osfs;
1095 int rc = dt_statfs(NULL, dt, &osfs);
1097 seq_printf(m, "%llu\n", osfs.os_files);
1100 EXPORT_SYMBOL(lprocfs_dt_filestotal_seq_show);
1102 int lprocfs_dt_filesfree_seq_show(struct seq_file *m, void *v)
1104 struct dt_device *dt = m->private;
1105 struct obd_statfs osfs;
1107 int rc = dt_statfs(NULL, dt, &osfs);
1109 seq_printf(m, "%llu\n", osfs.os_ffree);
1112 EXPORT_SYMBOL(lprocfs_dt_filesfree_seq_show);
1114 #endif /* CONFIG_PROC_FS */
1116 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
1119 struct dt_device *dt = container_of(kobj, struct dt_device,
1121 struct lu_device *lu = dt2lu_dev(dt);
1126 return sprintf(buf, "%s\n", lu->ld_obd->obd_uuid.uuid);
1128 LUSTRE_RO_ATTR(uuid);
1130 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
1133 struct dt_device *dt = container_of(kobj, struct dt_device,
1135 struct obd_statfs osfs;
1138 rc = dt_statfs(NULL, dt, &osfs);
1142 return sprintf(buf, "%u\n", (unsigned) osfs.os_bsize);
1144 LUSTRE_RO_ATTR(blocksize);
1146 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
1149 struct dt_device *dt = container_of(kobj, struct dt_device,
1151 struct obd_statfs osfs;
1156 rc = dt_statfs(NULL, dt, &osfs);
1160 blk_size = osfs.os_bsize >> 10;
1161 result = osfs.os_blocks;
1163 while (blk_size >>= 1)
1166 return sprintf(buf, "%llu\n", result);
1168 LUSTRE_RO_ATTR(kbytestotal);
1170 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
1173 struct dt_device *dt = container_of(kobj, struct dt_device,
1175 struct obd_statfs osfs;
1180 rc = dt_statfs(NULL, dt, &osfs);
1184 blk_size = osfs.os_bsize >> 10;
1185 result = osfs.os_bfree;
1187 while (blk_size >>= 1)
1190 return sprintf(buf, "%llu\n", result);
1192 LUSTRE_RO_ATTR(kbytesfree);
1194 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
1197 struct dt_device *dt = container_of(kobj, struct dt_device,
1199 struct obd_statfs osfs;
1204 rc = dt_statfs(NULL, dt, &osfs);
1208 blk_size = osfs.os_bsize >> 10;
1209 result = osfs.os_bavail;
1211 while (blk_size >>= 1)
1214 return sprintf(buf, "%llu\n", result);
1216 LUSTRE_RO_ATTR(kbytesavail);
1218 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
1221 struct dt_device *dt = container_of(kobj, struct dt_device,
1223 struct obd_statfs osfs;
1226 rc = dt_statfs(NULL, dt, &osfs);
1230 return sprintf(buf, "%llu\n", osfs.os_files);
1232 LUSTRE_RO_ATTR(filestotal);
1234 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
1237 struct dt_device *dt = container_of(kobj, struct dt_device,
1239 struct obd_statfs osfs;
1242 rc = dt_statfs(NULL, dt, &osfs);
1246 return sprintf(buf, "%llu\n", osfs.os_ffree);
1248 LUSTRE_RO_ATTR(filesfree);
1250 static const struct attribute *dt_def_attrs[] = {
1251 &lustre_attr_uuid.attr,
1252 &lustre_attr_blocksize.attr,
1253 &lustre_attr_kbytestotal.attr,
1254 &lustre_attr_kbytesfree.attr,
1255 &lustre_attr_kbytesavail.attr,
1256 &lustre_attr_filestotal.attr,
1257 &lustre_attr_filesfree.attr,
1261 static void dt_sysfs_release(struct kobject *kobj)
1263 struct dt_device *dt = container_of(kobj, struct dt_device,
1266 complete(&dt->dd_kobj_unregister);
1269 int dt_tunables_fini(struct dt_device *dt)
1274 if (!IS_ERR_OR_NULL(dt->dd_debugfs_entry))
1275 ldebugfs_remove(&dt->dd_debugfs_entry);
1277 if (dt->dd_def_attrs)
1278 sysfs_remove_files(&dt->dd_kobj, dt->dd_def_attrs);
1280 kobject_put(&dt->dd_kobj);
1281 wait_for_completion(&dt->dd_kobj_unregister);
1285 EXPORT_SYMBOL(dt_tunables_fini);
1287 int dt_tunables_init(struct dt_device *dt, struct obd_type *type,
1288 const char *name, struct lprocfs_vars *list)
1292 dt->dd_ktype.sysfs_ops = &lustre_sysfs_ops;
1293 dt->dd_ktype.release = dt_sysfs_release;
1295 init_completion(&dt->dd_kobj_unregister);
1296 rc = kobject_init_and_add(&dt->dd_kobj, &dt->dd_ktype, type->typ_kobj,
1301 dt->dd_def_attrs = dt_def_attrs;
1303 rc = sysfs_create_files(&dt->dd_kobj, dt->dd_def_attrs);
1305 kobject_put(&dt->dd_kobj);
1310 * No need to register debugfs if no enteries. This allows us to
1311 * choose between using dt_device or obd_device for debugfs.
1316 dt->dd_debugfs_entry = ldebugfs_register(name,
1317 type->typ_debugfs_entry,
1319 if (IS_ERR_OR_NULL(dt->dd_debugfs_entry)) {
1320 rc = dt->dd_debugfs_entry ? PTR_ERR(dt->dd_debugfs_entry)
1322 CERROR("%s: error %d setting up debugfs\n",
1324 dt->dd_debugfs_entry = NULL;
1325 sysfs_remove_files(&dt->dd_kobj, dt->dd_def_attrs);
1326 kobject_put(&dt->dd_kobj);
1332 EXPORT_SYMBOL(dt_tunables_init);