4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/obdclass/dt_object.c
34 * Generic functions from dt_object.h
36 * Author: Nikita Danilov <nikita@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_CLASS
41 #include <linux/list.h>
42 #include <obd_class.h>
43 #include <dt_object.h>
45 #include <lustre_fid.h>
46 #include <lustre_nodemap.h>
47 #include <lustre_quota.h>
48 #include <lustre_lfsck.h>
49 #include <uapi/linux/lustre/lustre_disk.h>
51 /* context key constructor/destructor: dt_global_key_init, dt_global_key_fini */
52 LU_KEY_INIT(dt_global, struct dt_thread_info);
53 LU_KEY_FINI(dt_global, struct dt_thread_info);
55 struct lu_context_key dt_key = {
56 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_LOCAL,
57 .lct_init = dt_global_key_init,
58 .lct_fini = dt_global_key_fini
62 * no lock is necessary to protect the list, because call-backs
63 * are added during system startup. Please refer to "struct dt_device".
65 void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
67 list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
69 EXPORT_SYMBOL(dt_txn_callback_add);
71 void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
73 list_del_init(&cb->dtc_linkage);
75 EXPORT_SYMBOL(dt_txn_callback_del);
77 int dt_txn_hook_start(const struct lu_env *env,
78 struct dt_device *dev, struct thandle *th)
81 struct dt_txn_callback *cb;
86 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
87 struct thandle *dtc_th = th;
89 if (cb->dtc_txn_start == NULL ||
90 !(cb->dtc_tag & env->le_ctx.lc_tags))
94 * Usually dt_txn_hook_start is called from bottom device,
95 * and if the thandle has th_top, then we need use top
96 * thandle for the callback in the top thandle layer
98 if (th->th_top != NULL)
101 rc = cb->dtc_txn_start(env, dtc_th, cb->dtc_cookie);
107 EXPORT_SYMBOL(dt_txn_hook_start);
109 int dt_txn_hook_stop(const struct lu_env *env, struct thandle *th)
111 struct dt_device *dev = th->th_dev;
112 struct dt_txn_callback *cb;
118 if (CFS_FAIL_CHECK(OBD_FAIL_DT_TXN_STOP))
121 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
122 struct thandle *dtc_th = th;
124 if (cb->dtc_txn_stop == NULL ||
125 !(cb->dtc_tag & env->le_ctx.lc_tags))
129 * Usually dt_txn_hook_stop is called from bottom device,
130 * and if the thandle has th_top, then we need use top
131 * thandle for the callback in the top thandle layer
133 if (th->th_top != NULL)
136 rc = cb->dtc_txn_stop(env, dtc_th, cb->dtc_cookie);
142 EXPORT_SYMBOL(dt_txn_hook_stop);
144 int dt_device_init(struct dt_device *dev, struct lu_device_type *t)
146 INIT_LIST_HEAD(&dev->dd_txn_callbacks);
147 return lu_device_init(&dev->dd_lu_dev, t);
149 EXPORT_SYMBOL(dt_device_init);
151 void dt_device_fini(struct dt_device *dev)
153 lu_device_fini(&dev->dd_lu_dev);
155 EXPORT_SYMBOL(dt_device_fini);
157 int dt_object_init(struct dt_object *obj,
158 struct lu_object_header *h, struct lu_device *d)
161 return lu_object_init(&obj->do_lu, h, d);
163 EXPORT_SYMBOL(dt_object_init);
165 void dt_object_fini(struct dt_object *obj)
167 lu_object_fini(&obj->do_lu);
169 EXPORT_SYMBOL(dt_object_fini);
172 * Set directory .do_index_ops.
174 * Set directory index operations, if the caller knows directory exists,
175 * \a check should be set to ensure object is directory and exists, while for
176 * new directories, skip check and the index operations will be used to create
177 * ".." under directory.
179 * Normally this is called before dt_lookup() to ensure directory objects
180 * exists and .do_index_ops is correctly set.
182 * \param env lu_env object.
183 * \param obj dt object.
184 * \param check check \a obj existence and type, return if index ops is set.
185 * \retval 1 on success.
186 * \retval 0 on error.
188 int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj, bool check)
191 if (unlikely(!dt_object_exists(obj)))
194 if (unlikely(!S_ISDIR(lu_object_attr(&obj->do_lu))))
197 if (obj->do_index_ops)
201 obj->do_ops->do_index_try(env, obj, &dt_directory_features);
203 return obj->do_index_ops != NULL;
205 EXPORT_SYMBOL(dt_try_as_dir);
207 enum dt_format_type dt_mode_to_dft(__u32 mode)
209 enum dt_format_type result;
211 switch (mode & S_IFMT) {
216 result = DFT_REGULAR;
228 LASSERTF(0, "invalid mode %o\n", mode);
229 result = 0; /* Just for satisfying compiler. */
234 EXPORT_SYMBOL(dt_mode_to_dft);
237 * lookup fid for object named \a name in directory \a dir.
240 int dt_lookup_dir(const struct lu_env *env, struct dt_object *dir,
241 const char *name, struct lu_fid *fid)
243 if (dt_try_as_dir(env, dir, true))
244 return dt_lookup(env, dir, (struct dt_rec *)fid,
245 (const struct dt_key *)name);
248 EXPORT_SYMBOL(dt_lookup_dir);
251 * this differs from dt_locate by top_dev as parameter
252 * but not one from lu_site
254 struct dt_object *dt_locate_at(const struct lu_env *env,
255 struct dt_device *dev,
256 const struct lu_fid *fid,
257 struct lu_device *top_dev,
258 const struct lu_object_conf *conf)
260 struct lu_object *lo;
263 lo = lu_object_find_at(env, top_dev, fid, conf);
269 list_for_each_entry(n, &lo->lo_header->loh_layers, lo_linkage) {
270 if (n->lo_dev == &dev->dd_lu_dev)
271 return container_of(n, struct dt_object, do_lu);
274 lu_object_put(env, lo);
275 return ERR_PTR(-ENOENT);
277 EXPORT_SYMBOL(dt_locate_at);
280 * find an object named \a entry in given \a dfh->dfh_o directory.
282 static int dt_find_entry(const struct lu_env *env, const char *entry,
285 struct dt_find_hint *dfh = data;
286 struct dt_device *dt = dfh->dfh_dt;
287 struct lu_fid *fid = dfh->dfh_fid;
288 struct dt_object *obj = dfh->dfh_o;
291 rc = dt_lookup_dir(env, obj, entry, fid);
292 dt_object_put(env, obj);
294 obj = dt_locate(env, dt, fid);
304 * Abstract function which parses path name. This function feeds
305 * path component to \a entry_func.
307 int dt_path_parser(const struct lu_env *env,
308 char *path, dt_entry_func_t entry_func,
315 e = strsep(&path, "/");
320 if (!path || path[0] == '\0')
324 rc = entry_func(env, e, data);
333 dt_store_resolve(const struct lu_env *env, struct dt_device *dt,
334 const char *path, struct lu_fid *fid)
336 struct dt_thread_info *info = dt_info(env);
337 struct dt_find_hint *dfh = &info->dti_dfh;
338 struct dt_object *obj;
345 strscpy(info->dti_buf, path, sizeof(info->dti_buf));
347 result = dt->dd_ops->dt_root_get(env, dt, fid);
349 obj = dt_locate(env, dt, fid);
352 result = dt_path_parser(env, info->dti_buf,
355 obj = ERR_PTR(result);
360 obj = ERR_PTR(result);
365 static struct dt_object *dt_reg_open(const struct lu_env *env,
366 struct dt_device *dt,
374 result = dt_lookup_dir(env, p, name, fid);
376 o = dt_locate(env, dt, fid);
384 * Open dt object named \a filename from \a dirname directory.
385 * \param dt dt device
386 * \param fid on success, object fid is stored in *fid
388 struct dt_object *dt_store_open(const struct lu_env *env, struct dt_device *dt,
389 const char *dirname, const char *filename,
392 struct dt_object *file;
393 struct dt_object *dir;
395 dir = dt_store_resolve(env, dt, dirname, fid);
397 file = dt_reg_open(env, dt, dir, filename, fid);
398 dt_object_put(env, dir);
406 struct dt_object *dt_find_or_create(const struct lu_env *env,
407 struct dt_device *dt,
408 const struct lu_fid *fid,
409 struct dt_object_format *dof,
412 struct dt_object *dto;
418 dto = dt_locate(env, dt, fid);
422 LASSERT(dto != NULL);
423 if (dt_object_exists(dto))
426 th = dt_trans_create(env, dt);
428 GOTO(out, rc = PTR_ERR(th));
430 rc = dt_declare_create(env, dto, at, NULL, dof, th);
432 GOTO(trans_stop, rc);
434 rc = dt_trans_start_local(env, dt, th);
436 GOTO(trans_stop, rc);
438 dt_write_lock(env, dto, 0);
439 if (dt_object_exists(dto))
440 GOTO(unlock, rc = 0);
442 CDEBUG(D_OTHER, "create new object "DFID"\n", PFID(fid));
444 rc = dt_create(env, dto, at, NULL, dof, th);
447 LASSERT(dt_object_exists(dto));
449 dt_write_unlock(env, dto);
451 dt_trans_stop(env, dt, th);
454 dt_object_put(env, dto);
460 EXPORT_SYMBOL(dt_find_or_create);
462 /* dt class init function. */
463 int dt_global_init(void)
467 LU_CONTEXT_KEY_INIT(&dt_key);
468 result = lu_context_key_register(&dt_key);
472 void dt_global_fini(void)
474 lu_context_key_degister(&dt_key);
478 * Generic read helper. May return an error for partial reads.
480 * \param env lustre environment
481 * \param dt object to be read
482 * \param buf lu_buf to be filled, with buffer pointer and length
483 * \param pos position to start reading, updated as data is read
485 * \retval real size of data read
486 * \retval -ve errno on failure
488 int dt_read(const struct lu_env *env, struct dt_object *dt,
489 struct lu_buf *buf, loff_t *pos)
491 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
492 return dt->do_body_ops->dbo_read(env, dt, buf, pos);
494 EXPORT_SYMBOL(dt_read);
497 * Read structures of fixed size from storage. Unlike dt_read(), using
498 * dt_record_read() will return an error for partial reads.
500 * \param env lustre environment
501 * \param dt object to be read
502 * \param buf lu_buf to be filled, with buffer pointer and length
503 * \param pos position to start reading, updated as data is read
505 * \retval 0 on successfully reading full buffer
506 * \retval -EFAULT on short read
507 * \retval -ve errno on failure
509 int dt_record_read(const struct lu_env *env, struct dt_object *dt,
510 struct lu_buf *buf, loff_t *pos)
514 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
516 size = dt->do_body_ops->dbo_read(env, dt, buf, pos);
519 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
521 EXPORT_SYMBOL(dt_record_read);
523 int dt_record_write(const struct lu_env *env, struct dt_object *dt,
524 const struct lu_buf *buf, loff_t *pos, struct thandle *th)
528 LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
530 LASSERT(dt->do_body_ops);
531 LASSERTF(dt->do_body_ops->dbo_write, DFID"\n",
532 PFID(lu_object_fid(&dt->do_lu)));
534 size = dt->do_body_ops->dbo_write(env, dt, buf, pos, th);
537 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
539 EXPORT_SYMBOL(dt_record_write);
541 int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
545 char *xname = XATTR_NAME_VERSION;
549 vbuf.lb_len = sizeof(dt_obj_version_t);
550 return dt_declare_xattr_set(env, o, &vbuf, xname, 0, th);
552 EXPORT_SYMBOL(dt_declare_version_set);
554 void dt_version_set(const struct lu_env *env, struct dt_object *o,
555 dt_obj_version_t version, struct thandle *th)
558 char *xname = XATTR_NAME_VERSION;
562 vbuf.lb_buf = &version;
563 vbuf.lb_len = sizeof(version);
564 rc = dt_xattr_set(env, o, &vbuf, xname, 0, th);
566 CDEBUG(D_INODE, "Can't set version, rc %d\n", rc);
568 EXPORT_SYMBOL(dt_version_set);
570 dt_obj_version_t dt_version_get(const struct lu_env *env, struct dt_object *o)
573 char *xname = XATTR_NAME_VERSION;
574 dt_obj_version_t version;
578 vbuf.lb_buf = &version;
579 vbuf.lb_len = sizeof(version);
580 rc = dt_xattr_get(env, o, &vbuf, xname);
581 if (rc != sizeof(version)) {
582 CDEBUG(D_INODE, "Can't get version, rc %d\n", rc);
588 EXPORT_SYMBOL(dt_version_get);
590 int dt_declare_data_version_set(const struct lu_env *env, struct dt_object *o,
596 vbuf.lb_len = sizeof(dt_obj_version_t);
598 return dt_declare_xattr_set(env, o, &vbuf, XATTR_NAME_DATAVER, 0, th);
600 EXPORT_SYMBOL(dt_declare_data_version_set);
602 void dt_data_version_set(const struct lu_env *env, struct dt_object *o,
603 dt_obj_version_t version, struct thandle *th)
607 CDEBUG(D_INODE, DFID": set new data version -> %llu\n",
608 PFID(lu_object_fid(&o->do_lu)), version);
610 /* version should never be set to zero */
612 vbuf.lb_buf = &version;
613 vbuf.lb_len = sizeof(version);
614 dt_xattr_set(env, o, &vbuf, XATTR_NAME_DATAVER, 0, th);
616 EXPORT_SYMBOL(dt_data_version_set);
618 int dt_declare_data_version_del(const struct lu_env *env, struct dt_object *o,
621 return dt_declare_xattr_del(env, o, XATTR_NAME_DATAVER, th);
623 EXPORT_SYMBOL(dt_declare_data_version_del);
625 void dt_data_version_del(const struct lu_env *env, struct dt_object *o,
628 /* file doesn't need explicit data version anymore */
629 CDEBUG(D_INODE, DFID": remove explicit data version\n",
630 PFID(lu_object_fid(&o->do_lu)));
631 dt_xattr_del(env, o, XATTR_NAME_DATAVER, th);
633 EXPORT_SYMBOL(dt_data_version_del);
635 /* Initialize explicit data version, e.g. for DoM files.
636 * It uses inode version as initial value.
638 dt_obj_version_t dt_data_version_init(const struct lu_env *env,
641 struct dt_device *dt = lu2dt_dev(o->do_lu.lo_dev);
648 dv = dt_version_get(env, o);
652 th = dt_trans_create(env, dt);
654 GOTO(out, rc = PTR_ERR(th));
656 rc = dt_declare_data_version_set(env, o, th);
660 rc = dt_trans_start_local(env, dt, th);
664 dt_data_version_set(env, o, dv, th);
666 dt_trans_stop(env, dt, th);
668 /* Ignore failure but report the error */
670 CDEBUG(D_INODE, "can't init data version for "DFID": rc = %d\n",
671 PFID(lu_object_fid(&o->do_lu)), rc);
676 dt_obj_version_t dt_data_version_get(const struct lu_env *env,
680 dt_obj_version_t version;
683 vbuf.lb_buf = &version;
684 vbuf.lb_len = sizeof(version);
685 rc = dt_xattr_get(env, o, &vbuf, XATTR_NAME_DATAVER);
687 CDEBUG(D_INODE, DFID": get data version %llu: rc = %d\n",
688 PFID(lu_object_fid(&o->do_lu)), version, rc);
690 if (rc == sizeof(version))
693 /* data version EA wasn't set yet on the object, initialize it now */
695 return dt_data_version_init(env, o);
697 CDEBUG(D_INODE, "Can't get data version: rc = %d\n", rc);
701 EXPORT_SYMBOL(dt_data_version_get);
703 /* list of all supported index types */
706 const struct dt_index_features dt_directory_features;
707 EXPORT_SYMBOL(dt_directory_features);
710 const struct dt_index_features dt_otable_features;
711 EXPORT_SYMBOL(dt_otable_features);
713 /* lfsck layout orphan */
714 const struct dt_index_features dt_lfsck_layout_orphan_features = {
716 .dif_keysize_min = sizeof(struct lu_fid),
717 .dif_keysize_max = sizeof(struct lu_fid),
718 .dif_recsize_min = sizeof(struct lu_orphan_rec_v3),
719 .dif_recsize_max = sizeof(struct lu_orphan_rec_v3),
722 EXPORT_SYMBOL(dt_lfsck_layout_orphan_features);
724 /* lfsck layout dangling */
725 const struct dt_index_features dt_lfsck_layout_dangling_features = {
726 .dif_flags = DT_IND_UPDATE,
727 .dif_keysize_min = sizeof(struct lfsck_layout_dangling_key),
728 .dif_keysize_max = sizeof(struct lfsck_layout_dangling_key),
729 .dif_recsize_min = sizeof(struct lu_fid),
730 .dif_recsize_max = sizeof(struct lu_fid),
733 EXPORT_SYMBOL(dt_lfsck_layout_dangling_features);
735 /* lfsck namespace */
736 const struct dt_index_features dt_lfsck_namespace_features = {
737 .dif_flags = DT_IND_UPDATE,
738 .dif_keysize_min = sizeof(struct lu_fid),
739 .dif_keysize_max = sizeof(struct lu_fid),
740 .dif_recsize_min = sizeof(__u8),
741 .dif_recsize_max = sizeof(__u8),
744 EXPORT_SYMBOL(dt_lfsck_namespace_features);
746 /* accounting indexes */
747 const struct dt_index_features dt_acct_features = {
748 .dif_flags = DT_IND_UPDATE,
749 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
750 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
751 .dif_recsize_min = sizeof(struct lquota_acct_rec), /* 16 bytes */
752 .dif_recsize_max = sizeof(struct lquota_acct_rec), /* 16 bytes */
755 EXPORT_SYMBOL(dt_acct_features);
757 /* global quota files */
758 const struct dt_index_features dt_quota_glb_features = {
759 .dif_flags = DT_IND_UPDATE,
760 /* a different key would have to be used for per-directory quota */
761 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
762 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
763 .dif_recsize_min = sizeof(struct lquota_glb_rec), /* 32 bytes */
764 .dif_recsize_max = sizeof(struct lquota_glb_rec), /* 32 bytes */
767 EXPORT_SYMBOL(dt_quota_glb_features);
769 /* slave quota files */
770 const struct dt_index_features dt_quota_slv_features = {
771 .dif_flags = DT_IND_UPDATE,
772 /* a different key would have to be used for per-directory quota */
773 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
774 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
775 .dif_recsize_min = sizeof(struct lquota_slv_rec), /* 8 bytes */
776 .dif_recsize_max = sizeof(struct lquota_slv_rec), /* 8 bytes */
779 EXPORT_SYMBOL(dt_quota_slv_features);
781 /* nodemap files, nodemap_rec size asserted in nodemap_storage.c */
782 const struct dt_index_features dt_nodemap_features = {
783 .dif_flags = DT_IND_UPDATE,
784 .dif_keysize_min = sizeof(__u64), /* 64-bit nodemap/record id */
785 .dif_keysize_max = sizeof(__u64), /* 64-bit nodemap/record id */
786 .dif_recsize_min = sizeof(union nodemap_rec), /* 32 bytes */
787 .dif_recsize_max = sizeof(union nodemap_rec), /* 32 bytes */
790 EXPORT_SYMBOL(dt_nodemap_features);
793 * helper function returning what dt_index_features structure should be used
794 * based on the FID sequence. This is used by OBD_IDX_READ RPC
796 static inline const struct dt_index_features *dt_index_feat_select(__u64 seq,
799 if (seq == FID_SEQ_QUOTA_GLB) {
800 /* global quota index */
802 /* global quota index should be a regular file */
803 return ERR_PTR(-ENOENT);
804 return &dt_quota_glb_features;
805 } else if (seq == FID_SEQ_QUOTA) {
806 /* quota slave index */
808 /* slave index should be a regular file */
809 return ERR_PTR(-ENOENT);
810 return &dt_quota_slv_features;
811 } else if (seq == FID_SEQ_LAYOUT_RBTREE){
812 return &dt_lfsck_layout_orphan_features;
813 } else if (seq >= FID_SEQ_NORMAL) {
814 /* object is part of the namespace, verify that it is a
817 /* sorry, we can only deal with directory */
818 return ERR_PTR(-ENOTDIR);
819 return &dt_directory_features;
822 return ERR_PTR(-EOPNOTSUPP);
826 * Fill a lu_idxpage with key/record pairs read for transfer via OBD_IDX_READ
829 * \param env - is the environment passed by the caller
830 * \param obj - index object being traversed (mostly for debugging)
831 * \param lp - is a pointer to the lu_page to fill
832 * \param bytes - is the maximum number of bytes that should be copied
833 * \param iops - is the index operation vector associated with the index object
834 * \param it - is a pointer to the current iterator
835 * \param attr - is the index attribute to pass to iops->rec()
836 * \param arg - is a pointer to the idx_info structure
838 static int dt_index_page_build(const struct lu_env *env, struct dt_object *obj,
839 union lu_page *lp, size_t bytes,
840 const struct dt_it_ops *iops,
841 struct dt_it *it, __u32 attr, void *arg)
843 struct idx_info *ii = (struct idx_info *)arg;
844 struct lu_idxpage *lip = &lp->lp_idx;
854 if (bytes < LIP_HDR_SIZE)
857 /* initialize the header of the new container */
858 memset(lip, 0, LIP_HDR_SIZE);
859 lip->lip_magic = LIP_MAGIC;
860 bytes -= LIP_HDR_SIZE;
862 /* client wants to the 64-bit hash value associated with each record */
863 if (!(ii->ii_flags & II_FL_NOHASH))
864 hashsize = sizeof(hash);
866 entry = lip->lip_entries;
868 /* fetch 64-bit hash value */
869 hash = iops->store(env, it);
870 ii->ii_hash_end = hash;
872 if (CFS_FAIL_CHECK(OBD_FAIL_OBD_IDX_READ_BREAK)) {
873 if (lip->lip_nr != 0)
877 if (!(ii->ii_flags & II_FL_NOKEY)) {
878 keysize = iops->key_size(env, it);
879 if (!(ii->ii_flags & II_FL_VARKEY) &&
880 keysize != ii->ii_keysize) {
882 CERROR("%s: keysize mismatch %hu != %hu on "
884 lu_dev_name(obj->do_lu.lo_dev),
885 keysize, ii->ii_keysize,
886 PFID(lu_object_fid(&obj->do_lu)), rc);
891 /* and finally the record */
892 if (ii->ii_flags & II_FL_VARREC)
893 recsize = iops->rec_size(env, it, attr);
895 recsize = ii->ii_recsize;
897 if (bytes < hashsize + keysize + recsize) {
898 if (lip->lip_nr == 0)
899 GOTO(out, rc = -E2BIG);
903 rc = iops->rec(env, it,
904 (struct dt_rec *)(entry + hashsize + keysize),
908 memcpy(entry, &hash, hashsize);
912 key = iops->key(env, it);
913 memcpy(entry + hashsize, key, keysize);
915 /* hash/key/record successfully copied! */
917 if (unlikely(lip->lip_nr == 1 && ii->ii_count == 0))
918 ii->ii_hash_start = hash;
919 entry += hashsize + keysize + recsize;
920 bytes -= hashsize + keysize + recsize;
921 } else if (rc != -ESTALE) {
925 /* move on to the next record */
927 rc = iops->next(env, it);
928 } while (rc == -ESTALE);
933 if (rc >= 0 && lip->lip_nr > 0)
934 /* one more container */
937 /* no more entries */
938 ii->ii_hash_end = II_END_OFF;
944 * Walk index and fill lu_page containers with key/record pairs
946 * \param env - is the environment passed by the caller
947 * \param obj - is the index object to parse
948 * \param rdpg - is the lu_rdpg descriptor associated with the transfer
949 * \param filler - is the callback function responsible for filling a lu_page
950 * with key/record pairs in the format wanted by the caller.
951 * If NULL, uses dt_index_page_build
952 * \param arg - is an opaq argument passed to the filler function
954 * \retval sum (in bytes) of all filled lu_pages
955 * \retval -ve errno on failure
957 int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
958 const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
962 const struct dt_it_ops *iops;
963 size_t pageidx, bytes, nlupgs = 0;
967 LASSERT(rdpg->rp_pages != NULL);
968 LASSERT(obj->do_index_ops != NULL);
971 filler = dt_index_page_build;
973 bytes = rdpg->rp_count;
977 /* Iterate through index and fill containers from @rdpg */
978 iops = &obj->do_index_ops->dio_it;
979 LASSERT(iops != NULL);
980 it = iops->init(env, obj, rdpg->rp_attrs);
984 rc = iops->load(env, it, rdpg->rp_hash);
987 * Iterator didn't find record with exactly the key requested.
989 * It is currently either
991 * - positioned above record with key less than
992 * requested---skip it.
993 * - or not positioned at all (is in IAM_IT_SKEWED
994 * state)---position it on the next item.
996 rc = iops->next(env, it);
1006 * Fill containers one after the other. There might be multiple
1007 * containers per physical page.
1009 * At this point and across for-loop:
1010 * rc == 0 -> ok, proceed.
1011 * rc > 0 -> end of index.
1014 for (pageidx = 0; rc == 0 && bytes > 0; pageidx++) {
1018 LASSERT(pageidx < rdpg->rp_npages);
1019 lp = kmap(rdpg->rp_pages[pageidx]);
1022 for (i = 0; i < LU_PAGE_COUNT; i++, lp++, bytes-=LU_PAGE_SIZE) {
1023 rc = filler(env, obj, lp,
1024 min_t(size_t, bytes, LU_PAGE_SIZE),
1025 iops, it, rdpg->rp_attrs, arg);
1028 /* one more lu_page */
1034 kunmap(rdpg->rp_pages[pageidx]);
1039 iops->fini(env, it);
1042 rc = min_t(size_t, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
1046 EXPORT_SYMBOL(dt_index_walk);
1049 * Walk key/record pairs of an index and copy them into 4KB containers to be
1050 * transferred over the network. This is the common handler for OBD_IDX_READ
1053 * \param env - is the environment passed by the caller
1054 * \param dev - is the dt_device storing the index
1055 * \param ii - is the idx_info structure packed by the client in the
1056 * OBD_IDX_READ request
1057 * \param rdpg - is the lu_rdpg descriptor
1059 * \retval on success, return sum (in bytes) of all filled containers
1060 * \retval appropriate error otherwise.
1062 int dt_index_read(const struct lu_env *env, struct dt_device *dev,
1063 struct idx_info *ii, const struct lu_rdpg *rdpg)
1065 const struct dt_index_features *feat;
1066 struct dt_object *obj;
1071 * rp_count shouldn't be null and should be a multiple of the container
1074 if (rdpg->rp_count == 0 || (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
1077 if (!fid_is_quota(&ii->ii_fid) && !fid_is_layout_rbtree(&ii->ii_fid) &&
1078 !fid_is_norm(&ii->ii_fid))
1079 RETURN(-EOPNOTSUPP);
1081 /* lookup index object subject to the transfer */
1082 obj = dt_locate(env, dev, &ii->ii_fid);
1084 RETURN(PTR_ERR(obj));
1085 if (dt_object_exists(obj) == 0)
1086 GOTO(out, rc = -ENOENT);
1088 /* fetch index features associated with index object */
1089 feat = dt_index_feat_select(fid_seq(&ii->ii_fid),
1090 lu_object_attr(&obj->do_lu));
1092 GOTO(out, rc = PTR_ERR(feat));
1094 /* load index feature if not done already */
1095 if (obj->do_index_ops == NULL) {
1096 rc = obj->do_ops->do_index_try(env, obj, feat);
1101 /* fill ii_flags with supported index features */
1102 ii->ii_flags &= (II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
1105 if (!(feat->dif_flags & DT_IND_VARKEY))
1106 ii->ii_keysize = feat->dif_keysize_max;
1108 if (!(feat->dif_flags & DT_IND_VARREC))
1109 ii->ii_recsize = feat->dif_recsize_max;
1111 if (feat->dif_flags & DT_IND_NONUNQ)
1112 /* key isn't necessarily unique */
1113 ii->ii_flags |= II_FL_NONUNQ;
1115 if (!fid_is_layout_rbtree(&ii->ii_fid)) {
1116 dt_read_lock(env, obj, 0);
1117 /* fetch object version before walking the index */
1118 ii->ii_version = dt_version_get(env, obj);
1121 /* walk the index and fill lu_idxpages with key/record pairs */
1122 rc = dt_index_walk(env, obj, rdpg, dt_index_page_build, ii);
1123 if (!fid_is_layout_rbtree(&ii->ii_fid))
1124 dt_read_unlock(env, obj);
1127 /* index is empty */
1128 LASSERT(ii->ii_count == 0);
1129 ii->ii_hash_end = II_END_OFF;
1133 * For partial lu_idxpage filling of the end system page,
1134 * init the header of the remain lu_idxpages.
1137 dt_index_page_adjust(rdpg->rp_pages, rdpg->rp_npages,
1142 dt_object_put(env, obj);
1145 EXPORT_SYMBOL(dt_index_read);
1147 #if PAGE_SIZE > LU_PAGE_SIZE
1149 * For partial lu_idxpage filling of the end system page, init the header of the
1150 * remain lu_idxpages. So that the clients handle partial filling correctly.
1151 * Current lu_idxpage read clients are osp_it_next_page(),
1152 * nodemap_process_idx_pages() and qsd_reint_entries().
1154 void dt_index_page_adjust(struct page **pages, const u32 npages,
1155 const size_t nlupgs)
1157 u32 nlupgs_mod = nlupgs % LU_PAGE_COUNT;
1160 struct lu_idxpage *lip;
1165 pgidx = nlupgs / LU_PAGE_COUNT;
1166 LASSERT(pgidx < npages);
1167 lp = kmap(pages[pgidx]);
1168 remain_nlupgs = LU_PAGE_COUNT - nlupgs_mod;
1170 /* initialize the header for the remain lu_pages */
1171 for (i = 0, lp += nlupgs_mod; i < remain_nlupgs; i++, lp++) {
1173 memset(lip, 0, LIP_HDR_SIZE);
1174 lip->lip_magic = LIP_MAGIC;
1177 kunmap(pages[pgidx]);
1181 void dt_index_page_adjust(struct page **pages, const u32 npages,
1182 const size_t nlupgs)
1186 EXPORT_SYMBOL(dt_index_page_adjust);
1188 #ifdef CONFIG_PROC_FS
1189 int lprocfs_dt_blksize_seq_show(struct seq_file *m, void *v)
1191 struct dt_device *dt = m->private;
1192 struct obd_statfs osfs;
1194 int rc = dt_statfs(NULL, dt, &osfs);
1196 seq_printf(m, "%u\n", (unsigned) osfs.os_bsize);
1199 EXPORT_SYMBOL(lprocfs_dt_blksize_seq_show);
1201 int lprocfs_dt_kbytestotal_seq_show(struct seq_file *m, void *v)
1203 struct dt_device *dt = m->private;
1204 struct obd_statfs osfs;
1206 int rc = dt_statfs(NULL, dt, &osfs);
1208 __u32 blk_size = osfs.os_bsize >> 10;
1209 __u64 result = osfs.os_blocks;
1211 while (blk_size >>= 1)
1214 seq_printf(m, "%llu\n", result);
1218 EXPORT_SYMBOL(lprocfs_dt_kbytestotal_seq_show);
1220 int lprocfs_dt_kbytesfree_seq_show(struct seq_file *m, void *v)
1222 struct dt_device *dt = m->private;
1223 struct obd_statfs osfs;
1225 int rc = dt_statfs(NULL, dt, &osfs);
1227 __u32 blk_size = osfs.os_bsize >> 10;
1228 __u64 result = osfs.os_bfree;
1230 while (blk_size >>= 1)
1233 seq_printf(m, "%llu\n", result);
1237 EXPORT_SYMBOL(lprocfs_dt_kbytesfree_seq_show);
1239 int lprocfs_dt_kbytesavail_seq_show(struct seq_file *m, void *v)
1241 struct dt_device *dt = m->private;
1242 struct obd_statfs osfs;
1244 int rc = dt_statfs(NULL, dt, &osfs);
1246 __u32 blk_size = osfs.os_bsize >> 10;
1247 __u64 result = osfs.os_bavail;
1249 while (blk_size >>= 1)
1252 seq_printf(m, "%llu\n", result);
1256 EXPORT_SYMBOL(lprocfs_dt_kbytesavail_seq_show);
1258 int lprocfs_dt_filestotal_seq_show(struct seq_file *m, void *v)
1260 struct dt_device *dt = m->private;
1261 struct obd_statfs osfs;
1263 int rc = dt_statfs(NULL, dt, &osfs);
1265 seq_printf(m, "%llu\n", osfs.os_files);
1268 EXPORT_SYMBOL(lprocfs_dt_filestotal_seq_show);
1270 int lprocfs_dt_filesfree_seq_show(struct seq_file *m, void *v)
1272 struct dt_device *dt = m->private;
1273 struct obd_statfs osfs;
1275 int rc = dt_statfs(NULL, dt, &osfs);
1277 seq_printf(m, "%llu\n", osfs.os_ffree);
1280 EXPORT_SYMBOL(lprocfs_dt_filesfree_seq_show);
1282 #endif /* CONFIG_PROC_FS */
1284 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
1287 struct dt_device *dt = container_of(kobj, struct dt_device,
1289 struct lu_device *lu = dt2lu_dev(dt);
1294 return sprintf(buf, "%s\n", lu->ld_obd->obd_uuid.uuid);
1296 LUSTRE_RO_ATTR(uuid);
1298 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
1301 struct dt_device *dt = container_of(kobj, struct dt_device,
1303 struct obd_statfs osfs;
1306 rc = dt_statfs(NULL, dt, &osfs);
1310 return sprintf(buf, "%u\n", (unsigned) osfs.os_bsize);
1312 LUSTRE_RO_ATTR(blocksize);
1314 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
1317 struct dt_device *dt = container_of(kobj, struct dt_device,
1319 struct obd_statfs osfs;
1324 rc = dt_statfs(NULL, dt, &osfs);
1328 blk_size = osfs.os_bsize >> 10;
1329 result = osfs.os_blocks;
1331 while (blk_size >>= 1)
1334 return sprintf(buf, "%llu\n", result);
1336 LUSTRE_RO_ATTR(kbytestotal);
1338 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
1341 struct dt_device *dt = container_of(kobj, struct dt_device,
1343 struct obd_statfs osfs;
1348 rc = dt_statfs(NULL, dt, &osfs);
1352 blk_size = osfs.os_bsize >> 10;
1353 result = osfs.os_bfree;
1355 while (blk_size >>= 1)
1358 return sprintf(buf, "%llu\n", result);
1360 LUSTRE_RO_ATTR(kbytesfree);
1362 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
1365 struct dt_device *dt = container_of(kobj, struct dt_device,
1367 struct obd_statfs osfs;
1372 rc = dt_statfs(NULL, dt, &osfs);
1376 blk_size = osfs.os_bsize >> 10;
1377 result = osfs.os_bavail;
1379 while (blk_size >>= 1)
1382 return sprintf(buf, "%llu\n", result);
1384 LUSTRE_RO_ATTR(kbytesavail);
1386 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
1389 struct dt_device *dt = container_of(kobj, struct dt_device,
1391 struct obd_statfs osfs;
1394 rc = dt_statfs(NULL, dt, &osfs);
1398 return sprintf(buf, "%llu\n", osfs.os_files);
1400 LUSTRE_RO_ATTR(filestotal);
1402 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
1405 struct dt_device *dt = container_of(kobj, struct dt_device,
1407 struct obd_statfs osfs;
1410 rc = dt_statfs(NULL, dt, &osfs);
1414 return sprintf(buf, "%llu\n", osfs.os_ffree);
1416 LUSTRE_RO_ATTR(filesfree);
1418 static const struct attribute *dt_def_attrs[] = {
1419 &lustre_attr_uuid.attr,
1420 &lustre_attr_blocksize.attr,
1421 &lustre_attr_kbytestotal.attr,
1422 &lustre_attr_kbytesfree.attr,
1423 &lustre_attr_kbytesavail.attr,
1424 &lustre_attr_filestotal.attr,
1425 &lustre_attr_filesfree.attr,
1429 static void dt_sysfs_release(struct kobject *kobj)
1431 struct dt_device *dt = container_of(kobj, struct dt_device,
1434 debugfs_remove_recursive(dt->dd_debugfs_entry);
1435 dt->dd_debugfs_entry = NULL;
1437 complete(&dt->dd_kobj_unregister);
1440 int dt_tunables_fini(struct dt_device *dt)
1445 if (dt->dd_def_attrs) {
1446 sysfs_remove_files(&dt->dd_kobj, dt->dd_def_attrs);
1447 kobject_put(&dt->dd_kobj);
1448 wait_for_completion(&dt->dd_kobj_unregister);
1453 EXPORT_SYMBOL(dt_tunables_fini);
1455 int dt_tunables_init(struct dt_device *dt, struct obd_type *type,
1456 const char *name, struct ldebugfs_vars *list)
1460 dt->dd_ktype.sysfs_ops = &lustre_sysfs_ops;
1461 dt->dd_ktype.release = dt_sysfs_release;
1463 init_completion(&dt->dd_kobj_unregister);
1464 rc = kobject_init_and_add(&dt->dd_kobj, &dt->dd_ktype, &type->typ_kobj,
1469 dt->dd_def_attrs = dt_def_attrs;
1471 rc = sysfs_create_files(&dt->dd_kobj, dt->dd_def_attrs);
1473 kobject_put(&dt->dd_kobj);
1474 dt->dd_def_attrs = NULL;
1479 * No need to register debugfs if no enteries. This allows us to
1480 * choose between using dt_device or obd_device for debugfs.
1485 dt->dd_debugfs_entry = debugfs_create_dir(name,
1486 type->typ_debugfs_entry);
1487 ldebugfs_add_vars(dt->dd_debugfs_entry, list, dt);
1491 EXPORT_SYMBOL(dt_tunables_init);