4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/dt_object.c
35 * Generic functions from dt_object.h
37 * Author: Nikita Danilov <nikita@clusterfs.com>
40 #define DEBUG_SUBSYSTEM S_CLASS
42 #include <linux/list.h>
44 #include <dt_object.h>
46 #include <lustre_fid.h>
47 #include <lustre_nodemap.h>
48 #include <lustre_quota.h>
50 /* context key constructor/destructor: dt_global_key_init, dt_global_key_fini */
51 LU_KEY_INIT(dt_global, struct dt_thread_info);
52 LU_KEY_FINI(dt_global, struct dt_thread_info);
54 struct lu_context_key dt_key = {
55 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_LOCAL,
56 .lct_init = dt_global_key_init,
57 .lct_fini = dt_global_key_fini
60 /* no lock is necessary to protect the list, because call-backs
61 * are added during system startup. Please refer to "struct dt_device".
63 void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
65 list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
67 EXPORT_SYMBOL(dt_txn_callback_add);
69 void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
71 list_del_init(&cb->dtc_linkage);
73 EXPORT_SYMBOL(dt_txn_callback_del);
75 int dt_txn_hook_start(const struct lu_env *env,
76 struct dt_device *dev, struct thandle *th)
79 struct dt_txn_callback *cb;
84 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
85 struct thandle *dtc_th = th;
87 if (cb->dtc_txn_start == NULL ||
88 !(cb->dtc_tag & env->le_ctx.lc_tags))
91 /* Usually dt_txn_hook_start is called from bottom device,
92 * and if the thandle has th_top, then we need use top
93 * thandle for the callback in the top thandle layer */
94 if (th->th_top != NULL)
97 rc = cb->dtc_txn_start(env, dtc_th, cb->dtc_cookie);
103 EXPORT_SYMBOL(dt_txn_hook_start);
105 int dt_txn_hook_stop(const struct lu_env *env, struct thandle *th)
107 struct dt_device *dev = th->th_dev;
108 struct dt_txn_callback *cb;
114 if (OBD_FAIL_CHECK(OBD_FAIL_DT_TXN_STOP))
117 list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
118 struct thandle *dtc_th = th;
120 if (cb->dtc_txn_stop == NULL ||
121 !(cb->dtc_tag & env->le_ctx.lc_tags))
124 /* Usually dt_txn_hook_stop is called from bottom device,
125 * and if the thandle has th_top, then we need use top
126 * thandle for the callback in the top thandle layer */
127 if (th->th_top != NULL)
130 rc = cb->dtc_txn_stop(env, dtc_th, cb->dtc_cookie);
136 EXPORT_SYMBOL(dt_txn_hook_stop);
138 void dt_txn_hook_commit(struct thandle *th)
140 struct dt_txn_callback *cb;
145 list_for_each_entry(cb, &th->th_dev->dd_txn_callbacks,
147 /* Right now, the bottom device (OSD) will use this hook
148 * commit to notify OSP, so we do not check and replace
149 * the thandle to top thandle now */
150 if (cb->dtc_txn_commit)
151 cb->dtc_txn_commit(th, cb->dtc_cookie);
154 EXPORT_SYMBOL(dt_txn_hook_commit);
156 int dt_device_init(struct dt_device *dev, struct lu_device_type *t)
158 INIT_LIST_HEAD(&dev->dd_txn_callbacks);
159 return lu_device_init(&dev->dd_lu_dev, t);
161 EXPORT_SYMBOL(dt_device_init);
163 void dt_device_fini(struct dt_device *dev)
165 lu_device_fini(&dev->dd_lu_dev);
167 EXPORT_SYMBOL(dt_device_fini);
169 int dt_object_init(struct dt_object *obj,
170 struct lu_object_header *h, struct lu_device *d)
173 return lu_object_init(&obj->do_lu, h, d);
175 EXPORT_SYMBOL(dt_object_init);
177 void dt_object_fini(struct dt_object *obj)
179 lu_object_fini(&obj->do_lu);
181 EXPORT_SYMBOL(dt_object_fini);
183 int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj)
185 if (obj->do_index_ops == NULL)
186 obj->do_ops->do_index_try(env, obj, &dt_directory_features);
187 return obj->do_index_ops != NULL;
189 EXPORT_SYMBOL(dt_try_as_dir);
191 enum dt_format_type dt_mode_to_dft(__u32 mode)
193 enum dt_format_type result;
195 switch (mode & S_IFMT) {
200 result = DFT_REGULAR;
217 EXPORT_SYMBOL(dt_mode_to_dft);
220 * lookup fid for object named \a name in directory \a dir.
223 int dt_lookup_dir(const struct lu_env *env, struct dt_object *dir,
224 const char *name, struct lu_fid *fid)
226 if (dt_try_as_dir(env, dir))
227 return dt_lookup(env, dir, (struct dt_rec *)fid,
228 (const struct dt_key *)name);
231 EXPORT_SYMBOL(dt_lookup_dir);
233 /* this differs from dt_locate by top_dev as parameter
234 * but not one from lu_site */
235 struct dt_object *dt_locate_at(const struct lu_env *env,
236 struct dt_device *dev,
237 const struct lu_fid *fid,
238 struct lu_device *top_dev,
239 const struct lu_object_conf *conf)
241 struct lu_object *lo;
244 lo = lu_object_find_at(env, top_dev, fid, conf);
246 return ERR_PTR(PTR_ERR(lo));
250 list_for_each_entry(n, &lo->lo_header->loh_layers, lo_linkage) {
251 if (n->lo_dev == &dev->dd_lu_dev)
252 return container_of0(n, struct dt_object, do_lu);
255 return ERR_PTR(-ENOENT);
257 EXPORT_SYMBOL(dt_locate_at);
260 * find an object named \a entry in given \a dfh->dfh_o directory.
262 static int dt_find_entry(const struct lu_env *env, const char *entry, void *data)
264 struct dt_find_hint *dfh = data;
265 struct dt_device *dt = dfh->dfh_dt;
266 struct lu_fid *fid = dfh->dfh_fid;
267 struct dt_object *obj = dfh->dfh_o;
270 result = dt_lookup_dir(env, obj, entry, fid);
271 lu_object_put(env, &obj->do_lu);
273 obj = dt_locate(env, dt, fid);
275 result = PTR_ERR(obj);
282 * Abstract function which parses path name. This function feeds
283 * path component to \a entry_func.
285 int dt_path_parser(const struct lu_env *env,
286 char *path, dt_entry_func_t entry_func,
293 e = strsep(&path, "/");
298 if (!path || path[0] == '\0')
302 rc = entry_func(env, e, data);
311 dt_store_resolve(const struct lu_env *env, struct dt_device *dt,
312 const char *path, struct lu_fid *fid)
314 struct dt_thread_info *info = dt_info(env);
315 struct dt_find_hint *dfh = &info->dti_dfh;
316 struct dt_object *obj;
323 strlcpy(info->dti_buf, path, sizeof(info->dti_buf));
325 result = dt->dd_ops->dt_root_get(env, dt, fid);
327 obj = dt_locate(env, dt, fid);
330 result = dt_path_parser(env, info->dti_buf,
333 obj = ERR_PTR(result);
338 obj = ERR_PTR(result);
343 static struct dt_object *dt_reg_open(const struct lu_env *env,
344 struct dt_device *dt,
352 result = dt_lookup_dir(env, p, name, fid);
354 o = dt_locate(env, dt, fid);
363 * Open dt object named \a filename from \a dirname directory.
364 * \param dt dt device
365 * \param fid on success, object fid is stored in *fid
367 struct dt_object *dt_store_open(const struct lu_env *env,
368 struct dt_device *dt,
370 const char *filename,
373 struct dt_object *file;
374 struct dt_object *dir;
376 dir = dt_store_resolve(env, dt, dirname, fid);
378 file = dt_reg_open(env, dt, dir,
380 lu_object_put(env, &dir->do_lu);
387 struct dt_object *dt_find_or_create(const struct lu_env *env,
388 struct dt_device *dt,
389 const struct lu_fid *fid,
390 struct dt_object_format *dof,
393 struct dt_object *dto;
399 dto = dt_locate(env, dt, fid);
403 LASSERT(dto != NULL);
404 if (dt_object_exists(dto))
407 th = dt_trans_create(env, dt);
409 GOTO(out, rc = PTR_ERR(th));
411 rc = dt_declare_create(env, dto, at, NULL, dof, th);
413 GOTO(trans_stop, rc);
415 rc = dt_trans_start_local(env, dt, th);
417 GOTO(trans_stop, rc);
419 dt_write_lock(env, dto, 0);
420 if (dt_object_exists(dto))
421 GOTO(unlock, rc = 0);
423 CDEBUG(D_OTHER, "create new object "DFID"\n", PFID(fid));
425 rc = dt_create(env, dto, at, NULL, dof, th);
428 LASSERT(dt_object_exists(dto));
430 dt_write_unlock(env, dto);
432 dt_trans_stop(env, dt, th);
435 lu_object_put(env, &dto->do_lu);
440 EXPORT_SYMBOL(dt_find_or_create);
442 /* dt class init function. */
443 int dt_global_init(void)
447 LU_CONTEXT_KEY_INIT(&dt_key);
448 result = lu_context_key_register(&dt_key);
452 void dt_global_fini(void)
454 lu_context_key_degister(&dt_key);
458 * Generic read helper. May return an error for partial reads.
460 * \param env lustre environment
461 * \param dt object to be read
462 * \param buf lu_buf to be filled, with buffer pointer and length
463 * \param pos position to start reading, updated as data is read
465 * \retval real size of data read
466 * \retval -ve errno on failure
468 int dt_read(const struct lu_env *env, struct dt_object *dt,
469 struct lu_buf *buf, loff_t *pos)
471 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
472 return dt->do_body_ops->dbo_read(env, dt, buf, pos);
474 EXPORT_SYMBOL(dt_read);
477 * Read structures of fixed size from storage. Unlike dt_read(), using
478 * dt_record_read() will return an error for partial reads.
480 * \param env lustre environment
481 * \param dt object to be read
482 * \param buf lu_buf to be filled, with buffer pointer and length
483 * \param pos position to start reading, updated as data is read
485 * \retval 0 on successfully reading full buffer
486 * \retval -EFAULT on short read
487 * \retval -ve errno on failure
489 int dt_record_read(const struct lu_env *env, struct dt_object *dt,
490 struct lu_buf *buf, loff_t *pos)
494 LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
496 size = dt->do_body_ops->dbo_read(env, dt, buf, pos);
499 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
501 EXPORT_SYMBOL(dt_record_read);
503 int dt_record_write(const struct lu_env *env, struct dt_object *dt,
504 const struct lu_buf *buf, loff_t *pos, struct thandle *th)
508 LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
510 LASSERT(dt->do_body_ops);
511 LASSERT(dt->do_body_ops->dbo_write);
513 size = dt->do_body_ops->dbo_write(env, dt, buf, pos, th, 1);
516 return (size == (ssize_t)buf->lb_len) ? 0 : -EFAULT;
518 EXPORT_SYMBOL(dt_record_write);
520 int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
524 char *xname = XATTR_NAME_VERSION;
528 vbuf.lb_len = sizeof(dt_obj_version_t);
529 return dt_declare_xattr_set(env, o, &vbuf, xname, 0, th);
532 EXPORT_SYMBOL(dt_declare_version_set);
534 void dt_version_set(const struct lu_env *env, struct dt_object *o,
535 dt_obj_version_t version, struct thandle *th)
538 char *xname = XATTR_NAME_VERSION;
542 vbuf.lb_buf = &version;
543 vbuf.lb_len = sizeof(version);
545 rc = dt_xattr_set(env, o, &vbuf, xname, 0, th);
547 CDEBUG(D_INODE, "Can't set version, rc %d\n", rc);
550 EXPORT_SYMBOL(dt_version_set);
552 dt_obj_version_t dt_version_get(const struct lu_env *env, struct dt_object *o)
555 char *xname = XATTR_NAME_VERSION;
556 dt_obj_version_t version;
560 vbuf.lb_buf = &version;
561 vbuf.lb_len = sizeof(version);
562 rc = dt_xattr_get(env, o, &vbuf, xname);
563 if (rc != sizeof(version)) {
564 CDEBUG(D_INODE, "Can't get version, rc %d\n", rc);
569 EXPORT_SYMBOL(dt_version_get);
571 /* list of all supported index types */
574 const struct dt_index_features dt_directory_features;
575 EXPORT_SYMBOL(dt_directory_features);
578 const struct dt_index_features dt_otable_features;
579 EXPORT_SYMBOL(dt_otable_features);
582 const struct dt_index_features dt_lfsck_orphan_features = {
584 .dif_keysize_min = sizeof(struct lu_fid),
585 .dif_keysize_max = sizeof(struct lu_fid),
586 .dif_recsize_min = sizeof(struct lu_orphan_rec),
587 .dif_recsize_max = sizeof(struct lu_orphan_rec),
590 EXPORT_SYMBOL(dt_lfsck_orphan_features);
593 const struct dt_index_features dt_lfsck_features = {
594 .dif_flags = DT_IND_UPDATE,
595 .dif_keysize_min = sizeof(struct lu_fid),
596 .dif_keysize_max = sizeof(struct lu_fid),
597 .dif_recsize_min = sizeof(__u8),
598 .dif_recsize_max = sizeof(__u8),
601 EXPORT_SYMBOL(dt_lfsck_features);
603 /* accounting indexes */
604 const struct dt_index_features dt_acct_features = {
605 .dif_flags = DT_IND_UPDATE,
606 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
607 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
608 .dif_recsize_min = sizeof(struct lquota_acct_rec), /* 16 bytes */
609 .dif_recsize_max = sizeof(struct lquota_acct_rec), /* 16 bytes */
612 EXPORT_SYMBOL(dt_acct_features);
614 /* global quota files */
615 const struct dt_index_features dt_quota_glb_features = {
616 .dif_flags = DT_IND_UPDATE,
617 /* a different key would have to be used for per-directory quota */
618 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
619 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
620 .dif_recsize_min = sizeof(struct lquota_glb_rec), /* 32 bytes */
621 .dif_recsize_max = sizeof(struct lquota_glb_rec), /* 32 bytes */
624 EXPORT_SYMBOL(dt_quota_glb_features);
626 /* slave quota files */
627 const struct dt_index_features dt_quota_slv_features = {
628 .dif_flags = DT_IND_UPDATE,
629 /* a different key would have to be used for per-directory quota */
630 .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
631 .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
632 .dif_recsize_min = sizeof(struct lquota_slv_rec), /* 8 bytes */
633 .dif_recsize_max = sizeof(struct lquota_slv_rec), /* 8 bytes */
636 EXPORT_SYMBOL(dt_quota_slv_features);
638 /* nodemap files, nodemap_rec size asserted in nodemap_storage.c */
639 const struct dt_index_features dt_nodemap_features = {
640 .dif_flags = DT_IND_UPDATE,
641 .dif_keysize_min = sizeof(__u64), /* 64-bit nodemap/record id */
642 .dif_keysize_max = sizeof(__u64), /* 64-bit nodemap/record id */
643 .dif_recsize_min = sizeof(union nodemap_rec), /* 32 bytes */
644 .dif_recsize_max = sizeof(union nodemap_rec), /* 32 bytes */
647 EXPORT_SYMBOL(dt_nodemap_features);
649 /* helper function returning what dt_index_features structure should be used
650 * based on the FID sequence. This is used by OBD_IDX_READ RPC */
651 static inline const struct dt_index_features *dt_index_feat_select(__u64 seq,
654 if (seq == FID_SEQ_QUOTA_GLB) {
655 /* global quota index */
657 /* global quota index should be a regular file */
658 return ERR_PTR(-ENOENT);
659 return &dt_quota_glb_features;
660 } else if (seq == FID_SEQ_QUOTA) {
661 /* quota slave index */
663 /* slave index should be a regular file */
664 return ERR_PTR(-ENOENT);
665 return &dt_quota_slv_features;
666 } else if (seq == FID_SEQ_LAYOUT_RBTREE){
667 return &dt_lfsck_orphan_features;
668 } else if (seq >= FID_SEQ_NORMAL) {
669 /* object is part of the namespace, verify that it is a
672 /* sorry, we can only deal with directory */
673 return ERR_PTR(-ENOTDIR);
674 return &dt_directory_features;
677 return ERR_PTR(-EOPNOTSUPP);
681 * Fill a lu_idxpage with key/record pairs read for transfer via OBD_IDX_READ
684 * \param env - is the environment passed by the caller
685 * \param lp - is a pointer to the lu_page to fill
686 * \param nob - is the maximum number of bytes that should be copied
687 * \param iops - is the index operation vector associated with the index object
688 * \param it - is a pointer to the current iterator
689 * \param attr - is the index attribute to pass to iops->rec()
690 * \param arg - is a pointer to the idx_info structure
692 static int dt_index_page_build(const struct lu_env *env, union lu_page *lp,
693 size_t nob, const struct dt_it_ops *iops,
694 struct dt_it *it, __u32 attr, void *arg)
696 struct idx_info *ii = (struct idx_info *)arg;
697 struct lu_idxpage *lip = &lp->lp_idx;
703 if (nob < LIP_HDR_SIZE)
706 /* initialize the header of the new container */
707 memset(lip, 0, LIP_HDR_SIZE);
708 lip->lip_magic = LIP_MAGIC;
711 /* compute size needed to store a key/record pair */
712 size = ii->ii_recsize + ii->ii_keysize;
713 if ((ii->ii_flags & II_FL_NOHASH) == 0)
714 /* add hash if the client wants it */
715 size += sizeof(__u64);
717 entry = lip->lip_entries;
719 char *tmp_entry = entry;
725 /* fetch 64-bit hash value */
726 hash = iops->store(env, it);
727 ii->ii_hash_end = hash;
729 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_IDX_READ_BREAK)) {
730 if (lip->lip_nr != 0)
735 if (lip->lip_nr == 0)
736 GOTO(out, rc = -EINVAL);
740 if (!(ii->ii_flags & II_FL_NOHASH)) {
741 /* client wants to the 64-bit hash value associated with
743 memcpy(tmp_entry, &hash, sizeof(hash));
744 tmp_entry += sizeof(hash);
747 if (ii->ii_flags & II_FL_VARKEY)
748 keysize = iops->key_size(env, it);
750 keysize = ii->ii_keysize;
752 if (!(ii->ii_flags & II_FL_NOKEY)) {
753 /* then the key value */
754 key = iops->key(env, it);
755 memcpy(tmp_entry, key, keysize);
756 tmp_entry += keysize;
759 /* and finally the record */
760 rc = iops->rec(env, it, (struct dt_rec *)tmp_entry, attr);
765 /* hash/key/record successfully copied! */
767 if (unlikely(lip->lip_nr == 1 && ii->ii_count == 0))
768 ii->ii_hash_start = hash;
770 if (ii->ii_flags & II_FL_VARREC)
771 recsize = iops->rec_size(env, it, attr);
773 recsize = ii->ii_recsize;
775 entry = tmp_entry + recsize;
779 /* move on to the next record */
781 rc = iops->next(env, it);
782 } while (rc == -ESTALE);
788 if (rc >= 0 && lip->lip_nr > 0)
789 /* one more container */
792 /* no more entries */
793 ii->ii_hash_end = II_END_OFF;
799 * Walk index and fill lu_page containers with key/record pairs
801 * \param env - is the environment passed by the caller
802 * \param obj - is the index object to parse
803 * \param rdpg - is the lu_rdpg descriptor associated with the transfer
804 * \param filler - is the callback function responsible for filling a lu_page
805 * with key/record pairs in the format wanted by the caller.
806 * If NULL, uses dt_index_page_build
807 * \param arg - is an opaq argument passed to the filler function
809 * \retval sum (in bytes) of all filled lu_pages
810 * \retval -ve errno on failure
812 int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
813 const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
817 const struct dt_it_ops *iops;
818 size_t pageidx, nob, nlupgs = 0;
822 LASSERT(rdpg->rp_pages != NULL);
823 LASSERT(obj->do_index_ops != NULL);
826 filler = dt_index_page_build;
828 nob = rdpg->rp_count;
832 /* Iterate through index and fill containers from @rdpg */
833 iops = &obj->do_index_ops->dio_it;
834 LASSERT(iops != NULL);
835 it = iops->init(env, obj, rdpg->rp_attrs);
839 rc = iops->load(env, it, rdpg->rp_hash);
842 * Iterator didn't find record with exactly the key requested.
844 * It is currently either
846 * - positioned above record with key less than
847 * requested---skip it.
848 * - or not positioned at all (is in IAM_IT_SKEWED
849 * state)---position it on the next item.
851 rc = iops->next(env, it);
860 /* Fill containers one after the other. There might be multiple
861 * containers per physical page.
863 * At this point and across for-loop:
864 * rc == 0 -> ok, proceed.
865 * rc > 0 -> end of index.
866 * rc < 0 -> error. */
867 for (pageidx = 0; rc == 0 && nob > 0; pageidx++) {
871 LASSERT(pageidx < rdpg->rp_npages);
872 lp = kmap(rdpg->rp_pages[pageidx]);
875 for (i = 0; i < LU_PAGE_COUNT; i++, lp++, nob -= LU_PAGE_SIZE) {
876 rc = filler(env, lp, min_t(size_t, nob, LU_PAGE_SIZE),
877 iops, it, rdpg->rp_attrs, arg);
880 /* one more lu_page */
886 kunmap(rdpg->rp_pages[i]);
894 rc = min_t(size_t, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
898 EXPORT_SYMBOL(dt_index_walk);
901 * Walk key/record pairs of an index and copy them into 4KB containers to be
902 * transferred over the network. This is the common handler for OBD_IDX_READ
905 * \param env - is the environment passed by the caller
906 * \param dev - is the dt_device storing the index
907 * \param ii - is the idx_info structure packed by the client in the
908 * OBD_IDX_READ request
909 * \param rdpg - is the lu_rdpg descriptor
911 * \retval on success, return sum (in bytes) of all filled containers
912 * \retval appropriate error otherwise.
914 int dt_index_read(const struct lu_env *env, struct dt_device *dev,
915 struct idx_info *ii, const struct lu_rdpg *rdpg)
917 const struct dt_index_features *feat;
918 struct dt_object *obj;
922 /* rp_count shouldn't be null and should be a multiple of the container
924 if (rdpg->rp_count == 0 || (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
927 if (!fid_is_quota(&ii->ii_fid) && !fid_is_layout_rbtree(&ii->ii_fid) &&
928 !fid_is_norm(&ii->ii_fid))
931 /* lookup index object subject to the transfer */
932 obj = dt_locate(env, dev, &ii->ii_fid);
934 RETURN(PTR_ERR(obj));
935 if (dt_object_exists(obj) == 0)
936 GOTO(out, rc = -ENOENT);
938 /* fetch index features associated with index object */
939 feat = dt_index_feat_select(fid_seq(&ii->ii_fid),
940 lu_object_attr(&obj->do_lu));
942 GOTO(out, rc = PTR_ERR(feat));
944 /* load index feature if not done already */
945 if (obj->do_index_ops == NULL) {
946 rc = obj->do_ops->do_index_try(env, obj, feat);
951 /* fill ii_flags with supported index features */
952 ii->ii_flags &= (II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
955 if (!(feat->dif_flags & DT_IND_VARKEY))
956 ii->ii_keysize = feat->dif_keysize_max;
958 if (!(feat->dif_flags & DT_IND_VARREC))
959 ii->ii_recsize = feat->dif_recsize_max;
961 if (feat->dif_flags & DT_IND_NONUNQ)
962 /* key isn't necessarily unique */
963 ii->ii_flags |= II_FL_NONUNQ;
965 if (!fid_is_layout_rbtree(&ii->ii_fid)) {
966 dt_read_lock(env, obj, 0);
967 /* fetch object version before walking the index */
968 ii->ii_version = dt_version_get(env, obj);
971 /* walk the index and fill lu_idxpages with key/record pairs */
972 rc = dt_index_walk(env, obj, rdpg, dt_index_page_build, ii);
973 if (!fid_is_layout_rbtree(&ii->ii_fid))
974 dt_read_unlock(env, obj);
978 LASSERT(ii->ii_count == 0);
979 ii->ii_hash_end = II_END_OFF;
984 lu_object_put(env, &obj->do_lu);
987 EXPORT_SYMBOL(dt_index_read);
989 #ifdef CONFIG_PROC_FS
990 int lprocfs_dt_blksize_seq_show(struct seq_file *m, void *v)
992 struct dt_device *dt = m->private;
993 struct obd_statfs osfs;
995 int rc = dt_statfs(NULL, dt, &osfs);
997 seq_printf(m, "%u\n", (unsigned) osfs.os_bsize);
1000 EXPORT_SYMBOL(lprocfs_dt_blksize_seq_show);
1002 int lprocfs_dt_kbytestotal_seq_show(struct seq_file *m, void *v)
1004 struct dt_device *dt = m->private;
1005 struct obd_statfs osfs;
1007 int rc = dt_statfs(NULL, dt, &osfs);
1009 __u32 blk_size = osfs.os_bsize >> 10;
1010 __u64 result = osfs.os_blocks;
1012 while (blk_size >>= 1)
1015 seq_printf(m, "%llu\n", result);
1019 EXPORT_SYMBOL(lprocfs_dt_kbytestotal_seq_show);
1021 int lprocfs_dt_kbytesfree_seq_show(struct seq_file *m, void *v)
1023 struct dt_device *dt = m->private;
1024 struct obd_statfs osfs;
1026 int rc = dt_statfs(NULL, dt, &osfs);
1028 __u32 blk_size = osfs.os_bsize >> 10;
1029 __u64 result = osfs.os_bfree;
1031 while (blk_size >>= 1)
1034 seq_printf(m, "%llu\n", result);
1038 EXPORT_SYMBOL(lprocfs_dt_kbytesfree_seq_show);
1040 int lprocfs_dt_kbytesavail_seq_show(struct seq_file *m, void *v)
1042 struct dt_device *dt = m->private;
1043 struct obd_statfs osfs;
1045 int rc = dt_statfs(NULL, dt, &osfs);
1047 __u32 blk_size = osfs.os_bsize >> 10;
1048 __u64 result = osfs.os_bavail;
1050 while (blk_size >>= 1)
1053 seq_printf(m, "%llu\n", result);
1057 EXPORT_SYMBOL(lprocfs_dt_kbytesavail_seq_show);
1059 int lprocfs_dt_filestotal_seq_show(struct seq_file *m, void *v)
1061 struct dt_device *dt = m->private;
1062 struct obd_statfs osfs;
1064 int rc = dt_statfs(NULL, dt, &osfs);
1066 seq_printf(m, "%llu\n", osfs.os_files);
1069 EXPORT_SYMBOL(lprocfs_dt_filestotal_seq_show);
1071 int lprocfs_dt_filesfree_seq_show(struct seq_file *m, void *v)
1073 struct dt_device *dt = m->private;
1074 struct obd_statfs osfs;
1076 int rc = dt_statfs(NULL, dt, &osfs);
1078 seq_printf(m, "%llu\n", osfs.os_ffree);
1081 EXPORT_SYMBOL(lprocfs_dt_filesfree_seq_show);
1083 #endif /* CONFIG_PROC_FS */