4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_index.c
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSD
40 #include <lustre_ver.h>
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
49 #include "osd_internal.h"
51 #include <sys/dnode.h>
55 #include <sys/spa_impl.h>
56 #include <sys/zfs_znode.h>
57 #include <sys/dmu_tx.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/dsl_prop.h>
60 #include <sys/sa_impl.h>
63 static inline int osd_object_is_zap(dnode_t *dn)
65 return (dn->dn_type == DMU_OT_DIRECTORY_CONTENTS ||
66 dn->dn_type == DMU_OT_USERGROUP_USED);
69 /* We don't actually have direct access to the zap_hashbits() function
70 * so just pretend like we do for now. If this ever breaks we can look at
72 #define zap_hashbits(zc) 48
75 * | cd (16 bits) | hash (48 bits) |
76 * we need it in other form:
77 * |0| hash (48 bit) | cd (15 bit) |
78 * to be a full 64-bit ordered hash so that Lustre readdir can use it to merge
79 * the readdir hashes from multiple directory stripes uniformly on the client.
80 * Another point is sign bit, the hash range should be in [0, 2^63-1] because
81 * loff_t (for llseek) needs to be a positive value. This means the "cd" field
82 * should only be the low 15 bits.
84 uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc)
86 uint64_t zfs_hash = zap_cursor_serialize(zc) & (~0ULL >> 1);
88 return (zfs_hash >> zap_hashbits(zc)) |
89 (zfs_hash << (63 - zap_hashbits(zc)));
92 void osd_zap_cursor_init_serialized(zap_cursor_t *zc, struct objset *os,
93 uint64_t id, uint64_t dirhash)
95 uint64_t zfs_hash = ((dirhash << zap_hashbits(zc)) & (~0ULL >> 1)) |
96 (dirhash >> (63 - zap_hashbits(zc)));
98 zap_cursor_init_serialized(zc, os, id, zfs_hash);
101 int osd_zap_cursor_init(zap_cursor_t **zc, struct objset *os,
102 uint64_t id, uint64_t dirhash)
107 if (unlikely(t == NULL))
110 osd_zap_cursor_init_serialized(t, os, id, dirhash);
116 void osd_zap_cursor_fini(zap_cursor_t *zc)
122 static inline void osd_obj_cursor_init_serialized(zap_cursor_t *zc,
123 struct osd_object *o,
126 struct osd_device *d = osd_obj2dev(o);
127 osd_zap_cursor_init_serialized(zc, d->od_os,
128 o->oo_dn->dn_object, dirhash);
131 static inline int osd_obj_cursor_init(zap_cursor_t **zc, struct osd_object *o,
134 struct osd_device *d = osd_obj2dev(o);
135 return osd_zap_cursor_init(zc, d->od_os, o->oo_dn->dn_object, dirhash);
138 static struct dt_it *osd_index_it_init(const struct lu_env *env,
139 struct dt_object *dt,
142 struct osd_thread_info *info = osd_oti_get(env);
143 struct osd_zap_it *it;
144 struct osd_object *obj = osd_dt_obj(dt);
145 struct lu_object *lo = &dt->do_lu;
149 if (obj->oo_destroyed)
150 RETURN(ERR_PTR(-ENOENT));
152 LASSERT(lu_object_exists(lo));
156 OBD_SLAB_ALLOC_PTR_GFP(it, osd_zapit_cachep, GFP_NOFS);
158 RETURN(ERR_PTR(-ENOMEM));
160 rc = osd_obj_cursor_init(&it->ozi_zc, obj, 0);
162 OBD_SLAB_FREE_PTR(it, osd_zapit_cachep);
170 RETURN((struct dt_it *)it);
173 static void osd_index_it_fini(const struct lu_env *env, struct dt_it *di)
175 struct osd_zap_it *it = (struct osd_zap_it *)di;
176 struct osd_object *obj;
180 LASSERT(it->ozi_obj);
184 osd_zap_cursor_fini(it->ozi_zc);
185 lu_object_put(env, &obj->oo_dt.do_lu);
186 OBD_SLAB_FREE_PTR(it, osd_zapit_cachep);
192 static void osd_index_it_put(const struct lu_env *env, struct dt_it *di)
194 /* PBS: do nothing : ref are incremented at retrive and decreamented
198 static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
201 const unsigned align = sizeof(struct luda_type) - 1;
202 struct luda_type *lt;
204 /* check if file type is required */
205 if (attr & LUDA_TYPE) {
206 len = (len + align) & ~align;
208 lt = (void *)ent->lde_name + len;
209 lt->lt_type = cpu_to_le16(DTTOIF(type));
210 ent->lde_attrs |= LUDA_TYPE;
213 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
217 * Get the object's FID from its LMA EA.
219 * \param[in] env pointer to the thread context
220 * \param[in] osd pointer to the OSD device
221 * \param[in] oid the object's local identifier
222 * \param[out] fid the buffer to hold the object's FID
224 * \retval 0 for success
225 * \retval negative error number on failure
227 static int osd_get_fid_by_oid(const struct lu_env *env, struct osd_device *osd,
228 uint64_t oid, struct lu_fid *fid)
230 struct objset *os = osd->od_os;
231 struct osd_thread_info *oti = osd_oti_get(env);
232 struct lustre_mdt_attrs *lma =
233 (struct lustre_mdt_attrs *)oti->oti_buf;
235 nvlist_t *sa_xattr = NULL;
236 sa_handle_t *sa_hdl = NULL;
237 uchar_t *nv_value = NULL;
238 uint64_t xattr = ZFS_NO_OBJECT;
243 rc = __osd_xattr_load(osd, oid, &sa_xattr);
250 rc = -nvlist_lookup_byte_array(sa_xattr, XATTR_NAME_LMA, &nv_value,
258 if (unlikely(size > sizeof(oti->oti_buf)))
259 GOTO(out, rc = -ERANGE);
261 memcpy(lma, nv_value, size);
266 rc = -sa_handle_get(os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl);
270 rc = -sa_lookup(sa_hdl, SA_ZPL_XATTR(osd), &xattr, 8);
271 sa_handle_destroy(sa_hdl);
276 buf.lb_len = sizeof(oti->oti_buf);
277 rc = __osd_xattr_get_large(env, osd, xattr, &buf,
278 XATTR_NAME_LMA, &size);
283 if (size < sizeof(*lma))
284 GOTO(out, rc = -EIO);
286 lustre_lma_swab(lma);
287 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
288 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
289 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
290 "oid = %#llx\n", osd->od_svname,
291 lma->lma_incompat & ~LMA_INCOMPAT_SUPP, oid);
292 GOTO(out, rc = -EOPNOTSUPP);
294 *fid = lma->lma_self_fid;
299 if (sa_xattr != NULL)
300 nvlist_free(sa_xattr);
305 * As we don't know FID, we can't use LU object, so this function
306 * partially duplicate __osd_xattr_get() which is built around
307 * LU-object and uses it to cache data like regular EA dnode, etc
309 static int osd_find_parent_by_dnode(const struct lu_env *env,
313 struct osd_device *osd = osd_obj2dev(osd_dt_obj(o));
315 uint64_t dnode = ZFS_NO_OBJECT;
319 /* first of all, get parent dnode from own attributes */
320 LASSERT(osd_dt_obj(o)->oo_dn);
321 rc = -sa_handle_get(osd->od_os, osd_dt_obj(o)->oo_dn->dn_object,
322 NULL, SA_HDL_PRIVATE, &sa_hdl);
326 rc = -sa_lookup(sa_hdl, SA_ZPL_PARENT(osd), &dnode, 8);
327 sa_handle_destroy(sa_hdl);
329 rc = osd_get_fid_by_oid(env, osd, dnode, fid);
334 static int osd_find_parent_fid(const struct lu_env *env, struct dt_object *o,
337 struct link_ea_header *leh;
338 struct link_ea_entry *lee;
343 buf.lb_buf = osd_oti_get(env)->oti_buf;
344 buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
346 rc = osd_xattr_get(env, o, &buf, XATTR_NAME_LINK);
348 rc = osd_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LINK);
352 OBD_ALLOC(buf.lb_buf, rc);
353 if (buf.lb_buf == NULL)
356 rc = osd_xattr_get(env, o, &buf, XATTR_NAME_LINK);
360 if (rc < sizeof(*leh) + sizeof(*lee))
361 GOTO(out, rc = -EINVAL);
364 if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
365 leh->leh_magic = LINK_EA_MAGIC;
366 leh->leh_reccount = __swab32(leh->leh_reccount);
367 leh->leh_len = __swab64(leh->leh_len);
369 if (leh->leh_magic != LINK_EA_MAGIC)
370 GOTO(out, rc = -EINVAL);
371 if (leh->leh_reccount == 0)
372 GOTO(out, rc = -ENODATA);
374 lee = (struct link_ea_entry *)(leh + 1);
375 fid_be_to_cpu(fid, (const struct lu_fid *)&lee->lee_parent_fid);
379 if (buf.lb_buf != osd_oti_get(env)->oti_buf)
380 OBD_FREE(buf.lb_buf, buf.lb_len);
383 /* this block can be enabled for additional verification
384 * it's trying to match FID from LinkEA vs. FID from LMA */
388 rc2 = osd_find_parent_by_dnode(env, o, &fid2);
390 if (lu_fid_eq(fid, &fid2) == 0)
391 CERROR("wrong parent: "DFID" != "DFID"\n",
392 PFID(fid), PFID(&fid2));
396 /* no LinkEA is found, let's try to find the fid in parent's LMA */
397 if (unlikely(rc != 0))
398 rc = osd_find_parent_by_dnode(env, o, fid);
403 static int osd_dir_lookup(const struct lu_env *env, struct dt_object *dt,
404 struct dt_rec *rec, const struct dt_key *key)
406 struct osd_thread_info *oti = osd_oti_get(env);
407 struct osd_object *obj = osd_dt_obj(dt);
408 struct osd_device *osd = osd_obj2dev(obj);
409 char *name = (char *)key;
413 if (name[0] == '.') {
415 const struct lu_fid *f = lu_object_fid(&dt->do_lu);
416 memcpy(rec, f, sizeof(*f));
418 } else if (name[1] == '.' && name[2] == 0) {
419 rc = osd_find_parent_fid(env, dt, (struct lu_fid *)rec);
420 RETURN(rc == 0 ? 1 : rc);
424 memset(&oti->oti_zde.lzd_fid, 0, sizeof(struct lu_fid));
425 rc = -zap_lookup(osd->od_os, obj->oo_dn->dn_object,
426 (char *)key, 8, sizeof(oti->oti_zde) / 8,
427 (void *)&oti->oti_zde);
431 if (likely(fid_is_sane(&oti->oti_zde.lzd_fid))) {
432 memcpy(rec, &oti->oti_zde.lzd_fid, sizeof(struct lu_fid));
436 rc = osd_get_fid_by_oid(env, osd, oti->oti_zde.lzd_reg.zde_dnode,
437 (struct lu_fid *)rec);
439 RETURN(rc == 0 ? 1 : (rc == -ENOENT ? -ENODATA : rc));
442 static int osd_declare_dir_insert(const struct lu_env *env,
443 struct dt_object *dt,
444 const struct dt_rec *rec,
445 const struct dt_key *key,
448 struct osd_object *obj = osd_dt_obj(dt);
449 struct osd_device *osd = osd_obj2dev(obj);
450 const struct dt_insert_rec *rec1;
451 const struct lu_fid *fid;
452 struct osd_thandle *oh;
456 rec1 = (struct dt_insert_rec *)rec;
458 LASSERT(fid != NULL);
459 LASSERT(rec1->rec_type != 0);
462 oh = container_of0(th, struct osd_thandle, ot_super);
464 /* This is for inserting dot/dotdot for new created dir. */
465 if (obj->oo_dn == NULL)
466 object = DMU_NEW_OBJECT;
468 object = obj->oo_dn->dn_object;
470 /* do not specify the key as then DMU is trying to look it up
471 * which is very expensive. usually the layers above lookup
472 * before insertion */
473 dmu_tx_hold_zap(oh->ot_tx, object, TRUE, NULL);
475 osd_idc_find_or_init(env, osd, fid);
481 * Put the osd object once done with it.
483 * \param obj osd object that needs to be put
485 static inline void osd_object_put(const struct lu_env *env,
486 struct osd_object *obj)
488 lu_object_put(env, &obj->oo_dt.do_lu);
491 static int osd_seq_exists(const struct lu_env *env, struct osd_device *osd,
494 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
495 struct seq_server_site *ss = osd_seq_site(osd);
500 LASSERT(ss->ss_server_fld != NULL);
502 rc = osd_fld_lookup(env, osd, seq, range);
505 CERROR("%s: Can not lookup fld for %#llx\n",
510 RETURN(ss->ss_node_id == range->lsr_index);
513 int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
514 const struct lu_fid *fid)
516 struct seq_server_site *ss = osd_seq_site(osd);
519 /* FID seqs not in FLDB, must be local seq */
520 if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
523 /* If FLD is not being initialized yet, it only happens during the
524 * initialization, likely during mgs initialization, and we assume
525 * this is local FID. */
526 if (ss == NULL || ss->ss_server_fld == NULL)
529 /* Only check the local FLDB here */
530 if (osd_seq_exists(env, osd, fid_seq(fid)))
537 * Inserts (key, value) pair in \a directory object.
539 * \param dt osd index object
540 * \param key key for index
541 * \param rec record reference
542 * \param th transaction handler
543 * \param ignore_quota update should not affect quota
546 * \retval -ve failure
548 static int osd_dir_insert(const struct lu_env *env, struct dt_object *dt,
549 const struct dt_rec *rec, const struct dt_key *key,
550 struct thandle *th, int ignore_quota)
552 struct osd_thread_info *oti = osd_oti_get(env);
553 struct osd_object *parent = osd_dt_obj(dt);
554 struct osd_device *osd = osd_obj2dev(parent);
555 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
556 const struct lu_fid *fid = rec1->rec_fid;
557 struct osd_thandle *oh;
558 struct osd_idmap_cache *idc;
559 char *name = (char *)key;
563 LASSERT(parent->oo_dn);
565 LASSERT(dt_object_exists(dt));
566 LASSERT(osd_invariant(parent));
569 oh = container_of0(th, struct osd_thandle, ot_super);
571 idc = osd_idc_find(env, osd, fid);
572 if (unlikely(idc == NULL)) {
573 /* this dt_insert() wasn't declared properly, so
574 * FID is missing in OI cache. we better do not
575 * lookup FID in FLDB/OI and don't risk to deadlock,
576 * but in some special cases (lfsck testing, etc)
577 * it's much simpler than fixing a caller */
578 CERROR("%s: "DFID" wasn't declared for insert\n",
579 osd_name(osd), PFID(fid));
580 idc = osd_idc_find_or_init(env, osd, fid);
582 RETURN(PTR_ERR(idc));
585 if (idc->oic_remote) {
586 /* Insert remote entry */
587 memset(&oti->oti_zde.lzd_reg, 0, sizeof(oti->oti_zde.lzd_reg));
588 oti->oti_zde.lzd_reg.zde_type = IFTODT(rec1->rec_type & S_IFMT);
590 if (unlikely(idc->oic_dnode == 0)) {
591 /* for a reason OI cache wasn't filled properly */
592 CERROR("%s: OIC for "DFID" isn't filled\n",
593 osd_name(osd), PFID(fid));
596 if (name[0] == '.') {
598 /* do not store ".", instead generate it
599 * during iteration */
601 } else if (name[1] == '.' && name[2] == 0) {
602 uint64_t dnode = idc->oic_dnode;
603 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_PARENT))
606 /* update parent dnode in the child.
607 * later it will be used to generate ".." */
608 rc = osd_object_sa_update(parent,
615 CLASSERT(sizeof(oti->oti_zde.lzd_reg) == 8);
616 CLASSERT(sizeof(oti->oti_zde) % 8 == 0);
617 oti->oti_zde.lzd_reg.zde_type = IFTODT(rec1->rec_type & S_IFMT);
618 oti->oti_zde.lzd_reg.zde_dnode = idc->oic_dnode;
621 oti->oti_zde.lzd_fid = *fid;
622 /* Insert (key,oid) into ZAP */
623 rc = -zap_add(osd->od_os, parent->oo_dn->dn_object,
624 (char *)key, 8, sizeof(oti->oti_zde) / 8,
625 (void *)&oti->oti_zde, oh->ot_tx);
626 if (unlikely(rc == -EEXIST &&
627 name[0] == '.' && name[1] == '.' && name[2] == 0))
628 /* Update (key,oid) in ZAP */
629 rc = -zap_update(osd->od_os, parent->oo_dn->dn_object,
630 (char *)key, 8, sizeof(oti->oti_zde) / 8,
631 (void *)&oti->oti_zde, oh->ot_tx);
638 static int osd_declare_dir_delete(const struct lu_env *env,
639 struct dt_object *dt,
640 const struct dt_key *key,
643 struct osd_object *obj = osd_dt_obj(dt);
644 struct osd_thandle *oh;
648 LASSERT(dt_object_exists(dt));
649 LASSERT(osd_invariant(obj));
652 oh = container_of0(th, struct osd_thandle, ot_super);
654 if (dt_object_exists(dt)) {
656 dnode = obj->oo_dn->dn_object;
658 dnode = DMU_NEW_OBJECT;
661 /* do not specify the key as then DMU is trying to look it up
662 * which is very expensive. usually the layers above lookup
664 dmu_tx_hold_zap(oh->ot_tx, dnode, FALSE, NULL);
669 static int osd_dir_delete(const struct lu_env *env, struct dt_object *dt,
670 const struct dt_key *key, struct thandle *th)
672 struct osd_object *obj = osd_dt_obj(dt);
673 struct osd_device *osd = osd_obj2dev(obj);
674 struct osd_thandle *oh;
675 dnode_t *zap_dn = obj->oo_dn;
676 char *name = (char *)key;
683 oh = container_of0(th, struct osd_thandle, ot_super);
686 * In Orion . and .. were stored in the directory (not generated upon
687 * request as now). we preserve them for backward compatibility
689 if (name[0] == '.') {
692 } else if (name[1] == '.' && name[2] == 0) {
697 /* Remove key from the ZAP */
698 rc = -zap_remove(osd->od_os, zap_dn->dn_object,
699 (char *) key, oh->ot_tx);
701 if (unlikely(rc && rc != -ENOENT))
702 CERROR("%s: zap_remove failed: rc = %d\n", osd->od_svname, rc);
707 static struct dt_it *osd_dir_it_init(const struct lu_env *env,
708 struct dt_object *dt,
711 struct osd_zap_it *it;
713 it = (struct osd_zap_it *)osd_index_it_init(env, dt, unused);
717 RETURN((struct dt_it *)it);
721 * Move Iterator to record specified by \a key
723 * \param di osd iterator
724 * \param key key for index
726 * \retval +ve di points to record with least key not larger than key
727 * \retval 0 di points to exact matched key
728 * \retval -ve failure
730 static int osd_dir_it_get(const struct lu_env *env,
731 struct dt_it *di, const struct dt_key *key)
733 struct osd_zap_it *it = (struct osd_zap_it *)di;
734 struct osd_object *obj = it->ozi_obj;
735 char *name = (char *)key;
742 /* reset the cursor */
743 zap_cursor_fini(it->ozi_zc);
744 osd_obj_cursor_init_serialized(it->ozi_zc, obj, 0);
746 /* XXX: implementation of the API is broken at the moment */
747 LASSERT(((const char *)key)[0] == 0);
754 if (name[0] == '.') {
758 } else if (name[1] == '.' && name[2] == 0) {
764 /* neither . nor .. - some real record */
772 static void osd_dir_it_put(const struct lu_env *env, struct dt_it *di)
774 /* PBS: do nothing : ref are incremented at retrive and decreamented
779 * in Orion . and .. were stored in the directory, while ZPL
780 * and current osd-zfs generate them up on request. so, we
781 * need to ignore previously stored . and ..
783 static int osd_index_retrieve_skip_dots(struct osd_zap_it *it,
789 rc = -zap_cursor_retrieve(it->ozi_zc, za);
792 if (unlikely(rc == 0 && za->za_name[0] == '.')) {
793 if (za->za_name[1] == 0) {
795 } else if (za->za_name[1] == '.' &&
796 za->za_name[2] == 0) {
800 zap_cursor_advance(it->ozi_zc);
802 } while (unlikely(rc == 0 && isdot));
808 * to load a directory entry at a time and stored it in
809 * iterator's in-memory data structure.
811 * \param di, struct osd_it_ea, iterator's in memory structure
813 * \retval +ve, iterator reached to end
814 * \retval 0, iterator not reached to end
815 * \retval -ve, on error
817 static int osd_dir_it_next(const struct lu_env *env, struct dt_it *di)
819 struct osd_zap_it *it = (struct osd_zap_it *)di;
820 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
825 /* temp. storage should be enough for any key supported by ZFS */
826 CLASSERT(sizeof(za->za_name) <= sizeof(it->ozi_name));
829 * the first ->next() moves the cursor to .
830 * the second ->next() moves the cursor to ..
831 * then we get to the real records and have to verify any exist
833 if (it->ozi_pos <= 2) {
839 zap_cursor_advance(it->ozi_zc);
843 * According to current API we need to return error if its last entry.
844 * zap_cursor_advance() does not return any value. So we need to call
845 * retrieve to check if there is any record. We should make
846 * changes to Iterator API to not return status for this API
848 rc = osd_index_retrieve_skip_dots(it, za);
850 if (rc == -ENOENT) /* end of dir */
856 static struct dt_key *osd_dir_it_key(const struct lu_env *env,
857 const struct dt_it *di)
859 struct osd_zap_it *it = (struct osd_zap_it *)di;
860 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
864 if (it->ozi_pos <= 1) {
866 RETURN((struct dt_key *)".");
867 } else if (it->ozi_pos == 2) {
868 RETURN((struct dt_key *)"..");
871 if ((rc = -zap_cursor_retrieve(it->ozi_zc, za)))
874 strcpy(it->ozi_name, za->za_name);
876 RETURN((struct dt_key *)it->ozi_name);
879 static int osd_dir_it_key_size(const struct lu_env *env, const struct dt_it *di)
881 struct osd_zap_it *it = (struct osd_zap_it *)di;
882 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
886 if (it->ozi_pos <= 1) {
889 } else if (it->ozi_pos == 2) {
893 if ((rc = -zap_cursor_retrieve(it->ozi_zc, za)) == 0)
894 rc = strlen(za->za_name);
899 static int osd_dir_it_rec(const struct lu_env *env, const struct dt_it *di,
900 struct dt_rec *dtrec, __u32 attr)
902 struct osd_zap_it *it = (struct osd_zap_it *)di;
903 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
904 struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
905 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
909 if (it->ozi_pos <= 1) {
910 lde->lde_hash = cpu_to_le64(1);
911 strcpy(lde->lde_name, ".");
912 lde->lde_namelen = cpu_to_le16(1);
913 lde->lde_fid = *lu_object_fid(&it->ozi_obj->oo_dt.do_lu);
914 lde->lde_attrs = LUDA_FID;
915 /* append lustre attributes */
916 osd_it_append_attrs(lde, attr, 1, IFTODT(S_IFDIR));
917 lde->lde_reclen = cpu_to_le16(lu_dirent_calc_size(1, attr));
921 } else if (it->ozi_pos == 2) {
922 lde->lde_hash = cpu_to_le64(2);
923 strcpy(lde->lde_name, "..");
924 lde->lde_namelen = cpu_to_le16(2);
925 lde->lde_attrs = LUDA_FID;
926 /* append lustre attributes */
927 osd_it_append_attrs(lde, attr, 2, IFTODT(S_IFDIR));
928 lde->lde_reclen = cpu_to_le16(lu_dirent_calc_size(2, attr));
929 rc = osd_find_parent_fid(env, &it->ozi_obj->oo_dt, &lde->lde_fid);
931 /* ENOENT happens at the root of filesystem so ignore it */
939 rc = -zap_cursor_retrieve(it->ozi_zc, za);
940 if (unlikely(rc != 0))
943 lde->lde_hash = cpu_to_le64(osd_zap_cursor_serialize(it->ozi_zc));
944 namelen = strlen(za->za_name);
945 if (namelen > NAME_MAX)
946 GOTO(out, rc = -EOVERFLOW);
947 strcpy(lde->lde_name, za->za_name);
948 lde->lde_namelen = cpu_to_le16(namelen);
950 if (za->za_integer_length != 8 || za->za_num_integers < 3) {
951 CERROR("%s: unsupported direntry format: %d %d\n",
952 osd_obj2dev(it->ozi_obj)->od_svname,
953 za->za_integer_length, (int)za->za_num_integers);
955 GOTO(out, rc = -EIO);
958 rc = -zap_lookup(it->ozi_zc->zc_objset, it->ozi_zc->zc_zapobj,
959 za->za_name, za->za_integer_length, 3, zde);
963 lde->lde_fid = zde->lzd_fid;
964 lde->lde_attrs = LUDA_FID;
966 /* append lustre attributes */
967 osd_it_append_attrs(lde, attr, namelen, zde->lzd_reg.zde_type);
969 lde->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
975 static int osd_dir_it_rec_size(const struct lu_env *env, const struct dt_it *di,
978 struct osd_zap_it *it = (struct osd_zap_it *)di;
979 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
984 if (it->ozi_pos <= 1)
986 else if (it->ozi_pos == 2)
990 rc = lu_dirent_calc_size(namelen, attr);
994 rc = -zap_cursor_retrieve(it->ozi_zc, za);
995 if (unlikely(rc != 0))
998 if (za->za_integer_length != 8 || za->za_num_integers < 3) {
999 CERROR("%s: unsupported direntry format: %d %d\n",
1000 osd_obj2dev(it->ozi_obj)->od_svname,
1001 za->za_integer_length, (int)za->za_num_integers);
1005 namelen = strlen(za->za_name);
1006 if (namelen > NAME_MAX)
1009 rc = lu_dirent_calc_size(namelen, attr);
1014 static __u64 osd_dir_it_store(const struct lu_env *env, const struct dt_it *di)
1016 struct osd_zap_it *it = (struct osd_zap_it *)di;
1020 if (it->ozi_pos <= 2)
1023 pos = osd_zap_cursor_serialize(it->ozi_zc);
1030 * rc == 0 -> end of directory.
1031 * rc > 0 -> ok, proceed.
1032 * rc < 0 -> error. ( EOVERFLOW can be masked.)
1034 static int osd_dir_it_load(const struct lu_env *env,
1035 const struct dt_it *di, __u64 hash)
1037 struct osd_zap_it *it = (struct osd_zap_it *)di;
1038 struct osd_object *obj = it->ozi_obj;
1039 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1043 /* reset the cursor */
1044 zap_cursor_fini(it->ozi_zc);
1045 osd_obj_cursor_init_serialized(it->ozi_zc, obj, hash);
1052 /* to return whether the end has been reached */
1053 rc = osd_index_retrieve_skip_dots(it, za);
1056 else if (rc == -ENOENT)
1063 struct dt_index_operations osd_dir_ops = {
1064 .dio_lookup = osd_dir_lookup,
1065 .dio_declare_insert = osd_declare_dir_insert,
1066 .dio_insert = osd_dir_insert,
1067 .dio_declare_delete = osd_declare_dir_delete,
1068 .dio_delete = osd_dir_delete,
1070 .init = osd_dir_it_init,
1071 .fini = osd_index_it_fini,
1072 .get = osd_dir_it_get,
1073 .put = osd_dir_it_put,
1074 .next = osd_dir_it_next,
1075 .key = osd_dir_it_key,
1076 .key_size = osd_dir_it_key_size,
1077 .rec = osd_dir_it_rec,
1078 .rec_size = osd_dir_it_rec_size,
1079 .store = osd_dir_it_store,
1080 .load = osd_dir_it_load
1085 * Primitives for index files using binary keys.
1088 /* key integer_size is 8 */
1089 static int osd_prepare_key_uint64(struct osd_object *o, __u64 *dst,
1090 const struct dt_key *src)
1097 /* align keysize to 64bit */
1098 size = (o->oo_keysize + sizeof(__u64) - 1) / sizeof(__u64);
1099 size *= sizeof(__u64);
1101 LASSERT(size <= MAXNAMELEN);
1103 if (unlikely(size > o->oo_keysize))
1104 memset(dst + o->oo_keysize, 0, size - o->oo_keysize);
1105 memcpy(dst, (const char *)src, o->oo_keysize);
1107 return (size/sizeof(__u64));
1110 static int osd_index_lookup(const struct lu_env *env, struct dt_object *dt,
1111 struct dt_rec *rec, const struct dt_key *key)
1113 struct osd_object *obj = osd_dt_obj(dt);
1114 struct osd_device *osd = osd_obj2dev(obj);
1115 __u64 *k = osd_oti_get(env)->oti_key64;
1119 rc = osd_prepare_key_uint64(obj, k, key);
1121 rc = -zap_lookup_uint64(osd->od_os, obj->oo_dn->dn_object,
1122 k, rc, obj->oo_recusize, obj->oo_recsize,
1124 RETURN(rc == 0 ? 1 : rc);
1127 static int osd_declare_index_insert(const struct lu_env *env,
1128 struct dt_object *dt,
1129 const struct dt_rec *rec,
1130 const struct dt_key *key,
1133 struct osd_object *obj = osd_dt_obj(dt);
1134 struct osd_thandle *oh;
1137 LASSERT(th != NULL);
1138 oh = container_of0(th, struct osd_thandle, ot_super);
1140 LASSERT(obj->oo_dn);
1142 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1144 /* do not specify the key as then DMU is trying to look it up
1145 * which is very expensive. usually the layers above lookup
1146 * before insertion */
1147 dmu_tx_hold_zap(oh->ot_tx, obj->oo_dn->dn_object, TRUE, NULL);
1152 static int osd_index_insert(const struct lu_env *env, struct dt_object *dt,
1153 const struct dt_rec *rec, const struct dt_key *key,
1154 struct thandle *th, int ignore_quota)
1156 struct osd_object *obj = osd_dt_obj(dt);
1157 struct osd_device *osd = osd_obj2dev(obj);
1158 struct osd_thandle *oh;
1159 __u64 *k = osd_oti_get(env)->oti_key64;
1163 LASSERT(obj->oo_dn);
1164 LASSERT(dt_object_exists(dt));
1165 LASSERT(osd_invariant(obj));
1166 LASSERT(th != NULL);
1168 oh = container_of0(th, struct osd_thandle, ot_super);
1170 rc = osd_prepare_key_uint64(obj, k, key);
1172 /* Insert (key,oid) into ZAP */
1173 rc = -zap_add_uint64(osd->od_os, obj->oo_dn->dn_object,
1174 k, rc, obj->oo_recusize, obj->oo_recsize,
1175 (void *)rec, oh->ot_tx);
1179 static int osd_declare_index_delete(const struct lu_env *env,
1180 struct dt_object *dt,
1181 const struct dt_key *key,
1184 struct osd_object *obj = osd_dt_obj(dt);
1185 struct osd_thandle *oh;
1188 LASSERT(dt_object_exists(dt));
1189 LASSERT(osd_invariant(obj));
1190 LASSERT(th != NULL);
1191 LASSERT(obj->oo_dn);
1193 oh = container_of0(th, struct osd_thandle, ot_super);
1195 /* do not specify the key as then DMU is trying to look it up
1196 * which is very expensive. usually the layers above lookup
1197 * before deletion */
1198 dmu_tx_hold_zap(oh->ot_tx, obj->oo_dn->dn_object, FALSE, NULL);
1203 static int osd_index_delete(const struct lu_env *env, struct dt_object *dt,
1204 const struct dt_key *key, struct thandle *th)
1206 struct osd_object *obj = osd_dt_obj(dt);
1207 struct osd_device *osd = osd_obj2dev(obj);
1208 struct osd_thandle *oh;
1209 __u64 *k = osd_oti_get(env)->oti_key64;
1213 LASSERT(obj->oo_dn);
1214 LASSERT(th != NULL);
1215 oh = container_of0(th, struct osd_thandle, ot_super);
1217 rc = osd_prepare_key_uint64(obj, k, key);
1219 /* Remove binary key from the ZAP */
1220 rc = -zap_remove_uint64(osd->od_os, obj->oo_dn->dn_object,
1225 static int osd_index_it_get(const struct lu_env *env, struct dt_it *di,
1226 const struct dt_key *key)
1228 struct osd_zap_it *it = (struct osd_zap_it *)di;
1229 struct osd_object *obj = it->ozi_obj;
1230 struct osd_device *osd = osd_obj2dev(obj);
1234 LASSERT(it->ozi_zc);
1237 * XXX: we need a binary version of zap_cursor_move_to_key()
1238 * to implement this API */
1239 if (*((const __u64 *)key) != 0)
1240 CERROR("NOT IMPLEMETED YET (move to %#llx)\n",
1243 zap_cursor_fini(it->ozi_zc);
1244 zap_cursor_init(it->ozi_zc, osd->od_os, obj->oo_dn->dn_object);
1250 static int osd_index_it_next(const struct lu_env *env, struct dt_it *di)
1252 struct osd_zap_it *it = (struct osd_zap_it *)di;
1253 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1257 if (it->ozi_reset == 0)
1258 zap_cursor_advance(it->ozi_zc);
1262 * According to current API we need to return error if it's last entry.
1263 * zap_cursor_advance() does not return any value. So we need to call
1264 * retrieve to check if there is any record. We should make
1265 * changes to Iterator API to not return status for this API
1267 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1274 static struct dt_key *osd_index_it_key(const struct lu_env *env,
1275 const struct dt_it *di)
1277 struct osd_zap_it *it = (struct osd_zap_it *)di;
1278 struct osd_object *obj = it->ozi_obj;
1279 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1284 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1286 RETURN(ERR_PTR(rc));
1288 /* the binary key is stored in the name */
1289 memcpy(&it->ozi_key, za->za_name, obj->oo_keysize);
1291 RETURN((struct dt_key *)&it->ozi_key);
1294 static int osd_index_it_key_size(const struct lu_env *env,
1295 const struct dt_it *di)
1297 struct osd_zap_it *it = (struct osd_zap_it *)di;
1298 struct osd_object *obj = it->ozi_obj;
1299 RETURN(obj->oo_keysize);
1302 static int osd_index_it_rec(const struct lu_env *env, const struct dt_it *di,
1303 struct dt_rec *rec, __u32 attr)
1305 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1306 struct osd_zap_it *it = (struct osd_zap_it *)di;
1307 struct osd_object *obj = it->ozi_obj;
1308 struct osd_device *osd = osd_obj2dev(obj);
1309 __u64 *k = osd_oti_get(env)->oti_key64;
1314 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1318 rc = osd_prepare_key_uint64(obj, k, (const struct dt_key *)za->za_name);
1320 rc = -zap_lookup_uint64(osd->od_os, obj->oo_dn->dn_object,
1321 k, rc, obj->oo_recusize, obj->oo_recsize,
1326 static __u64 osd_index_it_store(const struct lu_env *env,
1327 const struct dt_it *di)
1329 struct osd_zap_it *it = (struct osd_zap_it *)di;
1332 RETURN((__u64)zap_cursor_serialize(it->ozi_zc));
1335 static int osd_index_it_load(const struct lu_env *env, const struct dt_it *di,
1338 struct osd_zap_it *it = (struct osd_zap_it *)di;
1339 struct osd_object *obj = it->ozi_obj;
1340 struct osd_device *osd = osd_obj2dev(obj);
1341 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1345 /* reset the cursor */
1346 zap_cursor_fini(it->ozi_zc);
1347 zap_cursor_init_serialized(it->ozi_zc, osd->od_os,
1348 obj->oo_dn->dn_object, hash);
1351 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1354 else if (rc == -ENOENT)
1360 static struct dt_index_operations osd_index_ops = {
1361 .dio_lookup = osd_index_lookup,
1362 .dio_declare_insert = osd_declare_index_insert,
1363 .dio_insert = osd_index_insert,
1364 .dio_declare_delete = osd_declare_index_delete,
1365 .dio_delete = osd_index_delete,
1367 .init = osd_index_it_init,
1368 .fini = osd_index_it_fini,
1369 .get = osd_index_it_get,
1370 .put = osd_index_it_put,
1371 .next = osd_index_it_next,
1372 .key = osd_index_it_key,
1373 .key_size = osd_index_it_key_size,
1374 .rec = osd_index_it_rec,
1375 .store = osd_index_it_store,
1376 .load = osd_index_it_load
1380 struct osd_metadnode_it {
1381 struct osd_device *mit_dev;
1383 struct lu_fid mit_fid;
1385 __u64 mit_prefetched_dnode;
1388 static struct dt_it *osd_zfs_otable_it_init(const struct lu_env *env,
1389 struct dt_object *dt, __u32 attr)
1391 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1392 struct osd_metadnode_it *it;
1396 if (unlikely(it == NULL))
1397 RETURN(ERR_PTR(-ENOMEM));
1401 /* XXX: dmu_object_next() does NOT find dnodes allocated
1402 * in the current non-committed txg, so we force txg
1403 * commit to find all existing dnodes ... */
1404 if (!dev->od_dt_dev.dd_rdonly)
1405 txg_wait_synced(dmu_objset_pool(dev->od_os), 0ULL);
1407 RETURN((struct dt_it *)it);
1410 static void osd_zfs_otable_it_fini(const struct lu_env *env, struct dt_it *di)
1412 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1417 static int osd_zfs_otable_it_get(const struct lu_env *env,
1418 struct dt_it *di, const struct dt_key *key)
1423 static void osd_zfs_otable_it_put(const struct lu_env *env, struct dt_it *di)
1427 #define OTABLE_PREFETCH 256
1429 static void osd_zfs_otable_prefetch(const struct lu_env *env,
1430 struct osd_metadnode_it *it)
1432 struct osd_device *dev = it->mit_dev;
1435 /* can go negative on the very first access to the iterator
1436 * or if some non-Lustre objects were found */
1437 if (unlikely(it->mit_prefetched < 0))
1438 it->mit_prefetched = 0;
1440 if (it->mit_prefetched >= (OTABLE_PREFETCH >> 1))
1443 if (it->mit_prefetched_dnode == 0)
1444 it->mit_prefetched_dnode = it->mit_pos;
1446 while (it->mit_prefetched < OTABLE_PREFETCH) {
1447 rc = -dmu_object_next(dev->od_os, &it->mit_prefetched_dnode,
1449 if (unlikely(rc != 0))
1452 osd_dmu_prefetch(dev->od_os, it->mit_prefetched_dnode,
1453 0, 0, 0, ZIO_PRIORITY_ASYNC_READ);
1455 it->mit_prefetched++;
1459 static int osd_zfs_otable_it_next(const struct lu_env *env, struct dt_it *di)
1461 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1462 struct lustre_mdt_attrs *lma;
1463 struct osd_device *dev = it->mit_dev;
1464 nvlist_t *nvbuf = NULL;
1469 memset(&it->mit_fid, 0, sizeof(it->mit_fid));
1471 dnode = it->mit_pos;
1473 rc = -dmu_object_next(dev->od_os, &it->mit_pos, B_FALSE, 0);
1474 if (unlikely(rc != 0))
1476 it->mit_prefetched--;
1478 /* LMA is required for this to be a Lustre object.
1479 * If there is no xattr skip it. */
1480 rc = __osd_xattr_load(dev, it->mit_pos, &nvbuf);
1481 if (unlikely(rc != 0))
1484 LASSERT(nvbuf != NULL);
1485 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA, &v, &s);
1486 if (likely(rc == 0)) {
1488 lma = (struct lustre_mdt_attrs *)v;
1489 lustre_lma_swab(lma);
1490 it->mit_fid = lma->lma_self_fid;
1494 /* not a Lustre object, try next one */
1501 /* we aren't prefetching in the above loop because the number of
1502 * non-Lustre objects is very small and we will be repeating very
1503 * rare. in case we want to use this to iterate over non-Lustre
1504 * objects (i.e. when we convert regular ZFS in Lustre) it makes
1505 * sense to initiate prefetching in the loop */
1507 /* 0 - there are more items, +1 - the end */
1508 if (likely(rc == 0))
1509 osd_zfs_otable_prefetch(env, it);
1511 CDEBUG(D_OTHER, "advance: %llu -> %llu "DFID": %d\n", dnode,
1512 it->mit_pos, PFID(&it->mit_fid), rc);
1518 static struct dt_key *osd_zfs_otable_it_key(const struct lu_env *env,
1519 const struct dt_it *di)
1524 static int osd_zfs_otable_it_key_size(const struct lu_env *env,
1525 const struct dt_it *di)
1527 return sizeof(__u64);
1530 static int osd_zfs_otable_it_rec(const struct lu_env *env,
1531 const struct dt_it *di,
1532 struct dt_rec *rec, __u32 attr)
1534 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1535 struct lu_fid *fid = (struct lu_fid *)rec;
1544 static __u64 osd_zfs_otable_it_store(const struct lu_env *env,
1545 const struct dt_it *di)
1547 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1552 static int osd_zfs_otable_it_load(const struct lu_env *env,
1553 const struct dt_it *di, __u64 hash)
1555 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1558 it->mit_prefetched = 0;
1559 it->mit_prefetched_dnode = 0;
1561 return osd_zfs_otable_it_next(env, (struct dt_it *)di);
1564 static int osd_zfs_otable_it_key_rec(const struct lu_env *env,
1565 const struct dt_it *di, void *key_rec)
1570 const struct dt_index_operations osd_zfs_otable_ops = {
1572 .init = osd_zfs_otable_it_init,
1573 .fini = osd_zfs_otable_it_fini,
1574 .get = osd_zfs_otable_it_get,
1575 .put = osd_zfs_otable_it_put,
1576 .next = osd_zfs_otable_it_next,
1577 .key = osd_zfs_otable_it_key,
1578 .key_size = osd_zfs_otable_it_key_size,
1579 .rec = osd_zfs_otable_it_rec,
1580 .store = osd_zfs_otable_it_store,
1581 .load = osd_zfs_otable_it_load,
1582 .key_rec = osd_zfs_otable_it_key_rec,
1586 int osd_index_try(const struct lu_env *env, struct dt_object *dt,
1587 const struct dt_index_features *feat)
1589 struct osd_object *obj = osd_dt_obj(dt);
1593 down_read(&obj->oo_guard);
1596 * XXX: implement support for fixed-size keys sorted with natural
1597 * numerical way (not using internal hash value)
1599 if (feat->dif_flags & DT_IND_RANGE)
1600 GOTO(out, rc = -ERANGE);
1602 if (unlikely(feat == &dt_otable_features)) {
1603 dt->do_index_ops = &osd_zfs_otable_ops;
1607 LASSERT(!dt_object_exists(dt) || obj->oo_dn != NULL);
1608 if (likely(feat == &dt_directory_features)) {
1609 if (!dt_object_exists(dt) || osd_object_is_zap(obj->oo_dn))
1610 dt->do_index_ops = &osd_dir_ops;
1612 GOTO(out, rc = -ENOTDIR);
1613 } else if (unlikely(feat == &dt_acct_features)) {
1614 LASSERT(fid_is_acct(lu_object_fid(&dt->do_lu)));
1615 dt->do_index_ops = &osd_acct_index_ops;
1616 } else if (dt->do_index_ops == NULL) {
1617 /* For index file, we don't support variable key & record sizes
1618 * and the key has to be unique */
1619 if ((feat->dif_flags & ~DT_IND_UPDATE) != 0)
1620 GOTO(out, rc = -EINVAL);
1622 if (feat->dif_keysize_max > ZAP_MAXNAMELEN)
1623 GOTO(out, rc = -E2BIG);
1624 if (feat->dif_keysize_max != feat->dif_keysize_min)
1625 GOTO(out, rc = -EINVAL);
1627 /* As for the record size, it should be a multiple of 8 bytes
1628 * and smaller than the maximum value length supported by ZAP.
1630 if (feat->dif_recsize_max > ZAP_MAXVALUELEN)
1631 GOTO(out, rc = -E2BIG);
1632 if (feat->dif_recsize_max != feat->dif_recsize_min)
1633 GOTO(out, rc = -EINVAL);
1635 obj->oo_keysize = feat->dif_keysize_max;
1636 obj->oo_recsize = feat->dif_recsize_max;
1637 obj->oo_recusize = 1;
1639 /* ZFS prefers to work with array of 64bits */
1640 if ((obj->oo_recsize & 7) == 0) {
1641 obj->oo_recsize >>= 3;
1642 obj->oo_recusize = 8;
1644 dt->do_index_ops = &osd_index_ops;
1648 up_read(&obj->oo_guard);