4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_index.c
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSD
40 #include <libcfs/libcfs.h>
41 #include <obd_support.h>
42 #include <lustre_net.h>
44 #include <obd_class.h>
45 #include <lustre_disk.h>
46 #include <lustre_fid.h>
48 #include "osd_internal.h"
50 #include <sys/dnode.h>
54 #include <sys/spa_impl.h>
55 #include <sys/zfs_znode.h>
56 #include <sys/dmu_tx.h>
57 #include <sys/dmu_objset.h>
58 #include <sys/dsl_prop.h>
59 #include <sys/sa_impl.h>
62 static inline int osd_object_is_zap(dnode_t *dn)
64 return (dn->dn_type == DMU_OT_DIRECTORY_CONTENTS ||
65 dn->dn_type == DMU_OT_USERGROUP_USED);
68 /* We don't actually have direct access to the zap_hashbits() function
69 * so just pretend like we do for now. If this ever breaks we can look at
71 #define zap_hashbits(zc) 48
74 * | cd (16 bits) | hash (48 bits) |
75 * we need it in other form:
76 * |0| hash (48 bit) | cd (15 bit) |
77 * to be a full 64-bit ordered hash so that Lustre readdir can use it to merge
78 * the readdir hashes from multiple directory stripes uniformly on the client.
79 * Another point is sign bit, the hash range should be in [0, 2^63-1] because
80 * loff_t (for llseek) needs to be a positive value. This means the "cd" field
81 * should only be the low 15 bits.
83 uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc)
85 uint64_t zfs_hash = zap_cursor_serialize(zc) & (~0ULL >> 1);
87 return (zfs_hash >> zap_hashbits(zc)) |
88 (zfs_hash << (63 - zap_hashbits(zc)));
91 void osd_zap_cursor_init_serialized(zap_cursor_t *zc, struct objset *os,
92 uint64_t id, uint64_t dirhash)
94 uint64_t zfs_hash = ((dirhash << zap_hashbits(zc)) & (~0ULL >> 1)) |
95 (dirhash >> (63 - zap_hashbits(zc)));
97 zap_cursor_init_serialized(zc, os, id, zfs_hash);
100 int osd_zap_cursor_init(zap_cursor_t **zc, struct objset *os,
101 uint64_t id, uint64_t dirhash)
106 if (unlikely(t == NULL))
109 osd_zap_cursor_init_serialized(t, os, id, dirhash);
115 void osd_zap_cursor_fini(zap_cursor_t *zc)
121 static inline void osd_obj_cursor_init_serialized(zap_cursor_t *zc,
122 struct osd_object *o,
125 struct osd_device *d = osd_obj2dev(o);
126 osd_zap_cursor_init_serialized(zc, d->od_os,
127 o->oo_dn->dn_object, dirhash);
130 static inline int osd_obj_cursor_init(zap_cursor_t **zc, struct osd_object *o,
133 struct osd_device *d = osd_obj2dev(o);
134 return osd_zap_cursor_init(zc, d->od_os, o->oo_dn->dn_object, dirhash);
137 static struct dt_it *osd_index_it_init(const struct lu_env *env,
138 struct dt_object *dt,
141 struct osd_thread_info *info = osd_oti_get(env);
142 struct osd_zap_it *it;
143 struct osd_object *obj = osd_dt_obj(dt);
144 struct lu_object *lo = &dt->do_lu;
148 if (obj->oo_destroyed)
149 RETURN(ERR_PTR(-ENOENT));
151 LASSERT(lu_object_exists(lo));
155 OBD_SLAB_ALLOC_PTR_GFP(it, osd_zapit_cachep, GFP_NOFS);
157 RETURN(ERR_PTR(-ENOMEM));
159 rc = osd_obj_cursor_init(&it->ozi_zc, obj, 0);
161 OBD_SLAB_FREE_PTR(it, osd_zapit_cachep);
169 RETURN((struct dt_it *)it);
172 static void osd_index_it_fini(const struct lu_env *env, struct dt_it *di)
174 struct osd_zap_it *it = (struct osd_zap_it *)di;
175 struct osd_object *obj;
179 LASSERT(it->ozi_obj);
183 osd_zap_cursor_fini(it->ozi_zc);
184 osd_object_put(env, obj);
185 OBD_SLAB_FREE_PTR(it, osd_zapit_cachep);
191 static void osd_index_it_put(const struct lu_env *env, struct dt_it *di)
193 /* PBS: do nothing : ref are incremented at retrive and decreamented
197 static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
200 const unsigned align = sizeof(struct luda_type) - 1;
201 struct luda_type *lt;
203 /* check if file type is required */
204 if (attr & LUDA_TYPE) {
205 len = (len + align) & ~align;
207 lt = (void *)ent->lde_name + len;
208 lt->lt_type = cpu_to_le16(DTTOIF(type));
209 ent->lde_attrs |= LUDA_TYPE;
212 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
215 int __osd_xattr_load_by_oid(struct osd_device *osd, uint64_t oid, nvlist_t **sa)
221 rc = -dmu_bonus_hold(osd->od_os, oid, osd_obj_tag, &db);
223 CERROR("%s: can't get bonus, rc = %d\n", osd->od_svname, rc);
227 rc = -sa_handle_get_from_db(osd->od_os, db, NULL, SA_HDL_PRIVATE, &hdl);
229 dmu_buf_rele(db, osd_obj_tag);
233 rc = __osd_xattr_load(osd, hdl, sa);
235 sa_handle_destroy(hdl);
240 * Get the object's FID from its LMA EA.
242 * \param[in] env pointer to the thread context
243 * \param[in] osd pointer to the OSD device
244 * \param[in] oid the object's local identifier
245 * \param[out] fid the buffer to hold the object's FID
247 * \retval 0 for success
248 * \retval negative error number on failure
250 static int osd_get_fid_by_oid(const struct lu_env *env, struct osd_device *osd,
251 uint64_t oid, struct lu_fid *fid)
253 struct objset *os = osd->od_os;
254 struct osd_thread_info *oti = osd_oti_get(env);
255 struct lustre_mdt_attrs *lma =
256 (struct lustre_mdt_attrs *)oti->oti_buf;
258 nvlist_t *sa_xattr = NULL;
259 sa_handle_t *sa_hdl = NULL;
260 uchar_t *nv_value = NULL;
261 uint64_t xattr = ZFS_NO_OBJECT;
266 rc = __osd_xattr_load_by_oid(osd, oid, &sa_xattr);
273 rc = -nvlist_lookup_byte_array(sa_xattr, XATTR_NAME_LMA, &nv_value,
281 if (unlikely(size > sizeof(oti->oti_buf)))
282 GOTO(out, rc = -ERANGE);
284 memcpy(lma, nv_value, size);
289 rc = -sa_handle_get(os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl);
293 rc = -sa_lookup(sa_hdl, SA_ZPL_XATTR(osd), &xattr, 8);
294 sa_handle_destroy(sa_hdl);
299 buf.lb_len = sizeof(oti->oti_buf);
300 rc = __osd_xattr_get_large(env, osd, xattr, &buf,
301 XATTR_NAME_LMA, &size);
306 if (size < sizeof(*lma))
307 GOTO(out, rc = -EIO);
309 lustre_lma_swab(lma);
310 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
311 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
312 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
313 "oid = %#llx\n", osd->od_svname,
314 lma->lma_incompat & ~LMA_INCOMPAT_SUPP, oid);
315 GOTO(out, rc = -EOPNOTSUPP);
317 *fid = lma->lma_self_fid;
322 if (sa_xattr != NULL)
323 nvlist_free(sa_xattr);
328 * As we don't know FID, we can't use LU object, so this function
329 * partially duplicate osd_xattr_get_internal() which is built around
330 * LU-object and uses it to cache data like regular EA dnode, etc
332 static int osd_find_parent_by_dnode(const struct lu_env *env,
336 struct osd_object *obj = osd_dt_obj(o);
337 struct osd_device *osd = osd_obj2dev(obj);
338 uint64_t dnode = ZFS_NO_OBJECT;
342 /* first of all, get parent dnode from own attributes */
343 rc = osd_sa_handle_get(obj);
346 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PARENT(osd), &dnode, 8);
348 rc = osd_get_fid_by_oid(env, osd, dnode, fid);
353 static int osd_find_parent_fid(const struct lu_env *env, struct dt_object *o,
356 struct link_ea_header *leh;
357 struct link_ea_entry *lee;
362 buf.lb_buf = osd_oti_get(env)->oti_buf;
363 buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
365 rc = osd_xattr_get(env, o, &buf, XATTR_NAME_LINK);
367 rc = osd_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LINK);
371 OBD_ALLOC(buf.lb_buf, rc);
372 if (buf.lb_buf == NULL)
375 rc = osd_xattr_get(env, o, &buf, XATTR_NAME_LINK);
379 if (rc < sizeof(*leh) + sizeof(*lee))
380 GOTO(out, rc = -EINVAL);
383 if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
384 leh->leh_magic = LINK_EA_MAGIC;
385 leh->leh_reccount = __swab32(leh->leh_reccount);
386 leh->leh_len = __swab64(leh->leh_len);
388 if (leh->leh_magic != LINK_EA_MAGIC)
389 GOTO(out, rc = -EINVAL);
390 if (leh->leh_reccount == 0)
391 GOTO(out, rc = -ENODATA);
393 lee = (struct link_ea_entry *)(leh + 1);
394 fid_be_to_cpu(fid, (const struct lu_fid *)&lee->lee_parent_fid);
398 if (buf.lb_buf != osd_oti_get(env)->oti_buf)
399 OBD_FREE(buf.lb_buf, buf.lb_len);
402 /* this block can be enabled for additional verification
403 * it's trying to match FID from LinkEA vs. FID from LMA */
407 rc2 = osd_find_parent_by_dnode(env, o, &fid2);
409 if (lu_fid_eq(fid, &fid2) == 0)
410 CERROR("wrong parent: "DFID" != "DFID"\n",
411 PFID(fid), PFID(&fid2));
415 /* no LinkEA is found, let's try to find the fid in parent's LMA */
416 if (unlikely(rc != 0))
417 rc = osd_find_parent_by_dnode(env, o, fid);
422 static int osd_dir_lookup(const struct lu_env *env, struct dt_object *dt,
423 struct dt_rec *rec, const struct dt_key *key)
425 struct osd_thread_info *oti = osd_oti_get(env);
426 struct osd_object *obj = osd_dt_obj(dt);
427 struct osd_device *osd = osd_obj2dev(obj);
428 char *name = (char *)key;
432 if (name[0] == '.') {
434 const struct lu_fid *f = lu_object_fid(&dt->do_lu);
435 memcpy(rec, f, sizeof(*f));
437 } else if (name[1] == '.' && name[2] == 0) {
438 rc = osd_find_parent_fid(env, dt, (struct lu_fid *)rec);
439 RETURN(rc == 0 ? 1 : rc);
443 memset(&oti->oti_zde.lzd_fid, 0, sizeof(struct lu_fid));
444 rc = osd_zap_lookup(osd, obj->oo_dn->dn_object, obj->oo_dn,
445 (char *)key, 8, sizeof(oti->oti_zde) / 8,
446 (void *)&oti->oti_zde);
450 if (likely(fid_is_sane(&oti->oti_zde.lzd_fid))) {
451 memcpy(rec, &oti->oti_zde.lzd_fid, sizeof(struct lu_fid));
455 rc = osd_get_fid_by_oid(env, osd, oti->oti_zde.lzd_reg.zde_dnode,
456 (struct lu_fid *)rec);
458 RETURN(rc == 0 ? 1 : (rc == -ENOENT ? -ENODATA : rc));
462 * In DNE environment, the object and its name entry may reside on different
463 * MDTs. Under such case, we will create an agent object on the MDT where the
464 * name entry resides. The agent object is empty, and indicates that the real
465 * object for the name entry resides on another MDT. If without agent object,
466 * related name entry will be skipped when perform MDT side file level backup
467 * and restore via ZPL by userspace tool, such as 'tar'.
469 static int osd_create_agent_object(const struct lu_env *env,
470 struct osd_device *osd,
471 struct luz_direntry *zde,
472 uint64_t parent, dmu_tx_t *tx)
474 struct osd_thread_info *info = osd_oti_get(env);
475 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
476 struct lu_attr *la = &info->oti_la;
477 nvlist_t *nvbuf = NULL;
483 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AGENTOBJ))
486 rc = -nvlist_alloc(&nvbuf, NV_UNIQUE_NAME, KM_SLEEP);
490 lustre_lma_init(lma, &zde->lzd_fid, 0, LMAI_AGENT);
491 lustre_lma_swab(lma);
492 rc = -nvlist_add_byte_array(nvbuf, XATTR_NAME_LMA, (uchar_t *)lma,
497 la->la_valid = LA_TYPE | LA_MODE;
498 la->la_mode = (DTTOIF(zde->lzd_reg.zde_type) & S_IFMT) |
499 S_IRUGO | S_IWUSR | S_IXUGO;
501 if (S_ISDIR(la->la_mode))
502 rc = __osd_zap_create(env, osd, &dn, tx, la,
503 osd_find_dnsize(osd, OSD_BASE_EA_IN_BONUS), 0);
505 rc = __osd_object_create(env, osd, NULL, &zde->lzd_fid,
510 zde->lzd_reg.zde_dnode = dn->dn_object;
511 rc = -sa_handle_get(osd->od_os, dn->dn_object, NULL,
512 SA_HDL_PRIVATE, &hdl);
514 rc = __osd_attr_init(env, osd, NULL, hdl, tx,
516 sa_handle_destroy(hdl);
524 dmu_object_free(osd->od_os, dn->dn_object, tx);
534 static int osd_declare_dir_insert(const struct lu_env *env,
535 struct dt_object *dt,
536 const struct dt_rec *rec,
537 const struct dt_key *key,
540 struct osd_object *obj = osd_dt_obj(dt);
541 struct osd_device *osd = osd_obj2dev(obj);
542 const struct dt_insert_rec *rec1;
543 const struct lu_fid *fid;
544 struct osd_thandle *oh;
546 struct osd_idmap_cache *idc;
549 rec1 = (struct dt_insert_rec *)rec;
551 LASSERT(fid != NULL);
552 LASSERT(rec1->rec_type != 0);
555 oh = container_of0(th, struct osd_thandle, ot_super);
557 idc = osd_idc_find_or_init(env, osd, fid);
559 RETURN(PTR_ERR(idc));
561 if (idc->oic_remote) {
562 const char *name = (const char *)key;
564 if (name[0] != '.' || name[1] != '.' || name[2] != 0) {
565 /* Prepare agent object for remote entry that will
566 * be used for operations via ZPL, such as MDT side
567 * file-level backup and restore. */
568 dmu_tx_hold_sa_create(oh->ot_tx,
569 osd_find_dnsize(osd, OSD_BASE_EA_IN_BONUS));
570 if (S_ISDIR(rec1->rec_type))
571 dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT,
576 /* This is for inserting dot/dotdot for new created dir. */
577 if (obj->oo_dn == NULL)
578 object = DMU_NEW_OBJECT;
580 object = obj->oo_dn->dn_object;
582 /* do not specify the key as then DMU is trying to look it up
583 * which is very expensive. usually the layers above lookup
584 * before insertion */
585 osd_tx_hold_zap(oh->ot_tx, object, obj->oo_dn, TRUE, NULL);
590 static int osd_seq_exists(const struct lu_env *env, struct osd_device *osd,
593 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
594 struct seq_server_site *ss = osd_seq_site(osd);
599 LASSERT(ss->ss_server_fld != NULL);
601 rc = osd_fld_lookup(env, osd, seq, range);
604 CERROR("%s: Can not lookup fld for %#llx\n",
609 RETURN(ss->ss_node_id == range->lsr_index);
612 int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
613 const struct lu_fid *fid)
615 struct seq_server_site *ss = osd_seq_site(osd);
618 /* FID seqs not in FLDB, must be local seq */
619 if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
622 /* If FLD is not being initialized yet, it only happens during the
623 * initialization, likely during mgs initialization, and we assume
624 * this is local FID. */
625 if (ss == NULL || ss->ss_server_fld == NULL)
628 /* Only check the local FLDB here */
629 if (osd_seq_exists(env, osd, fid_seq(fid)))
636 * Inserts (key, value) pair in \a directory object.
638 * \param dt osd index object
639 * \param key key for index
640 * \param rec record reference
641 * \param th transaction handler
642 * \param ignore_quota update should not affect quota
645 * \retval -ve failure
647 static int osd_dir_insert(const struct lu_env *env, struct dt_object *dt,
648 const struct dt_rec *rec, const struct dt_key *key,
649 struct thandle *th, int ignore_quota)
651 struct osd_thread_info *oti = osd_oti_get(env);
652 struct osd_object *parent = osd_dt_obj(dt);
653 struct osd_device *osd = osd_obj2dev(parent);
654 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
655 const struct lu_fid *fid = rec1->rec_fid;
656 struct osd_thandle *oh;
657 struct osd_idmap_cache *idc;
658 const char *name = (const char *)key;
659 struct luz_direntry *zde = &oti->oti_zde;
660 int num = sizeof(*zde) / 8;
664 LASSERT(parent->oo_dn);
666 LASSERT(dt_object_exists(dt));
667 LASSERT(osd_invariant(parent));
670 oh = container_of0(th, struct osd_thandle, ot_super);
672 idc = osd_idc_find(env, osd, fid);
673 if (unlikely(idc == NULL)) {
674 /* this dt_insert() wasn't declared properly, so
675 * FID is missing in OI cache. we better do not
676 * lookup FID in FLDB/OI and don't risk to deadlock,
677 * but in some special cases (lfsck testing, etc)
678 * it's much simpler than fixing a caller */
679 CERROR("%s: "DFID" wasn't declared for insert\n",
680 osd_name(osd), PFID(fid));
681 idc = osd_idc_find_or_init(env, osd, fid);
683 RETURN(PTR_ERR(idc));
686 CLASSERT(sizeof(zde->lzd_reg) == 8);
687 CLASSERT(sizeof(*zde) % 8 == 0);
689 memset(&zde->lzd_reg, 0, sizeof(zde->lzd_reg));
690 zde->lzd_reg.zde_type = IFTODT(rec1->rec_type & S_IFMT);
693 if (idc->oic_remote) {
694 if (name[0] != '.' || name[1] != '.' || name[2] != 0) {
695 /* Create agent inode for remote object that will
696 * be used for MDT file-level backup and restore. */
697 rc = osd_create_agent_object(env, osd, zde,
698 parent->oo_dn->dn_object, oh->ot_tx);
700 CWARN("%s: Fail to create agent object for "
702 osd_name(osd), PFID(fid), rc);
703 /* Ignore the failure since the system can go
704 * ahead if we do not care about the MDT side
705 * file-level backup and restore. */
710 if (unlikely(idc->oic_dnode == 0)) {
711 /* for a reason OI cache wasn't filled properly */
712 CERROR("%s: OIC for "DFID" isn't filled\n",
713 osd_name(osd), PFID(fid));
716 if (name[0] == '.') {
718 /* do not store ".", instead generate it
719 * during iteration */
721 } else if (name[1] == '.' && name[2] == 0) {
722 uint64_t dnode = idc->oic_dnode;
723 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_PARENT))
726 /* update parent dnode in the child.
727 * later it will be used to generate ".." */
728 rc = osd_object_sa_update(parent,
735 zde->lzd_reg.zde_dnode = idc->oic_dnode;
738 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INDIR))
739 zde->lzd_fid.f_ver = ~0;
740 if (OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF))
742 /* Insert (key,oid) into ZAP */
743 rc = osd_zap_add(osd, parent->oo_dn->dn_object, parent->oo_dn,
744 name, 8, num, (void *)zde, oh->ot_tx);
745 if (unlikely(rc == -EEXIST &&
746 name[0] == '.' && name[1] == '.' && name[2] == 0))
747 /* Update (key,oid) in ZAP */
748 rc = -zap_update(osd->od_os, parent->oo_dn->dn_object, name, 8,
749 sizeof(*zde) / 8, (void *)zde, oh->ot_tx);
756 static int osd_declare_dir_delete(const struct lu_env *env,
757 struct dt_object *dt,
758 const struct dt_key *key,
761 struct osd_object *obj = osd_dt_obj(dt);
762 dnode_t *zap_dn = obj->oo_dn;
763 struct osd_thandle *oh;
764 const char *name = (const char *)key;
767 LASSERT(dt_object_exists(dt));
768 LASSERT(osd_invariant(obj));
769 LASSERT(zap_dn != NULL);
772 oh = container_of0(th, struct osd_thandle, ot_super);
775 * In Orion . and .. were stored in the directory (not generated upon
776 * request as now). We preserve them for backward compatibility.
778 if (name[0] == '.') {
781 else if (name[1] == '.' && name[2] == 0)
785 /* do not specify the key as then DMU is trying to look it up
786 * which is very expensive. usually the layers above lookup
788 osd_tx_hold_zap(oh->ot_tx, zap_dn->dn_object, zap_dn, FALSE, NULL);
790 /* For destroying agent object if have. */
791 dmu_tx_hold_bonus(oh->ot_tx, DMU_NEW_OBJECT);
796 static int osd_dir_delete(const struct lu_env *env, struct dt_object *dt,
797 const struct dt_key *key, struct thandle *th)
799 struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
800 struct osd_object *obj = osd_dt_obj(dt);
801 struct osd_device *osd = osd_obj2dev(obj);
802 struct osd_thandle *oh;
803 dnode_t *zap_dn = obj->oo_dn;
804 char *name = (char *)key;
811 oh = container_of0(th, struct osd_thandle, ot_super);
814 * In Orion . and .. were stored in the directory (not generated upon
815 * request as now). we preserve them for backward compatibility
817 if (name[0] == '.') {
820 } else if (name[1] == '.' && name[2] == 0) {
825 /* XXX: We have to say that lookup during delete_declare will affect
826 * performance, but we have to check whether the name entry (to
827 * be deleted) has agent object or not to avoid orphans.
829 * We will improve that in the future, some possible solutions,
831 * 1) Some hint from the caller via transaction handle to make
832 * the lookup conditionally.
833 * 2) Enhance the ZFS logic to recognize the OSD lookup result
834 * and delete the given entry directly without lookup again
835 * internally. LU-10295 */
836 memset(&zde->lzd_fid, 0, sizeof(zde->lzd_fid));
837 rc = osd_zap_lookup(osd, zap_dn->dn_object, zap_dn, name, 8, 3, zde);
840 CERROR("%s: failed to locate entry %s: rc = %d\n",
841 osd->od_svname, name, rc);
845 if (unlikely(osd_remote_fid(env, osd, &zde->lzd_fid) > 0)) {
846 rc = -dmu_object_free(osd->od_os, zde->lzd_reg.zde_dnode,
849 CERROR("%s: failed to destroy agent object (%llu) "
850 "for the entry %s: rc = %d\n", osd->od_svname,
851 (__u64)zde->lzd_reg.zde_dnode, name, rc);
854 /* Remove key from the ZAP */
855 rc = osd_zap_remove(osd, zap_dn->dn_object, zap_dn,
856 (char *)key, oh->ot_tx);
858 CERROR("%s: zap_remove %s failed: rc = %d\n",
859 osd->od_svname, name, rc);
864 static struct dt_it *osd_dir_it_init(const struct lu_env *env,
865 struct dt_object *dt,
868 struct osd_zap_it *it;
870 it = (struct osd_zap_it *)osd_index_it_init(env, dt, unused);
874 RETURN((struct dt_it *)it);
878 * Move Iterator to record specified by \a key
880 * \param di osd iterator
881 * \param key key for index
883 * \retval +ve di points to record with least key not larger than key
884 * \retval 0 di points to exact matched key
885 * \retval -ve failure
887 static int osd_dir_it_get(const struct lu_env *env,
888 struct dt_it *di, const struct dt_key *key)
890 struct osd_zap_it *it = (struct osd_zap_it *)di;
891 struct osd_object *obj = it->ozi_obj;
892 char *name = (char *)key;
899 /* reset the cursor */
900 zap_cursor_fini(it->ozi_zc);
901 osd_obj_cursor_init_serialized(it->ozi_zc, obj, 0);
903 /* XXX: implementation of the API is broken at the moment */
904 LASSERT(((const char *)key)[0] == 0);
911 if (name[0] == '.') {
915 } else if (name[1] == '.' && name[2] == 0) {
921 /* neither . nor .. - some real record */
929 static void osd_dir_it_put(const struct lu_env *env, struct dt_it *di)
931 /* PBS: do nothing : ref are incremented at retrive and decreamented
936 * in Orion . and .. were stored in the directory, while ZPL
937 * and current osd-zfs generate them up on request. so, we
938 * need to ignore previously stored . and ..
940 static int osd_index_retrieve_skip_dots(struct osd_zap_it *it,
946 rc = -zap_cursor_retrieve(it->ozi_zc, za);
949 if (unlikely(rc == 0 && za->za_name[0] == '.')) {
950 if (za->za_name[1] == 0) {
952 } else if (za->za_name[1] == '.' &&
953 za->za_name[2] == 0) {
957 zap_cursor_advance(it->ozi_zc);
959 } while (unlikely(rc == 0 && isdot));
965 * to load a directory entry at a time and stored it in
966 * iterator's in-memory data structure.
968 * \param di, struct osd_it_ea, iterator's in memory structure
970 * \retval +ve, iterator reached to end
971 * \retval 0, iterator not reached to end
972 * \retval -ve, on error
974 static int osd_dir_it_next(const struct lu_env *env, struct dt_it *di)
976 struct osd_zap_it *it = (struct osd_zap_it *)di;
977 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
982 /* temp. storage should be enough for any key supported by ZFS */
983 CLASSERT(sizeof(za->za_name) <= sizeof(it->ozi_name));
986 * the first ->next() moves the cursor to .
987 * the second ->next() moves the cursor to ..
988 * then we get to the real records and have to verify any exist
990 if (it->ozi_pos <= 2) {
996 zap_cursor_advance(it->ozi_zc);
1000 * According to current API we need to return error if its last entry.
1001 * zap_cursor_advance() does not return any value. So we need to call
1002 * retrieve to check if there is any record. We should make
1003 * changes to Iterator API to not return status for this API
1005 rc = osd_index_retrieve_skip_dots(it, za);
1007 if (rc == -ENOENT) /* end of dir */
1013 static struct dt_key *osd_dir_it_key(const struct lu_env *env,
1014 const struct dt_it *di)
1016 struct osd_zap_it *it = (struct osd_zap_it *)di;
1017 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1021 if (it->ozi_pos <= 1) {
1023 RETURN((struct dt_key *)".");
1024 } else if (it->ozi_pos == 2) {
1025 RETURN((struct dt_key *)"..");
1028 if ((rc = -zap_cursor_retrieve(it->ozi_zc, za)))
1029 RETURN(ERR_PTR(rc));
1031 strcpy(it->ozi_name, za->za_name);
1033 RETURN((struct dt_key *)it->ozi_name);
1036 static int osd_dir_it_key_size(const struct lu_env *env, const struct dt_it *di)
1038 struct osd_zap_it *it = (struct osd_zap_it *)di;
1039 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1043 if (it->ozi_pos <= 1) {
1046 } else if (it->ozi_pos == 2) {
1050 if ((rc = -zap_cursor_retrieve(it->ozi_zc, za)) == 0)
1051 rc = strlen(za->za_name);
1057 osd_dirent_update(const struct lu_env *env, struct osd_device *dev,
1058 uint64_t zap, const char *key, struct luz_direntry *zde)
1064 tx = dmu_tx_create(dev->od_os);
1068 dmu_tx_hold_zap(tx, zap, TRUE, NULL);
1069 rc = -dmu_tx_assign(tx, TXG_WAIT);
1071 rc = -zap_update(dev->od_os, zap, key, 8, sizeof(*zde) / 8,
1072 (const void *)zde, tx);
1081 static int osd_update_entry_for_agent(const struct lu_env *env,
1082 struct osd_device *osd,
1083 uint64_t zap, const char *name,
1084 struct luz_direntry *zde, __u32 attr)
1086 dmu_tx_t *tx = NULL;
1090 if (attr & LUDA_VERIFY_DRYRUN)
1093 tx = dmu_tx_create(osd->od_os);
1095 GOTO(out, rc = -ENOMEM);
1097 dmu_tx_hold_sa_create(tx, osd_find_dnsize(osd, OSD_BASE_EA_IN_BONUS));
1098 dmu_tx_hold_zap(tx, zap, FALSE, NULL);
1099 rc = -dmu_tx_assign(tx, TXG_WAIT);
1105 rc = osd_create_agent_object(env, osd, zde, zap, tx);
1107 rc = -zap_update(osd->od_os, zap, name, 8, sizeof(*zde) / 8,
1108 (const void *)zde, tx);
1114 CDEBUG(D_LFSCK, "%s: Updated (%s) remote entry for "DFID": rc = %d\n",
1115 osd_name(osd), (attr & LUDA_VERIFY_DRYRUN) ? "(ro)" : "(rw)",
1116 PFID(&zde->lzd_fid), rc);
1120 static int osd_dir_it_rec(const struct lu_env *env, const struct dt_it *di,
1121 struct dt_rec *dtrec, __u32 attr)
1123 struct osd_zap_it *it = (struct osd_zap_it *)di;
1124 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
1125 struct osd_thread_info *info = osd_oti_get(env);
1126 struct luz_direntry *zde = &info->oti_zde;
1127 zap_attribute_t *za = &info->oti_za;
1128 struct lu_fid *fid = &info->oti_fid;
1129 struct osd_device *osd = osd_obj2dev(it->ozi_obj);
1134 if (it->ozi_pos <= 1) {
1135 lde->lde_hash = cpu_to_le64(1);
1136 strcpy(lde->lde_name, ".");
1137 lde->lde_namelen = cpu_to_le16(1);
1138 fid_cpu_to_le(&lde->lde_fid,
1139 lu_object_fid(&it->ozi_obj->oo_dt.do_lu));
1140 lde->lde_attrs = LUDA_FID;
1141 /* append lustre attributes */
1142 osd_it_append_attrs(lde, attr, 1, IFTODT(S_IFDIR));
1143 lde->lde_reclen = cpu_to_le16(lu_dirent_calc_size(1, attr));
1146 } else if (it->ozi_pos == 2) {
1147 lde->lde_hash = cpu_to_le64(2);
1148 strcpy(lde->lde_name, "..");
1149 lde->lde_namelen = cpu_to_le16(2);
1150 rc = osd_find_parent_fid(env, &it->ozi_obj->oo_dt, fid);
1152 fid_cpu_to_le(&lde->lde_fid, fid);
1153 lde->lde_attrs = LUDA_FID;
1154 } else if (rc != -ENOENT) {
1155 /* ENOENT happens at the root of filesystem, ignore */
1159 /* append lustre attributes */
1160 osd_it_append_attrs(lde, attr, 2, IFTODT(S_IFDIR));
1161 lde->lde_reclen = cpu_to_le16(lu_dirent_calc_size(2, attr));
1167 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1171 lde->lde_hash = cpu_to_le64(osd_zap_cursor_serialize(it->ozi_zc));
1172 namelen = strlen(za->za_name);
1173 if (namelen > NAME_MAX)
1175 strcpy(lde->lde_name, za->za_name);
1176 lde->lde_namelen = cpu_to_le16(namelen);
1178 if (za->za_integer_length != 8) {
1179 CERROR("%s: unsupported direntry format: %d %d\n",
1181 za->za_integer_length, (int)za->za_num_integers);
1185 rc = osd_zap_lookup(osd, it->ozi_zc->zc_zapobj, it->ozi_obj->oo_dn,
1186 za->za_name, za->za_integer_length, 3, zde);
1190 if (za->za_num_integers >= 3 && fid_is_sane(&zde->lzd_fid)) {
1191 lde->lde_attrs = LUDA_FID;
1192 fid_cpu_to_le(&lde->lde_fid, &zde->lzd_fid);
1193 if (unlikely(zde->lzd_reg.zde_dnode == ZFS_NO_OBJECT &&
1194 osd_remote_fid(env, osd, &zde->lzd_fid) > 0 &&
1195 attr & LUDA_VERIFY)) {
1196 /* It is mainly used for handling the MDT
1197 * upgraded from old ZFS based backend. */
1198 rc = osd_update_entry_for_agent(env, osd,
1199 it->ozi_obj->oo_dn->dn_object,
1200 za->za_name, zde, attr);
1202 lde->lde_attrs |= LUDA_REPAIR;
1204 lde->lde_attrs |= LUDA_UNKNOWN;
1207 GOTO(pack_attr, rc = 0);
1210 if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
1213 rc = osd_get_fid_by_oid(env, osd, zde->lzd_reg.zde_dnode, fid);
1215 lde->lde_attrs = LUDA_UNKNOWN;
1216 GOTO(pack_attr, rc = 0);
1219 if (!(attr & LUDA_VERIFY)) {
1220 fid_cpu_to_le(&lde->lde_fid, fid);
1221 lde->lde_attrs = LUDA_FID;
1222 GOTO(pack_attr, rc = 0);
1225 if (attr & LUDA_VERIFY_DRYRUN) {
1226 fid_cpu_to_le(&lde->lde_fid, fid);
1227 lde->lde_attrs = LUDA_FID | LUDA_REPAIR;
1228 GOTO(pack_attr, rc = 0);
1231 fid_cpu_to_le(&lde->lde_fid, fid);
1232 lde->lde_attrs = LUDA_FID;
1233 zde->lzd_fid = *fid;
1234 rc = osd_dirent_update(env, osd, it->ozi_zc->zc_zapobj,
1237 lde->lde_attrs |= LUDA_UNKNOWN;
1238 GOTO(pack_attr, rc = 0);
1241 lde->lde_attrs |= LUDA_REPAIR;
1243 GOTO(pack_attr, rc = 0);
1246 osd_it_append_attrs(lde, attr, namelen, zde->lzd_reg.zde_type);
1247 lde->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
1251 static int osd_dir_it_rec_size(const struct lu_env *env, const struct dt_it *di,
1254 struct osd_zap_it *it = (struct osd_zap_it *)di;
1255 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1260 if (it->ozi_pos <= 1)
1262 else if (it->ozi_pos == 2)
1266 rc = lu_dirent_calc_size(namelen, attr);
1270 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1271 if (unlikely(rc != 0))
1274 if (za->za_integer_length != 8 || za->za_num_integers < 3) {
1275 CERROR("%s: unsupported direntry format: %d %d\n",
1276 osd_obj2dev(it->ozi_obj)->od_svname,
1277 za->za_integer_length, (int)za->za_num_integers);
1281 namelen = strlen(za->za_name);
1282 if (namelen > NAME_MAX)
1285 rc = lu_dirent_calc_size(namelen, attr);
1290 static __u64 osd_dir_it_store(const struct lu_env *env, const struct dt_it *di)
1292 struct osd_zap_it *it = (struct osd_zap_it *)di;
1296 if (it->ozi_pos <= 2)
1299 pos = osd_zap_cursor_serialize(it->ozi_zc);
1306 * rc == 0 -> end of directory.
1307 * rc > 0 -> ok, proceed.
1308 * rc < 0 -> error. ( EOVERFLOW can be masked.)
1310 static int osd_dir_it_load(const struct lu_env *env,
1311 const struct dt_it *di, __u64 hash)
1313 struct osd_zap_it *it = (struct osd_zap_it *)di;
1314 struct osd_object *obj = it->ozi_obj;
1315 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1319 /* reset the cursor */
1320 zap_cursor_fini(it->ozi_zc);
1321 osd_obj_cursor_init_serialized(it->ozi_zc, obj, hash);
1328 /* to return whether the end has been reached */
1329 rc = osd_index_retrieve_skip_dots(it, za);
1332 else if (rc == -ENOENT)
1339 struct dt_index_operations osd_dir_ops = {
1340 .dio_lookup = osd_dir_lookup,
1341 .dio_declare_insert = osd_declare_dir_insert,
1342 .dio_insert = osd_dir_insert,
1343 .dio_declare_delete = osd_declare_dir_delete,
1344 .dio_delete = osd_dir_delete,
1346 .init = osd_dir_it_init,
1347 .fini = osd_index_it_fini,
1348 .get = osd_dir_it_get,
1349 .put = osd_dir_it_put,
1350 .next = osd_dir_it_next,
1351 .key = osd_dir_it_key,
1352 .key_size = osd_dir_it_key_size,
1353 .rec = osd_dir_it_rec,
1354 .rec_size = osd_dir_it_rec_size,
1355 .store = osd_dir_it_store,
1356 .load = osd_dir_it_load
1361 * Primitives for index files using binary keys.
1364 /* key integer_size is 8 */
1365 static int osd_prepare_key_uint64(struct osd_object *o, __u64 *dst,
1366 const struct dt_key *src)
1373 /* align keysize to 64bit */
1374 size = (o->oo_keysize + sizeof(__u64) - 1) / sizeof(__u64);
1375 size *= sizeof(__u64);
1377 LASSERT(size <= MAXNAMELEN);
1379 if (unlikely(size > o->oo_keysize))
1380 memset(dst + o->oo_keysize, 0, size - o->oo_keysize);
1381 memcpy(dst, (const char *)src, o->oo_keysize);
1383 return (size/sizeof(__u64));
1386 static int osd_index_lookup(const struct lu_env *env, struct dt_object *dt,
1387 struct dt_rec *rec, const struct dt_key *key)
1389 struct osd_object *obj = osd_dt_obj(dt);
1390 struct osd_device *osd = osd_obj2dev(obj);
1391 __u64 *k = osd_oti_get(env)->oti_key64;
1395 rc = osd_prepare_key_uint64(obj, k, key);
1397 rc = -zap_lookup_uint64(osd->od_os, obj->oo_dn->dn_object,
1398 k, rc, obj->oo_recusize, obj->oo_recsize,
1400 RETURN(rc == 0 ? 1 : rc);
1403 static int osd_declare_index_insert(const struct lu_env *env,
1404 struct dt_object *dt,
1405 const struct dt_rec *rec,
1406 const struct dt_key *key,
1409 struct osd_object *obj = osd_dt_obj(dt);
1410 struct osd_thandle *oh;
1413 LASSERT(th != NULL);
1414 oh = container_of0(th, struct osd_thandle, ot_super);
1416 LASSERT(obj->oo_dn);
1418 /* do not specify the key as then DMU is trying to look it up
1419 * which is very expensive. usually the layers above lookup
1420 * before insertion */
1421 osd_tx_hold_zap(oh->ot_tx, obj->oo_dn->dn_object, obj->oo_dn,
1427 static int osd_index_insert(const struct lu_env *env, struct dt_object *dt,
1428 const struct dt_rec *rec, const struct dt_key *key,
1429 struct thandle *th, int ignore_quota)
1431 struct osd_object *obj = osd_dt_obj(dt);
1432 struct osd_device *osd = osd_obj2dev(obj);
1433 struct osd_thandle *oh;
1434 __u64 *k = osd_oti_get(env)->oti_key64;
1438 LASSERT(obj->oo_dn);
1439 LASSERT(dt_object_exists(dt));
1440 LASSERT(osd_invariant(obj));
1441 LASSERT(th != NULL);
1443 oh = container_of0(th, struct osd_thandle, ot_super);
1445 rc = osd_prepare_key_uint64(obj, k, key);
1447 /* Insert (key,oid) into ZAP */
1448 rc = -zap_add_uint64(osd->od_os, obj->oo_dn->dn_object,
1449 k, rc, obj->oo_recusize, obj->oo_recsize,
1450 (void *)rec, oh->ot_tx);
1454 static int osd_declare_index_delete(const struct lu_env *env,
1455 struct dt_object *dt,
1456 const struct dt_key *key,
1459 struct osd_object *obj = osd_dt_obj(dt);
1460 struct osd_thandle *oh;
1463 LASSERT(dt_object_exists(dt));
1464 LASSERT(osd_invariant(obj));
1465 LASSERT(th != NULL);
1466 LASSERT(obj->oo_dn);
1468 oh = container_of0(th, struct osd_thandle, ot_super);
1470 /* do not specify the key as then DMU is trying to look it up
1471 * which is very expensive. usually the layers above lookup
1472 * before deletion */
1473 osd_tx_hold_zap(oh->ot_tx, obj->oo_dn->dn_object, obj->oo_dn,
1479 static int osd_index_delete(const struct lu_env *env, struct dt_object *dt,
1480 const struct dt_key *key, struct thandle *th)
1482 struct osd_object *obj = osd_dt_obj(dt);
1483 struct osd_device *osd = osd_obj2dev(obj);
1484 struct osd_thandle *oh;
1485 __u64 *k = osd_oti_get(env)->oti_key64;
1489 LASSERT(obj->oo_dn);
1490 LASSERT(th != NULL);
1491 oh = container_of0(th, struct osd_thandle, ot_super);
1493 rc = osd_prepare_key_uint64(obj, k, key);
1495 /* Remove binary key from the ZAP */
1496 rc = -zap_remove_uint64(osd->od_os, obj->oo_dn->dn_object,
1501 static int osd_index_it_get(const struct lu_env *env, struct dt_it *di,
1502 const struct dt_key *key)
1504 struct osd_zap_it *it = (struct osd_zap_it *)di;
1505 struct osd_object *obj = it->ozi_obj;
1506 struct osd_device *osd = osd_obj2dev(obj);
1510 LASSERT(it->ozi_zc);
1513 * XXX: we need a binary version of zap_cursor_move_to_key()
1514 * to implement this API */
1515 if (*((const __u64 *)key) != 0)
1516 CERROR("NOT IMPLEMETED YET (move to %#llx)\n",
1519 zap_cursor_fini(it->ozi_zc);
1520 zap_cursor_init(it->ozi_zc, osd->od_os, obj->oo_dn->dn_object);
1526 static int osd_index_it_next(const struct lu_env *env, struct dt_it *di)
1528 struct osd_zap_it *it = (struct osd_zap_it *)di;
1529 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1533 if (it->ozi_reset == 0)
1534 zap_cursor_advance(it->ozi_zc);
1538 * According to current API we need to return error if it's last entry.
1539 * zap_cursor_advance() does not return any value. So we need to call
1540 * retrieve to check if there is any record. We should make
1541 * changes to Iterator API to not return status for this API
1543 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1550 static struct dt_key *osd_index_it_key(const struct lu_env *env,
1551 const struct dt_it *di)
1553 struct osd_zap_it *it = (struct osd_zap_it *)di;
1554 struct osd_object *obj = it->ozi_obj;
1555 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1560 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1562 RETURN(ERR_PTR(rc));
1564 /* the binary key is stored in the name */
1565 memcpy(&it->ozi_key, za->za_name, obj->oo_keysize);
1567 RETURN((struct dt_key *)&it->ozi_key);
1570 static int osd_index_it_key_size(const struct lu_env *env,
1571 const struct dt_it *di)
1573 struct osd_zap_it *it = (struct osd_zap_it *)di;
1574 struct osd_object *obj = it->ozi_obj;
1575 RETURN(obj->oo_keysize);
1578 static int osd_index_it_rec(const struct lu_env *env, const struct dt_it *di,
1579 struct dt_rec *rec, __u32 attr)
1581 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1582 struct osd_zap_it *it = (struct osd_zap_it *)di;
1583 struct osd_object *obj = it->ozi_obj;
1584 struct osd_device *osd = osd_obj2dev(obj);
1585 __u64 *k = osd_oti_get(env)->oti_key64;
1590 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1594 rc = osd_prepare_key_uint64(obj, k, (const struct dt_key *)za->za_name);
1596 rc = -zap_lookup_uint64(osd->od_os, obj->oo_dn->dn_object,
1597 k, rc, obj->oo_recusize, obj->oo_recsize,
1602 static __u64 osd_index_it_store(const struct lu_env *env,
1603 const struct dt_it *di)
1605 struct osd_zap_it *it = (struct osd_zap_it *)di;
1608 RETURN((__u64)zap_cursor_serialize(it->ozi_zc));
1611 static int osd_index_it_load(const struct lu_env *env, const struct dt_it *di,
1614 struct osd_zap_it *it = (struct osd_zap_it *)di;
1615 struct osd_object *obj = it->ozi_obj;
1616 struct osd_device *osd = osd_obj2dev(obj);
1617 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1621 /* reset the cursor */
1622 zap_cursor_fini(it->ozi_zc);
1623 zap_cursor_init_serialized(it->ozi_zc, osd->od_os,
1624 obj->oo_dn->dn_object, hash);
1627 rc = -zap_cursor_retrieve(it->ozi_zc, za);
1630 else if (rc == -ENOENT)
1636 static struct dt_index_operations osd_index_ops = {
1637 .dio_lookup = osd_index_lookup,
1638 .dio_declare_insert = osd_declare_index_insert,
1639 .dio_insert = osd_index_insert,
1640 .dio_declare_delete = osd_declare_index_delete,
1641 .dio_delete = osd_index_delete,
1643 .init = osd_index_it_init,
1644 .fini = osd_index_it_fini,
1645 .get = osd_index_it_get,
1646 .put = osd_index_it_put,
1647 .next = osd_index_it_next,
1648 .key = osd_index_it_key,
1649 .key_size = osd_index_it_key_size,
1650 .rec = osd_index_it_rec,
1651 .store = osd_index_it_store,
1652 .load = osd_index_it_load
1656 struct osd_metadnode_it {
1657 struct osd_device *mit_dev;
1659 struct lu_fid mit_fid;
1661 __u64 mit_prefetched_dnode;
1664 static struct dt_it *osd_zfs_otable_it_init(const struct lu_env *env,
1665 struct dt_object *dt, __u32 attr)
1667 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1668 struct osd_metadnode_it *it;
1672 if (unlikely(it == NULL))
1673 RETURN(ERR_PTR(-ENOMEM));
1677 /* XXX: dmu_object_next() does NOT find dnodes allocated
1678 * in the current non-committed txg, so we force txg
1679 * commit to find all existing dnodes ... */
1680 if (!dev->od_dt_dev.dd_rdonly)
1681 txg_wait_synced(dmu_objset_pool(dev->od_os), 0ULL);
1683 RETURN((struct dt_it *)it);
1686 static void osd_zfs_otable_it_fini(const struct lu_env *env, struct dt_it *di)
1688 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1693 static int osd_zfs_otable_it_get(const struct lu_env *env,
1694 struct dt_it *di, const struct dt_key *key)
1699 static void osd_zfs_otable_it_put(const struct lu_env *env, struct dt_it *di)
1703 #define OTABLE_PREFETCH 256
1705 static void osd_zfs_otable_prefetch(const struct lu_env *env,
1706 struct osd_metadnode_it *it)
1708 struct osd_device *dev = it->mit_dev;
1711 /* can go negative on the very first access to the iterator
1712 * or if some non-Lustre objects were found */
1713 if (unlikely(it->mit_prefetched < 0))
1714 it->mit_prefetched = 0;
1716 if (it->mit_prefetched >= (OTABLE_PREFETCH >> 1))
1719 if (it->mit_prefetched_dnode == 0)
1720 it->mit_prefetched_dnode = it->mit_pos;
1722 while (it->mit_prefetched < OTABLE_PREFETCH) {
1723 rc = -dmu_object_next(dev->od_os, &it->mit_prefetched_dnode,
1725 if (unlikely(rc != 0))
1728 osd_dmu_prefetch(dev->od_os, it->mit_prefetched_dnode,
1729 0, 0, 0, ZIO_PRIORITY_ASYNC_READ);
1731 it->mit_prefetched++;
1735 static int osd_zfs_otable_it_next(const struct lu_env *env, struct dt_it *di)
1737 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1738 struct lustre_mdt_attrs *lma;
1739 struct osd_device *dev = it->mit_dev;
1740 nvlist_t *nvbuf = NULL;
1745 memset(&it->mit_fid, 0, sizeof(it->mit_fid));
1747 dnode = it->mit_pos;
1749 rc = -dmu_object_next(dev->od_os, &it->mit_pos, B_FALSE, 0);
1750 if (unlikely(rc != 0))
1752 it->mit_prefetched--;
1754 /* LMA is required for this to be a Lustre object.
1755 * If there is no xattr skip it. */
1756 rc = __osd_xattr_load_by_oid(dev, it->mit_pos, &nvbuf);
1757 if (unlikely(rc != 0))
1760 LASSERT(nvbuf != NULL);
1761 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA, &v, &s);
1762 if (likely(rc == 0)) {
1764 lma = (struct lustre_mdt_attrs *)v;
1765 lustre_lma_swab(lma);
1766 if (likely(!(lma->lma_compat & LMAC_NOT_IN_OI) &&
1767 !(lma->lma_incompat & LMAI_AGENT))) {
1768 it->mit_fid = lma->lma_self_fid;
1774 /* not a Lustre visible object, try next one */
1779 /* we aren't prefetching in the above loop because the number of
1780 * non-Lustre objects is very small and we will be repeating very
1781 * rare. in case we want to use this to iterate over non-Lustre
1782 * objects (i.e. when we convert regular ZFS in Lustre) it makes
1783 * sense to initiate prefetching in the loop */
1785 /* 0 - there are more items, +1 - the end */
1786 if (likely(rc == 0))
1787 osd_zfs_otable_prefetch(env, it);
1789 CDEBUG(D_OTHER, "advance: %llu -> %llu "DFID": %d\n", dnode,
1790 it->mit_pos, PFID(&it->mit_fid), rc);
1796 static struct dt_key *osd_zfs_otable_it_key(const struct lu_env *env,
1797 const struct dt_it *di)
1802 static int osd_zfs_otable_it_key_size(const struct lu_env *env,
1803 const struct dt_it *di)
1805 return sizeof(__u64);
1808 static int osd_zfs_otable_it_rec(const struct lu_env *env,
1809 const struct dt_it *di,
1810 struct dt_rec *rec, __u32 attr)
1812 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1813 struct lu_fid *fid = (struct lu_fid *)rec;
1822 static __u64 osd_zfs_otable_it_store(const struct lu_env *env,
1823 const struct dt_it *di)
1825 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1830 static int osd_zfs_otable_it_load(const struct lu_env *env,
1831 const struct dt_it *di, __u64 hash)
1833 struct osd_metadnode_it *it = (struct osd_metadnode_it *)di;
1836 it->mit_prefetched = 0;
1837 it->mit_prefetched_dnode = 0;
1839 return osd_zfs_otable_it_next(env, (struct dt_it *)di);
1842 static int osd_zfs_otable_it_key_rec(const struct lu_env *env,
1843 const struct dt_it *di, void *key_rec)
1848 const struct dt_index_operations osd_zfs_otable_ops = {
1850 .init = osd_zfs_otable_it_init,
1851 .fini = osd_zfs_otable_it_fini,
1852 .get = osd_zfs_otable_it_get,
1853 .put = osd_zfs_otable_it_put,
1854 .next = osd_zfs_otable_it_next,
1855 .key = osd_zfs_otable_it_key,
1856 .key_size = osd_zfs_otable_it_key_size,
1857 .rec = osd_zfs_otable_it_rec,
1858 .store = osd_zfs_otable_it_store,
1859 .load = osd_zfs_otable_it_load,
1860 .key_rec = osd_zfs_otable_it_key_rec,
1864 int osd_index_try(const struct lu_env *env, struct dt_object *dt,
1865 const struct dt_index_features *feat)
1867 struct osd_object *obj = osd_dt_obj(dt);
1871 down_read(&obj->oo_guard);
1874 * XXX: implement support for fixed-size keys sorted with natural
1875 * numerical way (not using internal hash value)
1877 if (feat->dif_flags & DT_IND_RANGE)
1878 GOTO(out, rc = -ERANGE);
1880 if (unlikely(feat == &dt_otable_features)) {
1881 dt->do_index_ops = &osd_zfs_otable_ops;
1885 LASSERT(!dt_object_exists(dt) || obj->oo_dn != NULL);
1886 if (likely(feat == &dt_directory_features)) {
1887 if (!dt_object_exists(dt) || osd_object_is_zap(obj->oo_dn))
1888 dt->do_index_ops = &osd_dir_ops;
1890 GOTO(out, rc = -ENOTDIR);
1891 } else if (unlikely(feat == &dt_acct_features)) {
1892 LASSERT(fid_is_acct(lu_object_fid(&dt->do_lu)));
1893 dt->do_index_ops = &osd_acct_index_ops;
1894 } else if (dt->do_index_ops == NULL) {
1895 /* For index file, we don't support variable key & record sizes
1896 * and the key has to be unique */
1897 if ((feat->dif_flags & ~DT_IND_UPDATE) != 0)
1898 GOTO(out, rc = -EINVAL);
1900 if (feat->dif_keysize_max > ZAP_MAXNAMELEN)
1901 GOTO(out, rc = -E2BIG);
1902 if (feat->dif_keysize_max != feat->dif_keysize_min)
1903 GOTO(out, rc = -EINVAL);
1905 /* As for the record size, it should be a multiple of 8 bytes
1906 * and smaller than the maximum value length supported by ZAP.
1908 if (feat->dif_recsize_max > ZAP_MAXVALUELEN)
1909 GOTO(out, rc = -E2BIG);
1910 if (feat->dif_recsize_max != feat->dif_recsize_min)
1911 GOTO(out, rc = -EINVAL);
1913 obj->oo_keysize = feat->dif_keysize_max;
1914 obj->oo_recsize = feat->dif_recsize_max;
1915 obj->oo_recusize = 1;
1917 /* ZFS prefers to work with array of 64bits */
1918 if ((obj->oo_recsize & 7) == 0) {
1919 obj->oo_recsize >>= 3;
1920 obj->oo_recusize = 8;
1922 dt->do_index_ops = &osd_index_ops;
1926 up_read(&obj->oo_guard);