4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_oi.c
33 * OI functions to map fid to dnode
35 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
36 * Author: Mike Pershin <tappro@whamcloud.com>
37 * Author: Di Wang <di.wang@intel.com>
40 #define DEBUG_SUBSYSTEM S_OSD
42 #include <lustre_ver.h>
43 #include <libcfs/libcfs.h>
44 #include <obd_support.h>
45 #include <lustre_net.h>
47 #include <obd_class.h>
48 #include <lustre_disk.h>
49 #include <lustre_fid.h>
51 #include "osd_internal.h"
53 #include <sys/dnode.h>
58 #include <sys/spa_impl.h>
59 #include <sys/zfs_znode.h>
60 #include <sys/dmu_tx.h>
61 #include <sys/dmu_objset.h>
62 #include <sys/dsl_prop.h>
63 #include <sys/sa_impl.h>
66 #define OSD_OI_FID_NR (1UL << 7)
67 #define OSD_OI_FID_NR_MAX (1UL << OSD_OI_FID_OID_BITS_MAX)
68 unsigned int osd_oi_count = OSD_OI_FID_NR;
72 * zfs osd maintains names for known fids in the name hierarchy
73 * so that one can mount filesystem with regular ZFS stack and
81 static const struct named_oid oids[] = {
82 { .oid = LAST_RECV_OID, .name = LAST_RCVD },
83 { .oid = OFD_LAST_GROUP_OID, .name = "LAST_GROUP" },
84 { .oid = LLOG_CATALOGS_OID, .name = "CATALOGS" },
85 { .oid = MGS_CONFIGS_OID, /*MOUNT_CONFIGS_DIR*/ },
86 { .oid = FID_SEQ_SRV_OID, .name = "seq_srv" },
87 { .oid = FID_SEQ_CTL_OID, .name = "seq_ctl" },
88 { .oid = FLD_INDEX_OID, .name = "fld" },
89 { .oid = MDD_LOV_OBJ_OID, .name = LOV_OBJID },
90 { .oid = OFD_HEALTH_CHECK_OID, .name = HEALTH_CHECK },
91 { .oid = REPLY_DATA_OID, .name = REPLY_DATA },
95 static char *oid2name(const unsigned long oid)
100 if (oids[i].oid == oid)
108 * Lookup an existing OI by the given name.
111 osd_oi_lookup(const struct lu_env *env, struct osd_device *o,
112 uint64_t parent, const char *name, struct osd_oi *oi)
114 struct zpl_direntry *zde = &osd_oti_get(env)->oti_zde.lzd_reg;
117 rc = -zap_lookup(o->od_os, parent, name, 8, 1, (void *)zde);
121 rc = strlcpy(oi->oi_name, name, sizeof(oi->oi_name));
122 if (rc >= sizeof(oi->oi_name))
125 oi->oi_zapid = zde->zde_dnode;
131 * Create a new OI with the given name.
134 osd_oi_create(const struct lu_env *env, struct osd_device *o,
135 uint64_t parent, const char *name, uint64_t *child)
137 struct zpl_direntry *zde = &osd_oti_get(env)->oti_zde.lzd_reg;
138 struct lu_attr *la = &osd_oti_get(env)->oti_la;
139 sa_handle_t *sa_hdl = NULL;
144 /* verify it doesn't already exist */
145 rc = -zap_lookup(o->od_os, parent, name, 8, 1, (void *)zde);
149 if (o->od_dt_dev.dd_rdonly)
152 /* create fid-to-dnode index */
153 tx = dmu_tx_create(o->od_os);
157 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1, NULL);
158 dmu_tx_hold_bonus(tx, parent);
159 dmu_tx_hold_zap(tx, parent, TRUE, name);
160 dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE);
162 rc = -dmu_tx_assign(tx, TXG_WAIT);
168 oid = osd_zap_create_flags(o->od_os, 0, ZAP_FLAG_HASH64,
169 DMU_OT_DIRECTORY_CONTENTS,
170 14, /* == ZFS fzap_default_block_shift */
174 rc = -sa_handle_get(o->od_os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl);
177 la->la_valid = LA_MODE | LA_UID | LA_GID;
178 la->la_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
179 la->la_uid = la->la_gid = 0;
180 rc = __osd_attr_init(env, o, sa_hdl, tx, la, parent, NULL);
181 sa_handle_destroy(sa_hdl);
185 zde->zde_dnode = oid;
187 zde->zde_type = IFTODT(S_IFDIR);
189 rc = -zap_add(o->od_os, parent, name, 8, 1, (void *)zde, tx);
193 dmu_object_free(o->od_os, oid, tx);
203 osd_oi_find_or_create(const struct lu_env *env, struct osd_device *o,
204 uint64_t parent, const char *name, uint64_t *child)
209 rc = osd_oi_lookup(env, o, parent, name, &oi);
211 *child = oi.oi_zapid;
212 else if (rc == -ENOENT)
213 rc = osd_oi_create(env, o, parent, name, child);
219 * Lookup the target index/flags of the fid, so it will know where
220 * the object is located (tgt index) and it is MDT or OST object.
222 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
223 u64 seq, struct lu_seq_range *range)
225 struct seq_server_site *ss = osd_seq_site(osd);
227 if (fid_seq_is_idif(seq)) {
228 fld_range_set_ost(range);
229 range->lsr_index = idif_ost_idx(seq);
233 if (!fid_seq_in_fldb(seq)) {
234 fld_range_set_mdt(range);
236 /* FIXME: If ss is NULL, it suppose not get lsr_index
238 range->lsr_index = ss->ss_node_id;
243 fld_range_set_any(range);
244 /* OSD will only do local fld lookup */
245 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
248 int fid_is_on_ost(const struct lu_env *env, struct osd_device *osd,
249 const struct lu_fid *fid)
251 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
255 if (fid_is_idif(fid))
258 if (unlikely(fid_is_local_file(fid) || fid_is_llog(fid)) ||
259 fid_is_name_llog(fid) || fid_is_quota(fid))
262 rc = osd_fld_lookup(env, osd, fid_seq(fid), range);
264 /* During upgrade, OST FLDB might not be loaded because
265 * OST FLDB is not created until 2.6, so if some DNE
266 * filesystem upgrade from 2.5 to 2.7/2.8, they will
267 * not be able to find the sequence from local FLDB
268 * cache see fld_index_init(). */
269 if (rc == -ENOENT && osd->od_is_ost)
273 CERROR("%s: "DFID" lookup failed: rc = %d\n",
274 osd_name(osd), PFID(fid), rc);
278 if (fld_range_is_ost(range))
284 static struct osd_seq *osd_seq_find_locked(struct osd_seq_list *seq_list,
287 struct osd_seq *osd_seq;
289 list_for_each_entry(osd_seq, &seq_list->osl_seq_list, os_seq_list) {
290 if (osd_seq->os_seq == seq)
296 static struct osd_seq *osd_seq_find(struct osd_seq_list *seq_list, u64 seq)
298 struct osd_seq *osd_seq;
300 read_lock(&seq_list->osl_seq_list_lock);
301 osd_seq = osd_seq_find_locked(seq_list, seq);
302 read_unlock(&seq_list->osl_seq_list_lock);
307 static struct osd_seq *osd_find_or_add_seq(const struct lu_env *env,
308 struct osd_device *osd, u64 seq)
310 struct osd_seq_list *seq_list = &osd->od_seq_list;
311 struct osd_seq *osd_seq;
312 char *key = osd_oti_get(env)->oti_buf;
313 char *seq_name = osd_oti_get(env)->oti_str;
320 osd_seq = osd_seq_find(seq_list, seq);
324 down(&seq_list->osl_seq_init_sem);
325 /* Check again, in case some one else already add it
327 osd_seq = osd_seq_find(seq_list, seq);
331 OBD_ALLOC_PTR(osd_seq);
333 GOTO(out, rc = -ENOMEM);
335 INIT_LIST_HEAD(&osd_seq->os_seq_list);
336 osd_seq->os_seq = seq;
338 /* Init subdir count to be 32, but each seq can have
339 * different subdir count */
340 osd_seq->os_subdir_count = OSD_OST_MAP_SIZE;
341 OBD_ALLOC(osd_seq->os_compat_dirs,
342 sizeof(uint64_t) * osd_seq->os_subdir_count);
343 if (osd_seq->os_compat_dirs == NULL)
344 GOTO(out, rc = -ENOMEM);
346 oi.oi_zapid = osd->od_O_id;
347 sprintf(seq_name, (fid_seq_is_rsvd(seq) ||
348 fid_seq_is_mdt0(seq)) ? "%llu" : "%llx",
349 fid_seq_is_idif(seq) ? 0 : seq);
351 rc = osd_oi_find_or_create(env, osd, oi.oi_zapid, seq_name, &odb);
353 CERROR("%s: Can not create %s : rc = %d\n",
354 osd_name(osd), seq_name, rc);
358 for (i = 0; i < OSD_OST_MAP_SIZE; i++) {
359 sprintf(key, "d%d", i);
360 rc = osd_oi_find_or_create(env, osd, odb, key, &sdb);
363 osd_seq->os_compat_dirs[i] = sdb;
366 write_lock(&seq_list->osl_seq_list_lock);
367 list_add(&osd_seq->os_seq_list, &seq_list->osl_seq_list);
368 write_unlock(&seq_list->osl_seq_list_lock);
370 up(&seq_list->osl_seq_init_sem);
372 if (osd_seq != NULL && osd_seq->os_compat_dirs != NULL)
373 OBD_FREE(osd_seq->os_compat_dirs,
374 sizeof(uint64_t) * osd_seq->os_subdir_count);
376 OBD_FREE_PTR(osd_seq);
377 osd_seq = ERR_PTR(rc);
383 * objects w/o a natural reference (unlike a file on a MDS)
384 * are put under a special hierarchy /O/<seq>/d0..dXX
385 * this function returns a directory specific fid belongs to
388 osd_get_idx_for_ost_obj(const struct lu_env *env, struct osd_device *osd,
389 const struct lu_fid *fid, char *buf, int bufsize)
391 struct osd_seq *osd_seq;
396 osd_seq = osd_find_or_add_seq(env, osd, fid_seq(fid));
397 if (IS_ERR(osd_seq)) {
398 CERROR("%s: Can not find seq group "DFID"\n", osd_name(osd),
400 return PTR_ERR(osd_seq);
403 if (fid_is_last_id(fid)) {
406 rc = fid_to_ostid(fid, &osd_oti_get(env)->oti_ostid);
407 LASSERT(rc == 0); /* we should not get here with IGIF */
408 id = ostid_id(&osd_oti_get(env)->oti_ostid);
411 b = id % OSD_OST_MAP_SIZE;
412 LASSERT(osd_seq->os_compat_dirs[b]);
415 snprintf(buf, bufsize, "%llu", id);
417 return osd_seq->os_compat_dirs[b];
420 /* XXX: f_ver is not counted, but may differ too */
421 static void osd_fid2str(char *buf, const struct lu_fid *fid)
423 sprintf(buf, DFID_NOBRACE, PFID(fid));
427 * Determine the zap object id which is being used as the OI for the
428 * given fid. The lowest N bits in the sequence ID are used as the
429 * index key. On failure 0 is returned which zfs treats internally
430 * as an invalid object id.
433 osd_get_idx_for_fid(struct osd_device *osd, const struct lu_fid *fid,
434 char *buf, dnode_t **zdn)
438 LASSERT(osd->od_oi_table != NULL);
439 oi = osd->od_oi_table[fid_seq(fid) & (osd->od_oi_count - 1)];
441 osd_fid2str(buf, fid);
448 uint64_t osd_get_name_n_idx(const struct lu_env *env, struct osd_device *osd,
449 const struct lu_fid *fid, char *buf, int bufsize,
455 LASSERT(!fid_is_acct(fid));
460 if (fid_is_on_ost(env, osd, fid) == 1 || fid_seq(fid) == FID_SEQ_ECHO) {
461 zapid = osd_get_idx_for_ost_obj(env, osd, fid, buf, bufsize);
462 } else if (unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE)) {
463 /* special objects with fixed known fids get their name */
464 char *name = oid2name(fid_oid(fid));
467 zapid = osd->od_root;
469 strncpy(buf, name, bufsize);
471 zapid = osd_get_idx_for_fid(osd, fid, buf, NULL);
474 zapid = osd_get_idx_for_fid(osd, fid, buf, zdn);
480 static inline int fid_is_fs_root(const struct lu_fid *fid)
482 /* Map root inode to special local object FID */
483 return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
484 fid_oid(fid) == OSD_FS_ROOT_OID;
487 int osd_fid_lookup(const struct lu_env *env, struct osd_device *dev,
488 const struct lu_fid *fid, uint64_t *oid)
490 struct osd_thread_info *info = osd_oti_get(env);
491 char *buf = info->oti_buf;
497 if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
500 LASSERT(!fid_is_acct(fid));
502 if (unlikely(fid_is_fs_root(fid))) {
505 zapid = osd_get_name_n_idx(env, dev, fid, buf,
506 sizeof(info->oti_buf), &zdn);
507 rc = osd_zap_lookup(dev, zapid, zdn, buf,
508 8, 1, &info->oti_zde);
511 *oid = info->oti_zde.lzd_reg.zde_dnode;
515 osd_dmu_prefetch(dev->od_os, *oid, 0, 0, 0,
516 ZIO_PRIORITY_ASYNC_READ);
522 * Close an entry in a specific slot.
525 osd_oi_remove_table(const struct lu_env *env, struct osd_device *o, int key)
529 LASSERT(key < o->od_oi_count);
531 oi = o->od_oi_table[key];
534 osd_dnode_rele(oi->oi_dn);
536 o->od_oi_table[key] = NULL;
541 * Allocate and open a new entry in the specified unused slot.
544 osd_oi_add_table(const struct lu_env *env, struct osd_device *o,
550 LASSERT(key < o->od_oi_count);
551 LASSERT(o->od_oi_table[key] == NULL);
557 rc = osd_oi_lookup(env, o, o->od_root, name, oi);
563 o->od_oi_table[key] = oi;
564 __osd_obj2dnode(o->od_os, oi->oi_zapid, &oi->oi_dn);
570 * Depopulate the OI table.
573 osd_oi_close_table(const struct lu_env *env, struct osd_device *o)
577 for (i = 0; i < o->od_oi_count; i++)
578 osd_oi_remove_table(env, o, i);
582 * Populate the OI table based.
585 osd_oi_open_table(const struct lu_env *env, struct osd_device *o, int count)
591 for (i = 0; i < count; i++) {
592 sprintf(name, "%s.%d", DMU_OSD_OI_NAME_BASE, i);
593 rc = osd_oi_add_table(env, o, name, i);
595 osd_oi_close_table(env, o);
604 * Determine if the type and number of OIs used by this file system.
607 osd_oi_probe(const struct lu_env *env, struct osd_device *o, int *count)
609 uint64_t root_oid = o->od_root;
616 * Check for multiple OIs and determine the count. There is no
617 * gap handling, if an OI is missing the wrong size can be returned.
618 * The only safeguard is that we know the number of OIs must be a
619 * power of two and this is checked for basic sanity.
621 for (*count = 0; *count < OSD_OI_FID_NR_MAX; (*count)++) {
622 sprintf(name, "%s.%d", DMU_OSD_OI_NAME_BASE, *count);
623 rc = osd_oi_lookup(env, o, root_oid, name, &oi);
631 if ((*count & (*count - 1)) != 0)
641 * No OIs exist, this must be a new filesystem.
648 static void osd_ost_seq_fini(const struct lu_env *env, struct osd_device *osd)
650 struct osd_seq_list *osl = &osd->od_seq_list;
651 struct osd_seq *osd_seq, *tmp;
653 write_lock(&osl->osl_seq_list_lock);
654 list_for_each_entry_safe(osd_seq, tmp, &osl->osl_seq_list,
656 list_del(&osd_seq->os_seq_list);
657 OBD_FREE(osd_seq->os_compat_dirs,
658 sizeof(uint64_t) * osd_seq->os_subdir_count);
659 OBD_FREE(osd_seq, sizeof(*osd_seq));
661 write_unlock(&osl->osl_seq_list_lock);
667 * Create /O subdirectory to map legacy OST objects for compatibility.
670 osd_oi_init_compat(const struct lu_env *env, struct osd_device *o)
676 rc = osd_oi_find_or_create(env, o, o->od_root, "O", &sdb);
684 * Initialize the OIs by either opening or creating them as needed.
686 int osd_oi_init(const struct lu_env *env, struct osd_device *o)
688 char *key = osd_oti_get(env)->oti_buf;
689 int i, rc, count = 0;
692 rc = osd_oi_probe(env, o, &count);
699 count = osd_oi_count;
702 for (i = 0; i < count; i++) {
703 sprintf(key, "%s.%d", DMU_OSD_OI_NAME_BASE, i);
704 rc = osd_oi_find_or_create(env, o, odb, key, &sdb);
710 rc = osd_oi_init_compat(env, o);
714 LASSERT((count & (count - 1)) == 0);
715 o->od_oi_count = count;
716 OBD_ALLOC(o->od_oi_table, sizeof(struct osd_oi *) * count);
717 if (o->od_oi_table == NULL)
720 rc = osd_oi_open_table(env, o, count);
722 OBD_FREE(o->od_oi_table, sizeof(struct osd_oi *) * count);
723 o->od_oi_table = NULL;
729 void osd_oi_fini(const struct lu_env *env, struct osd_device *o)
733 osd_ost_seq_fini(env, o);
735 if (o->od_oi_table != NULL) {
736 (void) osd_oi_close_table(env, o);
737 OBD_FREE(o->od_oi_table,
738 sizeof(struct osd_oi *) * o->od_oi_count);
739 o->od_oi_table = NULL;
746 int osd_options_init(void)
748 /* osd_oi_count - Default number of OIs, 128 works well for ZFS */
749 if (osd_oi_count == 0 || osd_oi_count > OSD_OI_FID_NR_MAX)
750 osd_oi_count = OSD_OI_FID_NR;
752 if ((osd_oi_count & (osd_oi_count - 1)) != 0) {
753 LCONSOLE_WARN("Round up osd_oi_count %d to power2 %d\n",
754 osd_oi_count, size_roundup_power2(osd_oi_count));
755 osd_oi_count = size_roundup_power2(osd_oi_count);
762 * the following set of functions are used to maintain per-thread
763 * cache of FID->ino mapping. this mechanism is used to avoid
764 * expensive LU/OI lookups.
766 struct osd_idmap_cache *osd_idc_find(const struct lu_env *env,
767 struct osd_device *osd,
768 const struct lu_fid *fid)
770 struct osd_thread_info *oti = osd_oti_get(env);
771 struct osd_idmap_cache *idc = oti->oti_ins_cache;
774 for (i = 0; i < oti->oti_ins_cache_used; i++) {
775 if (!lu_fid_eq(&idc[i].oic_fid, fid))
777 if (idc[i].oic_dev != osd)
786 struct osd_idmap_cache *osd_idc_add(const struct lu_env *env,
787 struct osd_device *osd,
788 const struct lu_fid *fid)
790 struct osd_thread_info *oti = osd_oti_get(env);
791 struct osd_idmap_cache *idc;
794 if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
795 i = oti->oti_ins_cache_size * 2;
798 i = OSD_INS_CACHE_SIZE;
799 OBD_ALLOC(idc, sizeof(*idc) * i);
801 return ERR_PTR(-ENOMEM);
802 if (oti->oti_ins_cache != NULL) {
803 memcpy(idc, oti->oti_ins_cache,
804 oti->oti_ins_cache_used * sizeof(*idc));
805 OBD_FREE(oti->oti_ins_cache,
806 oti->oti_ins_cache_used * sizeof(*idc));
808 oti->oti_ins_cache = idc;
809 oti->oti_ins_cache_size = i;
812 idc = &oti->oti_ins_cache[oti->oti_ins_cache_used++];
822 * Lookup mapping for the given fid in the cache
824 * Initialize a new one if not found. the initialization checks whether
825 * the object is local or remote. for the local objects, OI is used to
826 * learn dnode#. the function is used when the caller has no information
827 * about the object, e.g. at dt_insert().
829 struct osd_idmap_cache *osd_idc_find_or_init(const struct lu_env *env,
830 struct osd_device *osd,
831 const struct lu_fid *fid)
833 struct osd_idmap_cache *idc;
836 LASSERT(!fid_is_acct(fid));
838 idc = osd_idc_find(env, osd, fid);
842 /* new mapping is needed */
843 idc = osd_idc_add(env, osd, fid);
848 rc = osd_remote_fid(env, osd, fid);
849 if (unlikely(rc < 0))
853 /* the object is local, lookup in OI */
856 rc = osd_fid_lookup(env, osd, fid, &dnode);
857 if (unlikely(rc < 0)) {
858 CERROR("%s: can't lookup: rc = %d\n",
862 LASSERT(dnode < (1ULL << DN_MAX_OBJECT_SHIFT));
863 idc->oic_dnode = dnode;
865 /* the object is remote */
873 * lookup mapping for given FID and fill it from the given object.
874 * the object is local by definition.
876 int osd_idc_find_and_init(const struct lu_env *env, struct osd_device *osd,
877 struct osd_object *obj)
879 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
880 struct osd_idmap_cache *idc;
882 idc = osd_idc_find(env, osd, fid);
884 if (obj->oo_dn == NULL)
886 idc->oic_dnode = obj->oo_dn->dn_object;
890 /* new mapping is needed */
891 idc = osd_idc_add(env, osd, fid);
896 idc->oic_dnode = obj->oo_dn->dn_object;