4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd/osd_handler.c
34 * Top-level entry points into osd module
36 * Author: Nikita Danilov <nikita@clusterfs.com>
37 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
40 #define DEBUG_SUBSYSTEM S_OSD
42 #include <linux/fs_struct.h>
43 #include <linux/kallsyms.h>
44 #include <linux/module.h>
45 #include <linux/user_namespace.h>
46 #include <linux/uidgid.h>
48 /* prerequisite for linux/xattr.h */
49 #include <linux/types.h>
50 /* prerequisite for linux/xattr.h */
52 /* XATTR_{REPLACE,CREATE} */
53 #include <linux/xattr.h>
55 #include <ldiskfs/ldiskfs.h>
56 #include <ldiskfs/xattr.h>
57 #include <ldiskfs/ldiskfs_extents.h>
60 * struct OBD_{ALLOC,FREE}*()
63 #include <obd_support.h>
64 /* struct ptlrpc_thread */
65 #include <lustre_net.h>
66 #include <lustre_fid.h>
68 #include <uapi/linux/lustre/lustre_param.h>
70 #include "osd_internal.h"
71 #include "osd_dynlocks.h"
73 /* llo_* api support */
74 #include <md_object.h>
75 #include <lustre_quota.h>
77 #include <lustre_linkea.h>
79 /* Maximum EA size is limited by LNET_MTU for remote objects */
80 #define OSD_MAX_EA_SIZE 1048364
83 module_param(ldiskfs_pdo, int, 0644);
84 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
86 int ldiskfs_track_declares_assert;
87 module_param(ldiskfs_track_declares_assert, int, 0644);
88 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
90 /* Slab to allocate dynlocks */
91 struct kmem_cache *dynlock_cachep;
93 /* Slab to allocate osd_it_ea */
94 struct kmem_cache *osd_itea_cachep;
96 static struct lu_kmem_descr ldiskfs_caches[] = {
98 .ckd_cache = &dynlock_cachep,
99 .ckd_name = "dynlock_cache",
100 .ckd_size = sizeof(struct dynlock_handle)
103 .ckd_cache = &osd_itea_cachep,
104 .ckd_name = "osd_itea_cache",
105 .ckd_size = sizeof(struct osd_it_ea)
112 static const char dot[] = ".";
113 static const char dotdot[] = "..";
115 static const struct lu_object_operations osd_lu_obj_ops;
116 static const struct dt_object_operations osd_obj_ops;
117 static const struct dt_object_operations osd_obj_otable_it_ops;
118 static const struct dt_index_operations osd_index_iam_ops;
119 static const struct dt_index_operations osd_index_ea_ops;
121 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
122 const struct lu_fid *fid);
123 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
124 struct osd_device *osd);
126 int osd_trans_declare_op2rb[] = {
127 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
128 [OSD_OT_PUNCH] = OSD_OT_MAX,
129 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
130 [OSD_OT_CREATE] = OSD_OT_DESTROY,
131 [OSD_OT_DESTROY] = OSD_OT_CREATE,
132 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
133 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
134 [OSD_OT_WRITE] = OSD_OT_WRITE,
135 [OSD_OT_INSERT] = OSD_OT_DELETE,
136 [OSD_OT_DELETE] = OSD_OT_INSERT,
137 [OSD_OT_QUOTA] = OSD_OT_MAX,
140 static int osd_has_index(const struct osd_object *obj)
142 return obj->oo_dt.do_index_ops != NULL;
145 static int osd_object_invariant(const struct lu_object *l)
147 return osd_invariant(osd_obj(l));
151 * Concurrency: doesn't matter
153 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
155 struct osd_thread_info *oti = osd_oti_get(env);
157 return oti->oti_w_locks > 0 && o->oo_owner == env;
161 * Concurrency: doesn't access mutable data
163 static int osd_root_get(const struct lu_env *env,
164 struct dt_device *dev, struct lu_fid *f)
166 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
171 * the following set of functions are used to maintain per-thread
172 * cache of FID->ino mapping. this mechanism is needed to resolve
173 * FID to inode at dt_insert() which in turn stores ino in the
174 * directory entries to keep ldiskfs compatible with ext[34].
175 * due to locking-originated restrictions we can't lookup ino
176 * using LU cache (deadlock is possible). lookup using OI is quite
177 * expensive. so instead we maintain this cache and methods like
178 * dt_create() fill it. so in the majority of cases dt_insert() is
179 * able to find needed mapping in lockless manner.
181 static struct osd_idmap_cache *
182 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
183 const struct lu_fid *fid)
185 struct osd_thread_info *oti = osd_oti_get(env);
186 struct osd_idmap_cache *idc = oti->oti_ins_cache;
189 for (i = 0; i < oti->oti_ins_cache_used; i++) {
190 if (!lu_fid_eq(&idc[i].oic_fid, fid))
192 if (idc[i].oic_dev != osd)
201 static struct osd_idmap_cache *
202 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
203 const struct lu_fid *fid)
205 struct osd_thread_info *oti = osd_oti_get(env);
206 struct osd_idmap_cache *idc;
209 if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
210 i = oti->oti_ins_cache_size * 2;
212 i = OSD_INS_CACHE_SIZE;
213 OBD_ALLOC_PTR_ARRAY(idc, i);
215 return ERR_PTR(-ENOMEM);
216 if (oti->oti_ins_cache != NULL) {
217 memcpy(idc, oti->oti_ins_cache,
218 oti->oti_ins_cache_used * sizeof(*idc));
219 OBD_FREE_PTR_ARRAY(oti->oti_ins_cache,
220 oti->oti_ins_cache_used);
222 oti->oti_ins_cache = idc;
223 oti->oti_ins_cache_size = i;
226 idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
229 idc->oic_lid.oii_ino = 0;
230 idc->oic_lid.oii_gen = 0;
237 * lookup mapping for the given fid in the cache, initialize a
238 * new one if not found. the initialization checks whether the
239 * object is local or remote. for local objects, OI is used to
240 * learn ino/generation. the function is used when the caller
241 * has no information about the object, e.g. at dt_insert().
243 static struct osd_idmap_cache *
244 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
245 const struct lu_fid *fid)
247 struct osd_idmap_cache *idc;
250 idc = osd_idc_find(env, osd, fid);
251 LASSERT(!IS_ERR(idc));
255 CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
256 osd->od_svname, PFID(fid));
258 /* new mapping is needed */
259 idc = osd_idc_add(env, osd, fid);
261 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
262 osd->od_svname, PFID(fid), PTR_ERR(idc));
267 rc = osd_remote_fid(env, osd, fid);
268 if (unlikely(rc < 0))
272 /* the object is local, lookup in OI */
273 /* XXX: probably cheaper to lookup in LU first? */
274 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
276 if (unlikely(rc < 0)) {
277 CERROR("can't lookup: rc = %d\n", rc);
281 /* the object is remote */
289 * lookup mapping for given FID and fill it from the given object.
290 * the object is lolcal by definition.
292 static int osd_idc_find_and_init(const struct lu_env *env,
293 struct osd_device *osd,
294 struct osd_object *obj)
296 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
297 struct osd_idmap_cache *idc;
299 idc = osd_idc_find(env, osd, fid);
300 LASSERT(!IS_ERR(idc));
302 if (obj->oo_inode == NULL)
304 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
305 LASSERT(idc->oic_lid.oii_ino == 0);
306 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
307 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
312 CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
313 osd->od_svname, PFID(fid));
315 /* new mapping is needed */
316 idc = osd_idc_add(env, osd, fid);
318 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
319 osd->od_svname, PFID(fid), PTR_ERR(idc));
323 if (obj->oo_inode != NULL) {
324 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
325 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
331 * OSD object methods.
335 * Concurrency: no concurrent access is possible that early in object
338 static struct lu_object *osd_object_alloc(const struct lu_env *env,
339 const struct lu_object_header *hdr,
342 struct osd_object *mo;
347 struct lu_object_header *h;
348 struct osd_device *o = osd_dev(d);
350 l = &mo->oo_dt.do_lu;
351 if (unlikely(o->od_in_init)) {
358 lu_object_header_init(h);
359 lu_object_init(l, h, d);
360 lu_object_add_top(h, l);
363 dt_object_init(&mo->oo_dt, NULL, d);
364 mo->oo_header = NULL;
367 mo->oo_dt.do_ops = &osd_obj_ops;
368 l->lo_ops = &osd_lu_obj_ops;
369 init_rwsem(&mo->oo_sem);
370 init_rwsem(&mo->oo_ext_idx_sem);
371 spin_lock_init(&mo->oo_guard);
372 INIT_LIST_HEAD(&mo->oo_xattr_list);
378 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
379 struct dentry *dentry, struct lustre_ost_attrs *loa)
383 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
384 (void *)loa, sizeof(*loa));
386 struct lustre_mdt_attrs *lma = &loa->loa_lma;
388 if (rc < sizeof(*lma))
392 lustre_loa_swab(loa, true);
393 /* Check LMA compatibility */
394 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
395 CWARN("%s: unsupported incompat LMA feature(s) %#x "
396 "for fid = "DFID", ino = %lu\n",
398 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
399 PFID(&lma->lma_self_fid), inode->i_ino);
402 } else if (rc == 0) {
410 * retrieve object from backend ext fs.
412 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
413 struct osd_inode_id *id)
416 struct inode *inode = NULL;
419 * if we look for an inode withing a running
420 * transaction, then we risk to deadlock
421 * osd_dirent_check_repair() breaks this
423 /* LASSERT(current->journal_info == NULL); */
425 inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
427 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
428 id->oii_ino, PTR_ERR(inode));
429 } else if (id->oii_gen != OSD_OII_NOGEN &&
430 inode->i_generation != id->oii_gen) {
431 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
432 "i_generation = %u\n",
433 id->oii_ino, id->oii_gen, inode->i_generation);
435 inode = ERR_PTR(-ESTALE);
436 } else if (inode->i_nlink == 0) {
438 * due to parallel readdir and unlink,
439 * we can have dead inode here.
441 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
443 inode = ERR_PTR(-ESTALE);
444 } else if (is_bad_inode(inode)) {
445 CWARN("%s: bad inode: ino = %u\n",
446 osd_dev2name(dev), id->oii_ino);
448 inode = ERR_PTR(-ENOENT);
449 } else if ((rc = osd_attach_jinode(inode))) {
453 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
454 if (id->oii_gen == OSD_OII_NOGEN)
455 osd_id_gen(id, inode->i_ino, inode->i_generation);
458 * Do not update file c/mtime in ldiskfs.
459 * NB: we don't have any lock to protect this because we don't
460 * have reference on osd_object now, but contention with
461 * another lookup + attr_set can't happen in the tiny window
462 * between if (...) and set S_NOCMTIME.
464 if (!(inode->i_flags & S_NOCMTIME))
465 inode->i_flags |= S_NOCMTIME;
470 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
471 handle_t *handle, struct dentry *child,
472 struct inode *inode, struct htree_lock *hlock)
476 rc = __ldiskfs_add_entry(handle, child, inode, hlock);
477 if (rc == -ENOBUFS || rc == -ENOSPC) {
478 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
479 struct inode *parent = child->d_parent->d_inode;
480 struct lu_fid *fid = NULL;
481 char fidstr[FID_LEN + 1] = "unknown";
483 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
485 fid = &loa->loa_lma.lma_self_fid;
486 } else if (rc2 == -ENODATA) {
487 if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
488 fid = &info->oti_fid3;
489 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
490 } else if (!osd->od_is_ost && osd->od_index == 0) {
491 fid = &info->oti_fid3;
492 lu_igif_build(fid, parent->i_ino,
493 parent->i_generation);
498 snprintf(fidstr, sizeof(fidstr), DFID, PFID(fid));
500 /* below message is checked in sanity.sh test_129 */
502 CWARN("%s: directory (inode: %lu, FID: %s) has reached max size limit\n",
503 osd_name(osd), parent->i_ino, fidstr);
505 rc = 0; /* ignore such error now */
506 CWARN("%s: directory (inode: %lu, FID: %s) is approaching max size limit\n",
507 osd_name(osd), parent->i_ino, fidstr);
517 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
518 struct osd_inode_id *id, struct lu_fid *fid)
520 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
524 inode = osd_iget(info, dev, id);
528 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
530 *fid = loa->loa_lma.lma_self_fid;
531 } else if (rc == -ENODATA) {
532 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
533 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
535 lu_igif_build(fid, inode->i_ino, inode->i_generation);
543 static struct inode *osd_iget_check(struct osd_thread_info *info,
544 struct osd_device *dev,
545 const struct lu_fid *fid,
546 struct osd_inode_id *id,
555 * The cached OI mapping is trustable. If we cannot locate the inode
556 * via the cached OI mapping, then return the failure to the caller
557 * directly without further OI checking.
561 inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
564 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
567 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
568 PFID(fid), id->oii_ino, rc);
572 if (is_bad_inode(inode)) {
577 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
578 PFID(fid), id->oii_ino);
582 if (id->oii_gen != OSD_OII_NOGEN &&
583 inode->i_generation != id->oii_gen) {
588 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
589 "oii_gen = %u, i_generation = %u\n", PFID(fid),
590 id->oii_ino, id->oii_gen, inode->i_generation);
594 if (inode->i_nlink == 0) {
599 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
600 PFID(fid), id->oii_ino);
604 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
608 __u32 saved_ino = id->oii_ino;
609 __u32 saved_gen = id->oii_gen;
612 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
614 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
616 * XXX: There are four possible cases:
618 * Backup/restore caused the OI invalid.
620 * Someone unlinked the object but NOT removed
621 * the OI mapping, such as mount target device
622 * as ldiskfs, and modify something directly.
624 * Someone just removed the object between the
625 * former oi_lookup and the iget. It is normal.
626 * 4. Other failure cases.
628 * Generally, when the device is mounted, it will
629 * auto check whether the system is restored from
630 * file-level backup or not. We trust such detect
631 * to distinguish the 1st case from the 2nd case:
632 * if the OI files are consistent but may contain
633 * stale OI mappings because of case 2, if iget()
634 * returns -ENOENT or -ESTALE, then it should be
639 * If the OI mapping was in OI file before the
640 * osd_iget_check(), but now, it is disappear,
641 * then it must be removed by race. That is a
647 * It is the OI scrub updated the OI mapping by race.
648 * The new OI mapping must be valid.
650 if (saved_ino != id->oii_ino ||
651 (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
660 if (dev->od_scrub.os_scrub.os_file.sf_flags &
663 * It still can be the case 2, but we cannot
664 * distinguish it from the case 1. So return
665 * -EREMCHG to block current operation until
666 * OI scrub rebuilt the OI mappings.
675 if (inode->i_generation == id->oii_gen)
680 if (id->oii_gen == OSD_OII_NOGEN)
681 osd_id_gen(id, inode->i_ino, inode->i_generation);
684 * Do not update file c/mtime in ldiskfs.
685 * NB: we don't have any lock to protect this because we don't
686 * have reference on osd_object now, but contention with
687 * another lookup + attr_set can't happen in the tiny window
688 * between if (...) and set S_NOCMTIME.
690 if (!(inode->i_flags & S_NOCMTIME))
691 inode->i_flags |= S_NOCMTIME;
708 * \retval +v: new filter_fid does not contain self-fid
709 * \retval 0: filter_fid_18_23, contains self-fid
710 * \retval -v: other failure cases
712 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
713 struct dentry *dentry, struct lu_fid *fid)
715 struct filter_fid *ff = &info->oti_ff;
716 struct ost_id *ostid = &info->oti_ostid;
719 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
720 if (rc == sizeof(struct filter_fid_18_23)) {
721 struct filter_fid_18_23 *ff_old = (void *)ff;
723 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
724 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
726 * XXX: use 0 as the index for compatibility, the caller will
727 * handle index related issues when necessary.
730 ostid_to_fid(fid, ostid, 0);
731 } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
733 } else if (rc >= 0) {
740 static int osd_lma_self_repair(struct osd_thread_info *info,
741 struct osd_device *osd, struct inode *inode,
742 const struct lu_fid *fid, __u32 compat)
747 LASSERT(current->journal_info == NULL);
749 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
750 osd_dto_credits_noquota[DTO_XATTR_SET]);
753 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
758 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
760 CWARN("%s: cannot self repair the LMA: rc = %d\n",
762 ldiskfs_journal_stop(jh);
766 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
768 struct osd_thread_info *info = osd_oti_get(env);
769 struct osd_device *osd = osd_obj2dev(obj);
770 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
771 struct lustre_mdt_attrs *lma = &loa->loa_lma;
772 struct inode *inode = obj->oo_inode;
773 struct dentry *dentry = &info->oti_obj_dentry;
774 struct lu_fid *fid = NULL;
775 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
780 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
781 (void *)loa, sizeof(*loa));
782 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
783 fid = &lma->lma_self_fid;
784 rc = osd_get_idif(info, inode, dentry, fid);
785 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
787 * For the given OST-object, if it has neither LMA nor
788 * FID in XATTR_NAME_FID, then the given FID (which is
789 * contained in the @obj, from client RPC for locating
790 * the OST-object) is trusted. We use it to generate
793 osd_lma_self_repair(info, osd, inode, rfid,
804 lustre_lma_swab(lma);
805 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
806 (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
807 S_ISREG(inode->i_mode)))) {
808 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
809 "fid = "DFID", ino = %lu\n", osd_name(osd),
810 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
811 PFID(rfid), inode->i_ino);
814 fid = &lma->lma_self_fid;
815 if (lma->lma_compat & LMAC_STRIPE_INFO &&
817 obj->oo_pfid_in_lma = 1;
818 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
820 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
824 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
825 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
826 struct ost_id *oi = &info->oti_ostid;
827 struct lu_fid *fid1 = &info->oti_fid3;
828 __u32 idx = fid_idif_ost_idx(rfid);
831 * For old IDIF, the OST index is not part of the IDIF,
832 * Means that different OSTs may have the same IDIFs.
833 * Under such case, we need to make some compatible
834 * check to make sure to trigger OI scrub properly.
836 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
837 /* Given @rfid is new, LMA is old. */
838 fid_to_ostid(fid, oi);
839 ostid_to_fid(fid1, oi, idx);
840 if (lu_fid_eq(fid1, rfid)) {
841 if (osd->od_index_in_idif)
842 osd_lma_self_repair(info, osd,
856 struct osd_check_lmv_buf {
857 /* please keep it as first member */
858 struct dir_context ctx;
859 struct osd_thread_info *oclb_info;
860 struct osd_device *oclb_dev;
861 struct osd_idmap_cache *oclb_oic;
867 * It is called internally by ->iterate*() to filter out the
868 * local slave object's FID of the striped directory.
870 * \retval 1 found the local slave's FID
871 * \retval 0 continue to check next item
872 * \retval -ve for failure
874 #ifdef HAVE_FILLDIR_USE_CTX
875 static int osd_stripe_dir_filldir(struct dir_context *buf,
877 static int osd_stripe_dir_filldir(void *buf,
879 const char *name, int namelen,
880 loff_t offset, __u64 ino, unsigned int d_type)
882 struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
883 struct osd_thread_info *oti = oclb->oclb_info;
884 struct lu_fid *fid = &oti->oti_fid3;
885 struct osd_inode_id *id = &oti->oti_id3;
886 struct osd_device *dev = oclb->oclb_dev;
887 struct osd_idmap_cache *oic = oclb->oclb_oic;
896 sscanf(name + 1, SFID, RFID(fid));
897 if (!fid_is_sane(fid))
900 if (osd_remote_fid(oti->oti_env, dev, fid))
903 osd_id_gen(id, ino, OSD_OII_NOGEN);
904 inode = osd_iget(oti, dev, id);
906 return PTR_ERR(inode);
909 osd_add_oi_cache(oti, dev, id, fid);
913 osd_oii_insert(dev, oic, true);
914 oclb->oclb_found = true;
920 * When lookup item under striped directory, we need to locate the master
921 * MDT-object of the striped directory firstly, then the client will send
922 * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
923 * and the item's name. If the system is restored from MDT file level backup,
924 * then before the OI scrub completely built the OI files, the OI mappings of
925 * the master MDT-object and slave MDT-object may be invalid. Usually, it is
926 * not a problem for the master MDT-object. Because when locate the master
927 * MDT-object, we will do name based lookup (for the striped directory itself)
928 * firstly, during such process we can setup the correct OI mapping for the
929 * master MDT-object. But it will be trouble for the slave MDT-object. Because
930 * the client will not trigger name based lookup on the MDT to locate the slave
931 * MDT-object before locating item under the striped directory, then when
932 * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
933 * is invalid and does not know what the right OI mapping is, then the MDT has
934 * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
935 * the OI file, related OI mapping is unknown yet, please try again later. And
936 * then client will re-try the RPC again and again until related OI mapping has
937 * been updated. That is quite inefficient.
939 * To resolve above trouble, we will handle it as the following two cases:
941 * 1) The slave MDT-object and the master MDT-object are on different MDTs.
942 * It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
943 * is linked under /REMOTE_PARENT_DIR with the name of its FID string.
944 * We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
945 * directly. Please check osd_fid_lookup().
947 * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
948 * Under such case, during lookup the master MDT-object, we will lookup the
949 * slave MDT-object via readdir against the master MDT-object, because the
950 * slave MDT-objects information are stored as sub-directories with the name
951 * "${FID}:${index}". Then when find the local slave MDT-object, its OI
952 * mapping will be recorded. Then subsequent osd_fid_lookup() will know
953 * the correct OI mapping for the slave MDT-object.
955 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
956 struct inode *inode, struct osd_idmap_cache *oic)
958 struct lu_buf *buf = &oti->oti_big_buf;
959 struct dentry *dentry = &oti->oti_obj_dentry;
960 struct file *filp = &oti->oti_file;
961 const struct file_operations *fops;
962 struct lmv_mds_md_v1 *lmv1;
963 struct osd_check_lmv_buf oclb = {
964 .ctx.actor = osd_stripe_dir_filldir,
975 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
978 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
980 lu_buf_realloc(buf, rc);
981 if (buf->lb_buf == NULL)
982 GOTO(out, rc = -ENOMEM);
988 if (unlikely(rc == 0 || rc == -ENODATA))
994 if (unlikely(buf->lb_buf == NULL)) {
995 lu_buf_realloc(buf, rc);
996 if (buf->lb_buf == NULL)
997 GOTO(out, rc = -ENOMEM);
1003 if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1006 fops = inode->i_fop;
1007 dentry->d_inode = inode;
1008 dentry->d_sb = inode->i_sb;
1010 filp->f_path.dentry = dentry;
1011 filp->f_flags |= O_NOATIME;
1012 filp->f_mode = FMODE_64BITHASH | FMODE_NONOTIFY;
1013 filp->f_mapping = inode->i_mapping;
1015 filp->private_data = NULL;
1016 filp->f_cred = current_cred();
1017 filp->f_inode = inode;
1018 rc = osd_security_file_alloc(filp);
1023 oclb.oclb_items = 0;
1024 rc = iterate_dir(filp, &oclb.ctx);
1025 } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1026 filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1027 fops->release(inode, filp);
1031 CDEBUG(D_LFSCK, "%s: fail to check LMV EA, inode = %lu/%u,"
1032 DFID": rc = %d\n", osd_ino2name(inode),
1033 inode->i_ino, inode->i_generation,
1034 PFID(&oic->oic_fid), rc);
1041 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1042 const struct lu_fid *fid,
1043 const struct lu_object_conf *conf)
1045 struct osd_thread_info *info;
1046 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1047 struct osd_device *dev;
1048 struct osd_idmap_cache *oic;
1049 struct osd_inode_id *id;
1050 struct inode *inode = NULL;
1051 struct lustre_scrub *scrub;
1052 struct scrub_file *sf;
1053 __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1058 bool remote = false;
1059 bool trusted = true;
1060 bool updated = false;
1061 bool checked = false;
1065 LINVRNT(osd_invariant(obj));
1066 LASSERT(obj->oo_inode == NULL);
1067 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
1069 dev = osd_dev(ldev);
1070 scrub = &dev->od_scrub.os_scrub;
1071 sf = &scrub->os_file;
1072 info = osd_oti_get(env);
1074 oic = &info->oti_cache;
1076 if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1080 * For the object is created as locking anchor, or for the object to
1081 * be created on disk. No need to osd_oi_lookup() at here because FID
1082 * shouldn't never be re-used, if it's really a duplicate FID from
1083 * unexpected reason, we should be able to detect it later by calling
1084 * do_create->osd_oi_insert().
1086 if (conf && conf->loc_flags & LOC_F_NEW)
1087 GOTO(out, result = 0);
1089 /* Search order: 1. per-thread cache. */
1090 if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1096 if (!list_empty(&scrub->os_inconsistent_items)) {
1097 /* Search order: 2. OI scrub pending list. */
1098 result = osd_oii_lookup(dev, fid, id);
1104 * The OI mapping in the OI file can be updated by the OI scrub
1105 * when we locate the inode via FID. So it may be not trustable.
1109 /* Search order: 3. OI files. */
1110 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1111 if (result == -ENOENT) {
1112 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1113 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1114 !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1116 GOTO(out, result = 0);
1121 /* -ESTALE is returned if inode of OST object doesn't exist */
1122 if (result == -ESTALE &&
1123 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1124 GOTO(out, result = 0);
1131 obj->oo_inode = NULL;
1132 /* for later passes through checks, not true on first pass */
1133 if (!IS_ERR_OR_NULL(inode))
1136 inode = osd_iget_check(info, dev, fid, id, trusted);
1137 if (!IS_ERR(inode)) {
1138 obj->oo_inode = inode;
1146 result = PTR_ERR(inode);
1147 if (result == -ENOENT || result == -ESTALE)
1148 GOTO(out, result = 0);
1150 if (result != -EREMCHG)
1155 * We still have chance to get the valid inode: for the
1156 * object which is referenced by remote name entry, the
1157 * object on the local MDT will be linked under the dir
1158 * of "/REMOTE_PARENT_DIR" with its FID string as name.
1160 * We do not know whether the object for the given FID
1161 * is referenced by some remote name entry or not, and
1162 * especially for DNE II, a multiple-linked object may
1163 * have many name entries reside on many MDTs.
1165 * To simplify the operation, OSD will not distinguish
1166 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1167 * only happened for the RPC from other MDT during the
1168 * OI scrub, or for the client side RPC with FID only,
1169 * such as FID to path, or from old connected client.
1172 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1176 flags |= SS_AUTO_PARTIAL;
1177 flags &= ~SS_AUTO_FULL;
1182 if (thread_is_running(&scrub->os_thread)) {
1183 if (scrub->os_partial_scan && !scrub->os_in_join)
1186 if (IS_ERR_OR_NULL(inode) || result)
1187 GOTO(out, result = -EINPROGRESS);
1190 LASSERT(obj->oo_inode == inode);
1192 osd_add_oi_cache(info, dev, id, fid);
1193 osd_oii_insert(dev, oic, true);
1197 if (dev->od_auto_scrub_interval == AS_NEVER) {
1199 GOTO(out, result = -EREMCHG);
1202 LASSERT(obj->oo_inode == inode);
1204 osd_add_oi_cache(info, dev, id, fid);
1209 rc1 = osd_scrub_start(env, dev, flags);
1210 LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
1211 "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
1212 if (rc1 && rc1 != -EALREADY)
1213 GOTO(out, result = -EREMCHG);
1215 if (IS_ERR_OR_NULL(inode) || result)
1216 GOTO(out, result = -EINPROGRESS);
1219 LASSERT(obj->oo_inode == inode);
1221 osd_add_oi_cache(info, dev, id, fid);
1222 osd_oii_insert(dev, oic, true);
1227 if (unlikely(obj->oo_header))
1230 result = osd_check_lma(env, obj);
1234 LASSERTF(id->oii_ino == inode->i_ino &&
1235 id->oii_gen == inode->i_generation,
1236 "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1237 PFID(fid), id->oii_ino, id->oii_gen,
1238 inode->i_ino, inode->i_generation);
1240 saved_ino = inode->i_ino;
1241 saved_gen = inode->i_generation;
1243 if (unlikely(result == -ENODATA)) {
1245 * If the OI scrub updated the OI mapping by race, it
1246 * must be valid. Trust the inode that has no LMA EA.
1251 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1254 * The OI mapping is still there, the inode is still
1255 * valid. It is just becaues the inode has no LMA EA.
1257 if (saved_ino == id->oii_ino &&
1258 saved_gen == id->oii_gen)
1262 * It is the OI scrub updated the OI mapping by race.
1263 * The new OI mapping must be valid.
1271 * "result == -ENOENT" means that the OI mappinghas been
1272 * removed by race, so the inode belongs to other object.
1274 * Others error can be returned directly.
1276 if (result == -ENOENT) {
1279 obj->oo_inode = NULL;
1284 if (result != -EREMCHG)
1290 * if two OST objects map to the same inode, and inode mode is
1291 * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
1292 * reserved by precreate, and not written yet, in this case, don't
1293 * set inode for the object whose FID mismatch, so that it can create
1294 * inode and not block precreate.
1296 if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
1297 inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
1298 obj->oo_inode = NULL;
1299 GOTO(out, result = 0);
1302 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1304 * "result == -ENOENT" means the cached OI mapping has been removed
1305 * from the OI file by race, above inode belongs to other object.
1307 if (result == -ENOENT) {
1310 obj->oo_inode = NULL;
1311 GOTO(out, result = 0);
1317 if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1323 * It is the OI scrub updated the OI mapping by race.
1324 * The new OI mapping must be valid.
1332 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1333 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1335 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1337 if (lma->lma_compat & LMAC_STRIPE_INFO &&
1339 obj->oo_pfid_in_lma = 1;
1340 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1342 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1343 } else if (result != -ENODATA) {
1348 obj->oo_compat_dot_created = 1;
1349 obj->oo_compat_dotdot_created = 1;
1351 if (S_ISDIR(inode->i_mode) &&
1352 (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1353 osd_check_lmv(info, dev, inode, oic);
1355 result = osd_attach_jinode(inode);
1360 GOTO(out, result = 0);
1362 LASSERT(!obj->oo_hl_head);
1363 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1365 GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1368 if (result || !obj->oo_inode) {
1369 if (!IS_ERR_OR_NULL(inode))
1372 obj->oo_inode = NULL;
1374 fid_zero(&oic->oic_fid);
1377 LINVRNT(osd_invariant(obj));
1382 * Concurrency: shouldn't matter.
1384 static void osd_object_init0(struct osd_object *obj)
1386 LASSERT(obj->oo_inode != NULL);
1387 obj->oo_dt.do_body_ops = &osd_body_ops;
1388 obj->oo_dt.do_lu.lo_header->loh_attr |=
1389 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1393 * Concurrency: no concurrent access is possible that early in object
1396 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1397 const struct lu_object_conf *conf)
1399 struct osd_object *obj = osd_obj(l);
1402 LINVRNT(osd_invariant(obj));
1404 if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
1405 cfs_fail_val == 2) {
1406 struct osd_thread_info *info = osd_oti_get(env);
1407 struct osd_idmap_cache *oic = &info->oti_cache;
1408 /* invalidate thread cache */
1409 memset(&oic->oic_fid, 0, sizeof(oic->oic_fid));
1411 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1412 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1413 l->lo_header->loh_attr |= LOHA_EXISTS;
1417 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1418 obj->oo_dt.do_body_ops = &osd_body_ops_new;
1419 if (result == 0 && obj->oo_inode != NULL) {
1420 struct osd_thread_info *oti = osd_oti_get(env);
1421 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1423 osd_object_init0(obj);
1424 if (unlikely(obj->oo_header))
1427 result = osd_get_lma(oti, obj->oo_inode,
1428 &oti->oti_obj_dentry, loa);
1431 * Convert LMAI flags to lustre LMA flags
1432 * and cache it to oo_lma_flags
1435 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1436 } else if (result == -ENODATA) {
1440 obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
1442 LINVRNT(osd_invariant(obj));
1447 * The first part of oxe_buf is xattr name, and is '\0' terminated.
1448 * The left part is for value, binary mode.
1450 struct osd_xattr_entry {
1451 struct list_head oxe_list;
1455 struct rcu_head oxe_rcu;
1459 static int osd_oxc_get(struct osd_object *obj, const char *name,
1462 struct osd_xattr_entry *tmp;
1463 struct osd_xattr_entry *oxe = NULL;
1464 size_t namelen = strlen(name);
1468 list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1469 if (namelen == tmp->oxe_namelen &&
1470 strncmp(name, tmp->oxe_buf, namelen) == 0) {
1477 GOTO(out, rc = -ENOENT);
1479 if (!oxe->oxe_exist)
1480 GOTO(out, rc = -ENODATA);
1483 rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1486 if (buf->lb_buf == NULL)
1489 if (buf->lb_len < rc)
1490 GOTO(out, rc = -ERANGE);
1492 memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1499 static void osd_oxc_free(struct rcu_head *head)
1501 struct osd_xattr_entry *oxe;
1503 oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1504 OBD_FREE(oxe, oxe->oxe_len);
1507 static void osd_oxc_add(struct osd_object *obj, const char *name,
1508 const char *buf, int buflen)
1510 struct osd_xattr_entry *oxe;
1511 struct osd_xattr_entry *old = NULL;
1512 struct osd_xattr_entry *tmp;
1513 size_t namelen = strlen(name);
1514 size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1516 OBD_ALLOC(oxe, len);
1520 INIT_LIST_HEAD(&oxe->oxe_list);
1522 oxe->oxe_namelen = namelen;
1523 memcpy(oxe->oxe_buf, name, namelen);
1525 LASSERT(buf != NULL);
1526 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1527 oxe->oxe_exist = true;
1529 oxe->oxe_exist = false;
1532 /* this should be rarely called, just remove old and add new */
1533 spin_lock(&obj->oo_guard);
1534 list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1535 if (namelen == tmp->oxe_namelen &&
1536 strncmp(name, tmp->oxe_buf, namelen) == 0) {
1542 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1543 call_rcu(&old->oxe_rcu, osd_oxc_free);
1545 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1547 spin_unlock(&obj->oo_guard);
1550 static void osd_oxc_del(struct osd_object *obj, const char *name)
1552 struct osd_xattr_entry *oxe;
1553 size_t namelen = strlen(name);
1555 spin_lock(&obj->oo_guard);
1556 list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1557 if (namelen == oxe->oxe_namelen &&
1558 strncmp(name, oxe->oxe_buf, namelen) == 0) {
1559 list_del_rcu(&oxe->oxe_list);
1560 call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1564 spin_unlock(&obj->oo_guard);
1567 static void osd_oxc_fini(struct osd_object *obj)
1569 struct osd_xattr_entry *oxe, *next;
1571 list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1572 list_del(&oxe->oxe_list);
1573 OBD_FREE(oxe, oxe->oxe_len);
1578 * Concurrency: no concurrent access is possible that late in object
1581 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1583 struct osd_object *obj = osd_obj(l);
1584 struct lu_object_header *h = obj->oo_header;
1586 LINVRNT(osd_invariant(obj));
1589 dt_object_fini(&obj->oo_dt);
1590 if (obj->oo_hl_head != NULL)
1591 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1592 /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
1595 lu_object_header_fini(h);
1596 OBD_FREE_PRE(h, sizeof(*h), "kfreed");
1597 kfree_rcu(h, loh_rcu);
1602 * Concurrency: no concurrent access is possible that late in object
1605 static void osd_index_fini(struct osd_object *o)
1607 struct iam_container *bag;
1609 if (o->oo_dir != NULL) {
1610 bag = &o->oo_dir->od_container;
1611 if (o->oo_inode != NULL) {
1612 if (bag->ic_object == o->oo_inode)
1613 iam_container_fini(bag);
1615 OBD_FREE_PTR(o->oo_dir);
1621 * Concurrency: no concurrent access is possible that late in object
1622 * life-cycle (for all existing callers, that is. New callers have to provide
1623 * their own locking.)
1625 static int osd_inode_unlinked(const struct inode *inode)
1627 return inode->i_nlink == 0;
1631 OSD_TXN_OI_DELETE_CREDITS = 20,
1632 OSD_TXN_INODE_DELETE_CREDITS = 20
1639 #if OSD_THANDLE_STATS
1641 * Set time when the handle is allocated
1643 static void osd_th_alloced(struct osd_thandle *oth)
1645 oth->oth_alloced = ktime_get();
1649 * Set time when the handle started
1651 static void osd_th_started(struct osd_thandle *oth)
1653 oth->oth_started = ktime_get();
1657 * Check whether the we deal with this handle for too long.
1659 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1660 ktime_t alloced, ktime_t started,
1663 ktime_t now = ktime_get();
1665 LASSERT(dev != NULL);
1667 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1668 ktime_us_delta(started, alloced));
1669 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1670 ktime_us_delta(closed, started));
1671 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1672 ktime_us_delta(now, closed));
1674 if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1675 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1676 oth, now, alloced, started, closed);
1677 libcfs_debug_dumpstack(NULL);
1681 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
1683 ktime_t __closed = ktime_get(); \
1684 ktime_t __alloced = oth->oth_alloced; \
1685 ktime_t __started = oth->oth_started; \
1688 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
1691 #else /* OSD_THANDLE_STATS */
1693 #define osd_th_alloced(h) do {} while(0)
1694 #define osd_th_started(h) do {} while(0)
1695 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
1697 #endif /* OSD_THANDLE_STATS */
1700 * Concurrency: doesn't access mutable data.
1702 static int osd_param_is_not_sane(const struct osd_device *dev,
1703 const struct thandle *th)
1705 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1707 return oh->ot_credits > osd_transaction_size(dev);
1711 * Concurrency: shouldn't matter.
1713 static void osd_trans_commit_cb(struct super_block *sb,
1714 struct ldiskfs_journal_cb_entry *jcb, int error)
1716 struct osd_thandle *oh = container_of(jcb, struct osd_thandle, ot_jcb);
1717 struct thandle *th = &oh->ot_super;
1718 struct lu_device *lud = &th->th_dev->dd_lu_dev;
1719 struct dt_txn_commit_cb *dcb, *tmp;
1721 LASSERT(oh->ot_handle == NULL);
1724 CERROR("transaction @0x%p commit error: %d\n", th, error);
1726 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DELAY_TRANS, 40);
1727 /* call per-transaction callbacks if any */
1728 list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1730 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1731 "commit callback entry: magic=%x name='%s'\n",
1732 dcb->dcb_magic, dcb->dcb_name);
1733 list_del_init(&dcb->dcb_linkage);
1734 dcb->dcb_func(NULL, th, dcb, error);
1737 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1744 static struct thandle *osd_trans_create(const struct lu_env *env,
1745 struct dt_device *d)
1747 struct osd_thread_info *oti = osd_oti_get(env);
1748 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1749 struct osd_thandle *oh;
1755 CERROR("%s: someone try to start transaction under "
1756 "readonly mode, should be disabled.\n",
1757 osd_name(osd_dt_dev(d)));
1759 RETURN(ERR_PTR(-EROFS));
1762 /* on pending IO in this thread should left from prev. request */
1763 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1765 sb_start_write(osd_sb(osd_dt_dev(d)));
1767 OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1769 sb_end_write(osd_sb(osd_dt_dev(d)));
1770 RETURN(ERR_PTR(-ENOMEM));
1773 oh->ot_quota_trans = &oti->oti_quota_trans;
1774 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1779 INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1780 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1781 INIT_LIST_HEAD(&oh->ot_trunc_locks);
1784 memset(oti->oti_declare_ops, 0,
1785 sizeof(oti->oti_declare_ops));
1786 memset(oti->oti_declare_ops_cred, 0,
1787 sizeof(oti->oti_declare_ops_cred));
1788 memset(oti->oti_declare_ops_used, 0,
1789 sizeof(oti->oti_declare_ops_used));
1791 oti->oti_ins_cache_depth++;
1796 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1798 struct osd_thread_info *oti = osd_oti_get(env);
1799 struct osd_thandle *oh;
1801 oh = container_of(th, struct osd_thandle, ot_super);
1802 LASSERT(oh != NULL);
1804 CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
1805 oti->oti_declare_ops[OSD_OT_CREATE],
1806 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1807 oti->oti_declare_ops_used[OSD_OT_CREATE],
1808 oti->oti_declare_ops[OSD_OT_DESTROY],
1809 oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1810 oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1811 CWARN(" attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1812 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1813 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1814 oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1815 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1816 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1817 oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1818 CWARN(" write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1819 oti->oti_declare_ops[OSD_OT_WRITE],
1820 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1821 oti->oti_declare_ops_used[OSD_OT_WRITE],
1822 oti->oti_declare_ops[OSD_OT_PUNCH],
1823 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1824 oti->oti_declare_ops_used[OSD_OT_PUNCH],
1825 oti->oti_declare_ops[OSD_OT_QUOTA],
1826 oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1827 oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1828 CWARN(" insert: %u/%u/%u, delete: %u/%u/%u\n",
1829 oti->oti_declare_ops[OSD_OT_INSERT],
1830 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1831 oti->oti_declare_ops_used[OSD_OT_INSERT],
1832 oti->oti_declare_ops[OSD_OT_DELETE],
1833 oti->oti_declare_ops_cred[OSD_OT_DELETE],
1834 oti->oti_declare_ops_used[OSD_OT_DELETE]);
1835 CWARN(" ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1836 oti->oti_declare_ops[OSD_OT_REF_ADD],
1837 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1838 oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1839 oti->oti_declare_ops[OSD_OT_REF_DEL],
1840 oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1841 oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1845 * Concurrency: shouldn't matter.
1847 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1850 struct osd_thread_info *oti = osd_oti_get(env);
1851 struct osd_device *dev = osd_dt_dev(d);
1853 struct osd_thandle *oh;
1858 LASSERT(current->journal_info == NULL);
1860 oh = container_of(th, struct osd_thandle, ot_super);
1861 LASSERT(oh != NULL);
1862 LASSERT(oh->ot_handle == NULL);
1864 rc = dt_txn_hook_start(env, d, th);
1868 if (unlikely(osd_param_is_not_sane(dev, th))) {
1869 static unsigned long last_printed;
1870 static int last_credits;
1873 * don't make noise on a tiny testing systems
1874 * actual credits misuse will be caught anyway
1876 if (last_credits != oh->ot_credits &&
1877 time_after(jiffies, last_printed +
1878 cfs_time_seconds(60)) &&
1879 osd_transaction_size(dev) > 512) {
1880 CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1881 oh->ot_credits, osd_transaction_size(dev));
1882 osd_trans_dump_creds(env, th);
1883 libcfs_debug_dumpstack(NULL);
1884 last_credits = oh->ot_credits;
1885 last_printed = jiffies;
1888 * XXX Limit the credits to 'max_transaction_buffers', and
1889 * let the underlying filesystem to catch the error if
1890 * we really need so many credits.
1892 * This should be removed when we can calculate the
1893 * credits precisely.
1895 oh->ot_credits = osd_transaction_size(dev);
1896 } else if (ldiskfs_track_declares_assert != 0) {
1898 * reserve few credits to prevent an assertion in JBD
1899 * our debugging mechanism will be able to detected
1900 * overuse. this can help to debug single-update
1903 oh->ot_credits += 10;
1904 if (unlikely(osd_param_is_not_sane(dev, th)))
1905 oh->ot_credits = osd_transaction_size(dev);
1908 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1909 GOTO(out, rc = -EIO);
1912 * XXX temporary stuff. Some abstraction layer should
1915 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1919 LASSERT(oti->oti_txns == 0);
1921 lu_device_get(&d->dd_lu_dev);
1922 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1933 static int osd_seq_exists(const struct lu_env *env,
1934 struct osd_device *osd, u64 seq)
1936 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1937 struct seq_server_site *ss = osd_seq_site(osd);
1942 LASSERT(ss != NULL);
1943 LASSERT(ss->ss_server_fld != NULL);
1945 rc = osd_fld_lookup(env, osd, seq, range);
1948 CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
1949 osd_name(osd), seq, rc);
1953 RETURN(ss->ss_node_id == range->lsr_index);
1956 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1958 struct dt_txn_commit_cb *dcb;
1959 struct dt_txn_commit_cb *tmp;
1961 /* call per-transaction stop callbacks if any */
1962 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1964 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1965 "commit callback entry: magic=%x name='%s'\n",
1966 dcb->dcb_magic, dcb->dcb_name);
1967 list_del_init(&dcb->dcb_linkage);
1968 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1973 * Concurrency: shouldn't matter.
1975 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1978 struct osd_thread_info *oti = osd_oti_get(env);
1979 struct osd_thandle *oh;
1980 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1981 struct osd_device *osd = osd_dt_dev(th->th_dev);
1982 struct qsd_instance *qsd = osd_def_qsd(osd);
1983 struct lquota_trans *qtrans;
1984 LIST_HEAD(truncates);
1985 int rc = 0, remove_agents = 0;
1989 oh = container_of(th, struct osd_thandle, ot_super);
1991 remove_agents = oh->ot_remove_agents;
1993 qtrans = oh->ot_quota_trans;
1994 oh->ot_quota_trans = NULL;
1996 /* move locks to local list, stop tx, execute truncates */
1997 list_splice(&oh->ot_trunc_locks, &truncates);
1999 if (oh->ot_handle != NULL) {
2002 handle_t *hdl = oh->ot_handle;
2005 * add commit callback
2006 * notice we don't do this in osd_trans_start()
2007 * as underlying transaction can change during truncate
2009 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2012 LASSERT(oti->oti_txns == 1);
2015 rc = dt_txn_hook_stop(env, th);
2017 CERROR("%s: failed in transaction hook: rc = %d\n",
2020 osd_trans_stop_cb(oh, rc);
2021 /* hook functions might modify th_sync */
2022 hdl->h_sync = th->th_sync;
2024 oh->ot_handle = NULL;
2025 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2027 CERROR("%s: failed to stop transaction: rc = %d\n",
2028 osd_name(osd), rc2);
2032 osd_process_truncates(&truncates);
2034 osd_trans_stop_cb(oh, th->th_result);
2038 osd_trunc_unlock_all(env, &truncates);
2040 /* inform the quota slave device that the transaction is stopping */
2041 qsd_op_end(env, qsd, qtrans);
2044 * as we want IO to journal and data IO be concurrent, we don't block
2045 * awaiting data IO completion in osd_do_bio(), instead we wait here
2046 * once transaction is submitted to the journal. all reqular requests
2047 * don't do direct IO (except read/write), thus this wait_event becomes
2050 * IMPORTANT: we have to wait till any IO submited by the thread is
2051 * completed otherwise iobuf may be corrupted by different request
2053 wait_event(iobuf->dr_wait,
2054 atomic_read(&iobuf->dr_numreqs) == 0);
2055 osd_fini_iobuf(osd, iobuf);
2057 rc = iobuf->dr_error;
2059 if (unlikely(remove_agents != 0))
2060 osd_process_scheduled_agent_removals(env, osd);
2062 oti->oti_ins_cache_depth--;
2063 /* reset OI cache for safety */
2064 if (oti->oti_ins_cache_depth == 0)
2065 oti->oti_ins_cache_used = 0;
2067 sb_end_write(osd_sb(osd));
2072 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2074 struct osd_thandle *oh = container_of(th, struct osd_thandle,
2077 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2078 LASSERT(&dcb->dcb_func != NULL);
2079 if (dcb->dcb_flags & DCB_TRANS_STOP)
2080 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2082 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2088 * Called just before object is freed. Releases all resources except for
2089 * object itself (that is released by osd_object_free()).
2091 * Concurrency: no concurrent access is possible that late in object
2094 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2096 struct osd_object *obj = osd_obj(l);
2097 struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2098 struct inode *inode = obj->oo_inode;
2103 LINVRNT(osd_invariant(obj));
2106 * If object is unlinked remove fid->ino mapping from object index.
2109 osd_index_fini(obj);
2114 if (osd_has_index(obj) && obj->oo_dt.do_index_ops == &osd_index_iam_ops)
2115 ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
2117 uid = i_uid_read(inode);
2118 gid = i_gid_read(inode);
2119 projid = i_projid_read(inode);
2121 obj->oo_inode = NULL;
2124 /* do not rebalance quota if the caller needs to release memory
2125 * otherwise qsd_refresh_usage() may went into a new ldiskfs
2126 * transaction and risk to deadlock - LU-12178 */
2127 if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2130 if (!obj->oo_header && qsd) {
2131 struct osd_thread_info *info = osd_oti_get(env);
2132 struct lquota_id_info *qi = &info->oti_qi;
2134 /* Release granted quota to master if necessary */
2135 qi->lqi_id.qid_uid = uid;
2136 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2138 qi->lqi_id.qid_uid = gid;
2139 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2141 qi->lqi_id.qid_uid = projid;
2142 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2147 * Concurrency: ->loo_object_release() is called under site spin-lock.
2149 static void osd_object_release(const struct lu_env *env,
2150 struct lu_object *l)
2152 struct osd_object *o = osd_obj(l);
2155 * nobody should be releasing a non-destroyed object with nlink=0
2156 * the API allows this, but ldiskfs doesn't like and then report
2157 * this inode as deleted
2159 LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2160 o->oo_inode->i_nlink == 0));
2164 * Concurrency: shouldn't matter.
2166 static int osd_object_print(const struct lu_env *env, void *cookie,
2167 lu_printer_t p, const struct lu_object *l)
2169 struct osd_object *o = osd_obj(l);
2170 struct iam_descr *d;
2172 if (o->oo_dir != NULL)
2173 d = o->oo_dir->od_container.ic_descr;
2176 return (*p)(env, cookie,
2177 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2179 o->oo_inode ? o->oo_inode->i_ino : 0UL,
2180 o->oo_inode ? o->oo_inode->i_generation : 0,
2181 d ? d->id_ops->id_name : "plain");
2185 * Concurrency: shouldn't matter.
2187 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2188 struct obd_statfs *sfs, struct obd_statfs_info *info)
2190 struct osd_device *osd = osd_dt_dev(d);
2191 struct super_block *sb = osd_sb(osd);
2192 struct kstatfs *ksfs;
2196 if (unlikely(osd->od_mnt == NULL))
2197 return -EINPROGRESS;
2199 /* osd_lproc.c call this without env, allocate ksfs for that case */
2200 if (unlikely(env == NULL)) {
2201 OBD_ALLOC_PTR(ksfs);
2205 ksfs = &osd_oti_get(env)->oti_ksfs;
2208 result = sb->s_op->statfs(sb->s_root, ksfs);
2212 statfs_pack(sfs, ksfs);
2213 if (unlikely(sb->s_flags & SB_RDONLY))
2214 sfs->os_state |= OS_STATFS_READONLY;
2216 sfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
2218 if (ldiskfs_has_feature_extents(sb))
2219 sfs->os_maxbytes = sb->s_maxbytes;
2221 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2224 * Reserve some space so to avoid fragmenting the filesystem too much.
2225 * Fragmentation not only impacts performance, but can also increase
2226 * metadata overhead significantly, causing grant calculation to be
2229 * Reserve 0.78% of total space, at least 8MB for small filesystems.
2231 BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
2232 reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2233 if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2234 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2236 sfs->os_blocks -= reserved;
2237 sfs->os_bfree -= min(reserved, sfs->os_bfree);
2238 sfs->os_bavail -= min(reserved, sfs->os_bavail);
2241 if (unlikely(env == NULL))
2247 * Estimate space needed for file creations. We assume the largest filename
2248 * which is 2^64 - 1, hence a filename of 20 chars.
2249 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2251 #ifdef __LDISKFS_DIR_REC_LEN
2252 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2254 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2258 * Concurrency: doesn't access mutable data.
2260 static void osd_conf_get(const struct lu_env *env,
2261 const struct dt_device *dev,
2262 struct dt_device_param *param)
2264 struct osd_device *d = osd_dt_dev(dev);
2265 struct super_block *sb = osd_sb(d);
2266 struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2271 * XXX should be taken from not-yet-existing fs abstraction layer.
2273 param->ddp_max_name_len = LDISKFS_NAME_LEN;
2274 param->ddp_max_nlink = LDISKFS_LINK_MAX;
2275 param->ddp_symlink_max = sb->s_blocksize;
2276 param->ddp_mount_type = LDD_MT_LDISKFS;
2277 if (ldiskfs_has_feature_extents(sb))
2278 param->ddp_maxbytes = sb->s_maxbytes;
2280 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2282 * inode are statically allocated, so per-inode space consumption
2283 * is the space consumed by the directory entry
2285 param->ddp_inodespace = PER_OBJ_USAGE;
2287 * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2288 * is 128MB) which is unlikely to be hit in real life. Report a smaller
2289 * maximum length to not under-count the actual number of extents
2290 * needed for writing a file if there are sub-optimal block allocations.
2292 param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
2293 /* worst-case extent insertion metadata overhead */
2294 param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2295 param->ddp_mntopts = 0;
2296 if (test_opt(sb, XATTR_USER))
2297 param->ddp_mntopts |= MNTOPT_USERXATTR;
2298 if (test_opt(sb, POSIX_ACL))
2299 param->ddp_mntopts |= MNTOPT_ACL;
2302 * LOD might calculate the max stripe count based on max_ea_size,
2303 * so we need take account in the overhead as well,
2304 * xattr_header + magic + xattr_entry_head
2306 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2307 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2309 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2310 if (ldiskfs_has_feature_ea_inode(sb))
2311 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2315 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2317 if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
2318 param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
2321 * Preferred RPC size for efficient disk IO. 4MB shows good
2322 * all-around performance for ldiskfs, but use bigalloc chunk size
2323 * by default if larger.
2325 #if defined(LDISKFS_CLUSTER_SIZE)
2326 if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2327 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2330 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2332 param->ddp_t10_cksum_type = 0;
2334 unsigned short interval = blk_integrity_interval(bi);
2335 name = blk_integrity_name(bi);
2343 if (strncmp(name, "T10-DIF-TYPE",
2344 sizeof("T10-DIF-TYPE") - 1) == 0) {
2345 /* also skip "1/3-" at end */
2346 const int type_off = sizeof("T10-DIF-TYPE.");
2347 char type_number = name[type_off - 2];
2349 if (interval != 512 && interval != 4096) {
2350 CERROR("%s: unsupported T10PI sector size %u\n",
2351 d->od_svname, interval);
2352 } else if (type_number != '1' && type_number != '3') {
2353 CERROR("%s: unsupported T10PI type %s\n",
2354 d->od_svname, name);
2355 } else if (strcmp(name + type_off, "CRC") == 0) {
2356 d->od_t10_type = type_number == '1' ?
2357 OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2358 param->ddp_t10_cksum_type = interval == 512 ?
2359 OBD_CKSUM_T10CRC512 :
2361 } else if (strcmp(name + type_off, "IP") == 0) {
2362 d->od_t10_type = type_number == '1' ?
2363 OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2364 param->ddp_t10_cksum_type = interval == 512 ?
2365 OBD_CKSUM_T10IP512 :
2368 CERROR("%s: unsupported checksum type of "
2370 d->od_svname, name);
2374 CERROR("%s: unsupported T10PI type '%s'",
2375 d->od_svname, name);
2380 static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
2382 return osd_sb(osd_dt_dev(d));
2386 * Concurrency: shouldn't matter.
2388 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2391 struct super_block *s = osd_sb(osd_dt_dev(d));
2394 down_read(&s->s_umount);
2395 rc = s->s_op->sync_fs(s, 1);
2396 up_read(&s->s_umount);
2398 CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2405 * Start commit for OSD device.
2407 * An implementation of dt_commit_async method for OSD device.
2408 * Asychronously starts underlayng fs sync and thereby a transaction
2411 * \param env environment
2412 * \param d dt device
2414 * \see dt_device_operations
2416 static int osd_commit_async(const struct lu_env *env,
2417 struct dt_device *d)
2419 struct super_block *s = osd_sb(osd_dt_dev(d));
2424 CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2425 down_read(&s->s_umount);
2426 rc = s->s_op->sync_fs(s, 0);
2427 up_read(&s->s_umount);
2432 static int (*priv_security_file_alloc)(struct file *file);
2434 int osd_security_file_alloc(struct file *file)
2436 if (priv_security_file_alloc)
2437 return priv_security_file_alloc(file);
2442 * Concurrency: shouldn't matter.
2444 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2446 struct super_block *sb = osd_sb(osd_dt_dev(d));
2447 struct block_device *dev = sb->s_bdev;
2448 int rc = -EOPNOTSUPP;
2452 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2453 osd_dt_dev(d)->od_svname, (long)dev, rc);
2459 * Note: we do not count into QUOTA here.
2460 * If we mount with --data_journal we may need more.
2462 const int osd_dto_credits_noquota[DTO_NR] = {
2465 * INDEX_EXTRA_TRANS_BLOCKS(8) +
2466 * SINGLEDATA_TRANS_BLOCKS(8)
2467 * XXX Note: maybe iam need more, since iam have more level than
2470 [DTO_INDEX_INSERT] = 16,
2473 * just modify a single entry, probably merge few within a block
2475 [DTO_INDEX_DELETE] = 1,
2479 [DTO_INDEX_UPDATE] = 16,
2481 * 4(inode, inode bits, groups, GDT)
2482 * notice: OI updates are counted separately with DTO_INDEX_INSERT
2484 [DTO_OBJECT_CREATE] = 4,
2486 * 4(inode, inode bits, groups, GDT)
2487 * notice: OI updates are counted separately with DTO_INDEX_DELETE
2489 [DTO_OBJECT_DELETE] = 4,
2491 * Attr set credits (inode)
2493 [DTO_ATTR_SET_BASE] = 1,
2495 * Xattr set. The same as xattr of EXT3.
2496 * DATA_TRANS_BLOCKS(14)
2497 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2498 * are also counted in. Do not know why?
2500 [DTO_XATTR_SET] = 14,
2502 * credits for inode change during write.
2504 [DTO_WRITE_BASE] = 3,
2506 * credits for single block write.
2508 [DTO_WRITE_BLOCK] = 14,
2510 * Attr set credits for chown.
2511 * This is extra credits for setattr, and it is null without quota
2513 [DTO_ATTR_SET_CHOWN] = 0
2516 static const struct dt_device_operations osd_dt_ops = {
2517 .dt_root_get = osd_root_get,
2518 .dt_statfs = osd_statfs,
2519 .dt_trans_create = osd_trans_create,
2520 .dt_trans_start = osd_trans_start,
2521 .dt_trans_stop = osd_trans_stop,
2522 .dt_trans_cb_add = osd_trans_cb_add,
2523 .dt_conf_get = osd_conf_get,
2524 .dt_mnt_sb_get = osd_mnt_sb_get,
2525 .dt_sync = osd_sync,
2527 .dt_commit_async = osd_commit_async,
2530 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2533 struct osd_object *obj = osd_dt_obj(dt);
2534 struct osd_thread_info *oti = osd_oti_get(env);
2536 LINVRNT(osd_invariant(obj));
2538 LASSERT(obj->oo_owner != env);
2539 down_read_nested(&obj->oo_sem, role);
2541 LASSERT(obj->oo_owner == NULL);
2545 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2548 struct osd_object *obj = osd_dt_obj(dt);
2549 struct osd_thread_info *oti = osd_oti_get(env);
2551 LINVRNT(osd_invariant(obj));
2553 LASSERT(obj->oo_owner != env);
2554 down_write_nested(&obj->oo_sem, role);
2556 LASSERT(obj->oo_owner == NULL);
2557 obj->oo_owner = env;
2561 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2563 struct osd_object *obj = osd_dt_obj(dt);
2564 struct osd_thread_info *oti = osd_oti_get(env);
2566 LINVRNT(osd_invariant(obj));
2568 LASSERT(oti->oti_r_locks > 0);
2570 up_read(&obj->oo_sem);
2573 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2575 struct osd_object *obj = osd_dt_obj(dt);
2576 struct osd_thread_info *oti = osd_oti_get(env);
2578 LINVRNT(osd_invariant(obj));
2580 LASSERT(obj->oo_owner == env);
2581 LASSERT(oti->oti_w_locks > 0);
2583 obj->oo_owner = NULL;
2584 up_write(&obj->oo_sem);
2587 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2589 struct osd_object *obj = osd_dt_obj(dt);
2591 LINVRNT(osd_invariant(obj));
2593 return obj->oo_owner == env;
2596 static void osd_inode_getattr(const struct lu_env *env,
2597 struct inode *inode, struct lu_attr *attr)
2599 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2600 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2601 LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2602 LA_BLKSIZE | LA_TYPE | LA_BTIME;
2604 attr->la_atime = inode->i_atime.tv_sec;
2605 attr->la_mtime = inode->i_mtime.tv_sec;
2606 attr->la_ctime = inode->i_ctime.tv_sec;
2607 attr->la_btime = LDISKFS_I(inode)->i_crtime.tv_sec;
2608 attr->la_mode = inode->i_mode;
2609 attr->la_size = i_size_read(inode);
2610 attr->la_blocks = inode->i_blocks;
2611 attr->la_uid = i_uid_read(inode);
2612 attr->la_gid = i_gid_read(inode);
2613 attr->la_projid = i_projid_read(inode);
2614 attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
2615 attr->la_nlink = inode->i_nlink;
2616 attr->la_rdev = inode->i_rdev;
2617 attr->la_blksize = 1 << inode->i_blkbits;
2618 attr->la_blkbits = inode->i_blkbits;
2620 * Ext4 did not transfer inherit flags from raw inode
2621 * to inode flags, and ext4 internally test raw inode
2622 * @i_flags directly. Instead of patching ext4, we do it here.
2624 if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2625 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2628 static int osd_dirent_count(const struct lu_env *env, struct dt_object *dt,
2631 struct osd_object *obj = osd_dt_obj(dt);
2632 const struct dt_it_ops *iops;
2638 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2639 LASSERT(fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)));
2641 if (obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
2642 *count = obj->oo_dirent_count;
2646 /* directory not initialized yet */
2647 if (!dt->do_index_ops) {
2652 iops = &dt->do_index_ops->dio_it;
2653 it = iops->init(env, dt, LUDA_64BITHASH);
2655 RETURN(PTR_ERR(it));
2657 rc = iops->load(env, it, 0);
2659 if (rc == -ENODATA) {
2666 rc = iops->next(env, it);
2668 for (*count = 0; rc == 0 || rc == -ESTALE; rc = iops->next(env, it)) {
2672 if (iops->key_size(env, it) == 0)
2678 obj->oo_dirent_count = *count;
2683 iops->fini(env, it);
2688 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2689 struct lu_attr *attr)
2691 struct osd_object *obj = osd_dt_obj(dt);
2694 if (unlikely(!dt_object_exists(dt)))
2696 if (unlikely(obj->oo_destroyed))
2699 LASSERT(!dt_object_remote(dt));
2700 LINVRNT(osd_invariant(obj));
2702 spin_lock(&obj->oo_guard);
2703 osd_inode_getattr(env, obj->oo_inode, attr);
2704 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2705 attr->la_valid |= LA_FLAGS;
2706 attr->la_flags |= LUSTRE_ORPHAN_FL;
2708 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
2709 attr->la_valid |= LA_FLAGS;
2710 attr->la_flags |= LUSTRE_ENCRYPT_FL;
2712 spin_unlock(&obj->oo_guard);
2714 if (S_ISDIR(obj->oo_inode->i_mode) &&
2715 fid_is_namespace_visible(lu_object_fid(&dt->do_lu)))
2716 rc = osd_dirent_count(env, dt, &attr->la_dirent_count);
2721 static int osd_declare_attr_qid(const struct lu_env *env,
2722 struct osd_object *obj,
2723 struct osd_thandle *oh, long long bspace,
2724 qid_t old_id, qid_t new_id, bool enforce,
2725 unsigned int type, bool ignore_edquot)
2728 struct osd_thread_info *info = osd_oti_get(env);
2729 struct lquota_id_info *qi = &info->oti_qi;
2731 qi->lqi_type = type;
2732 /* inode accounting */
2733 qi->lqi_is_blk = false;
2735 /* one more inode for the new id ... */
2736 qi->lqi_id.qid_uid = new_id;
2738 /* Reserve credits for the new id */
2739 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2740 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2745 /* and one less inode for the current id */
2746 qi->lqi_id.qid_uid = old_id;
2748 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2749 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2754 /* block accounting */
2755 qi->lqi_is_blk = true;
2757 /* more blocks for the new id ... */
2758 qi->lqi_id.qid_uid = new_id;
2759 qi->lqi_space = bspace;
2761 * Credits for the new uid has been reserved, re-use "obj"
2762 * to save credit reservation.
2764 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2765 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2770 /* and finally less blocks for the current uid */
2771 qi->lqi_id.qid_uid = old_id;
2772 qi->lqi_space = -bspace;
2773 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2774 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2780 static int osd_declare_attr_set(const struct lu_env *env,
2781 struct dt_object *dt,
2782 const struct lu_attr *attr,
2783 struct thandle *handle)
2785 struct osd_thandle *oh;
2786 struct osd_object *obj;
2795 LASSERT(dt != NULL);
2796 LASSERT(handle != NULL);
2798 obj = osd_dt_obj(dt);
2799 LASSERT(osd_invariant(obj));
2801 oh = container_of(handle, struct osd_thandle, ot_super);
2802 LASSERT(oh->ot_handle == NULL);
2804 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2805 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2807 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2808 osd_dto_credits_noquota[DTO_XATTR_SET]);
2810 if (attr == NULL || obj->oo_inode == NULL)
2813 bspace = obj->oo_inode->i_blocks << 9;
2814 bspace = toqb(bspace);
2817 * Changing ownership is always preformed by super user, it should not
2818 * fail with EDQUOT unless required explicitly.
2820 * We still need to call the osd_declare_qid() to calculate the journal
2821 * credits for updating quota accounting files and to trigger quota
2822 * space adjustment once the operation is completed.
2824 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2825 bool ignore_edquot = !(attr->la_flags & LUSTRE_SET_SYNC_FL);
2828 CDEBUG(D_QUOTA, "%s: enforce quota on UID %u, GID %u"
2829 "(the quota space is %lld)\n",
2830 obj->oo_inode->i_sb->s_id, attr->la_uid,
2831 attr->la_gid, bspace);
2834 uid = i_uid_read(obj->oo_inode);
2835 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2836 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2837 attr->la_uid, enforce, USRQUOTA,
2842 gid = i_gid_read(obj->oo_inode);
2843 CDEBUG(D_QUOTA, "declare uid %d -> %d gid %d -> %d\n", uid,
2844 attr->la_uid, gid, attr->la_gid);
2845 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2846 rc = osd_declare_attr_qid(env, obj, oh, bspace, gid,
2847 attr->la_gid, enforce, GRPQUOTA,
2853 #ifdef HAVE_PROJECT_QUOTA
2854 if (attr->la_valid & LA_PROJID) {
2855 __u32 projid = i_projid_read(obj->oo_inode);
2857 enforce = (attr->la_valid & LA_PROJID) &&
2858 (attr->la_projid != projid);
2859 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2860 (qid_t)projid, (qid_t)attr->la_projid,
2861 enforce, PRJQUOTA, true);
2869 static int osd_inode_setattr(const struct lu_env *env,
2870 struct inode *inode, const struct lu_attr *attr)
2872 __u64 bits = attr->la_valid;
2874 /* Only allow set size for regular file */
2875 if (!S_ISREG(inode->i_mode))
2876 bits &= ~(LA_SIZE | LA_BLOCKS);
2881 if (bits & LA_ATIME)
2882 inode->i_atime = osd_inode_time(inode, attr->la_atime);
2883 if (bits & LA_CTIME)
2884 inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
2885 if (bits & LA_MTIME)
2886 inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
2887 if (bits & LA_SIZE) {
2888 spin_lock(&inode->i_lock);
2889 LDISKFS_I(inode)->i_disksize = attr->la_size;
2890 i_size_write(inode, attr->la_size);
2891 spin_unlock(&inode->i_lock);
2895 * OSD should not change "i_blocks" which is used by quota.
2896 * "i_blocks" should be changed by ldiskfs only.
2899 inode->i_mode = (inode->i_mode & S_IFMT) |
2900 (attr->la_mode & ~S_IFMT);
2902 i_uid_write(inode, attr->la_uid);
2904 i_gid_write(inode, attr->la_gid);
2905 if (bits & LA_PROJID)
2906 i_projid_write(inode, attr->la_projid);
2907 if (bits & LA_NLINK)
2908 set_nlink(inode, attr->la_nlink);
2910 inode->i_rdev = attr->la_rdev;
2912 if (bits & LA_FLAGS) {
2913 /* always keep S_NOCMTIME */
2914 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2917 * Ext4 did not transfer inherit flags from
2918 * @inode->i_flags to raw inode i_flags when writing
2919 * flags, we do it explictly here.
2921 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
2922 LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
2924 LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
2929 #ifdef HAVE_PROJECT_QUOTA
2930 static int osd_transfer_project(struct inode *inode, __u32 projid,
2931 struct thandle *handle)
2933 struct super_block *sb = inode->i_sb;
2934 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2937 struct ldiskfs_iloc iloc;
2938 struct ldiskfs_inode *raw_inode;
2939 struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
2941 if (!ldiskfs_has_feature_project(sb)) {
2942 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
2943 == LDISKFS_DEF_PROJID);
2944 if (projid != LDISKFS_DEF_PROJID)
2950 if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
2953 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2954 if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
2957 err = ldiskfs_get_inode_loc(inode, &iloc);
2961 raw_inode = ldiskfs_raw_inode(&iloc);
2962 if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
2963 struct osd_thandle *oh =
2964 container_of0(handle, struct osd_thandle,
2967 * try to expand inode size automatically.
2969 ldiskfs_mark_inode_dirty(oh->ot_handle, inode);
2970 if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
2978 dquot_initialize(inode);
2979 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2980 if (transfer_to[PRJQUOTA]) {
2981 err = __dquot_transfer(inode, transfer_to);
2982 dqput(transfer_to[PRJQUOTA]);
2991 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr,
2992 struct thandle *handle)
2996 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
2997 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
3001 "executing dquot_transfer inode %ld uid %d -> %d gid %d -> %d\n",
3002 inode->i_ino, i_uid_read(inode), attr->la_uid,
3003 i_gid_read(inode), attr->la_gid);
3005 dquot_initialize(inode);
3007 if (attr->la_valid & LA_UID)
3008 iattr.ia_valid |= ATTR_UID;
3009 if (attr->la_valid & LA_GID)
3010 iattr.ia_valid |= ATTR_GID;
3011 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
3012 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
3014 rc = dquot_transfer(inode, &iattr);
3016 CERROR("%s: quota transfer failed: rc = %d. Is quota "
3017 "enforcement enabled on the ldiskfs "
3018 "filesystem?\n", inode->i_sb->s_id, rc);
3023 /* Handle project id transfer here properly */
3024 if (attr->la_valid & LA_PROJID &&
3025 attr->la_projid != i_projid_read(inode)) {
3026 #ifdef HAVE_PROJECT_QUOTA
3027 rc = osd_transfer_project(inode, attr->la_projid, handle);
3032 CERROR("%s: quota transfer failed: rc = %d. Is project "
3033 "enforcement enabled on the ldiskfs "
3034 "filesystem?\n", inode->i_sb->s_id, rc);
3041 static int osd_attr_set(const struct lu_env *env,
3042 struct dt_object *dt,
3043 const struct lu_attr *attr,
3044 struct thandle *handle)
3046 struct osd_object *obj = osd_dt_obj(dt);
3047 struct inode *inode;
3050 if (!dt_object_exists(dt))
3053 LASSERT(handle != NULL);
3054 LASSERT(!dt_object_remote(dt));
3055 LASSERT(osd_invariant(obj));
3057 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
3059 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
3060 !osd_obj2dev(obj)->od_is_ost) {
3061 struct osd_thread_info *oti = osd_oti_get(env);
3062 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
3063 struct lu_fid *fid1 = &oti->oti_fid;
3064 struct osd_inode_id *id = &oti->oti_id;
3065 struct iam_path_descr *ipd;
3066 struct iam_container *bag;
3067 struct osd_thandle *oh;
3070 fid_cpu_to_be(fid1, fid0);
3071 memset(id, 1, sizeof(*id));
3072 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
3073 fid0)->oi_dir.od_container;
3074 ipd = osd_idx_ipd_get(env, bag);
3075 if (unlikely(ipd == NULL))
3078 oh = container_of(handle, struct osd_thandle, ot_super);
3079 rc = iam_update(oh->ot_handle, bag,
3080 (const struct iam_key *)fid1,
3081 (const struct iam_rec *)id, ipd);
3082 osd_ipd_put(env, bag, ipd);
3083 return(rc > 0 ? 0 : rc);
3086 inode = obj->oo_inode;
3088 rc = osd_quota_transfer(inode, attr, handle);
3092 spin_lock(&obj->oo_guard);
3093 rc = osd_inode_setattr(env, inode, attr);
3094 spin_unlock(&obj->oo_guard);
3098 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
3100 osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
3102 if (!(attr->la_valid & LA_FLAGS))
3105 /* Let's check if there are extra flags need to be set into LMA */
3106 if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
3107 struct osd_thread_info *info = osd_oti_get(env);
3108 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
3110 LASSERT(!obj->oo_pfid_in_lma);
3112 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
3113 &info->oti_ost_attrs);
3117 lma->lma_incompat |=
3118 lustre_to_lma_flags(attr->la_flags);
3119 lustre_lma_swab(lma);
3121 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3123 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
3124 lma, sizeof(*lma), XATTR_REPLACE);
3126 struct osd_device *osd = osd_obj2dev(obj);
3128 CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
3129 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
3130 lma->lma_incompat, rc);
3133 attr->la_flags & LUSTRE_LMA_FL_MASKS;
3135 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
3142 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
3143 struct osd_object *obj,
3144 const char *name, const int namelen)
3146 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
3149 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
3150 umode_t mode, struct dt_allocation_hint *hint,
3151 struct thandle *th, struct lu_attr *attr)
3154 struct osd_device *osd = osd_obj2dev(obj);
3155 struct osd_thandle *oth;
3156 struct dt_object *parent = NULL;
3157 struct inode *inode;
3158 uid_t owner[2] = {0, 0};
3160 if (attr->la_valid & LA_UID)
3161 owner[0] = attr->la_uid;
3162 if (attr->la_valid & LA_GID)
3163 owner[1] = attr->la_gid;
3165 LINVRNT(osd_invariant(obj));
3166 LASSERT(obj->oo_inode == NULL);
3167 LASSERT(obj->oo_hl_head == NULL);
3169 if (S_ISDIR(mode) && ldiskfs_pdo) {
3171 ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
3172 if (obj->oo_hl_head == NULL)
3176 oth = container_of(th, struct osd_thandle, ot_super);
3177 LASSERT(oth->ot_handle->h_transaction != NULL);
3179 if (hint != NULL && hint->dah_parent != NULL &&
3180 !dt_object_remote(hint->dah_parent))
3181 parent = hint->dah_parent;
3183 inode = ldiskfs_create_inode(oth->ot_handle,
3184 parent ? osd_dt_obj(parent)->oo_inode :
3185 osd_sb(osd)->s_root->d_inode,
3187 if (!IS_ERR(inode)) {
3188 /* Do not update file c/mtime in ldiskfs. */
3189 inode->i_flags |= S_NOCMTIME;
3192 * For new created object, it must be consistent,
3193 * and it is unnecessary to scrub against it.
3195 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
3197 obj->oo_inode = inode;
3200 if (obj->oo_hl_head != NULL) {
3201 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
3202 obj->oo_hl_head = NULL;
3204 result = PTR_ERR(inode);
3206 LINVRNT(osd_invariant(obj));
3214 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
3215 struct lu_attr *attr,
3216 struct dt_allocation_hint *hint,
3217 struct dt_object_format *dof,
3221 struct osd_thandle *oth;
3222 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX | S_ISGID));
3224 LASSERT(S_ISDIR(attr->la_mode));
3226 oth = container_of(th, struct osd_thandle, ot_super);
3227 LASSERT(oth->ot_handle->h_transaction != NULL);
3228 if (fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)))
3229 obj->oo_dirent_count = 0;
3230 result = osd_mkfile(info, obj, mode, hint, th, attr);
3235 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
3236 struct lu_attr *attr,
3237 struct dt_allocation_hint *hint,
3238 struct dt_object_format *dof,
3242 struct osd_thandle *oth;
3243 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
3245 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
3247 LASSERT(S_ISREG(attr->la_mode));
3249 oth = container_of(th, struct osd_thandle, ot_super);
3250 LASSERT(oth->ot_handle->h_transaction != NULL);
3252 result = osd_mkfile(info, obj, mode, hint, th, attr);
3254 LASSERT(obj->oo_inode != NULL);
3255 if (feat->dif_flags & DT_IND_VARKEY)
3256 result = iam_lvar_create(obj->oo_inode,
3257 feat->dif_keysize_max,
3259 feat->dif_recsize_max,
3262 result = iam_lfix_create(obj->oo_inode,
3263 feat->dif_keysize_max,
3265 feat->dif_recsize_max,
3271 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
3272 struct lu_attr *attr,
3273 struct dt_allocation_hint *hint,
3274 struct dt_object_format *dof,
3277 LASSERT(S_ISREG(attr->la_mode));
3278 return osd_mkfile(info, obj, (attr->la_mode &
3279 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th,
3283 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
3284 struct lu_attr *attr,
3285 struct dt_allocation_hint *hint,
3286 struct dt_object_format *dof,
3289 LASSERT(S_ISLNK(attr->la_mode));
3290 return osd_mkfile(info, obj, (attr->la_mode &
3291 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th,
3295 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
3296 struct lu_attr *attr,
3297 struct dt_allocation_hint *hint,
3298 struct dt_object_format *dof,
3301 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
3304 LINVRNT(osd_invariant(obj));
3305 LASSERT(obj->oo_inode == NULL);
3306 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
3307 S_ISFIFO(mode) || S_ISSOCK(mode));
3309 result = osd_mkfile(info, obj, mode, hint, th, attr);
3311 LASSERT(obj->oo_inode != NULL);
3313 * This inode should be marked dirty for i_rdev. Currently
3314 * that is done in the osd_attr_init().
3316 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
3319 LINVRNT(osd_invariant(obj));
3323 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
3325 struct dt_allocation_hint *hint,
3326 struct dt_object_format *dof,
3329 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
3331 osd_obj_type_f result;
3347 result = osd_mk_index;
3358 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
3359 struct dt_object *parent, struct dt_object *child,
3364 ah->dah_parent = parent;
3365 ah->dah_mode = child_mode;
3367 if (parent != NULL && !dt_object_remote(parent)) {
3368 /* will help to find FID->ino at dt_insert("..") */
3369 struct osd_object *pobj = osd_dt_obj(parent);
3371 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
3375 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
3376 struct lu_attr *attr, struct dt_object_format *dof,
3377 struct thandle *handle)
3379 struct inode *inode = obj->oo_inode;
3380 __u64 valid = attr->la_valid;
3383 attr->la_valid &= ~(LA_TYPE | LA_MODE);
3385 if (dof->dof_type != DFT_NODE)
3386 attr->la_valid &= ~LA_RDEV;
3387 if ((valid & LA_ATIME) && (attr->la_atime == inode->i_atime.tv_sec))
3388 attr->la_valid &= ~LA_ATIME;
3389 if ((valid & LA_CTIME) && (attr->la_ctime == inode->i_ctime.tv_sec))
3390 attr->la_valid &= ~LA_CTIME;
3391 if ((valid & LA_MTIME) && (attr->la_mtime == inode->i_mtime.tv_sec))
3392 attr->la_valid &= ~LA_MTIME;
3394 result = osd_quota_transfer(inode, attr, handle);
3398 if (attr->la_valid != 0) {
3399 result = osd_inode_setattr(info->oti_env, inode, attr);
3401 * The osd_inode_setattr() should always succeed here. The
3402 * only error that could be returned is EDQUOT when we are
3403 * trying to change the UID or GID of the inode. However, this
3404 * should not happen since quota enforcement is no longer
3405 * enabled on ldiskfs (lquota takes care of it).
3407 LASSERTF(result == 0, "%d\n", result);
3408 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
3411 attr->la_valid = valid;
3415 * Helper function for osd_create()
3417 * \retval 0, on success
3419 static int __osd_create(struct osd_thread_info *info, struct osd_object *obj,
3420 struct lu_attr *attr, struct dt_allocation_hint *hint,
3421 struct dt_object_format *dof, struct thandle *th)
3426 osd_trans_exec_op(info->oti_env, th, OSD_OT_CREATE);
3428 /* we drop umask so that permissions we pass are not affected */
3429 umask = current->fs->umask;
3430 current->fs->umask = 0;
3432 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
3434 if (likely(obj->oo_inode != NULL)) {
3435 LASSERT(obj->oo_inode->i_state & I_NEW);
3438 * Unlock the inode before attr initialization to avoid
3439 * unnecessary dqget operations. LU-6378
3441 unlock_new_inode(obj->oo_inode);
3444 if (likely(result == 0)) {
3445 osd_attr_init(info, obj, attr, dof, th);
3446 osd_object_init0(obj);
3449 /* restore previous umask value */
3450 current->fs->umask = umask;
3452 osd_trans_exec_check(info->oti_env, th, OSD_OT_CREATE);
3458 * Helper function for osd_create()
3460 * \retval 0, on success
3462 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
3463 const struct lu_fid *fid, struct thandle *th)
3465 struct osd_thread_info *info = osd_oti_get(env);
3466 struct osd_inode_id *id = &info->oti_id;
3467 struct osd_device *osd = osd_obj2dev(obj);
3468 struct osd_thandle *oh;
3471 LASSERT(obj->oo_inode != NULL);
3473 oh = container_of(th, struct osd_thandle, ot_super);
3474 LASSERT(oh->ot_handle);
3475 osd_trans_exec_op(env, th, OSD_OT_INSERT);
3477 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
3478 rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
3479 OI_CHECK_FLD, NULL);
3480 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_DUPLICATE_MAP) && osd->od_is_ost) {
3481 struct lu_fid next_fid = *fid;
3483 /* insert next object in advance, and map to the same inode */
3485 if (next_fid.f_oid != 0) {
3486 osd_trans_exec_op(env, th, OSD_OT_INSERT);
3487 osd_oi_insert(info, osd, &next_fid, id, oh->ot_handle,
3488 OI_CHECK_FLD, NULL);
3489 osd_trans_exec_check(env, th, OSD_OT_INSERT);
3493 osd_trans_exec_check(env, th, OSD_OT_INSERT);
3498 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
3499 u64 seq, struct lu_seq_range *range)
3501 struct seq_server_site *ss = osd_seq_site(osd);
3503 if (fid_seq_is_idif(seq)) {
3504 fld_range_set_ost(range);
3505 range->lsr_index = idif_ost_idx(seq);
3509 if (!fid_seq_in_fldb(seq)) {
3510 fld_range_set_mdt(range);
3513 * FIXME: If ss is NULL, it suppose not get lsr_index
3516 range->lsr_index = ss->ss_node_id;
3520 LASSERT(ss != NULL);
3521 fld_range_set_any(range);
3522 /* OSD will only do local fld lookup */
3523 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
3526 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
3527 struct lu_attr *attr,
3528 struct dt_allocation_hint *hint,
3529 struct dt_object_format *dof,
3530 struct thandle *handle)
3532 struct osd_thandle *oh;
3537 LASSERT(handle != NULL);
3539 oh = container_of(handle, struct osd_thandle, ot_super);
3540 LASSERT(oh->ot_handle == NULL);
3543 * EA object consumes more credits than regular object: osd_mk_index
3544 * vs. osd_mkreg: osd_mk_index will create 2 blocks for root_node and
3545 * leaf_node, could involves the block, block bitmap, groups, GDT
3546 * change for each block, so add 4 * 2 credits in that case.
3548 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
3549 osd_dto_credits_noquota[DTO_OBJECT_CREATE] +
3550 (dof->dof_type == DFT_INDEX) ? 4 * 2 : 0);
3552 * Reuse idle OI block may cause additional one OI block
3555 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3556 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
3557 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_DUPLICATE_MAP))
3558 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3559 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
3561 /* will help to find FID->ino mapping at dt_insert() */
3562 rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
3570 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid,
3571 attr->la_projid, 1, oh, osd_dt_obj(dt),
3572 NULL, OSD_QID_INODE);
3580 * Called to destroy on-disk representation of the object
3582 * Concurrency: must be locked
3584 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
3587 struct osd_object *obj = osd_dt_obj(dt);
3588 struct inode *inode = obj->oo_inode;
3589 struct osd_thandle *oh;
3597 oh = container_of(th, struct osd_thandle, ot_super);
3598 LASSERT(oh->ot_handle == NULL);
3600 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
3601 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
3603 /* For removing agent entry */
3604 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
3605 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
3608 * Recycle idle OI leaf may cause additional three OI blocks
3611 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ2))
3612 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3613 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
3614 /* one less inode */
3615 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3616 i_projid_read(inode), -1, oh, obj, NULL,
3620 /* data to be truncated */
3621 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3622 i_projid_read(inode), 0, oh, obj, NULL,
3628 * will help to find FID->ino when this object is being
3631 rc = osd_idc_find_and_init(env, osd_obj2dev(obj), obj);
3636 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
3639 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3640 struct osd_object *obj = osd_dt_obj(dt);
3641 struct inode *inode = obj->oo_inode;
3642 struct osd_device *osd = osd_obj2dev(obj);
3643 struct osd_thandle *oh;
3648 oh = container_of(th, struct osd_thandle, ot_super);
3649 LASSERT(oh->ot_handle);
3651 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
3653 if (unlikely(fid_is_acct(fid)))
3656 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
3657 result = osd_delete_from_remote_parent(env, osd, obj, oh, true);
3659 CERROR("%s: remove agent entry "DFID": rc = %d\n",
3660 osd_name(osd), PFID(fid), result);
3663 if (S_ISDIR(inode->i_mode)) {
3664 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
3665 inode->i_nlink == 2);
3667 spin_lock(&obj->oo_guard);
3669 spin_unlock(&obj->oo_guard);
3670 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
3673 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
3675 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
3677 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ2))
3678 result = osd_oi_delete(osd_oti_get(env), osd, fid,
3679 oh->ot_handle, OI_CHECK_FLD);
3681 osd_trans_exec_check(env, th, OSD_OT_DESTROY);
3682 /* XXX: add to ext3 orphan list */
3683 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
3685 /* not needed in the cache anymore */
3686 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
3687 obj->oo_destroyed = 1;
3693 * Put the fid into lustre_mdt_attrs, and then place the structure
3694 * inode's ea. This fid should not be altered during the life time
3697 * \retval +ve, on success
3698 * \retval -ve, on error
3700 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
3702 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
3703 const struct lu_fid *fid, __u32 compat, __u32 incompat)
3705 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
3706 struct lustre_mdt_attrs *lma = &loa->loa_lma;
3711 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
3714 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_OST_EA_FID_SET))
3717 lustre_loa_init(loa, fid, compat, incompat);
3718 lustre_loa_swab(loa, false);
3721 * For the OST device with 256 bytes inode size by default,
3722 * the PFID EA will be stored together with LMA EA to avoid
3723 * performance trouble. Otherwise the PFID EA can be stored
3724 * independently. LU-8998
3726 if ((compat & LMAC_FID_ON_OST) &&
3727 LDISKFS_INODE_SIZE(inode->i_sb) <= 256)
3728 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
3729 sizeof(*loa), XATTR_CREATE);
3731 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3732 sizeof(*lma), XATTR_CREATE);
3734 * LMA may already exist, but we need to check that all the
3735 * desired compat/incompat flags have been added.
3737 if (unlikely(rc == -EEXIST)) {
3738 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
3739 XATTR_NAME_LMA, (void *)loa, sizeof(*loa));
3743 if (rc < sizeof(*lma))
3746 lustre_loa_swab(loa, true);
3747 if (lu_fid_eq(fid, &lma->lma_self_fid) &&
3748 ((compat == 0 && incompat == 0) ||
3749 (!(~lma->lma_compat & compat) &&
3750 !(~lma->lma_incompat & incompat))))
3753 lma->lma_self_fid = *fid;
3754 lma->lma_compat |= compat;
3755 lma->lma_incompat |= incompat;
3756 if (rc == sizeof(*lma)) {
3757 lustre_lma_swab(lma);
3758 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3759 sizeof(*lma), XATTR_REPLACE);
3761 lustre_loa_swab(loa, false);
3762 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
3763 sizeof(*loa), XATTR_REPLACE);
3771 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
3772 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
3773 * To have compatilibility with 1.8 ldiskfs driver we need to have
3774 * magic number at start of fid data.
3775 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
3778 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
3779 const struct lu_fid *fid)
3781 if (!fid_is_namespace_visible(fid) ||
3782 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
3783 param->edp_magic = 0;
3787 param->edp_magic = LDISKFS_LUFID_MAGIC;
3788 param->edp_len = sizeof(struct lu_fid) + 1;
3789 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
3793 * Try to read the fid from inode ea into dt_rec.
3795 * \param fid object fid.
3797 * \retval 0 on success
3799 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
3800 __u32 ino, struct lu_fid *fid,
3801 struct osd_inode_id *id)
3803 struct osd_thread_info *info = osd_oti_get(env);
3804 struct inode *inode;
3808 osd_id_gen(id, ino, OSD_OII_NOGEN);
3809 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
3811 RETURN(PTR_ERR(inode));
3817 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
3819 struct inode *parent_dir,
3820 const struct lu_fid *dot_fid,
3821 const struct lu_fid *dot_dot_fid,
3822 struct osd_thandle *oth)
3824 struct ldiskfs_dentry_param *dot_ldp;
3825 struct ldiskfs_dentry_param *dot_dot_ldp;
3826 __u32 saved_nlink = dir->i_nlink;
3829 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3830 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3832 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3833 dot_ldp->edp_magic = 0;
3835 rc = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3836 dir, dot_ldp, dot_dot_ldp);
3838 * The ldiskfs_add_dot_dotdot() may dir->i_nlink as 2, then
3839 * the subseqent ref_add() will increase the dir->i_nlink
3840 * as 3. That is incorrect for new created directory.
3842 * It looks like hack, because we want to make the OSD API
3843 * to be order-independent for new created directory object
3844 * between dt_insert(..) and ref_add() operations.
3846 * Here, we only restore the in-RAM dir-inode's nlink attr,
3847 * becuase if the nlink attr is not 2, then there will be
3848 * ref_add() called following the dt_insert(..), such call
3849 * will make both the in-RAM and on-disk dir-inode's nlink
3850 * attr to be set as 2. LU-7447
3852 set_nlink(dir, saved_nlink);
3857 * Create an local agent inode for remote entry
3859 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
3860 struct osd_device *osd,
3861 struct osd_object *pobj,
3862 const struct lu_fid *fid,
3866 struct osd_thread_info *info = osd_oti_get(env);
3867 struct inode *local;
3868 struct osd_thandle *oh;
3874 oh = container_of(th, struct osd_thandle, ot_super);
3875 LASSERT(oh->ot_handle->h_transaction != NULL);
3877 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type,
3879 if (IS_ERR(local)) {
3880 CERROR("%s: create local error %d\n", osd_name(osd),
3881 (int)PTR_ERR(local));
3886 * restore i_gid in case S_ISGID is set, we will inherit S_ISGID and set
3887 * correct gid on remote file, not agent here
3889 local->i_gid = current_fsgid();
3890 ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
3892 /* e2fsck doesn't like empty symlinks. Store remote FID as symlink.
3893 * That gives e2fsck something to look at and be happy, and allows
3894 * debugging if we need to determine where this symlink came from.
3896 if (S_ISLNK(type)) {
3897 BUILD_BUG_ON(LDISKFS_N_BLOCKS * 4 < FID_LEN + 1);
3898 rc = snprintf((char *)LDISKFS_I(local)->i_data,
3899 LDISKFS_N_BLOCKS * 4, DFID, PFID(fid));
3901 i_size_write(local, rc);
3902 LDISKFS_I(local)->i_disksize = rc;
3904 unlock_new_inode(local);
3906 /* Agent inode should not have project ID */
3907 #ifdef HAVE_PROJECT_QUOTA
3908 if (LDISKFS_I(pobj->oo_inode)->i_flags & LUSTRE_PROJINHERIT_FL &&
3909 i_projid_read(pobj->oo_inode) != 0) {
3910 rc = osd_transfer_project(local, 0, th);
3912 CERROR("%s: quota transfer failed: rc = %d. Is project "
3913 "quota enforcement enabled on the ldiskfs "
3914 "filesystem?\n", local->i_sb->s_id, rc);
3915 RETURN(ERR_PTR(rc));
3919 /* Set special LMA flag for local agent inode */
3920 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
3922 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
3923 osd_name(osd), PFID(fid), rc);
3924 RETURN(ERR_PTR(rc));
3930 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
3931 lu_object_fid(&pobj->oo_dt.do_lu),
3934 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
3935 osd_name(osd), PFID(fid), rc);
3936 RETURN(ERR_PTR(rc));
3943 * when direntry is deleted, we have to take care of possible agent inode
3944 * referenced by that. unfortunately we can't do this at that point:
3945 * iget() within a running transaction leads to deadlock and we better do
3946 * not call that every delete declaration to save performance. so we put
3947 * a potention agent inode on a list and process that once the transaction
3948 * is over. Notice it's not any worse in terms of real orphans as regular
3949 * object destroy doesn't put inodes on the on-disk orphan list. this should
3950 * be addressed separately
3952 static int osd_schedule_agent_inode_removal(const struct lu_env *env,
3953 struct osd_thandle *oh,
3956 struct osd_device *osd = osd_dt_dev(oh->ot_super.th_dev);
3957 struct osd_obj_orphan *oor;
3964 oor->oor_env = (struct lu_env *)env;
3965 spin_lock(&osd->od_osfs_lock);
3966 list_add(&oor->oor_list, &osd->od_orphan_list);
3967 spin_unlock(&osd->od_osfs_lock);
3969 oh->ot_remove_agents = 1;
3975 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
3976 struct osd_device *osd)
3978 struct osd_thread_info *info = osd_oti_get(env);
3979 struct osd_obj_orphan *oor, *tmp;
3980 struct osd_inode_id id;
3982 struct inode *inode;
3987 spin_lock(&osd->od_osfs_lock);
3988 list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
3989 if (oor->oor_env == env)
3990 list_move(&oor->oor_list, &list);
3992 spin_unlock(&osd->od_osfs_lock);
3994 list_for_each_entry_safe(oor, tmp, &list, oor_list) {
3998 list_del(&oor->oor_list);
4001 osd_id_gen(&id, ino, OSD_OII_NOGEN);
4002 inode = osd_iget_fid(info, osd, &id, &fid);
4006 if (!osd_remote_fid(env, osd, &fid)) {
4011 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC, 1);
4013 mark_inode_dirty(inode);
4014 ldiskfs_journal_stop(jh);
4022 * OSD layer object create function for OST objects (b=11826).
4024 * The FID is inserted into inode xattr here.
4026 * \retval 0, on success
4027 * \retval -ve, on error
4029 static int osd_create(const struct lu_env *env, struct dt_object *dt,
4030 struct lu_attr *attr, struct dt_allocation_hint *hint,
4031 struct dt_object_format *dof, struct thandle *th)
4033 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
4034 struct osd_object *obj = osd_dt_obj(dt);
4035 struct osd_thread_info *info = osd_oti_get(env);
4036 int result, on_ost = 0;
4040 if (dt_object_exists(dt))
4043 LINVRNT(osd_invariant(obj));
4044 LASSERT(!dt_object_remote(dt));
4045 LASSERT(osd_is_write_locked(env, obj));
4046 LASSERT(th != NULL);
4048 if (unlikely(fid_is_acct(fid)))
4050 * Quota files can't be created from the kernel any more,
4051 * 'tune2fs -O quota' will take care of creating them
4055 result = __osd_create(info, obj, attr, hint, dof, th);
4057 if (fid_is_idif(fid) &&
4058 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
4059 struct lu_fid *tfid = &info->oti_fid;
4060 struct ost_id *oi = &info->oti_ostid;
4062 fid_to_ostid(fid, oi);
4063 ostid_to_fid(tfid, oi, 0);
4065 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
4066 LMAC_FID_ON_OST, 0);
4068 on_ost = fid_is_on_ost(info, osd_obj2dev(obj),
4070 result = osd_ea_fid_set(info, obj->oo_inode, fid,
4071 on_ost ? LMAC_FID_ON_OST : 0,
4074 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
4075 obj->oo_dt.do_body_ops = &osd_body_ops;
4078 if (!result && !CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY))
4079 result = __osd_oi_insert(env, obj, fid, th);
4082 * a small optimization - dt_insert() isn't usually applied
4083 * to OST objects, so we don't need to cache OI mapping for
4086 if (result == 0 && on_ost == 0) {
4087 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
4089 result = osd_idc_find_and_init(env, osd, obj);
4090 LASSERT(result == 0);
4093 LASSERT(ergo(result == 0,
4094 dt_object_exists(dt) && !dt_object_remote(dt)));
4095 LINVRNT(osd_invariant(obj));
4099 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
4100 struct thandle *handle)
4102 struct osd_thandle *oh;
4104 /* it's possible that object doesn't exist yet */
4105 LASSERT(handle != NULL);
4107 oh = container_of(handle, struct osd_thandle, ot_super);
4108 LASSERT(oh->ot_handle == NULL);
4110 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
4111 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
4113 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
4119 * Concurrency: @dt is write locked.
4121 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
4124 struct osd_object *obj = osd_dt_obj(dt);
4125 struct inode *inode = obj->oo_inode;
4126 struct osd_thandle *oh;
4129 if (!dt_object_exists(dt) || obj->oo_destroyed)
4132 LINVRNT(osd_invariant(obj));
4133 LASSERT(!dt_object_remote(dt));
4134 LASSERT(osd_is_write_locked(env, obj));
4135 LASSERT(th != NULL);
4137 oh = container_of(th, struct osd_thandle, ot_super);
4138 LASSERT(oh->ot_handle != NULL);
4140 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
4142 CDEBUG(D_INODE, DFID" increase nlink %d\n",
4143 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
4145 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
4146 * (65000) subdirectories by storing "1" in i_nlink if the link count
4147 * would otherwise overflow. Directory tranversal tools understand
4148 * that (st_nlink == 1) indicates that the filesystem dose not track
4149 * hard links count on the directory, and will not abort subdirectory
4150 * scanning early once (st_nlink - 2) subdirs have been found.
4152 * This also has to properly handle the case of inodes with nlink == 0
4153 * in case they are being linked into the PENDING directory
4155 spin_lock(&obj->oo_guard);
4156 if (unlikely(inode->i_nlink == 0))
4157 /* inc_nlink from 0 may cause WARN_ON */
4158 set_nlink(inode, 1);
4160 ldiskfs_inc_count(oh->ot_handle, inode);
4161 if (!S_ISDIR(inode->i_mode))
4162 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
4164 spin_unlock(&obj->oo_guard);
4166 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
4167 LINVRNT(osd_invariant(obj));
4169 osd_trans_exec_check(env, th, OSD_OT_REF_ADD);
4174 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
4175 struct thandle *handle)
4177 struct osd_thandle *oh;
4179 if (!dt_object_exists(dt))
4182 LASSERT(!dt_object_remote(dt));
4183 LASSERT(handle != NULL);
4185 oh = container_of(handle, struct osd_thandle, ot_super);
4186 LASSERT(oh->ot_handle == NULL);
4188 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
4189 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
4195 * Concurrency: @dt is write locked.
4197 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
4200 struct osd_object *obj = osd_dt_obj(dt);
4201 struct inode *inode = obj->oo_inode;
4202 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
4203 struct osd_thandle *oh;
4205 if (!dt_object_exists(dt))
4208 LINVRNT(osd_invariant(obj));
4209 LASSERT(!dt_object_remote(dt));
4210 LASSERT(osd_is_write_locked(env, obj));
4211 LASSERT(th != NULL);
4213 oh = container_of(th, struct osd_thandle, ot_super);
4214 LASSERT(oh->ot_handle != NULL);
4216 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
4218 spin_lock(&obj->oo_guard);
4220 * That can be result of upgrade from old Lustre version and
4221 * applied only to local files. Just skip this ref_del call.
4222 * ext4_unlink() only treats this as a warning, don't LASSERT here.
4224 if (inode->i_nlink == 0) {
4225 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
4226 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
4227 ", maybe an upgraded file? (LU-3915)\n",
4228 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
4229 spin_unlock(&obj->oo_guard);
4233 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
4234 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
4236 ldiskfs_dec_count(oh->ot_handle, inode);
4237 spin_unlock(&obj->oo_guard);
4239 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
4240 LINVRNT(osd_invariant(obj));
4242 osd_trans_exec_check(env, th, OSD_OT_REF_DEL);
4248 * Concurrency: @dt is read locked.
4250 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
4251 struct lu_buf *buf, const char *name)
4253 struct osd_object *obj = osd_dt_obj(dt);
4254 struct inode *inode = obj->oo_inode;
4255 struct osd_thread_info *info = osd_oti_get(env);
4256 struct dentry *dentry = &info->oti_obj_dentry;
4257 bool cache_xattr = false;
4262 /* version get is not real XATTR but uses xattr API */
4263 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
4264 dt_obj_version_t *ver = buf->lb_buf;
4267 * for version we are just using xattr API but change inode
4270 if (buf->lb_len == 0)
4271 return sizeof(dt_obj_version_t);
4273 if (buf->lb_len < sizeof(dt_obj_version_t))
4276 CDEBUG(D_INODE, "Get version %#llx for inode %lu\n",
4277 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
4279 *ver = LDISKFS_I(inode)->i_fs_version;
4281 return sizeof(dt_obj_version_t);
4284 if (!dt_object_exists(dt))
4287 LASSERT(!dt_object_remote(dt));
4288 LASSERT(inode->i_op != NULL);
4289 #ifdef HAVE_IOP_XATTR
4290 LASSERT(inode->i_op->getxattr != NULL);
4293 if (strcmp(name, XATTR_NAME_LOV) == 0 ||
4294 strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0)
4298 rc = osd_oxc_get(obj, name, buf);
4303 if (strcmp(name, XATTR_NAME_FID) == 0 && obj->oo_pfid_in_lma) {
4304 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
4305 struct lustre_mdt_attrs *lma = &loa->loa_lma;
4306 struct filter_fid *ff;
4307 struct ost_layout *ol;
4309 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
4313 LASSERT(lma->lma_compat & LMAC_STRIPE_INFO);
4316 if (buf->lb_len == 0 || !buf->lb_buf)
4319 if (buf->lb_len < rc)
4323 ol = &ff->ff_layout;
4324 ol->ol_stripe_count = cpu_to_le32(loa->loa_parent_fid.f_ver >>
4325 PFID_STRIPE_IDX_BITS);
4326 ol->ol_stripe_size = cpu_to_le32(loa->loa_stripe_size);
4327 loa->loa_parent_fid.f_ver &= PFID_STRIPE_COUNT_MASK;
4328 fid_cpu_to_le(&ff->ff_parent, &loa->loa_parent_fid);
4329 if (lma->lma_compat & LMAC_COMP_INFO) {
4330 ol->ol_comp_start = cpu_to_le64(loa->loa_comp_start);
4331 ol->ol_comp_end = cpu_to_le64(loa->loa_comp_end);
4332 ol->ol_comp_id = cpu_to_le32(loa->loa_comp_id);
4334 ol->ol_comp_start = 0;
4335 ol->ol_comp_end = 0;
4339 rc = __osd_xattr_get(inode, dentry, name,
4340 buf->lb_buf, buf->lb_len);
4344 if (rc == -ENOENT || rc == -ENODATA)
4345 osd_oxc_add(obj, name, NULL, 0);
4346 else if (rc > 0 && buf->lb_buf != NULL)
4347 osd_oxc_add(obj, name, buf->lb_buf, rc);
4353 static int osd_declare_xattr_set(const struct lu_env *env,
4354 struct dt_object *dt,
4355 const struct lu_buf *buf, const char *name,
4356 int fl, struct thandle *handle)
4358 struct osd_thandle *oh;
4360 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
4362 LASSERT(handle != NULL);
4364 oh = container_of(handle, struct osd_thandle, ot_super);
4365 LASSERT(oh->ot_handle == NULL);
4367 if (strcmp(name, XATTR_NAME_LMA) == 0) {
4369 * For non-upgrading case, the LMA is set first and
4370 * usually fit inode. But for upgrade case, the LMA
4371 * may be in another separated EA block.
4373 if (dt_object_exists(dt)) {
4374 if (fl == LU_XATTR_REPLACE)
4379 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
4381 } else if (strcmp(name, XATTR_NAME_FID) == 0) {
4382 /* We may need to delete the old PFID EA. */
4383 credits = LDISKFS_MAXQUOTAS_DEL_BLOCKS(sb);
4384 if (fl == LU_XATTR_REPLACE)
4390 * If some name entry resides on remote MDT, then will create
4391 * agent entry under remote parent. On the other hand, if the
4392 * remote entry will be removed, then related agent entry may
4393 * need to be removed from the remote parent. So there may be
4394 * kinds of cases, let's declare enough credits. The credits
4395 * for create agent entry is enough for remove case.
4397 if (strcmp(name, XATTR_NAME_LINK) == 0) {
4398 credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
4399 if (dt_object_exists(dt))
4400 credits += 1; /* For updating LMA */
4404 credits += osd_dto_credits_noquota[DTO_XATTR_SET];
4409 if (buf->lb_buf == NULL && dt_object_exists(dt)) {
4411 * learn xattr size from osd_xattr_get if
4412 * attribute has not been read yet
4414 buflen = __osd_xattr_get(
4415 osd_dt_obj(dt)->oo_inode,
4416 &osd_oti_get(env)->oti_obj_dentry,
4421 buflen = buf->lb_len;
4424 if (buflen > sb->s_blocksize) {
4425 credits += osd_calc_bkmap_credits(
4427 (buflen + sb->s_blocksize - 1) >>
4428 sb->s_blocksize_bits);
4432 * xattr set may involve inode quota change, reserve credits for
4433 * dquot_initialize()
4435 credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
4438 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
4443 static int osd_xattr_set_pfid(const struct lu_env *env, struct osd_object *obj,
4444 const struct lu_buf *buf, int fl,
4445 struct thandle *handle)
4447 struct osd_thread_info *info = osd_oti_get(env);
4448 struct dentry *dentry = &info->oti_obj_dentry;
4449 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
4450 struct lustre_mdt_attrs *lma = &loa->loa_lma;
4451 struct inode *inode = obj->oo_inode;
4452 struct filter_fid *ff = buf->lb_buf;
4453 struct ost_layout *ol = &ff->ff_layout;
4454 int flags = XATTR_REPLACE;
4459 if (buf->lb_len != sizeof(*ff) && buf->lb_len != sizeof(struct lu_fid))
4462 rc = osd_get_lma(info, inode, dentry, loa);
4463 if (rc == -ENODATA) {
4464 /* Usually for upgarding from old device */
4465 lustre_loa_init(loa, lu_object_fid(&obj->oo_dt.do_lu),
4466 LMAC_FID_ON_OST, 0);
4467 flags = XATTR_CREATE;
4472 if (!rc && lma->lma_compat & LMAC_STRIPE_INFO) {
4473 if ((fl & LU_XATTR_CREATE) && !(fl & LU_XATTR_REPLACE))
4476 if (LDISKFS_INODE_SIZE(inode->i_sb) > 256) {
4477 /* Separate PFID EA from LMA */
4478 lma->lma_compat &= ~(LMAC_STRIPE_INFO | LMAC_COMP_INFO);
4479 lustre_lma_swab(lma);
4480 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
4481 sizeof(*lma), XATTR_REPLACE);
4483 obj->oo_pfid_in_lma = 0;
4484 rc = LU_XATTR_CREATE;
4490 if (LDISKFS_INODE_SIZE(inode->i_sb) > 256)
4494 * Old client does not send stripe information,
4495 * then store the PFID EA on disk separatedly.
4497 if (unlikely(buf->lb_len == sizeof(struct lu_fid) ||
4498 ol->ol_stripe_size == 0))
4501 /* Remove old PFID EA entry firstly. */
4502 dquot_initialize(inode);
4503 rc = ll_vfs_removexattr(dentry, inode, XATTR_NAME_FID);
4504 if (rc == -ENODATA) {
4505 if ((fl & LU_XATTR_REPLACE) && !(fl & LU_XATTR_CREATE))
4512 fid_le_to_cpu(&loa->loa_parent_fid, &ff->ff_parent);
4513 if (likely(ol->ol_stripe_size != 0)) {
4514 loa->loa_parent_fid.f_ver |= le32_to_cpu(ol->ol_stripe_count) <<
4515 PFID_STRIPE_IDX_BITS;
4516 loa->loa_stripe_size = le32_to_cpu(ol->ol_stripe_size);
4517 lma->lma_compat |= LMAC_STRIPE_INFO;
4518 if (ol->ol_comp_id != 0) {
4519 loa->loa_comp_id = le32_to_cpu(ol->ol_comp_id);
4520 loa->loa_comp_start = le64_to_cpu(ol->ol_comp_start);
4521 loa->loa_comp_end = le64_to_cpu(ol->ol_comp_end);
4522 lma->lma_compat |= LMAC_COMP_INFO;
4526 lustre_loa_swab(loa, false);
4528 /* Store the PFID EA inside LMA. */
4529 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa, sizeof(*loa),
4532 obj->oo_pfid_in_lma = 1;
4538 * In DNE environment, the object (in spite of regular file or directory)
4539 * and its name entry may reside on different MDTs. Under such case, we will
4540 * create an agent entry on the MDT where the object resides. The agent entry
4541 * references the object locally, that makes the object to be visible to the
4542 * userspace when mounted as 'ldiskfs' directly. Then the userspace tools,
4543 * such as 'tar' can handle the object properly.
4545 * We handle the agent entry during set linkEA that is the common interface
4546 * for both regular file and directroy, can handle kinds of cases, such as
4547 * create/link/unlink/rename, and so on.
4549 * NOTE: we can NOT do that when ea_{insert,delete} that is only for directory.
4551 * XXX: There are two known issues:
4552 * 1. For one object, we will create at most one agent entry even if there
4553 * may be more than one cross-MDTs hard links on the object. So the local
4554 * e2fsck may claim that the object's nlink is larger than the name entries
4555 * that reference such inode. And in further, the e2fsck will fix the nlink
4556 * attribute to match the local references. Then it will cause the object's
4557 * nlink attribute to be inconsistent with the global references. it is bad
4558 * but not fatal. The ref_del() can handle the zero-referenced case. On the
4559 * other hand, the global namespace LFSCK can repair the object's attribute
4560 * according to the linkEA.
4561 * 2. There may be too many hard links on the object as to its linkEA overflow,
4562 * then the linkEA entry for cross-MDTs reference may be discarded. If such
4563 * case happened, then at this point, we do not know whether there are some
4564 * cross-MDTs reference. But there are local references, it guarantees that
4565 * object is visible to userspace when mounted as 'ldiskfs'. That is enough.
4567 static int osd_xattr_handle_linkea(const struct lu_env *env,
4568 struct osd_device *osd,
4569 struct osd_object *obj,
4570 const struct lu_buf *buf,
4571 struct thandle *handle)
4573 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
4574 struct lu_fid *tfid = &osd_oti_get(env)->oti_fid3;
4575 struct linkea_data ldata = { .ld_buf = (struct lu_buf *)buf };
4576 struct lu_name tmpname;
4577 struct osd_thandle *oh;
4579 bool remote = false;
4583 oh = container_of(handle, struct osd_thandle, ot_super);
4584 LASSERT(oh->ot_handle != NULL);
4586 rc = linkea_init_with_rec(&ldata);
4588 linkea_first_entry(&ldata);
4589 while (ldata.ld_lee != NULL && !remote) {
4590 linkea_entry_unpack(ldata.ld_lee, &ldata.ld_reclen,
4592 if (osd_remote_fid(env, osd, tfid) > 0)
4595 linkea_next_entry(&ldata);
4597 } else if (rc == -ENODATA) {
4603 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu) && !remote) {
4604 rc = osd_delete_from_remote_parent(env, osd, obj, oh, false);
4606 CERROR("%s: failed to remove agent entry for "DFID
4607 ": rc = %d\n", osd_name(osd), PFID(fid), rc);
4608 } else if (!lu_object_has_agent_entry(&obj->oo_dt.do_lu) && remote) {
4609 rc = osd_add_to_remote_parent(env, osd, obj, oh);
4611 CERROR("%s: failed to create agent entry for "DFID
4612 ": rc = %d\n", osd_name(osd), PFID(fid), rc);
4619 * Concurrency: @dt is write locked.
4621 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
4622 const struct lu_buf *buf, const char *name, int fl,
4623 struct thandle *handle)
4625 struct osd_object *obj = osd_dt_obj(dt);
4626 struct osd_device *osd = osd_obj2dev(obj);
4627 struct inode *inode = obj->oo_inode;
4628 struct osd_thread_info *info = osd_oti_get(env);
4638 /* version set is not real XATTR */
4639 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
4640 dt_obj_version_t *version = buf->lb_buf;
4643 * for version we are just using xattr API but change inode
4646 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
4648 CDEBUG(D_INODE, "Set version %#llx (old %#llx) for inode %lu\n",
4649 *version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
4651 LDISKFS_I(inode)->i_fs_version = *version;
4653 * Version is set after all inode operations are finished,
4654 * so we should mark it dirty here
4656 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
4661 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
4662 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
4665 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
4668 * For the OST device with 256 bytes inode size by default,
4669 * the PFID EA will be stored together with LMA EA to avoid
4670 * performance trouble. Otherwise the PFID EA can be stored
4671 * independently. LU-8998
4673 if (strcmp(name, XATTR_NAME_FID) == 0 && osd->od_is_ost &&
4674 (LDISKFS_INODE_SIZE(inode->i_sb) <= 256 || obj->oo_pfid_in_lma)) {
4675 LASSERT(buf->lb_buf);
4677 fl = osd_xattr_set_pfid(env, obj, buf, fl, handle);
4680 } else if (strcmp(name, XATTR_NAME_LMV) == 0) {
4681 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
4682 struct lustre_mdt_attrs *lma = &loa->loa_lma;
4684 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
4688 lma->lma_incompat |= LMAI_STRIPED;
4689 lustre_lma_swab(lma);
4690 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
4691 sizeof(*lma), XATTR_REPLACE);
4694 } else if (strcmp(name, XATTR_NAME_LINK) == 0) {
4695 LASSERT(!osd->od_is_ost);
4697 rc = osd_xattr_handle_linkea(env, osd, obj, buf, handle);
4702 if (fl & LU_XATTR_REPLACE)
4703 fs_flags |= XATTR_REPLACE;
4705 if (fl & LU_XATTR_CREATE)
4706 fs_flags |= XATTR_CREATE;
4708 rc = __osd_xattr_set(info, inode, name, buf->lb_buf, len, fs_flags);
4709 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
4712 (strcmp(name, XATTR_NAME_LOV) == 0 ||
4713 strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
4714 osd_oxc_add(obj, name, buf->lb_buf, buf->lb_len);
4720 * Concurrency: @dt is read locked.
4722 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
4723 const struct lu_buf *buf)
4725 struct osd_object *obj = osd_dt_obj(dt);
4726 struct inode *inode = obj->oo_inode;
4727 struct osd_thread_info *info = osd_oti_get(env);
4728 struct dentry *dentry = &info->oti_obj_dentry;
4730 if (!dt_object_exists(dt))
4733 LASSERT(!dt_object_remote(dt));
4734 LASSERT(inode->i_op != NULL);
4735 LASSERT(inode->i_op->listxattr != NULL);
4737 dentry->d_inode = inode;
4738 dentry->d_sb = inode->i_sb;
4739 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
4742 static int osd_declare_xattr_del(const struct lu_env *env,
4743 struct dt_object *dt, const char *name,
4744 struct thandle *handle)
4746 struct osd_thandle *oh;
4747 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
4749 LASSERT(!dt_object_remote(dt));
4750 LASSERT(handle != NULL);
4752 oh = container_of(handle, struct osd_thandle, ot_super);
4753 LASSERT(oh->ot_handle == NULL);
4755 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
4756 osd_dto_credits_noquota[DTO_XATTR_SET]);
4758 * xattr del may involve inode quota change, reserve credits for
4759 * dquot_initialize()
4761 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
4767 * Concurrency: @dt is write locked.
4769 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
4770 const char *name, struct thandle *handle)
4772 struct osd_object *obj = osd_dt_obj(dt);
4773 struct inode *inode = obj->oo_inode;
4774 struct osd_thread_info *info = osd_oti_get(env);
4775 struct dentry *dentry = &info->oti_obj_dentry;
4778 if (!dt_object_exists(dt))
4781 LASSERT(!dt_object_remote(dt));
4782 LASSERT(inode->i_op != NULL);
4783 LASSERT(handle != NULL);
4784 #ifdef HAVE_IOP_XATTR
4785 LASSERT(inode->i_op->removexattr != NULL);
4788 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
4790 if (strcmp(name, XATTR_NAME_FID) == 0 && obj->oo_pfid_in_lma) {
4791 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
4793 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
4794 &info->oti_ost_attrs);
4796 LASSERT(lma->lma_compat & LMAC_STRIPE_INFO);
4798 lma->lma_compat &= ~(LMAC_STRIPE_INFO | LMAC_COMP_INFO);
4799 lustre_lma_swab(lma);
4800 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
4801 sizeof(*lma), XATTR_REPLACE);
4803 obj->oo_pfid_in_lma = 0;
4806 dquot_initialize(inode);
4807 dentry->d_inode = inode;
4808 dentry->d_sb = inode->i_sb;
4809 rc = ll_vfs_removexattr(dentry, inode, name);
4812 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
4815 (strcmp(name, XATTR_NAME_LOV) == 0 ||
4816 strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
4817 osd_oxc_del(obj, name);
4822 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
4823 __u64 start, __u64 end)
4825 struct osd_object *obj = osd_dt_obj(dt);
4826 struct inode *inode = obj->oo_inode;
4827 struct osd_thread_info *info = osd_oti_get(env);
4828 struct dentry *dentry = &info->oti_obj_dentry;
4829 struct file *file = &info->oti_file;
4834 dentry->d_inode = inode;
4835 dentry->d_sb = inode->i_sb;
4836 file->f_path.dentry = dentry;
4837 file->f_mapping = inode->i_mapping;
4838 file->f_op = inode->i_fop;
4839 file->f_inode = inode;
4841 rc = vfs_fsync_range(file, start, end, 0);
4846 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
4855 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
4856 const struct dt_index_features *feat)
4858 struct iam_descr *descr;
4860 if (osd_object_is_root(o))
4861 return feat == &dt_directory_features;
4863 LASSERT(o->oo_dir != NULL);
4865 descr = o->oo_dir->od_container.ic_descr;
4866 if (feat == &dt_directory_features) {
4867 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
4872 return feat->dif_keysize_min <= descr->id_key_size &&
4873 descr->id_key_size <= feat->dif_keysize_max &&
4874 feat->dif_recsize_min <= descr->id_rec_size &&
4875 descr->id_rec_size <= feat->dif_recsize_max &&
4876 !(feat->dif_flags & (DT_IND_VARKEY |
4877 DT_IND_VARREC | DT_IND_NONUNQ)) &&
4878 ergo(feat->dif_flags & DT_IND_UPDATE,
4879 1 /* XXX check that object (and fs) is writable */);
4883 static int osd_iam_container_init(const struct lu_env *env,
4884 struct osd_object *obj,
4885 struct osd_directory *dir)
4887 struct iam_container *bag = &dir->od_container;
4890 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
4894 result = iam_container_setup(bag);
4896 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
4898 iam_container_fini(bag);
4905 * Concurrency: no external locking is necessary.
4907 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
4908 const struct dt_index_features *feat)
4912 struct osd_object *obj = osd_dt_obj(dt);
4914 LINVRNT(osd_invariant(obj));
4916 if (osd_object_is_root(obj)) {
4917 dt->do_index_ops = &osd_index_ea_ops;
4919 } else if (feat == &dt_directory_features) {
4920 dt->do_index_ops = &osd_index_ea_ops;
4921 if (obj->oo_inode == NULL || S_ISDIR(obj->oo_inode->i_mode))
4926 } else if (unlikely(feat == &dt_otable_features)) {
4927 dt->do_index_ops = &osd_otable_ops;
4929 } else if (unlikely(feat == &dt_acct_features)) {
4930 dt->do_index_ops = &osd_acct_index_ops;
4933 } else if (!osd_has_index(obj)) {
4934 struct osd_directory *dir;
4935 struct osd_device *osd = osd_obj2dev(obj);
4936 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
4941 spin_lock(&obj->oo_guard);
4942 if (obj->oo_dir == NULL)
4946 * Concurrent thread allocated container data.
4949 spin_unlock(&obj->oo_guard);
4951 * Now, that we have container data, serialize its
4954 down_write(&obj->oo_ext_idx_sem);
4956 * recheck under lock.
4959 if (osd_has_index(obj)) {
4964 result = osd_iam_container_init(env, obj, obj->oo_dir);
4965 if (result || feat == &dt_lfsck_namespace_features ||
4966 feat == &dt_lfsck_layout_orphan_features ||
4967 feat == &dt_lfsck_layout_dangling_features)
4970 result = osd_index_register(osd, fid,
4971 feat->dif_keysize_max,
4972 feat->dif_recsize_max);
4974 CWARN("%s: failed to register index "
4976 osd_name(osd), PFID(fid), result);
4977 else if (result > 0)
4980 CDEBUG(D_LFSCK, "%s: index object "DFID
4981 " (%d/%d) registered\n",
4982 osd_name(osd), PFID(fid),
4983 (int)feat->dif_keysize_max,
4984 (int)feat->dif_recsize_max);
4987 up_write(&obj->oo_ext_idx_sem);
4995 if (result == 0 && skip_iam == 0) {
4996 if (!osd_iam_index_probe(env, obj, feat))
4999 LINVRNT(osd_invariant(obj));
5004 static int osd_otable_it_attr_get(const struct lu_env *env,
5005 struct dt_object *dt,
5006 struct lu_attr *attr)
5012 static const struct dt_object_operations osd_obj_ops = {
5013 .do_read_lock = osd_read_lock,
5014 .do_write_lock = osd_write_lock,
5015 .do_read_unlock = osd_read_unlock,
5016 .do_write_unlock = osd_write_unlock,
5017 .do_write_locked = osd_write_locked,
5018 .do_attr_get = osd_attr_get,
5019 .do_declare_attr_set = osd_declare_attr_set,
5020 .do_attr_set = osd_attr_set,
5021 .do_ah_init = osd_ah_init,
5022 .do_declare_create = osd_declare_create,
5023 .do_create = osd_create,
5024 .do_declare_destroy = osd_declare_destroy,
5025 .do_destroy = osd_destroy,
5026 .do_index_try = osd_index_try,
5027 .do_declare_ref_add = osd_declare_ref_add,
5028 .do_ref_add = osd_ref_add,
5029 .do_declare_ref_del = osd_declare_ref_del,
5030 .do_ref_del = osd_ref_del,
5031 .do_xattr_get = osd_xattr_get,
5032 .do_declare_xattr_set = osd_declare_xattr_set,
5033 .do_xattr_set = osd_xattr_set,
5034 .do_declare_xattr_del = osd_declare_xattr_del,
5035 .do_xattr_del = osd_xattr_del,
5036 .do_xattr_list = osd_xattr_list,
5037 .do_object_sync = osd_object_sync,
5038 .do_invalidate = osd_invalidate,
5041 static const struct dt_object_operations osd_obj_otable_it_ops = {
5042 .do_attr_get = osd_otable_it_attr_get,
5043 .do_index_try = osd_index_try,
5046 static int osd_index_declare_iam_delete(const struct lu_env *env,
5047 struct dt_object *dt,
5048 const struct dt_key *key,
5049 struct thandle *handle)
5051 struct osd_thandle *oh;
5053 oh = container_of(handle, struct osd_thandle, ot_super);
5054 LASSERT(oh->ot_handle == NULL);
5056 /* Recycle may cause additional three blocks to be changed. */
5057 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
5058 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
5064 * delete a (key, value) pair from index \a dt specified by \a key
5066 * \param dt osd index object
5067 * \param key key for index
5068 * \param rec record reference
5069 * \param handle transaction handler
5072 * \retval -ve failure
5074 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
5075 const struct dt_key *key,
5076 struct thandle *handle)
5078 struct osd_thread_info *oti = osd_oti_get(env);
5079 struct osd_object *obj = osd_dt_obj(dt);
5080 struct osd_thandle *oh;
5081 struct iam_path_descr *ipd;
5082 struct iam_container *bag = &obj->oo_dir->od_container;
5087 if (!dt_object_exists(dt))
5090 LINVRNT(osd_invariant(obj));
5091 LASSERT(!dt_object_remote(dt));
5092 LASSERT(bag->ic_object == obj->oo_inode);
5093 LASSERT(handle != NULL);
5095 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
5097 ipd = osd_idx_ipd_get(env, bag);
5098 if (unlikely(ipd == NULL))
5101 oh = container_of(handle, struct osd_thandle, ot_super);
5102 LASSERT(oh->ot_handle != NULL);
5103 LASSERT(oh->ot_handle->h_transaction != NULL);
5105 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
5106 /* swab quota uid/gid provided by caller */
5107 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
5108 key = (const struct dt_key *)&oti->oti_quota_id;
5111 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
5112 osd_ipd_put(env, bag, ipd);
5113 LINVRNT(osd_invariant(obj));
5114 osd_trans_exec_check(env, handle, OSD_OT_DELETE);
5118 static int osd_index_declare_ea_delete(const struct lu_env *env,
5119 struct dt_object *dt,
5120 const struct dt_key *key,
5121 struct thandle *handle)
5123 struct osd_thandle *oh;
5124 struct inode *inode;
5129 LASSERT(!dt_object_remote(dt));
5130 LASSERT(handle != NULL);
5132 oh = container_of(handle, struct osd_thandle, ot_super);
5133 LASSERT(oh->ot_handle == NULL);
5135 credits = osd_dto_credits_noquota[DTO_INDEX_DELETE];
5136 osd_trans_declare_op(env, oh, OSD_OT_DELETE, credits);
5138 inode = osd_dt_obj(dt)->oo_inode;
5142 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
5143 i_projid_read(inode), 0, oh, osd_dt_obj(dt),
5148 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
5151 struct osd_fid_pack *rec;
5154 if (de->file_type & LDISKFS_DIRENT_LUFID) {
5155 rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
5156 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
5157 if (rc == 0 && unlikely(!fid_is_sane((struct lu_fid *)fid)))
5163 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
5164 const struct lu_fid *fid)
5166 struct seq_server_site *ss = osd_seq_site(osd);
5170 /* FID seqs not in FLDB, must be local seq */
5171 if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
5175 * If FLD is not being initialized yet, it only happens during the
5176 * initialization, likely during mgs initialization, and we assume
5177 * this is local FID.
5179 if (ss == NULL || ss->ss_server_fld == NULL)
5182 /* Only check the local FLDB here */
5183 if (osd_seq_exists(env, osd, fid_seq(fid)))
5189 static void osd_take_care_of_agent(const struct lu_env *env,
5190 struct osd_device *osd,
5191 struct osd_thandle *oh,
5192 struct ldiskfs_dir_entry_2 *de)
5194 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
5195 struct osd_idmap_cache *idc;
5196 int rc, schedule = 0;
5198 LASSERT(de != NULL);
5200 rc = osd_get_fid_from_dentry(de, (struct dt_rec *)fid);
5201 if (likely(rc == 0)) {
5202 idc = osd_idc_find_or_init(env, osd, fid);
5203 if (IS_ERR(idc) || idc->oic_remote)
5205 } else if (rc == -ENODATA) {
5207 * can't get FID, postpone to the end of the
5208 * transaction when iget() is safe
5212 CERROR("%s: can't get FID: rc = %d\n", osd_name(osd), rc);
5215 osd_schedule_agent_inode_removal(env, oh,
5216 le32_to_cpu(de->inode));
5220 * Index delete function for interoperability mode (b11826).
5221 * It will remove the directory entry added by osd_index_ea_insert().
5222 * This entry is needed to maintain name->fid mapping.
5224 * \param key, key i.e. file entry to be deleted
5226 * \retval 0, on success
5227 * \retval -ve, on error
5229 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
5230 const struct dt_key *key, struct thandle *handle)
5232 struct osd_object *obj = osd_dt_obj(dt);
5233 struct inode *dir = obj->oo_inode;
5234 struct dentry *dentry;
5235 struct osd_thandle *oh;
5236 struct ldiskfs_dir_entry_2 *de = NULL;
5237 struct buffer_head *bh;
5238 struct htree_lock *hlock = NULL;
5239 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
5244 if (!dt_object_exists(dt))
5247 LINVRNT(osd_invariant(obj));
5248 LASSERT(!dt_object_remote(dt));
5249 LASSERT(handle != NULL);
5251 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
5253 oh = container_of(handle, struct osd_thandle, ot_super);
5254 LASSERT(oh->ot_handle != NULL);
5255 LASSERT(oh->ot_handle->h_transaction != NULL);
5257 dquot_initialize(dir);
5258 dentry = osd_child_dentry_get(env, obj,
5259 (char *)key, strlen((char *)key));
5261 if (obj->oo_hl_head != NULL) {
5262 hlock = osd_oti_get(env)->oti_hlock;
5263 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
5264 dir, LDISKFS_HLOCK_DEL);
5266 down_write(&obj->oo_ext_idx_sem);
5269 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
5272 * If this is not the ".." entry, it might be a remote DNE
5273 * entry and we need to check if the FID is for a remote
5274 * MDT. If the FID is not in the directory entry (e.g.
5275 * upgraded 1.8 filesystem without dirdata enabled) then
5276 * we need to get the FID from the LMA. For a remote directory
5277 * there HAS to be an LMA, it cannot be an IGIF inode in this
5280 * Delete the entry before the agent inode in order to
5281 * simplify error handling. At worst an error after deleting
5282 * the entry first might leak the agent inode afterward. The
5283 * reverse would need filesystem abort in case of error deleting
5284 * the entry after the agent had been removed, or leave a
5285 * dangling entry pointing at a random inode.
5287 if (strcmp((char *)key, dotdot) != 0)
5288 osd_take_care_of_agent(env, osd, oh, de);
5289 rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
5295 if (!rc && fid_is_namespace_visible(lu_object_fid(&dt->do_lu)) &&
5296 obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
5297 /* NB, dirent count may not be accurate, because it's counted
5300 if (obj->oo_dirent_count)
5301 obj->oo_dirent_count--;
5303 obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
5306 ldiskfs_htree_unlock(hlock);
5308 up_write(&obj->oo_ext_idx_sem);
5311 LASSERT(osd_invariant(obj));
5312 osd_trans_exec_check(env, handle, OSD_OT_DELETE);
5317 * Lookup index for \a key and copy record to \a rec.
5319 * \param dt osd index object
5320 * \param key key for index
5321 * \param rec record reference
5323 * \retval +ve success : exact mach
5324 * \retval 0 return record with key not greater than \a key
5325 * \retval -ve failure
5327 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
5328 struct dt_rec *rec, const struct dt_key *key)
5330 struct osd_object *obj = osd_dt_obj(dt);
5331 struct iam_path_descr *ipd;
5332 struct iam_container *bag = &obj->oo_dir->od_container;
5333 struct osd_thread_info *oti = osd_oti_get(env);
5334 struct iam_iterator *it = &oti->oti_idx_it;
5335 struct iam_rec *iam_rec;
5340 if (!dt_object_exists(dt))
5343 LASSERT(osd_invariant(obj));
5344 LASSERT(!dt_object_remote(dt));
5345 LASSERT(bag->ic_object == obj->oo_inode);
5347 ipd = osd_idx_ipd_get(env, bag);
5351 /* got ipd now we can start iterator. */
5352 iam_it_init(it, bag, 0, ipd);
5354 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
5355 /* swab quota uid/gid provided by caller */
5356 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
5357 key = (const struct dt_key *)&oti->oti_quota_id;
5360 rc = iam_it_get(it, (struct iam_key *)key);
5362 if (S_ISDIR(obj->oo_inode->i_mode))
5363 iam_rec = (struct iam_rec *)oti->oti_ldp;
5365 iam_rec = (struct iam_rec *)rec;
5367 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
5369 if (S_ISDIR(obj->oo_inode->i_mode))
5370 osd_fid_unpack((struct lu_fid *)rec,
5371 (struct osd_fid_pack *)iam_rec);
5372 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
5373 osd_quota_unpack(obj, rec);
5378 osd_ipd_put(env, bag, ipd);
5380 LINVRNT(osd_invariant(obj));
5385 static int osd_index_declare_iam_insert(const struct lu_env *env,
5386 struct dt_object *dt,
5387 const struct dt_rec *rec,
5388 const struct dt_key *key,
5389 struct thandle *handle)
5391 struct osd_thandle *oh;
5393 LASSERT(handle != NULL);
5395 oh = container_of(handle, struct osd_thandle, ot_super);
5396 LASSERT(oh->ot_handle == NULL);
5398 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
5399 osd_dto_credits_noquota[DTO_INDEX_INSERT]);
5405 * Inserts (key, value) pair in \a dt index object.
5407 * \param dt osd index object
5408 * \param key key for index
5409 * \param rec record reference
5410 * \param th transaction handler
5413 * \retval -ve failure
5415 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
5416 const struct dt_rec *rec,
5417 const struct dt_key *key, struct thandle *th)
5419 struct osd_object *obj = osd_dt_obj(dt);
5420 struct iam_path_descr *ipd;
5421 struct osd_thandle *oh;
5422 struct iam_container *bag;
5423 struct osd_thread_info *oti = osd_oti_get(env);
5424 struct iam_rec *iam_rec;
5429 if (!dt_object_exists(dt))
5432 LINVRNT(osd_invariant(obj));
5433 LASSERT(!dt_object_remote(dt));
5435 bag = &obj->oo_dir->od_container;
5436 LASSERT(bag->ic_object == obj->oo_inode);
5437 LASSERT(th != NULL);
5439 osd_trans_exec_op(env, th, OSD_OT_INSERT);
5441 ipd = osd_idx_ipd_get(env, bag);
5442 if (unlikely(ipd == NULL))
5445 oh = container_of(th, struct osd_thandle, ot_super);
5446 LASSERT(oh->ot_handle != NULL);
5447 LASSERT(oh->ot_handle->h_transaction != NULL);
5448 if (S_ISDIR(obj->oo_inode->i_mode)) {
5449 iam_rec = (struct iam_rec *)oti->oti_ldp;
5450 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec,
5452 } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
5453 /* pack quota uid/gid */
5454 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
5455 key = (const struct dt_key *)&oti->oti_quota_id;
5456 /* pack quota record */
5457 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
5458 iam_rec = (struct iam_rec *)rec;
5460 iam_rec = (struct iam_rec *)rec;
5463 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
5465 osd_ipd_put(env, bag, ipd);
5466 LINVRNT(osd_invariant(obj));
5467 osd_trans_exec_check(env, th, OSD_OT_INSERT);
5472 * Calls ldiskfs_add_entry() to add directory entry
5473 * into the directory. This is required for
5474 * interoperability mode (b11826)
5476 * \retval 0, on success
5477 * \retval -ve, on error
5479 static int __osd_ea_add_rec(struct osd_thread_info *info,
5480 struct osd_object *pobj, struct inode *cinode,
5481 const char *name, const struct lu_fid *fid,
5482 struct htree_lock *hlock, struct thandle *th)
5484 struct ldiskfs_dentry_param *ldp;
5485 struct dentry *child;
5486 struct osd_thandle *oth;
5489 oth = container_of(th, struct osd_thandle, ot_super);
5490 LASSERT(oth->ot_handle != NULL);
5491 LASSERT(oth->ot_handle->h_transaction != NULL);
5492 LASSERT(pobj->oo_inode);
5494 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
5495 if (unlikely(pobj->oo_inode ==
5496 osd_sb(osd_obj2dev(pobj))->s_root->d_inode))
5499 osd_get_ldiskfs_dirent_param(ldp, fid);
5500 child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
5501 child->d_fsdata = (void *)ldp;
5502 dquot_initialize(pobj->oo_inode);
5503 rc = osd_ldiskfs_add_entry(info, osd_obj2dev(pobj), oth->ot_handle,
5504 child, cinode, hlock);
5505 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
5506 struct ldiskfs_dir_entry_2 *de;
5507 struct buffer_head *bh;
5510 bh = osd_ldiskfs_find_entry(pobj->oo_inode, &child->d_name, &de,
5513 rc1 = ldiskfs_journal_get_write_access(oth->ot_handle,
5516 if (S_ISDIR(cinode->i_mode))
5517 de->file_type = LDISKFS_DIRENT_LUFID |
5518 LDISKFS_FT_REG_FILE;
5520 de->file_type = LDISKFS_DIRENT_LUFID |
5522 ldiskfs_handle_dirty_metadata(oth->ot_handle,
5533 * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
5534 * into the directory.Also sets flags into osd object to
5535 * indicate dot and dotdot are created. This is required for
5536 * interoperability mode (b11826)
5538 * \param dir directory for dot and dotdot fixup.
5539 * \param obj child object for linking
5541 * \retval 0, on success
5542 * \retval -ve, on error
5544 static int osd_add_dot_dotdot(struct osd_thread_info *info,
5545 struct osd_object *dir,
5546 struct inode *parent_dir, const char *name,
5547 const struct lu_fid *dot_fid,
5548 const struct lu_fid *dot_dot_fid,
5551 struct inode *inode = dir->oo_inode;
5552 struct osd_thandle *oth;
5555 oth = container_of(th, struct osd_thandle, ot_super);
5556 LASSERT(oth->ot_handle->h_transaction != NULL);
5557 LASSERT(S_ISDIR(dir->oo_inode->i_mode));
5559 if (strcmp(name, dot) == 0) {
5560 if (dir->oo_compat_dot_created) {
5563 LASSERT(inode->i_ino == parent_dir->i_ino);
5564 dir->oo_compat_dot_created = 1;
5567 } else if (strcmp(name, dotdot) == 0) {
5568 if (!dir->oo_compat_dot_created)
5570 /* in case of rename, dotdot is already created */
5571 if (dir->oo_compat_dotdot_created) {
5572 return __osd_ea_add_rec(info, dir, parent_dir, name,
5573 dot_dot_fid, NULL, th);
5576 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_PARENT)) {
5577 struct lu_fid tfid = *dot_dot_fid;
5580 result = osd_add_dot_dotdot_internal(info,
5581 dir->oo_inode, parent_dir, dot_fid,
5584 result = osd_add_dot_dotdot_internal(info,
5585 dir->oo_inode, parent_dir, dot_fid,
5590 dir->oo_compat_dotdot_created = 1;
5598 * It will call the appropriate osd_add* function and return the
5599 * value, return by respective functions.
5601 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
5602 struct inode *cinode, const char *name,
5603 const struct lu_fid *fid, struct thandle *th)
5605 struct osd_thread_info *info = osd_oti_get(env);
5606 struct htree_lock *hlock;
5609 hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
5611 if (name[0] == '.' && (name[1] == '\0' ||
5612 (name[1] == '.' && name[2] == '\0'))) {
5613 if (hlock != NULL) {
5614 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
5617 down_write(&pobj->oo_ext_idx_sem);
5620 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
5621 lu_object_fid(&pobj->oo_dt.do_lu),
5624 if (hlock != NULL) {
5625 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
5626 pobj->oo_inode, LDISKFS_HLOCK_ADD);
5628 down_write(&pobj->oo_ext_idx_sem);
5631 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INDIR)) {
5632 struct lu_fid *tfid = &info->oti_fid;
5636 rc = __osd_ea_add_rec(info, pobj, cinode, name,
5639 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
5643 if (!rc && fid_is_namespace_visible(lu_object_fid(&pobj->oo_dt.do_lu))
5644 && pobj->oo_dirent_count != LU_DIRENT_COUNT_UNSET)
5645 pobj->oo_dirent_count++;
5648 ldiskfs_htree_unlock(hlock);
5650 up_write(&pobj->oo_ext_idx_sem);
5656 osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
5657 struct osd_idmap_cache *oic)
5659 struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
5660 struct lu_fid *fid = &oic->oic_fid;
5661 struct osd_inode_id *id = &oic->oic_lid;
5662 struct inode *inode = NULL;
5669 if (!fid_is_norm(fid) && !fid_is_igif(fid))
5672 if (thread_is_running(&scrub->os_thread) &&
5673 scrub->os_pos_current > id->oii_ino)
5676 if (dev->od_auto_scrub_interval == AS_NEVER ||
5677 ktime_get_real_seconds() <
5678 scrub->os_file.sf_time_last_complete + dev->od_auto_scrub_interval)
5682 rc = osd_oi_lookup(oti, dev, fid, &oti->oti_id, 0);
5683 if (rc == -ENOENT) {
5684 __u32 gen = id->oii_gen;
5690 inode = osd_iget(oti, dev, id);
5691 /* The inode has been removed (by race maybe). */
5692 if (IS_ERR(inode)) {
5693 rc = PTR_ERR(inode);
5695 RETURN(rc == -ESTALE ? -ENOENT : rc);
5698 /* The OI mapping is lost. */
5699 if (gen != OSD_OII_NOGEN)
5703 * The inode may has been reused by others, we do not know,
5704 * leave it to be handled by subsequent osd_fid_lookup().
5707 } else if (rc || osd_id_eq(id, &oti->oti_id)) {
5714 if (thread_is_running(&scrub->os_thread)) {
5715 if (inode == NULL) {
5716 inode = osd_iget(oti, dev, id);
5717 /* The inode has been removed (by race maybe). */
5718 if (IS_ERR(inode)) {
5719 rc = PTR_ERR(inode);
5721 RETURN(rc == -ESTALE ? -ENOENT : rc);
5725 rc = osd_oii_insert(dev, oic, insert);
5727 * There is race condition between osd_oi_lookup and OI scrub.
5728 * The OI scrub finished just after osd_oi_lookup() failure.
5729 * Under such case, it is unnecessary to trigger OI scrub again,
5730 * but try to call osd_oi_lookup() again.
5732 if (unlikely(rc == -EAGAIN))
5735 if (!S_ISDIR(inode->i_mode))
5738 rc = osd_check_lmv(oti, dev, inode, oic);
5743 if (dev->od_auto_scrub_interval != AS_NEVER && ++once == 1) {
5744 rc = osd_scrub_start(oti->oti_env, dev, SS_AUTO_PARTIAL |
5745 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
5746 CDEBUG(D_LFSCK | D_CONSOLE | D_WARNING,
5747 "%s: trigger partial OI scrub for RPC inconsistency "
5748 "checking FID "DFID": rc = %d\n",
5749 osd_dev2name(dev), PFID(fid), rc);
5750 if (rc == 0 || rc == -EALREADY)
5763 static int osd_fail_fid_lookup(struct osd_thread_info *oti,
5764 struct osd_device *dev,
5765 struct osd_idmap_cache *oic,
5766 struct lu_fid *fid, __u32 ino)
5768 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
5769 struct inode *inode;
5772 osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
5773 inode = osd_iget(oti, dev, &oic->oic_lid);
5774 if (IS_ERR(inode)) {
5775 fid_zero(&oic->oic_fid);
5776 return PTR_ERR(inode);
5779 rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, loa);
5782 fid_zero(&oic->oic_fid);
5784 *fid = oic->oic_fid = loa->loa_lma.lma_self_fid;
5788 void osd_add_oi_cache(struct osd_thread_info *info, struct osd_device *osd,
5789 struct osd_inode_id *id, const struct lu_fid *fid)
5791 CDEBUG(D_INODE, "add "DFID" %u:%u to info %p\n", PFID(fid),
5792 id->oii_ino, id->oii_gen, info);
5793 info->oti_cache.oic_lid = *id;
5794 info->oti_cache.oic_fid = *fid;
5795 info->oti_cache.oic_dev = osd;
5799 * Get parent FID from the linkEA.
5801 * For a directory which parent resides on remote MDT, to satisfy the
5802 * local e2fsck, we insert it into the /REMOTE_PARENT_DIR locally. On
5803 * the other hand, to make the lookup(..) on the directory can return
5804 * the real parent FID, we append the real parent FID after its ".."
5805 * name entry in the /REMOTE_PARENT_DIR.
5807 * Unfortunately, such PFID-in-dirent cannot be preserved via file-level
5808 * backup. So after the restore, we cannot get the right parent FID from
5809 * its ".." name entry in the /REMOTE_PARENT_DIR. Under such case, since
5810 * we have stored the real parent FID in the directory object's linkEA,
5811 * we can parse the linkEA for the real parent FID.
5813 * \param[in] env pointer to the thread context
5814 * \param[in] obj pointer to the object to be handled
5815 * \param[out]fid pointer to the buffer to hold the parent FID
5817 * \retval 0 for getting the real parent FID successfully
5818 * \retval negative error number on failure
5820 static int osd_get_pfid_from_linkea(const struct lu_env *env,
5821 struct osd_object *obj,
5824 struct osd_thread_info *oti = osd_oti_get(env);
5825 struct lu_buf *buf = &oti->oti_big_buf;
5826 struct dentry *dentry = &oti->oti_obj_dentry;
5827 struct inode *inode = obj->oo_inode;
5828 struct linkea_data ldata = { NULL };
5834 if (!S_ISDIR(inode->i_mode))
5838 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
5839 buf->lb_buf, buf->lb_len);
5840 if (rc == -ERANGE) {
5841 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
5844 lu_buf_realloc(buf, rc);
5845 if (buf->lb_buf == NULL)
5852 if (unlikely(rc == 0))
5858 if (unlikely(buf->lb_buf == NULL)) {
5859 lu_buf_realloc(buf, rc);
5860 if (buf->lb_buf == NULL)
5867 rc = linkea_init_with_rec(&ldata);
5869 linkea_first_entry(&ldata);
5870 linkea_entry_unpack(ldata.ld_lee, &ldata.ld_reclen, NULL, fid);
5876 static int osd_verify_ent_by_linkea(const struct lu_env *env,
5877 struct inode *inode,
5878 const struct lu_fid *pfid,
5879 const char *name, const int namelen)
5881 struct osd_thread_info *oti = osd_oti_get(env);
5882 struct lu_buf *buf = &oti->oti_big_buf;
5883 struct dentry *dentry = &oti->oti_obj_dentry;
5884 struct linkea_data ldata = { NULL };
5885 struct lu_name cname = { .ln_name = name,
5886 .ln_namelen = namelen };
5892 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
5893 buf->lb_buf, buf->lb_len);
5895 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK, NULL, 0);
5900 if (unlikely(rc == 0))
5903 if (buf->lb_len < rc) {
5904 lu_buf_realloc(buf, rc);
5905 if (buf->lb_buf == NULL)
5912 rc = linkea_init_with_rec(&ldata);
5914 rc = linkea_links_find(&ldata, &cname, pfid);
5920 * Calls ->lookup() to find dentry. From dentry get inode and
5921 * read inode's ea to get fid. This is required for interoperability
5924 * \retval 0, on success
5925 * \retval -ve, on error
5927 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
5928 struct dt_rec *rec, const struct dt_key *key)
5930 struct inode *dir = obj->oo_inode;
5931 struct dentry *dentry;
5932 struct ldiskfs_dir_entry_2 *de;
5933 struct buffer_head *bh;
5934 struct lu_fid *fid = (struct lu_fid *)rec;
5935 struct htree_lock *hlock = NULL;
5941 LASSERT(dir->i_op != NULL);
5942 LASSERT(dir->i_op->lookup != NULL);
5944 dentry = osd_child_dentry_get(env, obj,
5945 (char *)key, strlen((char *)key));
5947 if (obj->oo_hl_head != NULL) {
5948 hlock = osd_oti_get(env)->oti_hlock;
5949 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
5950 dir, LDISKFS_HLOCK_LOOKUP);
5952 down_read(&obj->oo_ext_idx_sem);
5955 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
5957 struct osd_thread_info *oti = osd_oti_get(env);
5958 struct osd_inode_id *id = &oti->oti_id;
5959 struct osd_idmap_cache *oic = &oti->oti_cache;
5960 struct osd_device *dev = osd_obj2dev(obj);
5962 ino = le32_to_cpu(de->inode);
5963 if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP)) {
5965 rc = osd_fail_fid_lookup(oti, dev, oic, fid, ino);
5969 rc = osd_get_fid_from_dentry(de, rec);
5971 /* done with de, release bh */
5974 if (unlikely(is_remote_parent_ino(dev, ino))) {
5975 const char *name = (const char *)key;
5978 * If the parent is on remote MDT, and there
5979 * is no FID-in-dirent, then we have to get
5980 * the parent FID from the linkEA.
5982 if (likely(strlen(name) == 2 &&
5983 name[0] == '.' && name[1] == '.'))
5984 rc = osd_get_pfid_from_linkea(env, obj,
5987 rc = osd_ea_fid_get(env, obj, ino, fid, id);
5990 osd_id_gen(id, ino, OSD_OII_NOGEN);
5993 if (rc != 0 || osd_remote_fid(env, dev, fid)) {
5994 fid_zero(&oic->oic_fid);
5999 osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id, fid);
6000 rc = osd_consistency_check(oti, dev, oic);
6002 fid_zero(&oic->oic_fid);
6004 /* Other error should not affect lookup result. */
6014 ldiskfs_htree_unlock(hlock);
6016 up_read(&obj->oo_ext_idx_sem);
6020 static int osd_index_declare_ea_insert(const struct lu_env *env,
6021 struct dt_object *dt,
6022 const struct dt_rec *rec,
6023 const struct dt_key *key,
6024 struct thandle *handle)
6026 struct osd_thandle *oh;
6027 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
6028 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
6029 const struct lu_fid *fid = rec1->rec_fid;
6030 int credits, rc = 0;
6031 struct osd_idmap_cache *idc;
6035 LASSERT(!dt_object_remote(dt));
6036 LASSERT(handle != NULL);
6037 LASSERT(fid != NULL);
6038 LASSERT(rec1->rec_type != 0);
6040 oh = container_of(handle, struct osd_thandle, ot_super);
6041 LASSERT(oh->ot_handle == NULL);
6043 credits = osd_dto_credits_noquota[DTO_INDEX_INSERT];
6046 * we can't call iget() while a transactions is running
6047 * (this can lead to a deadlock), but we need to know
6048 * inum and object type. so we find this information at
6049 * declaration and cache in per-thread info
6051 idc = osd_idc_find_or_init(env, osd, fid);
6053 RETURN(PTR_ERR(idc));
6054 if (idc->oic_remote) {
6056 * a reference to remote inode is represented by an
6057 * agent inode which we have to create
6059 credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
6060 credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
6063 osd_trans_declare_op(env, oh, OSD_OT_INSERT, credits);
6065 if (osd_dt_obj(dt)->oo_inode != NULL) {
6066 struct inode *inode = osd_dt_obj(dt)->oo_inode;
6069 * We ignore block quota on meta pool (MDTs), so needn't
6070 * calculate how many blocks will be consumed by this index
6073 rc = osd_declare_inode_qid(env, i_uid_read(inode),
6075 i_projid_read(inode), 0,
6076 oh, osd_dt_obj(dt), NULL,
6081 #ifdef HAVE_PROJECT_QUOTA
6083 * Reserve credits for local agent inode to transfer
6084 * to 0, quota enforcement is ignored in this case.
6086 if (idc->oic_remote &&
6087 LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL &&
6088 i_projid_read(inode) != 0)
6089 rc = osd_declare_attr_qid(env, osd_dt_obj(dt), oh,
6090 0, i_projid_read(inode),
6091 0, false, PRJQUOTA, true);
6099 * Index add function for interoperability mode (b11826).
6100 * It will add the directory entry.This entry is needed to
6101 * maintain name->fid mapping.
6103 * \param key it is key i.e. file entry to be inserted
6104 * \param rec it is value of given key i.e. fid
6106 * \retval 0, on success
6107 * \retval -ve, on error
6109 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
6110 const struct dt_rec *rec,
6111 const struct dt_key *key, struct thandle *th)
6113 struct osd_object *obj = osd_dt_obj(dt);
6114 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
6115 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
6116 const struct lu_fid *fid = rec1->rec_fid;
6117 const char *name = (const char *)key;
6118 struct osd_thread_info *oti = osd_oti_get(env);
6119 struct inode *child_inode = NULL;
6120 struct osd_idmap_cache *idc;
6125 if (!dt_object_exists(dt))
6128 LASSERT(osd_invariant(obj));
6129 LASSERT(!dt_object_remote(dt));
6130 LASSERT(th != NULL);
6132 osd_trans_exec_op(env, th, OSD_OT_INSERT);
6134 LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!\n", PFID(fid));
6136 idc = osd_idc_find(env, osd, fid);
6137 if (unlikely(idc == NULL)) {
6138 idc = osd_idc_find_or_init(env, osd, fid);
6141 * this dt_insert() wasn't declared properly, so
6142 * FID is missing in OI cache. we better do not
6143 * lookup FID in FLDB/OI and don't risk to deadlock,
6144 * but in some special cases (lfsck testing, etc)
6145 * it's much simpler than fixing a caller.
6147 * normally this error should be placed after the first
6148 * find, but migrate may attach source stripes to
6149 * target, which doesn't create stripes.
6151 CERROR("%s: "DFID" wasn't declared for insert\n",
6152 osd_name(osd), PFID(fid));
6154 RETURN(PTR_ERR(idc));
6158 if (idc->oic_remote) {
6159 /* Insert remote entry */
6160 if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
6162 igrab(osd->od_mdt_map->omm_remote_parent->d_inode);
6164 child_inode = osd_create_local_agent_inode(env, osd,
6165 obj, fid, rec1->rec_type & S_IFMT, th);
6166 if (IS_ERR(child_inode))
6167 RETURN(PTR_ERR(child_inode));
6170 /* Insert local entry */
6171 if (unlikely(idc->oic_lid.oii_ino == 0)) {
6172 /* for a reason OI cache wasn't filled properly */
6173 CERROR("%s: OIC for "DFID" isn't filled\n",
6174 osd_name(osd), PFID(fid));
6177 child_inode = oti->oti_inode;
6178 if (unlikely(child_inode == NULL)) {
6179 struct ldiskfs_inode_info *lii;
6184 child_inode = oti->oti_inode = &lii->vfs_inode;
6186 child_inode->i_sb = osd_sb(osd);
6187 child_inode->i_ino = idc->oic_lid.oii_ino;
6188 child_inode->i_mode = rec1->rec_type & S_IFMT;
6191 rc = osd_ea_add_rec(env, obj, child_inode, name, fid, th);
6193 CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
6194 obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
6196 if (child_inode && child_inode != oti->oti_inode)
6198 LASSERT(osd_invariant(obj));
6199 osd_trans_exec_check(env, th, OSD_OT_INSERT);
6205 * Initialize osd Iterator for given osd index object.
6207 * \param dt osd index object
6210 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
6211 struct dt_object *dt,
6214 struct osd_it_iam *it;
6215 struct osd_object *obj = osd_dt_obj(dt);
6216 struct lu_object *lo = &dt->do_lu;
6217 struct iam_path_descr *ipd;
6218 struct iam_container *bag = &obj->oo_dir->od_container;
6220 if (!dt_object_exists(dt))
6221 return ERR_PTR(-ENOENT);
6225 return ERR_PTR(-ENOMEM);
6227 ipd = osd_it_ipd_get(env, bag);
6228 if (likely(ipd != NULL)) {
6232 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
6233 return (struct dt_it *)it;
6236 return ERR_PTR(-ENOMEM);
6241 * free given Iterator.
6243 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
6245 struct osd_it_iam *it = (struct osd_it_iam *)di;
6246 struct osd_object *obj = it->oi_obj;
6248 iam_it_fini(&it->oi_it);
6249 osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
6250 osd_object_put(env, obj);
6255 * Move Iterator to record specified by \a key
6257 * \param di osd iterator
6258 * \param key key for index
6260 * \retval +ve di points to record with least key not larger than key
6261 * \retval 0 di points to exact matched key
6262 * \retval -ve failure
6265 static int osd_it_iam_get(const struct lu_env *env,
6266 struct dt_it *di, const struct dt_key *key)
6268 struct osd_thread_info *oti = osd_oti_get(env);
6269 struct osd_it_iam *it = (struct osd_it_iam *)di;
6271 if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
6272 /* swab quota uid/gid */
6273 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
6274 key = (struct dt_key *)&oti->oti_quota_id;
6277 return iam_it_get(&it->oi_it, (const struct iam_key *)key);
6283 * \param di osd iterator
6285 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
6287 struct osd_it_iam *it = (struct osd_it_iam *)di;
6289 iam_it_put(&it->oi_it);
6293 * Move iterator by one record
6295 * \param di osd iterator
6297 * \retval +1 end of container reached
6299 * \retval -ve failure
6302 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
6304 struct osd_it_iam *it = (struct osd_it_iam *)di;
6306 return iam_it_next(&it->oi_it);
6310 * Return pointer to the key under iterator.
6313 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
6314 const struct dt_it *di)
6316 struct osd_thread_info *oti = osd_oti_get(env);
6317 struct osd_it_iam *it = (struct osd_it_iam *)di;
6318 struct osd_object *obj = it->oi_obj;
6321 key = (struct dt_key *)iam_it_key_get(&it->oi_it);
6323 if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
6324 /* swab quota uid/gid */
6325 oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
6326 key = (struct dt_key *)&oti->oti_quota_id;
6333 * Return size of key under iterator (in bytes)
6336 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
6338 struct osd_it_iam *it = (struct osd_it_iam *)di;
6340 return iam_it_key_size(&it->oi_it);
6344 osd_it_append_attrs(struct lu_dirent *ent, int len, __u16 type)
6346 /* check if file type is required */
6347 if (ent->lde_attrs & LUDA_TYPE) {
6348 struct luda_type *lt;
6349 int align = sizeof(*lt) - 1;
6351 len = (len + align) & ~align;
6352 lt = (struct luda_type *)(ent->lde_name + len);
6353 lt->lt_type = cpu_to_le16(DTTOIF(type));
6356 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
6360 * build lu direct from backend fs dirent.
6364 osd_it_pack_dirent(struct lu_dirent *ent, struct lu_fid *fid, __u64 offset,
6365 char *name, __u16 namelen, __u16 type, __u32 attr)
6367 ent->lde_attrs = attr | LUDA_FID;
6368 fid_cpu_to_le(&ent->lde_fid, fid);
6370 ent->lde_hash = cpu_to_le64(offset);
6371 ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
6373 strncpy(ent->lde_name, name, namelen);
6374 ent->lde_name[namelen] = '\0';
6375 ent->lde_namelen = cpu_to_le16(namelen);
6377 /* append lustre attributes */
6378 osd_it_append_attrs(ent, namelen, type);
6382 * Return pointer to the record under iterator.
6384 static int osd_it_iam_rec(const struct lu_env *env,
6385 const struct dt_it *di,
6386 struct dt_rec *dtrec, __u32 attr)
6388 struct osd_it_iam *it = (struct osd_it_iam *)di;
6389 struct osd_thread_info *info = osd_oti_get(env);
6393 if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
6394 const struct osd_fid_pack *rec;
6395 struct lu_fid *fid = &info->oti_fid;
6396 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
6402 name = (char *)iam_it_key_get(&it->oi_it);
6404 RETURN(PTR_ERR(name));
6406 namelen = iam_it_key_size(&it->oi_it);
6408 rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
6410 RETURN(PTR_ERR(rec));
6412 rc = osd_fid_unpack(fid, rec);
6416 hash = iam_it_store(&it->oi_it);
6418 /* IAM does not store object type in IAM index (dir) */
6419 osd_it_pack_dirent(lde, fid, hash, name, namelen,
6421 } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
6422 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
6423 (struct iam_rec *)dtrec);
6424 osd_quota_unpack(it->oi_obj, dtrec);
6426 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
6427 (struct iam_rec *)dtrec);
6434 * Returns cookie for current Iterator position.
6436 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
6438 struct osd_it_iam *it = (struct osd_it_iam *)di;
6440 return iam_it_store(&it->oi_it);
6444 * Restore iterator from cookie.
6446 * \param di osd iterator
6447 * \param hash Iterator location cookie
6449 * \retval +ve di points to record with least key not larger than key.
6450 * \retval 0 di points to exact matched key
6451 * \retval -ve failure
6454 static int osd_it_iam_load(const struct lu_env *env,
6455 const struct dt_it *di, __u64 hash)
6457 struct osd_it_iam *it = (struct osd_it_iam *)di;
6459 return iam_it_load(&it->oi_it, hash);
6462 static const struct dt_index_operations osd_index_iam_ops = {
6463 .dio_lookup = osd_index_iam_lookup,
6464 .dio_declare_insert = osd_index_declare_iam_insert,
6465 .dio_insert = osd_index_iam_insert,
6466 .dio_declare_delete = osd_index_declare_iam_delete,
6467 .dio_delete = osd_index_iam_delete,
6469 .init = osd_it_iam_init,
6470 .fini = osd_it_iam_fini,
6471 .get = osd_it_iam_get,
6472 .put = osd_it_iam_put,
6473 .next = osd_it_iam_next,
6474 .key = osd_it_iam_key,
6475 .key_size = osd_it_iam_key_size,
6476 .rec = osd_it_iam_rec,
6477 .store = osd_it_iam_store,
6478 .load = osd_it_iam_load
6484 * Creates or initializes iterator context.
6486 * \retval struct osd_it_ea, iterator structure on success
6489 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
6490 struct dt_object *dt,
6493 struct osd_object *obj = osd_dt_obj(dt);
6494 struct osd_thread_info *info = osd_oti_get(env);
6495 struct osd_it_ea *oie;
6497 struct lu_object *lo = &dt->do_lu;
6498 struct dentry *obj_dentry;
6502 if (!dt_object_exists(dt) || obj->oo_destroyed)
6503 RETURN(ERR_PTR(-ENOENT));
6505 OBD_SLAB_ALLOC_PTR_GFP(oie, osd_itea_cachep, GFP_NOFS);
6507 RETURN(ERR_PTR(-ENOMEM));
6508 obj_dentry = &oie->oie_dentry;
6510 obj_dentry->d_inode = obj->oo_inode;
6511 obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
6512 obj_dentry->d_name.hash = 0;
6514 oie->oie_rd_dirent = 0;
6515 oie->oie_it_dirent = 0;
6516 oie->oie_dirent = NULL;
6517 if (unlikely(!info->oti_it_ea_buf_used)) {
6518 oie->oie_buf = info->oti_it_ea_buf;
6519 info->oti_it_ea_buf_used = 1;
6521 OBD_ALLOC(oie->oie_buf, OSD_IT_EA_BUFSIZE);
6522 if (oie->oie_buf == NULL)
6523 RETURN(ERR_PTR(-ENOMEM));
6527 file = &oie->oie_file;
6529 /* Only FMODE_64BITHASH or FMODE_32BITHASH should be set, NOT both. */
6530 if (attr & LUDA_64BITHASH)
6531 file->f_mode = FMODE_64BITHASH;
6533 file->f_mode = FMODE_32BITHASH;
6534 file->f_path.dentry = obj_dentry;
6535 file->f_mapping = obj->oo_inode->i_mapping;
6536 file->f_op = obj->oo_inode->i_fop;
6537 file->f_inode = obj->oo_inode;
6540 RETURN((struct dt_it *)oie);
6544 * Destroy or finishes iterator context.
6546 * \param di iterator structure to be destroyed
6548 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
6550 struct osd_thread_info *info = osd_oti_get(env);
6551 struct osd_it_ea *oie = (struct osd_it_ea *)di;
6552 struct osd_object *obj = oie->oie_obj;
6553 struct inode *inode = obj->oo_inode;
6556 oie->oie_file.f_op->release(inode, &oie->oie_file);
6557 osd_object_put(env, obj);
6558 if (unlikely(oie->oie_buf != info->oti_it_ea_buf))
6559 OBD_FREE(oie->oie_buf, OSD_IT_EA_BUFSIZE);
6561 info->oti_it_ea_buf_used = 0;
6562 OBD_SLAB_FREE_PTR(oie, osd_itea_cachep);
6567 * It position the iterator at given key, so that next lookup continues from
6568 * that key Or it is similar to dio_it->load() but based on a key,
6569 * rather than file position.
6571 * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
6574 * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
6576 static int osd_it_ea_get(const struct lu_env *env,
6577 struct dt_it *di, const struct dt_key *key)
6579 struct osd_it_ea *it = (struct osd_it_ea *)di;
6582 LASSERT(((const char *)key)[0] == '\0');
6583 it->oie_file.f_pos = 0;
6584 it->oie_rd_dirent = 0;
6585 it->oie_it_dirent = 0;
6586 it->oie_dirent = NULL;
6594 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
6598 struct osd_filldir_cbs {
6599 struct dir_context ctx;
6600 struct osd_it_ea *it;
6603 * It is called internally by ->iterate*(). It fills the
6604 * iterator's in-memory data structure with required
6605 * information i.e. name, namelen, rec_size etc.
6607 * \param buf in which information to be filled in.
6608 * \param name name of the file in given dir
6610 * \retval 0 on success
6611 * \retval 1 on buffer full
6613 #ifdef HAVE_FILLDIR_USE_CTX
6614 static int osd_ldiskfs_filldir(struct dir_context *buf,
6616 static int osd_ldiskfs_filldir(void *buf,
6618 const char *name, int namelen,
6619 loff_t offset, __u64 ino, unsigned int d_type)
6621 struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
6622 struct osd_object *obj = it->oie_obj;
6623 struct osd_it_ea_dirent *ent = it->oie_dirent;
6624 struct lu_fid *fid = &ent->oied_fid;
6625 struct osd_fid_pack *rec;
6629 /* this should never happen */
6630 if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
6631 CERROR("ldiskfs return invalid namelen %d\n", namelen);
6635 if ((void *)ent - it->oie_buf + sizeof(*ent) + namelen >
6639 /* "." is just the object itself. */
6640 if (namelen == 1 && name[0] == '.') {
6641 *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
6642 } else if (d_type & LDISKFS_DIRENT_LUFID) {
6643 rec = (struct osd_fid_pack *)(name + namelen + 1);
6644 if (osd_fid_unpack(fid, rec) != 0)
6649 d_type &= ~LDISKFS_DIRENT_LUFID;
6651 /* NOT export local root. */
6652 if (unlikely(osd_sb(osd_obj2dev(obj))->s_root->d_inode->i_ino == ino)) {
6653 ino = obj->oo_inode->i_ino;
6654 *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
6657 ent->oied_ino = ino;
6658 ent->oied_off = offset;
6659 ent->oied_namelen = namelen;
6660 ent->oied_type = d_type;
6662 memcpy(ent->oied_name, name, namelen);
6664 it->oie_rd_dirent++;
6665 it->oie_dirent = (void *)ent + cfs_size_round(sizeof(*ent) + namelen);
6670 * Calls ->iterate*() to load a directory entry at a time
6671 * and stored it in iterator's in-memory data structure.
6673 * \param di iterator's in memory structure
6675 * \retval 0 on success
6676 * \retval -ve on error
6677 * \retval +1 reach the end of entry
6679 static int osd_ldiskfs_it_fill(const struct lu_env *env,
6680 const struct dt_it *di)
6682 struct osd_it_ea *it = (struct osd_it_ea *)di;
6683 struct osd_object *obj = it->oie_obj;
6684 struct inode *inode = obj->oo_inode;
6685 struct htree_lock *hlock = NULL;
6686 struct file *filp = &it->oie_file;
6688 struct osd_filldir_cbs buf = {
6689 .ctx.actor = osd_ldiskfs_filldir,
6694 it->oie_dirent = it->oie_buf;
6695 it->oie_rd_dirent = 0;
6697 if (obj->oo_hl_head != NULL) {
6698 hlock = osd_oti_get(env)->oti_hlock;
6699 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
6700 inode, LDISKFS_HLOCK_READDIR);
6702 down_read(&obj->oo_ext_idx_sem);
6705 filp->f_cred = current_cred();
6706 rc = osd_security_file_alloc(filp);
6710 filp->f_flags |= O_NOATIME;
6711 filp->f_mode |= FMODE_NONOTIFY;
6712 rc = iterate_dir(filp, &buf.ctx);
6717 ldiskfs_htree_unlock(hlock);
6719 up_read(&obj->oo_ext_idx_sem);
6721 if (it->oie_rd_dirent == 0) {
6723 * If it does not get any dirent, it means it has been reached
6724 * to the end of the dir
6726 it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
6730 it->oie_dirent = it->oie_buf;
6731 it->oie_it_dirent = 1;
6738 * It calls osd_ldiskfs_it_fill() which will use ->iterate*()
6739 * to load a directory entry at a time and stored it in
6740 * iterator's in-memory data structure.
6742 * \param di iterator's in memory structure
6744 * \retval +ve iterator reached to end
6745 * \retval 0 iterator not reached to end
6746 * \retval -ve on error
6748 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
6750 struct osd_it_ea *it = (struct osd_it_ea *)di;
6755 if (it->oie_it_dirent < it->oie_rd_dirent) {
6757 (void *)it->oie_dirent +
6758 cfs_size_round(sizeof(struct osd_it_ea_dirent) +
6759 it->oie_dirent->oied_namelen);
6760 it->oie_it_dirent++;
6763 if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
6766 rc = osd_ldiskfs_it_fill(env, di);
6773 * Returns the key at current position from iterator's in memory structure.
6775 * \param di iterator's in memory structure
6777 * \retval key i.e. struct dt_key on success
6779 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
6780 const struct dt_it *di)
6782 struct osd_it_ea *it = (struct osd_it_ea *)di;
6784 return (struct dt_key *)it->oie_dirent->oied_name;
6788 * Returns key's size at current position from iterator's in memory structure.
6790 * \param di iterator's in memory structure
6792 * \retval key_size i.e. struct dt_key on success
6794 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
6796 struct osd_it_ea *it = (struct osd_it_ea *)di;
6798 return it->oie_dirent->oied_namelen;
6801 #if defined LDISKFS_DIR_ENTRY_LEN && defined LDISKFS_DIR_ENTRY_LEN_
6802 #undef LDISKFS_DIR_REC_LEN
6803 #define LDISKFS_DIR_REC_LEN(de) LDISKFS_DIR_ENTRY_LEN_((de))
6806 static inline bool osd_dotdot_has_space(struct ldiskfs_dir_entry_2 *de)
6808 if (LDISKFS_DIR_REC_LEN(de) >=
6809 __LDISKFS_DIR_REC_LEN(2 + 1 + sizeof(struct osd_fid_pack)))
6816 osd_dirent_has_space(struct ldiskfs_dir_entry_2 *de, __u16 namelen,
6817 unsigned int blocksize, bool dotdot)
6820 return osd_dotdot_has_space(de);
6822 if (ldiskfs_rec_len_from_disk(de->rec_len, blocksize) >=
6823 __LDISKFS_DIR_REC_LEN(namelen + 1 + sizeof(struct osd_fid_pack)))
6830 osd_dirent_reinsert(const struct lu_env *env, struct osd_device *dev,
6831 handle_t *jh, struct dentry *dentry,
6832 const struct lu_fid *fid, struct buffer_head *bh,
6833 struct ldiskfs_dir_entry_2 *de, struct htree_lock *hlock,
6836 struct inode *dir = dentry->d_parent->d_inode;
6837 struct inode *inode = dentry->d_inode;
6838 struct osd_fid_pack *rec;
6839 struct ldiskfs_dentry_param *ldp;
6840 int namelen = dentry->d_name.len;
6842 struct osd_thread_info *info = osd_oti_get(env);
6846 if (!ldiskfs_has_feature_dirdata(inode->i_sb))
6849 /* There is enough space to hold the FID-in-dirent. */
6850 if (osd_dirent_has_space(de, namelen, dir->i_sb->s_blocksize, dotdot)) {
6851 rc = ldiskfs_journal_get_write_access(jh, bh);
6855 de->name[namelen] = 0;
6856 rec = (struct osd_fid_pack *)(de->name + namelen + 1);
6857 rec->fp_len = sizeof(struct lu_fid) + 1;
6858 fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
6859 de->file_type |= LDISKFS_DIRENT_LUFID;
6860 rc = ldiskfs_handle_dirty_metadata(jh, NULL, bh);
6867 rc = ldiskfs_delete_entry(jh, dir, de, bh);
6871 ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
6872 osd_get_ldiskfs_dirent_param(ldp, fid);
6873 dentry->d_fsdata = (void *)ldp;
6874 dquot_initialize(dir);
6875 rc = osd_ldiskfs_add_entry(info, dev, jh, dentry, inode, hlock);
6877 * It is too bad, we cannot reinsert the name entry back.
6878 * That means we lose it!
6881 CDEBUG(D_LFSCK, "%s: fail to reinsert the dirent, "
6882 "dir = %lu/%u, name = %.*s, "DFID": rc = %d\n",
6883 osd_ino2name(inode),
6884 dir->i_ino, dir->i_generation, namelen,
6885 dentry->d_name.name, PFID(fid), rc);
6891 osd_dirent_check_repair(const struct lu_env *env, struct osd_object *obj,
6892 struct osd_it_ea *it, struct lu_fid *fid,
6893 struct osd_inode_id *id, __u32 *attr)
6895 struct osd_thread_info *info = osd_oti_get(env);
6896 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
6897 struct osd_device *dev = osd_obj2dev(obj);
6898 struct super_block *sb = osd_sb(dev);
6899 const char *devname = osd_name(dev);
6900 struct osd_it_ea_dirent *ent = it->oie_dirent;
6901 struct inode *dir = obj->oo_inode;
6902 struct htree_lock *hlock = NULL;
6903 struct buffer_head *bh = NULL;
6904 handle_t *jh = NULL;
6905 struct ldiskfs_dir_entry_2 *de;
6906 struct dentry *dentry;
6907 struct inode *inode;
6908 const struct lu_fid *pfid = lu_object_fid(&obj->oo_dt.do_lu);
6911 bool dotdot = false;
6916 if (ent->oied_name[0] == '.') {
6917 if (ent->oied_namelen == 1)
6920 if (ent->oied_namelen == 2 && ent->oied_name[1] == '.')
6924 osd_id_gen(id, ent->oied_ino, OSD_OII_NOGEN);
6925 inode = osd_iget(info, dev, id);
6926 if (IS_ERR(inode)) {
6927 rc = PTR_ERR(inode);
6928 if (rc == -ENOENT || rc == -ESTALE) {
6930 * Maybe dangling name entry, or
6931 * corrupted directory entry.
6933 *attr |= LUDA_UNKNOWN;
6936 CDEBUG(D_LFSCK, "%s: fail to iget() for dirent "
6937 "check_repair, dir = %lu/%u, name = %.*s, "
6938 "ino = %llu, rc = %d\n",
6939 devname, dir->i_ino, dir->i_generation,
6940 ent->oied_namelen, ent->oied_name,
6947 dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
6949 rc = osd_get_lma(info, inode, dentry, &info->oti_ost_attrs);
6950 if (rc == -ENODATA || !fid_is_sane(&lma->lma_self_fid))
6956 * We need to ensure that the name entry is still valid.
6957 * Because it may be removed or renamed by other already.
6959 * The unlink or rename operation will start journal before PDO lock,
6960 * so to avoid deadlock, here we need to start journal handle before
6961 * related PDO lock also. But because we do not know whether there
6962 * will be something to be repaired before PDO lock, we just start
6963 * journal without conditions.
6965 * We may need to remove the name entry firstly, then insert back.
6966 * One credit is for user quota file update.
6967 * One credit is for group quota file update.
6968 * Two credits are for dirty inode.
6970 credits = osd_dto_credits_noquota[DTO_INDEX_DELETE] +
6971 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1 + 1 + 2;
6973 if (dev->od_dirent_journal != 0) {
6976 jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
6979 CDEBUG(D_LFSCK, "%s: fail to start trans for dirent "
6980 "check_repair, dir = %lu/%u, credits = %d, "
6981 "name = %.*s, ino = %llu: rc = %d\n",
6982 devname, dir->i_ino, dir->i_generation, credits,
6983 ent->oied_namelen, ent->oied_name,
6986 GOTO(out_inode, rc);
6989 if (obj->oo_hl_head != NULL) {
6990 hlock = osd_oti_get(env)->oti_hlock;
6992 * "0" means exclusive lock for the whole directory.
6993 * We need to prevent others access such name entry
6994 * during the delete + insert. Neither HLOCK_ADD nor
6995 * HLOCK_DEL cannot guarantee the atomicity.
6997 ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir, 0);
6999 down_write(&obj->oo_ext_idx_sem);
7002 if (obj->oo_hl_head != NULL) {
7003 hlock = osd_oti_get(env)->oti_hlock;
7004 ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir,
7005 LDISKFS_HLOCK_LOOKUP);
7007 down_read(&obj->oo_ext_idx_sem);
7011 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
7012 if (IS_ERR(bh) || le32_to_cpu(de->inode) != inode->i_ino) {
7013 *attr |= LUDA_IGNORE;
7019 * For dotdot entry, if there is not enough space to hold the
7020 * FID-in-dirent, just keep them there. It only happens when the
7021 * device upgraded from 1.8 or restored from MDT file-level backup.
7022 * For the whole directory, only dotdot entry have no FID-in-dirent
7023 * and needs to get FID from LMA when readdir, it will not affect the
7026 if (dotdot && !osd_dotdot_has_space(de)) {
7027 *attr |= LUDA_UNKNOWN;
7033 if (lu_fid_eq(fid, &lma->lma_self_fid))
7036 if (unlikely(lma->lma_compat & LMAC_NOT_IN_OI)) {
7037 struct lu_fid *tfid = &lma->lma_self_fid;
7039 if (likely(dotdot &&
7040 fid_seq(tfid) == FID_SEQ_LOCAL_FILE &&
7041 fid_oid(tfid) == REMOTE_PARENT_DIR_OID)) {
7043 * It must be REMOTE_PARENT_DIR and as the
7044 * 'dotdot' entry of remote directory
7046 *attr |= LUDA_IGNORE;
7048 CDEBUG(D_LFSCK, "%s: expect remote agent "
7049 "parent directory, but got %.*s under "
7050 "dir = %lu/%u with the FID "DFID"\n",
7051 devname, ent->oied_namelen,
7052 ent->oied_name, dir->i_ino,
7053 dir->i_generation, PFID(tfid));
7055 *attr |= LUDA_UNKNOWN;
7062 if (!fid_is_zero(fid)) {
7063 rc = osd_verify_ent_by_linkea(env, inode, pfid, ent->oied_name,
7065 if (rc == -ENOENT ||
7067 !(dev->od_scrub.os_scrub.os_file.sf_flags & SF_UPGRADE))) {
7069 * linkEA does not recognize the dirent entry,
7070 * it may because the dirent entry corruption
7071 * and points to other's inode.
7073 CDEBUG(D_LFSCK, "%s: the target inode does not "
7074 "recognize the dirent, dir = %lu/%u, "
7075 " name = %.*s, ino = %llu, "
7076 DFID": rc = %d\n", devname, dir->i_ino,
7077 dir->i_generation, ent->oied_namelen,
7078 ent->oied_name, ent->oied_ino, PFID(fid), rc);
7079 *attr |= LUDA_UNKNOWN;
7084 if (rc && rc != -ENODATA) {
7085 CDEBUG(D_LFSCK, "%s: fail to verify FID in the dirent, "
7086 "dir = %lu/%u, name = %.*s, ino = %llu, "
7087 DFID": rc = %d\n", devname, dir->i_ino,
7088 dir->i_generation, ent->oied_namelen,
7089 ent->oied_name, ent->oied_ino, PFID(fid), rc);
7090 *attr |= LUDA_UNKNOWN;
7098 * linkEA recognizes the dirent entry, the FID-in-LMA is
7099 * valid, trusted, in spite of fid_is_sane(fid) or not.
7101 if (*attr & LUDA_VERIFY_DRYRUN) {
7102 *fid = lma->lma_self_fid;
7103 *attr |= LUDA_REPAIR;
7110 dev->od_dirent_journal = 1;
7111 if (hlock != NULL) {
7112 ldiskfs_htree_unlock(hlock);
7115 up_read(&obj->oo_ext_idx_sem);
7121 *fid = lma->lma_self_fid;
7123 /* Update or append the FID-in-dirent. */
7124 rc = osd_dirent_reinsert(env, dev, jh, dentry, fid,
7125 bh, de, hlock, dotdot);
7127 *attr |= LUDA_REPAIR;
7129 CDEBUG(D_LFSCK, "%s: fail to re-insert FID after "
7130 "the dirent, dir = %lu/%u, name = %.*s, "
7131 "ino = %llu, "DFID": rc = %d\n",
7132 devname, dir->i_ino, dir->i_generation,
7133 ent->oied_namelen, ent->oied_name,
7134 ent->oied_ino, PFID(fid), rc);
7136 /* lma is NULL, trust the FID-in-dirent if it is valid. */
7137 if (*attr & LUDA_VERIFY_DRYRUN) {
7138 if (fid_is_sane(fid)) {
7139 *attr |= LUDA_REPAIR;
7140 } else if (dev->od_index == 0) {
7141 lu_igif_build(fid, inode->i_ino,
7142 inode->i_generation);
7143 *attr |= LUDA_UPGRADE;
7151 dev->od_dirent_journal = 1;
7152 if (hlock != NULL) {
7153 ldiskfs_htree_unlock(hlock);
7156 up_read(&obj->oo_ext_idx_sem);
7163 if (unlikely(fid_is_sane(fid))) {
7165 * FID-in-dirent exists, but FID-in-LMA is lost.
7166 * Trust the FID-in-dirent, and add FID-in-LMA.
7168 rc = osd_ea_fid_set(info, inode, fid, 0, 0);
7170 *attr |= LUDA_REPAIR;
7172 CDEBUG(D_LFSCK, "%s: fail to set LMA for "
7173 "update dirent, dir = %lu/%u, "
7174 "name = %.*s, ino = %llu, "
7176 devname, dir->i_ino, dir->i_generation,
7177 ent->oied_namelen, ent->oied_name,
7178 ent->oied_ino, PFID(fid), rc);
7179 } else if (dev->od_index == 0) {
7180 lu_igif_build(fid, inode->i_ino, inode->i_generation);
7182 * It is probably IGIF object. Only aappend the
7183 * FID-in-dirent. OI scrub will process FID-in-LMA.
7185 rc = osd_dirent_reinsert(env, dev, jh, dentry, fid,
7186 bh, de, hlock, dotdot);
7188 *attr |= LUDA_UPGRADE;
7190 CDEBUG(D_LFSCK, "%s: fail to append IGIF "
7191 "after the dirent, dir = %lu/%u, "
7192 "name = %.*s, ino = %llu, "
7194 devname, dir->i_ino, dir->i_generation,
7195 ent->oied_namelen, ent->oied_name,
7196 ent->oied_ino, PFID(fid), rc);
7205 if (hlock != NULL) {
7206 ldiskfs_htree_unlock(hlock);
7208 if (dev->od_dirent_journal != 0)
7209 up_write(&obj->oo_ext_idx_sem);
7211 up_read(&obj->oo_ext_idx_sem);
7215 ldiskfs_journal_stop(jh);
7219 if (rc >= 0 && !dirty)
7220 dev->od_dirent_journal = 0;
7226 * Returns the value at current position from iterator's in memory structure.
7228 * \param di struct osd_it_ea, iterator's in memory structure
7229 * \param attr attr requested for dirent.
7230 * \param lde lustre dirent
7232 * \retval 0 no error and \param lde has correct lustre dirent.
7233 * \retval -ve on error
7235 static inline int osd_it_ea_rec(const struct lu_env *env,
7236 const struct dt_it *di,
7237 struct dt_rec *dtrec, __u32 attr)
7239 struct osd_it_ea *it = (struct osd_it_ea *)di;
7240 struct osd_object *obj = it->oie_obj;
7241 struct osd_device *dev = osd_obj2dev(obj);
7242 struct osd_thread_info *oti = osd_oti_get(env);
7243 struct osd_inode_id *id = &oti->oti_id;
7244 struct lu_fid *fid = &it->oie_dirent->oied_fid;
7245 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
7246 __u32 ino = it->oie_dirent->oied_ino;
7251 LASSERT(!is_remote_parent_ino(dev, obj->oo_inode->i_ino));
7253 if (attr & LUDA_VERIFY) {
7254 if (unlikely(is_remote_parent_ino(dev, ino))) {
7255 attr |= LUDA_IGNORE;
7257 * If the parent is on remote MDT, and there
7258 * is no FID-in-dirent, then we have to get
7259 * the parent FID from the linkEA.
7261 if (!fid_is_sane(fid) &&
7262 it->oie_dirent->oied_namelen == 2 &&
7263 it->oie_dirent->oied_name[0] == '.' &&
7264 it->oie_dirent->oied_name[1] == '.')
7265 osd_get_pfid_from_linkea(env, obj, fid);
7267 rc = osd_dirent_check_repair(env, obj, it, fid, id,
7271 if (!fid_is_sane(fid))
7272 attr |= LUDA_UNKNOWN;
7274 attr &= ~LU_DIRENT_ATTRS_MASK;
7275 if (!fid_is_sane(fid)) {
7276 bool is_dotdot = false;
7278 if (it->oie_dirent->oied_namelen == 2 &&
7279 it->oie_dirent->oied_name[0] == '.' &&
7280 it->oie_dirent->oied_name[1] == '.')
7283 * If the parent is on remote MDT, and there
7284 * is no FID-in-dirent, then we have to get
7285 * the parent FID from the linkEA.
7287 if (is_remote_parent_ino(dev, ino) && is_dotdot) {
7288 rc = osd_get_pfid_from_linkea(env, obj, fid);
7290 if (is_dotdot == false &&
7291 OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
7294 rc = osd_ea_fid_get(env, obj, ino, fid, id);
7297 osd_id_gen(id, ino, OSD_OII_NOGEN);
7301 /* Pack the entry anyway, at least the offset is right. */
7302 osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
7303 it->oie_dirent->oied_name,
7304 it->oie_dirent->oied_namelen,
7305 it->oie_dirent->oied_type, attr);
7310 if (osd_remote_fid(env, dev, fid))
7313 if (likely(!(attr & (LUDA_IGNORE | LUDA_UNKNOWN)) && rc == 0))
7314 osd_add_oi_cache(oti, dev, id, fid);
7316 RETURN(rc > 0 ? 0 : rc);
7320 * Returns the record size size at current position.
7322 * This function will return record(lu_dirent) size in bytes.
7324 * \param[in] env execution environment
7325 * \param[in] di iterator's in memory structure
7326 * \param[in] attr attribute of the entry, only requires LUDA_TYPE to
7327 * calculate the lu_dirent size.
7329 * \retval record size(in bytes & in memory) of the current lu_dirent
7332 static int osd_it_ea_rec_size(const struct lu_env *env, const struct dt_it *di,
7335 struct osd_it_ea *it = (struct osd_it_ea *)di;
7337 return lu_dirent_calc_size(it->oie_dirent->oied_namelen, attr);
7341 * Returns a cookie for current position of the iterator head, so that
7342 * user can use this cookie to load/start the iterator next time.
7344 * \param di iterator's in memory structure
7346 * \retval cookie for current position, on success
7348 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
7350 struct osd_it_ea *it = (struct osd_it_ea *)di;
7352 return it->oie_dirent->oied_off;
7356 * It calls osd_ldiskfs_it_fill() which will use ->iterate*()
7357 * to load a directory entry at a time and stored it i inn,
7358 * in iterator's in-memory data structure.
7360 * \param di struct osd_it_ea, iterator's in memory structure
7362 * \retval +ve on success
7363 * \retval -ve on error
7365 static int osd_it_ea_load(const struct lu_env *env,
7366 const struct dt_it *di, __u64 hash)
7368 struct osd_it_ea *it = (struct osd_it_ea *)di;
7372 it->oie_file.f_pos = hash;
7374 rc = osd_ldiskfs_it_fill(env, di);
7385 * Index lookup function for interoperability mode (b11826).
7387 * \param key, key i.e. file name to be searched
7389 * \retval +ve, on success
7390 * \retval -ve, on error
7392 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
7393 struct dt_rec *rec, const struct dt_key *key)
7395 struct osd_object *obj = osd_dt_obj(dt);
7400 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
7401 LINVRNT(osd_invariant(obj));
7403 rc = osd_ea_lookup_rec(env, obj, rec, key);
7410 * Index and Iterator operations for interoperability
7411 * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
7413 static const struct dt_index_operations osd_index_ea_ops = {
7414 .dio_lookup = osd_index_ea_lookup,
7415 .dio_declare_insert = osd_index_declare_ea_insert,
7416 .dio_insert = osd_index_ea_insert,
7417 .dio_declare_delete = osd_index_declare_ea_delete,
7418 .dio_delete = osd_index_ea_delete,
7420 .init = osd_it_ea_init,
7421 .fini = osd_it_ea_fini,
7422 .get = osd_it_ea_get,
7423 .put = osd_it_ea_put,
7424 .next = osd_it_ea_next,
7425 .key = osd_it_ea_key,
7426 .key_size = osd_it_ea_key_size,
7427 .rec = osd_it_ea_rec,
7428 .rec_size = osd_it_ea_rec_size,
7429 .store = osd_it_ea_store,
7430 .load = osd_it_ea_load
7434 static void *osd_key_init(const struct lu_context *ctx,
7435 struct lu_context_key *key)
7437 struct osd_thread_info *info;
7439 OBD_ALLOC_PTR(info);
7441 return ERR_PTR(-ENOMEM);
7443 OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
7444 if (info->oti_it_ea_buf == NULL)
7447 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
7449 info->oti_hlock = ldiskfs_htree_lock_alloc();
7450 if (info->oti_hlock == NULL)
7456 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
7459 return ERR_PTR(-ENOMEM);
7462 static void osd_key_fini(const struct lu_context *ctx,
7463 struct lu_context_key *key, void *data)
7465 struct osd_thread_info *info = data;
7466 struct ldiskfs_inode_info *lli = LDISKFS_I(info->oti_inode);
7467 struct osd_idmap_cache *idc = info->oti_ins_cache;
7469 if (info->oti_dio_pages) {
7471 for (i = 0; i < PTLRPC_MAX_BRW_PAGES; i++) {
7472 struct page *page = info->oti_dio_pages[i];
7474 LASSERT(PagePrivate2(page));
7475 LASSERT(PageLocked(page));
7476 ClearPagePrivate2(page);
7481 OBD_FREE_PTR_ARRAY_LARGE(info->oti_dio_pages,
7482 PTLRPC_MAX_BRW_PAGES);
7485 if (info->oti_inode != NULL)
7487 if (info->oti_hlock != NULL)
7488 ldiskfs_htree_lock_free(info->oti_hlock);
7489 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
7490 lu_buf_free(&info->oti_iobuf.dr_pg_buf);
7491 lu_buf_free(&info->oti_iobuf.dr_bl_buf);
7492 lu_buf_free(&info->oti_iobuf.dr_lnb_buf);
7493 lu_buf_free(&info->oti_big_buf);
7495 LASSERT(info->oti_ins_cache_size > 0);
7496 OBD_FREE_PTR_ARRAY(idc, info->oti_ins_cache_size);
7497 info->oti_ins_cache = NULL;
7498 info->oti_ins_cache_size = 0;
7503 static void osd_key_exit(const struct lu_context *ctx,
7504 struct lu_context_key *key, void *data)
7506 struct osd_thread_info *info = data;
7508 LASSERT(info->oti_r_locks == 0);
7509 LASSERT(info->oti_w_locks == 0);
7510 LASSERT(info->oti_txns == 0);
7513 /* type constructor/destructor: osd_type_init, osd_type_fini */
7514 LU_TYPE_INIT_FINI(osd, &osd_key);
7516 struct lu_context_key osd_key = {
7517 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
7518 .lct_init = osd_key_init,
7519 .lct_fini = osd_key_fini,
7520 .lct_exit = osd_key_exit
7524 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
7525 const char *name, struct lu_device *next)
7527 struct osd_device *osd = osd_dev(d);
7529 if (strlcpy(osd->od_svname, name, sizeof(osd->od_svname)) >=
7530 sizeof(osd->od_svname))
7532 return osd_procfs_init(osd, name);
7535 static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
7537 struct seq_server_site *ss = osd_seq_site(osd);
7542 if (osd->od_is_ost || osd->od_cl_seq != NULL)
7545 if (unlikely(ss == NULL))
7548 OBD_ALLOC_PTR(osd->od_cl_seq);
7549 if (osd->od_cl_seq == NULL)
7552 seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
7553 osd->od_svname, ss->ss_server_seq);
7555 if (ss->ss_node_id == 0) {
7557 * If the OSD on the sequence controller(MDT0), then allocate
7558 * sequence here, otherwise allocate sequence after connected
7559 * to MDT0 (see mdt_register_lwp_callback()).
7561 rc = seq_server_alloc_meta(osd->od_cl_seq->lcs_srv,
7562 &osd->od_cl_seq->lcs_space, env);
7568 static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
7570 if (osd->od_cl_seq == NULL)
7573 seq_client_fini(osd->od_cl_seq);
7574 OBD_FREE_PTR(osd->od_cl_seq);
7575 osd->od_cl_seq = NULL;
7578 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
7582 /* shutdown quota slave instance associated with the device */
7583 if (o->od_quota_slave_md != NULL) {
7584 struct qsd_instance *qsd = o->od_quota_slave_md;
7586 o->od_quota_slave_md = NULL;
7590 if (o->od_quota_slave_dt != NULL) {
7591 struct qsd_instance *qsd = o->od_quota_slave_dt;
7593 o->od_quota_slave_dt = NULL;
7597 osd_fid_fini(env, o);
7598 osd_scrub_cleanup(env, o);
7603 static void osd_umount(const struct lu_env *env, struct osd_device *o)
7607 if (o->od_mnt != NULL) {
7608 shrink_dcache_sb(osd_sb(o));
7609 osd_sync(env, &o->od_dt_dev);
7618 static int osd_mount(const struct lu_env *env,
7619 struct osd_device *o, struct lustre_cfg *cfg)
7621 const char *name = lustre_cfg_string(cfg, 0);
7622 const char *dev = lustre_cfg_string(cfg, 1);
7624 unsigned long page, s_flags, lmd_flags = 0;
7625 struct page *__page;
7626 struct file_system_type *type;
7627 char *options = NULL;
7629 struct osd_thread_info *info = osd_oti_get(env);
7630 struct lu_fid *fid = &info->oti_fid;
7631 struct inode *inode;
7632 int rc = 0, force_over_1024tb = 0;
7636 if (o->od_mnt != NULL)
7639 if (strlen(dev) >= sizeof(o->od_mntdev))
7641 strcpy(o->od_mntdev, dev);
7643 str = lustre_cfg_string(cfg, 2);
7644 s_flags = simple_strtoul(str, NULL, 0);
7645 str = strstr(str, ":");
7647 lmd_flags = simple_strtoul(str + 1, NULL, 0);
7648 opts = lustre_cfg_string(cfg, 3);
7650 if (opts == NULL || strstr(opts, "bigendian_extents") == NULL) {
7651 CERROR("%s: device %s extents feature is not guaranteed to "
7652 "work on big-endian systems. Use \"bigendian_extents\" "
7653 "mount option to override.\n", name, dev);
7657 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
7658 if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
7659 CWARN("force_over_128tb option is deprecated. Filesystems smaller than 1024TB can be created without any force option. Use force_over_1024tb option for filesystems larger than 1024TB.\n");
7662 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 1, 53, 0)
7663 if (opts != NULL && strstr(opts, "force_over_256tb") != NULL) {
7664 CWARN("force_over_256tb option is deprecated. Filesystems smaller than 1024TB can be created without any force options. Use force_over_1024tb option for filesystems larger than 1024TB.\n");
7667 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 53, 0)
7668 if (opts != NULL && strstr(opts, "force_over_512tb") != NULL) {
7669 CWARN("force_over_512tb option is deprecated. Filesystems smaller than 1024TB can be created without any force options. Use force_over_1024tb option for filesystems larger than 1024TB.\n");
7673 if (opts != NULL && strstr(opts, "force_over_1024tb") != NULL)
7674 force_over_1024tb = 1;
7676 __page = alloc_page(GFP_KERNEL);
7678 GOTO(out, rc = -ENOMEM);
7679 page = (unsigned long)page_address(__page);
7680 options = (char *)page;
7683 /* strip out the options for back compatiblity */
7684 static const char * const sout[] = {
7691 /* strip out option we processed in osd */
7692 "bigendian_extents",
7696 "force_over_1024tb",
7699 strncat(options, opts, PAGE_SIZE);
7700 for (rc = 0, str = options; sout[rc]; ) {
7701 char *op = strstr(str, sout[rc]);
7708 if (op == options || *(op - 1) == ',') {
7709 str = op + strlen(sout[rc]);
7710 if (*str == ',' || *str == '\0') {
7711 *str == ',' ? str++ : str;
7712 memmove(op, str, strlen(str) + 1);
7715 for (str = op; *str != ',' && *str != '\0'; str++)
7719 strncat(options, "user_xattr,acl", PAGE_SIZE);
7722 /* Glom up mount options */
7723 if (*options != '\0')
7724 strncat(options, ",", PAGE_SIZE);
7725 strncat(options, "no_mbcache,nodelalloc", PAGE_SIZE);
7727 type = get_fs_type("ldiskfs");
7729 CERROR("%s: cannot find ldiskfs module\n", name);
7730 GOTO(out, rc = -ENODEV);
7733 o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
7734 module_put(type->owner);
7736 if (IS_ERR(o->od_mnt)) {
7737 rc = PTR_ERR(o->od_mnt);
7739 CERROR("%s: can't mount %s: %d\n", name, dev, rc);
7743 if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) <<
7744 osd_sb(o)->s_blocksize_bits > 1024ULL << 40 &&
7745 force_over_1024tb == 0) {
7746 CERROR("%s: device %s LDISKFS has not been tested on filesystems larger than 1024TB and may cause data corruption. Use 'force_over_1024tb' mount option to override.\n",
7748 GOTO(out_mnt, rc = -EINVAL);
7751 if (lmd_flags & LMD_FLG_DEV_RDONLY) {
7752 LCONSOLE_WARN("%s: not support dev_rdonly on this device",
7755 GOTO(out_mnt, rc = -EOPNOTSUPP);
7758 if (!ldiskfs_has_feature_journal(o->od_mnt->mnt_sb)) {
7759 CERROR("%s: device %s is mounted w/o journal\n", name, dev);
7760 GOTO(out_mnt, rc = -EINVAL);
7763 #ifdef LDISKFS_MOUNT_DIRDATA
7764 if (ldiskfs_has_feature_dirdata(o->od_mnt->mnt_sb))
7765 LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
7766 else if (strstr(name, "MDT")) /* don't complain for MGT or OSTs */
7767 CWARN("%s: device %s was upgraded from Lustre-1.x without "
7768 "enabling the dirdata feature. If you do not want to "
7769 "downgrade to Lustre-1.x again, you can enable it via "
7770 "'tune2fs -O dirdata device'\n", name, dev);
7772 inode = osd_sb(o)->s_root->d_inode;
7773 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
7774 rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
7776 CERROR("%s: failed to set lma on %s root inode\n", name, dev);
7780 if (lmd_flags & LMD_FLG_NOSCRUB)
7781 o->od_auto_scrub_interval = AS_NEVER;
7783 if (blk_queue_nonrot(bdev_get_queue(osd_sb(o)->s_bdev))) {
7784 /* do not use pagecache with flash-backed storage */
7785 o->od_writethrough_cache = 0;
7786 o->od_read_cache = 0;
7797 __free_page(__page);
7802 static struct lu_device *osd_device_fini(const struct lu_env *env,
7803 struct lu_device *d)
7805 struct osd_device *o = osd_dev(d);
7809 osd_index_backup(env, o, false);
7810 osd_shutdown(env, o);
7812 if (o->od_oi_table != NULL)
7813 osd_oi_fini(osd_oti_get(env), o);
7814 osd_obj_map_fini(o);
7820 static int osd_device_init0(const struct lu_env *env,
7821 struct osd_device *o,
7822 struct lustre_cfg *cfg)
7824 struct lu_device *l = osd2lu_dev(o);
7825 struct osd_thread_info *info;
7829 /* if the module was re-loaded, env can loose its keys */
7830 rc = lu_env_refill((struct lu_env *)env);
7833 info = osd_oti_get(env);
7836 l->ld_ops = &osd_lu_ops;
7837 o->od_dt_dev.dd_ops = &osd_dt_ops;
7839 spin_lock_init(&o->od_osfs_lock);
7840 mutex_init(&o->od_otable_mutex);
7841 INIT_LIST_HEAD(&o->od_orphan_list);
7842 INIT_LIST_HEAD(&o->od_index_backup_list);
7843 INIT_LIST_HEAD(&o->od_index_restore_list);
7844 spin_lock_init(&o->od_lock);
7845 o->od_index_backup_policy = LIBP_NONE;
7848 o->od_read_cache = 1;
7849 o->od_writethrough_cache = 1;
7850 o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
7851 o->od_readcache_max_iosize = OSD_READCACHE_MAX_IO_MB << 20;
7852 o->od_writethrough_max_iosize = OSD_WRITECACHE_MAX_IO_MB << 20;
7853 o->od_auto_scrub_interval = AS_DEFAULT;
7855 cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
7856 sizeof(o->od_svname));
7857 if (cplen >= sizeof(o->od_svname)) {
7862 o->od_index_backup_stop = 0;
7863 o->od_index = -1; /* -1 means index is invalid */
7864 rc = server_name2index(o->od_svname, &o->od_index, NULL);
7865 if (rc == LDD_F_SV_TYPE_OST)
7868 o->od_full_scrub_ratio = OFSR_DEFAULT;
7869 o->od_full_scrub_threshold_rate = FULL_SCRUB_THRESHOLD_RATE_DEFAULT;
7870 rc = osd_mount(env, o, cfg);
7874 /* Can only check block device after mount */
7875 o->od_nonrotational =
7876 blk_queue_nonrot(bdev_get_queue(osd_sb(o)->s_bdev));
7878 rc = osd_obj_map_init(env, o);
7882 rc = lu_site_init(&o->od_site, l);
7884 GOTO(out_compat, rc);
7885 o->od_site.ls_bottom_dev = l;
7887 rc = lu_site_init_finish(&o->od_site);
7891 INIT_LIST_HEAD(&o->od_ios_list);
7892 /* setup scrub, including OI files initialization */
7894 rc = osd_scrub_setup(env, o);
7899 rc = osd_procfs_init(o, o->od_svname);
7901 CERROR("%s: can't initialize procfs: rc = %d\n",
7903 GOTO(out_scrub, rc);
7906 LASSERT(l->ld_site->ls_linkage.next != NULL);
7907 LASSERT(l->ld_site->ls_linkage.prev != NULL);
7909 /* initialize quota slave instance */
7910 /* currently it's no need to prepare qsd_instance_md for OST */
7911 if (!o->od_is_ost) {
7912 o->od_quota_slave_md = qsd_init(env, o->od_svname,
7914 o->od_proc_entry, true);
7915 if (IS_ERR(o->od_quota_slave_md)) {
7916 rc = PTR_ERR(o->od_quota_slave_md);
7917 o->od_quota_slave_md = NULL;
7918 GOTO(out_procfs, rc);
7922 o->od_quota_slave_dt = qsd_init(env, o->od_svname, &o->od_dt_dev,
7923 o->od_proc_entry, false);
7925 if (IS_ERR(o->od_quota_slave_dt)) {
7926 if (o->od_quota_slave_md != NULL) {
7927 qsd_fini(env, o->od_quota_slave_md);
7928 o->od_quota_slave_md = NULL;
7931 rc = PTR_ERR(o->od_quota_slave_dt);
7932 o->od_quota_slave_dt = NULL;
7933 GOTO(out_procfs, rc);
7941 osd_scrub_cleanup(env, o);
7943 lu_site_fini(&o->od_site);
7945 osd_obj_map_fini(o);
7952 static struct lu_device *osd_device_alloc(const struct lu_env *env,
7953 struct lu_device_type *t,
7954 struct lustre_cfg *cfg)
7956 struct osd_device *o;
7961 return ERR_PTR(-ENOMEM);
7963 rc = dt_device_init(&o->od_dt_dev, t);
7966 * Because the ctx might be revived in dt_device_init,
7967 * refill the env here
7969 lu_env_refill((struct lu_env *)env);
7970 rc = osd_device_init0(env, o, cfg);
7972 dt_device_fini(&o->od_dt_dev);
7975 if (unlikely(rc != 0))
7978 return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
7981 static struct lu_device *osd_device_free(const struct lu_env *env,
7982 struct lu_device *d)
7984 struct osd_device *o = osd_dev(d);
7988 /* XXX: make osd top device in order to release reference */
7989 d->ld_site->ls_top_dev = d;
7990 lu_site_purge(env, d->ld_site, -1);
7991 lu_site_print(env, d->ld_site, &d->ld_site->ls_obj_hash.nelems,
7992 D_ERROR, lu_cdebug_printer);
7993 lu_site_fini(&o->od_site);
7994 dt_device_fini(&o->od_dt_dev);
7999 static int osd_process_config(const struct lu_env *env,
8000 struct lu_device *d, struct lustre_cfg *cfg)
8002 struct osd_device *o = osd_dev(d);
8008 switch (cfg->lcfg_command) {
8010 rc = osd_mount(env, o, cfg);
8014 * For the case LCFG_PRE_CLEANUP is not called in advance,
8015 * that may happend if hit failure during mount process.
8017 osd_index_backup(env, o, false);
8018 lu_dev_del_linkage(d->ld_site, d);
8019 rc = osd_shutdown(env, o);
8022 LASSERT(&o->od_dt_dev);
8023 count = class_modify_config(cfg, PARAM_OSD,
8024 &o->od_dt_dev.dd_kobj);
8026 count = class_modify_config(cfg, PARAM_OST,
8027 &o->od_dt_dev.dd_kobj);
8028 rc = count > 0 ? 0 : count;
8030 case LCFG_PRE_CLEANUP:
8032 osd_index_backup(env, o,
8033 o->od_index_backup_policy != LIBP_NONE);
8043 static int osd_recovery_complete(const struct lu_env *env,
8044 struct lu_device *d)
8046 struct osd_device *osd = osd_dev(d);
8051 if (osd->od_quota_slave_md == NULL && osd->od_quota_slave_dt == NULL)
8055 * start qsd instance on recovery completion, this notifies the quota
8056 * slave code that we are about to process new requests now
8058 rc = qsd_start(env, osd->od_quota_slave_dt);
8059 if (rc == 0 && osd->od_quota_slave_md != NULL)
8060 rc = qsd_start(env, osd->od_quota_slave_md);
8066 * we use exports to track all osd users
8068 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
8069 struct obd_device *obd, struct obd_uuid *cluuid,
8070 struct obd_connect_data *data, void *localdata)
8072 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
8073 struct lustre_handle conn;
8078 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
8080 rc = class_connect(&conn, obd, cluuid);
8084 *exp = class_conn2export(&conn);
8086 spin_lock(&osd->od_osfs_lock);
8088 spin_unlock(&osd->od_osfs_lock);
8094 * once last export (we don't count self-export) disappeared
8095 * osd can be released
8097 static int osd_obd_disconnect(struct obd_export *exp)
8099 struct obd_device *obd = exp->exp_obd;
8100 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
8101 int rc, release = 0;
8105 /* Only disconnect the underlying layers on the final disconnect. */
8106 spin_lock(&osd->od_osfs_lock);
8108 if (osd->od_connects == 0)
8110 spin_unlock(&osd->od_osfs_lock);
8112 rc = class_disconnect(exp); /* bz 9811 */
8114 if (rc == 0 && release)
8115 class_manual_cleanup(obd);
8119 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
8120 struct lu_device *dev)
8122 struct osd_device *osd = osd_dev(dev);
8123 struct lr_server_data *lsd =
8124 &osd->od_dt_dev.dd_lu_dev.ld_site->ls_tgt->lut_lsd;
8129 if (osd->od_quota_slave_md != NULL) {
8130 /* set up quota slave objects for inode */
8131 result = qsd_prepare(env, osd->od_quota_slave_md);
8136 if (osd->od_quota_slave_dt != NULL) {
8137 /* set up quota slave objects for block */
8138 result = qsd_prepare(env, osd->od_quota_slave_dt);
8144 if (lsd->lsd_feature_incompat & OBD_COMPAT_OST) {
8145 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 52, 0)
8146 if (lsd->lsd_feature_rocompat & OBD_ROCOMPAT_IDX_IN_IDIF) {
8147 osd->od_index_in_idif = 1;
8149 osd->od_index_in_idif = 0;
8150 result = osd_register_proc_index_in_idif(osd);
8155 osd->od_index_in_idif = 1;
8159 result = osd_fid_init(env, osd);
8165 * Implementation of lu_device_operations::ldo_fid_alloc() for OSD
8169 * see include/lu_object.h for the details.
8171 static int osd_fid_alloc(const struct lu_env *env, struct lu_device *d,
8172 struct lu_fid *fid, struct lu_object *parent,
8173 const struct lu_name *name)
8175 struct osd_device *osd = osd_dev(d);
8177 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
8180 static const struct lu_object_operations osd_lu_obj_ops = {
8181 .loo_object_init = osd_object_init,
8182 .loo_object_delete = osd_object_delete,
8183 .loo_object_release = osd_object_release,
8184 .loo_object_free = osd_object_free,
8185 .loo_object_print = osd_object_print,
8186 .loo_object_invariant = osd_object_invariant
8189 const struct lu_device_operations osd_lu_ops = {
8190 .ldo_object_alloc = osd_object_alloc,
8191 .ldo_process_config = osd_process_config,
8192 .ldo_recovery_complete = osd_recovery_complete,
8193 .ldo_prepare = osd_prepare,
8194 .ldo_fid_alloc = osd_fid_alloc,
8197 static const struct lu_device_type_operations osd_device_type_ops = {
8198 .ldto_init = osd_type_init,
8199 .ldto_fini = osd_type_fini,
8201 .ldto_start = osd_type_start,
8202 .ldto_stop = osd_type_stop,
8204 .ldto_device_alloc = osd_device_alloc,
8205 .ldto_device_free = osd_device_free,
8207 .ldto_device_init = osd_device_init,
8208 .ldto_device_fini = osd_device_fini
8211 static struct lu_device_type osd_device_type = {
8212 .ldt_tags = LU_DEVICE_DT,
8213 .ldt_name = LUSTRE_OSD_LDISKFS_NAME,
8214 .ldt_ops = &osd_device_type_ops,
8215 .ldt_ctx_tags = LCT_LOCAL,
8218 static int osd_health_check(const struct lu_env *env, struct obd_device *obd)
8220 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
8221 struct super_block *sb = osd_sb(osd);
8223 return (osd->od_mnt == NULL || sb->s_flags & SB_RDONLY);
8227 * lprocfs legacy support.
8229 static const struct obd_ops osd_obd_device_ops = {
8230 .o_owner = THIS_MODULE,
8231 .o_connect = osd_obd_connect,
8232 .o_disconnect = osd_obd_disconnect,
8233 .o_health_check = osd_health_check,
8236 static ssize_t track_declares_assert_show(struct kobject *kobj,
8237 struct attribute *attr,
8240 return sprintf(buf, "%d\n", ldiskfs_track_declares_assert);
8243 static ssize_t track_declares_assert_store(struct kobject *kobj,
8244 struct attribute *attr,
8245 const char *buffer, size_t count)
8247 bool track_declares_assert;
8250 rc = kstrtobool(buffer, &track_declares_assert);
8254 ldiskfs_track_declares_assert = track_declares_assert;
8258 LUSTRE_RW_ATTR(track_declares_assert);
8260 static int __init osd_init(void)
8262 struct kobject *kobj;
8265 BUILD_BUG_ON(BH_DXLock >=
8266 sizeof(((struct buffer_head *)0)->b_state) * 8);
8267 #if !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_DEBUG_SPINLOCK)
8268 /* please, try to keep osd_thread_info smaller than a page */
8269 BUILD_BUG_ON(sizeof(struct osd_thread_info) > PAGE_SIZE);
8274 rc = lu_kmem_init(ldiskfs_caches);
8278 #ifdef CONFIG_KALLSYMS
8279 priv_security_file_alloc =
8280 (void *)kallsyms_lookup_name("security_file_alloc");
8283 rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
8284 LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
8286 lu_kmem_fini(ldiskfs_caches);
8290 kobj = kset_find_obj(lustre_kset, LUSTRE_OSD_LDISKFS_NAME);
8292 rc = sysfs_create_file(kobj,
8293 &lustre_attr_track_declares_assert.attr);
8296 CWARN("osd-ldiskfs: track_declares_assert failed to register with sysfs\n");
8303 static void __exit osd_exit(void)
8305 struct kobject *kobj;
8307 kobj = kset_find_obj(lustre_kset, LUSTRE_OSD_LDISKFS_NAME);
8309 sysfs_remove_file(kobj,
8310 &lustre_attr_track_declares_assert.attr);
8313 class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
8314 lu_kmem_fini(ldiskfs_caches);
8317 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
8318 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
8319 MODULE_VERSION(LUSTRE_VERSION_STRING);
8320 MODULE_LICENSE("GPL");
8322 module_init(osd_init);
8323 module_exit(osd_exit);