4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd/osd_handler.c
34 * Top-level entry points into osd module
36 * Author: Nikita Danilov <nikita@clusterfs.com>
37 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
40 #define DEBUG_SUBSYSTEM S_OSD
42 #include <linux/kallsyms.h>
43 #include <linux/module.h>
44 #include <linux/user_namespace.h>
45 #ifdef HAVE_UIDGID_HEADER
46 # include <linux/uidgid.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/types.h>
51 /* prerequisite for linux/xattr.h */
53 /* XATTR_{REPLACE,CREATE} */
54 #include <linux/xattr.h>
56 #include <ldiskfs/ldiskfs.h>
57 #include <ldiskfs/xattr.h>
58 #include <ldiskfs/ldiskfs_extents.h>
61 * struct OBD_{ALLOC,FREE}*()
64 #include <obd_support.h>
65 /* struct ptlrpc_thread */
66 #include <lustre_net.h>
67 #include <lustre_fid.h>
69 #include <uapi/linux/lustre/lustre_param.h>
71 #include "osd_internal.h"
72 #include "osd_dynlocks.h"
74 /* llo_* api support */
75 #include <md_object.h>
76 #include <lustre_quota.h>
78 #include <lustre_linkea.h>
80 /* Maximum EA size is limited by LNET_MTU for remote objects */
81 #define OSD_MAX_EA_SIZE 1048364
84 module_param(ldiskfs_pdo, int, 0644);
85 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
87 int ldiskfs_track_declares_assert;
88 module_param(ldiskfs_track_declares_assert, int, 0644);
89 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
91 /* Slab to allocate dynlocks */
92 struct kmem_cache *dynlock_cachep;
94 /* Slab to allocate osd_it_ea */
95 struct kmem_cache *osd_itea_cachep;
97 static struct lu_kmem_descr ldiskfs_caches[] = {
99 .ckd_cache = &dynlock_cachep,
100 .ckd_name = "dynlock_cache",
101 .ckd_size = sizeof(struct dynlock_handle)
104 .ckd_cache = &osd_itea_cachep,
105 .ckd_name = "osd_itea_cache",
106 .ckd_size = sizeof(struct osd_it_ea)
113 static const char dot[] = ".";
114 static const char dotdot[] = "..";
116 static const struct lu_object_operations osd_lu_obj_ops;
117 static const struct dt_object_operations osd_obj_ops;
118 static const struct dt_object_operations osd_obj_otable_it_ops;
119 static const struct dt_index_operations osd_index_iam_ops;
120 static const struct dt_index_operations osd_index_ea_ops;
122 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
123 const struct lu_fid *fid);
124 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
125 struct osd_device *osd);
127 int osd_trans_declare_op2rb[] = {
128 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
129 [OSD_OT_PUNCH] = OSD_OT_MAX,
130 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
131 [OSD_OT_CREATE] = OSD_OT_DESTROY,
132 [OSD_OT_DESTROY] = OSD_OT_CREATE,
133 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
134 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
135 [OSD_OT_WRITE] = OSD_OT_WRITE,
136 [OSD_OT_INSERT] = OSD_OT_DELETE,
137 [OSD_OT_DELETE] = OSD_OT_INSERT,
138 [OSD_OT_QUOTA] = OSD_OT_MAX,
141 static int osd_has_index(const struct osd_object *obj)
143 return obj->oo_dt.do_index_ops != NULL;
146 static int osd_object_invariant(const struct lu_object *l)
148 return osd_invariant(osd_obj(l));
152 * Concurrency: doesn't matter
154 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
156 struct osd_thread_info *oti = osd_oti_get(env);
158 return oti->oti_w_locks > 0 && o->oo_owner == env;
162 * Concurrency: doesn't access mutable data
164 static int osd_root_get(const struct lu_env *env,
165 struct dt_device *dev, struct lu_fid *f)
167 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
172 * the following set of functions are used to maintain per-thread
173 * cache of FID->ino mapping. this mechanism is needed to resolve
174 * FID to inode at dt_insert() which in turn stores ino in the
175 * directory entries to keep ldiskfs compatible with ext[34].
176 * due to locking-originated restrictions we can't lookup ino
177 * using LU cache (deadlock is possible). lookup using OI is quite
178 * expensive. so instead we maintain this cache and methods like
179 * dt_create() fill it. so in the majority of cases dt_insert() is
180 * able to find needed mapping in lockless manner.
182 static struct osd_idmap_cache *
183 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
184 const struct lu_fid *fid)
186 struct osd_thread_info *oti = osd_oti_get(env);
187 struct osd_idmap_cache *idc = oti->oti_ins_cache;
190 for (i = 0; i < oti->oti_ins_cache_used; i++) {
191 if (!lu_fid_eq(&idc[i].oic_fid, fid))
193 if (idc[i].oic_dev != osd)
202 static struct osd_idmap_cache *
203 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
204 const struct lu_fid *fid)
206 struct osd_thread_info *oti = osd_oti_get(env);
207 struct osd_idmap_cache *idc;
210 if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
211 i = oti->oti_ins_cache_size * 2;
213 i = OSD_INS_CACHE_SIZE;
214 OBD_ALLOC(idc, sizeof(*idc) * i);
216 return ERR_PTR(-ENOMEM);
217 if (oti->oti_ins_cache != NULL) {
218 memcpy(idc, oti->oti_ins_cache,
219 oti->oti_ins_cache_used * sizeof(*idc));
220 OBD_FREE(oti->oti_ins_cache,
221 oti->oti_ins_cache_used * sizeof(*idc));
223 oti->oti_ins_cache = idc;
224 oti->oti_ins_cache_size = i;
227 idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
230 idc->oic_lid.oii_ino = 0;
231 idc->oic_lid.oii_gen = 0;
238 * lookup mapping for the given fid in the cache, initialize a
239 * new one if not found. the initialization checks whether the
240 * object is local or remote. for local objects, OI is used to
241 * learn ino/generation. the function is used when the caller
242 * has no information about the object, e.g. at dt_insert().
244 static struct osd_idmap_cache *
245 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
246 const struct lu_fid *fid)
248 struct osd_idmap_cache *idc;
251 idc = osd_idc_find(env, osd, fid);
252 LASSERT(!IS_ERR(idc));
256 CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
257 osd->od_svname, PFID(fid));
259 /* new mapping is needed */
260 idc = osd_idc_add(env, osd, fid);
262 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
263 osd->od_svname, PFID(fid), PTR_ERR(idc));
268 rc = osd_remote_fid(env, osd, fid);
269 if (unlikely(rc < 0))
273 /* the object is local, lookup in OI */
274 /* XXX: probably cheaper to lookup in LU first? */
275 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
277 if (unlikely(rc < 0)) {
278 CERROR("can't lookup: rc = %d\n", rc);
282 /* the object is remote */
290 * lookup mapping for given FID and fill it from the given object.
291 * the object is lolcal by definition.
293 static int osd_idc_find_and_init(const struct lu_env *env,
294 struct osd_device *osd,
295 struct osd_object *obj)
297 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
298 struct osd_idmap_cache *idc;
300 idc = osd_idc_find(env, osd, fid);
301 LASSERT(!IS_ERR(idc));
303 if (obj->oo_inode == NULL)
305 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
306 LASSERT(idc->oic_lid.oii_ino == 0);
307 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
308 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
313 CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
314 osd->od_svname, PFID(fid));
316 /* new mapping is needed */
317 idc = osd_idc_add(env, osd, fid);
319 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
320 osd->od_svname, PFID(fid), PTR_ERR(idc));
324 if (obj->oo_inode != NULL) {
325 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
326 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
332 * OSD object methods.
336 * Concurrency: no concurrent access is possible that early in object
339 static struct lu_object *osd_object_alloc(const struct lu_env *env,
340 const struct lu_object_header *hdr,
343 struct osd_object *mo;
348 struct lu_object_header *h;
349 struct osd_device *o = osd_dev(d);
351 l = &mo->oo_dt.do_lu;
352 if (unlikely(o->od_in_init)) {
359 lu_object_header_init(h);
360 lu_object_init(l, h, d);
361 lu_object_add_top(h, l);
364 dt_object_init(&mo->oo_dt, NULL, d);
365 mo->oo_header = NULL;
368 mo->oo_dt.do_ops = &osd_obj_ops;
369 l->lo_ops = &osd_lu_obj_ops;
370 init_rwsem(&mo->oo_sem);
371 init_rwsem(&mo->oo_ext_idx_sem);
372 spin_lock_init(&mo->oo_guard);
373 INIT_LIST_HEAD(&mo->oo_xattr_list);
379 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
380 struct dentry *dentry, struct lustre_ost_attrs *loa)
384 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
385 (void *)loa, sizeof(*loa));
387 struct lustre_mdt_attrs *lma = &loa->loa_lma;
389 if (rc < sizeof(*lma))
393 lustre_loa_swab(loa, true);
394 /* Check LMA compatibility */
395 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
396 CWARN("%s: unsupported incompat LMA feature(s) %#x "
397 "for fid = "DFID", ino = %lu\n",
399 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
400 PFID(&lma->lma_self_fid), inode->i_ino);
403 } else if (rc == 0) {
411 * retrieve object from backend ext fs.
413 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
414 struct osd_inode_id *id)
417 struct inode *inode = NULL;
420 * if we look for an inode withing a running
421 * transaction, then we risk to deadlock
422 * osd_dirent_check_repair() breaks this
424 /* LASSERT(current->journal_info == NULL); */
426 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
428 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
429 id->oii_ino, PTR_ERR(inode));
430 } else if (id->oii_gen != OSD_OII_NOGEN &&
431 inode->i_generation != id->oii_gen) {
432 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
433 "i_generation = %u\n",
434 id->oii_ino, id->oii_gen, inode->i_generation);
436 inode = ERR_PTR(-ESTALE);
437 } else if (inode->i_nlink == 0) {
439 * due to parallel readdir and unlink,
440 * we can have dead inode here.
442 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
444 inode = ERR_PTR(-ESTALE);
445 } else if (is_bad_inode(inode)) {
446 CWARN("%s: bad inode: ino = %u\n",
447 osd_dev2name(dev), id->oii_ino);
449 inode = ERR_PTR(-ENOENT);
450 } else if ((rc = osd_attach_jinode(inode))) {
454 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
455 if (id->oii_gen == OSD_OII_NOGEN)
456 osd_id_gen(id, inode->i_ino, inode->i_generation);
459 * Do not update file c/mtime in ldiskfs.
460 * NB: we don't have any lock to protect this because we don't
461 * have reference on osd_object now, but contention with
462 * another lookup + attr_set can't happen in the tiny window
463 * between if (...) and set S_NOCMTIME.
465 if (!(inode->i_flags & S_NOCMTIME))
466 inode->i_flags |= S_NOCMTIME;
471 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
472 handle_t *handle, struct dentry *child,
473 struct inode *inode, struct htree_lock *hlock)
477 rc = __ldiskfs_add_entry(handle, child, inode, hlock);
478 if (rc == -ENOBUFS || rc == -ENOSPC) {
479 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
480 struct inode *parent = child->d_parent->d_inode;
481 struct lu_fid *fid = NULL;
483 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
485 fid = &loa->loa_lma.lma_self_fid;
486 } else if (rc2 == -ENODATA) {
487 if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
488 fid = &info->oti_fid3;
489 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
490 } else if (!osd->od_is_ost && osd->od_index == 0) {
491 fid = &info->oti_fid3;
492 lu_igif_build(fid, parent->i_ino,
493 parent->i_generation);
498 CWARN("%s: directory (inode: %lu, FID: "DFID") %s "
499 "maximum entry limit\n",
500 osd_name(osd), parent->i_ino, PFID(fid),
501 rc == -ENOSPC ? "has reached" : "is approaching");
503 CWARN("%s: directory (inode: %lu, FID: unknown) %s "
504 "maximum entry limit\n",
505 osd_name(osd), parent->i_ino,
506 rc == -ENOSPC ? "has reached" : "is approaching");
508 /* ignore such error now */
518 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
519 struct osd_inode_id *id, struct lu_fid *fid)
521 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
525 inode = osd_iget(info, dev, id);
529 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
531 *fid = loa->loa_lma.lma_self_fid;
532 } else if (rc == -ENODATA) {
533 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
534 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
536 lu_igif_build(fid, inode->i_ino, inode->i_generation);
544 static struct inode *osd_iget_check(struct osd_thread_info *info,
545 struct osd_device *dev,
546 const struct lu_fid *fid,
547 struct osd_inode_id *id,
556 * The cached OI mapping is trustable. If we cannot locate the inode
557 * via the cached OI mapping, then return the failure to the caller
558 * directly without further OI checking.
562 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
565 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
568 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
569 PFID(fid), id->oii_ino, rc);
573 if (is_bad_inode(inode)) {
578 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
579 PFID(fid), id->oii_ino);
583 if (id->oii_gen != OSD_OII_NOGEN &&
584 inode->i_generation != id->oii_gen) {
589 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
590 "oii_gen = %u, i_generation = %u\n", PFID(fid),
591 id->oii_ino, id->oii_gen, inode->i_generation);
595 if (inode->i_nlink == 0) {
600 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
601 PFID(fid), id->oii_ino);
605 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
609 __u32 saved_ino = id->oii_ino;
610 __u32 saved_gen = id->oii_gen;
613 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
615 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
617 * XXX: There are four possible cases:
619 * Backup/restore caused the OI invalid.
621 * Someone unlinked the object but NOT removed
622 * the OI mapping, such as mount target device
623 * as ldiskfs, and modify something directly.
625 * Someone just removed the object between the
626 * former oi_lookup and the iget. It is normal.
627 * 4. Other failure cases.
629 * Generally, when the device is mounted, it will
630 * auto check whether the system is restored from
631 * file-level backup or not. We trust such detect
632 * to distinguish the 1st case from the 2nd case:
633 * if the OI files are consistent but may contain
634 * stale OI mappings because of case 2, if iget()
635 * returns -ENOENT or -ESTALE, then it should be
640 * If the OI mapping was in OI file before the
641 * osd_iget_check(), but now, it is disappear,
642 * then it must be removed by race. That is a
648 * It is the OI scrub updated the OI mapping by race.
649 * The new OI mapping must be valid.
651 if (saved_ino != id->oii_ino ||
652 (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
661 if (dev->od_scrub.os_scrub.os_file.sf_flags &
664 * It still can be the case 2, but we cannot
665 * distinguish it from the case 1. So return
666 * -EREMCHG to block current operation until
667 * OI scrub rebuilt the OI mappings.
676 if (inode->i_generation == id->oii_gen)
681 if (id->oii_gen == OSD_OII_NOGEN)
682 osd_id_gen(id, inode->i_ino, inode->i_generation);
685 * Do not update file c/mtime in ldiskfs.
686 * NB: we don't have any lock to protect this because we don't
687 * have reference on osd_object now, but contention with
688 * another lookup + attr_set can't happen in the tiny window
689 * between if (...) and set S_NOCMTIME.
691 if (!(inode->i_flags & S_NOCMTIME))
692 inode->i_flags |= S_NOCMTIME;
709 * \retval +v: new filter_fid does not contain self-fid
710 * \retval 0: filter_fid_18_23, contains self-fid
711 * \retval -v: other failure cases
713 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
714 struct dentry *dentry, struct lu_fid *fid)
716 struct filter_fid *ff = &info->oti_ff;
717 struct ost_id *ostid = &info->oti_ostid;
720 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
721 if (rc == sizeof(struct filter_fid_18_23)) {
722 struct filter_fid_18_23 *ff_old = (void *)ff;
724 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
725 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
727 * XXX: use 0 as the index for compatibility, the caller will
728 * handle index related issues when necessary.
731 ostid_to_fid(fid, ostid, 0);
732 } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
734 } else if (rc >= 0) {
741 static int osd_lma_self_repair(struct osd_thread_info *info,
742 struct osd_device *osd, struct inode *inode,
743 const struct lu_fid *fid, __u32 compat)
748 LASSERT(current->journal_info == NULL);
750 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
751 osd_dto_credits_noquota[DTO_XATTR_SET]);
754 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
759 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
761 CWARN("%s: cannot self repair the LMA: rc = %d\n",
763 ldiskfs_journal_stop(jh);
767 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
769 struct osd_thread_info *info = osd_oti_get(env);
770 struct osd_device *osd = osd_obj2dev(obj);
771 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
772 struct lustre_mdt_attrs *lma = &loa->loa_lma;
773 struct inode *inode = obj->oo_inode;
774 struct dentry *dentry = &info->oti_obj_dentry;
775 struct lu_fid *fid = NULL;
776 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
781 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
782 (void *)loa, sizeof(*loa));
783 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
784 fid = &lma->lma_self_fid;
785 rc = osd_get_idif(info, inode, dentry, fid);
786 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
788 * For the given OST-object, if it has neither LMA nor
789 * FID in XATTR_NAME_FID, then the given FID (which is
790 * contained in the @obj, from client RPC for locating
791 * the OST-object) is trusted. We use it to generate
794 osd_lma_self_repair(info, osd, inode, rfid,
805 lustre_lma_swab(lma);
806 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
807 (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
808 S_ISREG(inode->i_mode)))) {
809 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
810 "fid = "DFID", ino = %lu\n", osd_name(osd),
811 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
812 PFID(rfid), inode->i_ino);
815 fid = &lma->lma_self_fid;
816 if (lma->lma_compat & LMAC_STRIPE_INFO &&
818 obj->oo_pfid_in_lma = 1;
819 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
821 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
825 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
826 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
827 struct ost_id *oi = &info->oti_ostid;
828 struct lu_fid *fid1 = &info->oti_fid3;
829 __u32 idx = fid_idif_ost_idx(rfid);
832 * For old IDIF, the OST index is not part of the IDIF,
833 * Means that different OSTs may have the same IDIFs.
834 * Under such case, we need to make some compatible
835 * check to make sure to trigger OI scrub properly.
837 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
838 /* Given @rfid is new, LMA is old. */
839 fid_to_ostid(fid, oi);
840 ostid_to_fid(fid1, oi, idx);
841 if (lu_fid_eq(fid1, rfid)) {
842 if (osd->od_index_in_idif)
843 osd_lma_self_repair(info, osd,
857 struct osd_check_lmv_buf {
858 #ifdef HAVE_DIR_CONTEXT
859 /* please keep it as first member */
860 struct dir_context ctx;
862 struct osd_thread_info *oclb_info;
863 struct osd_device *oclb_dev;
864 struct osd_idmap_cache *oclb_oic;
870 * It is called internally by ->readdir() to filter out the
871 * local slave object's FID of the striped directory.
873 * \retval 1 found the local slave's FID
874 * \retval 0 continue to check next item
875 * \retval -ve for failure
877 #ifdef HAVE_FILLDIR_USE_CTX
878 static int osd_stripe_dir_filldir(struct dir_context *buf,
880 static int osd_stripe_dir_filldir(void *buf,
882 const char *name, int namelen,
883 loff_t offset, __u64 ino, unsigned int d_type)
885 struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
886 struct osd_thread_info *oti = oclb->oclb_info;
887 struct lu_fid *fid = &oti->oti_fid3;
888 struct osd_inode_id *id = &oti->oti_id3;
889 struct osd_device *dev = oclb->oclb_dev;
890 struct osd_idmap_cache *oic = oclb->oclb_oic;
899 sscanf(name + 1, SFID, RFID(fid));
900 if (!fid_is_sane(fid))
903 if (osd_remote_fid(oti->oti_env, dev, fid))
906 osd_id_gen(id, ino, OSD_OII_NOGEN);
907 inode = osd_iget(oti, dev, id);
909 return PTR_ERR(inode);
912 osd_add_oi_cache(oti, dev, id, fid);
916 osd_oii_insert(dev, oic, true);
917 oclb->oclb_found = true;
923 * When lookup item under striped directory, we need to locate the master
924 * MDT-object of the striped directory firstly, then the client will send
925 * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
926 * and the item's name. If the system is restored from MDT file level backup,
927 * then before the OI scrub completely built the OI files, the OI mappings of
928 * the master MDT-object and slave MDT-object may be invalid. Usually, it is
929 * not a problem for the master MDT-object. Because when locate the master
930 * MDT-object, we will do name based lookup (for the striped directory itself)
931 * firstly, during such process we can setup the correct OI mapping for the
932 * master MDT-object. But it will be trouble for the slave MDT-object. Because
933 * the client will not trigger name based lookup on the MDT to locate the slave
934 * MDT-object before locating item under the striped directory, then when
935 * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
936 * is invalid and does not know what the right OI mapping is, then the MDT has
937 * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
938 * the OI file, related OI mapping is unknown yet, please try again later. And
939 * then client will re-try the RPC again and again until related OI mapping has
940 * been updated. That is quite inefficient.
942 * To resolve above trouble, we will handle it as the following two cases:
944 * 1) The slave MDT-object and the master MDT-object are on different MDTs.
945 * It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
946 * is linked under /REMOTE_PARENT_DIR with the name of its FID string.
947 * We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
948 * directly. Please check osd_fid_lookup().
950 * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
951 * Under such case, during lookup the master MDT-object, we will lookup the
952 * slave MDT-object via readdir against the master MDT-object, because the
953 * slave MDT-objects information are stored as sub-directories with the name
954 * "${FID}:${index}". Then when find the local slave MDT-object, its OI
955 * mapping will be recorded. Then subsequent osd_fid_lookup() will know
956 * the correct OI mapping for the slave MDT-object.
958 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
959 struct inode *inode, struct osd_idmap_cache *oic)
961 struct lu_buf *buf = &oti->oti_big_buf;
962 struct dentry *dentry = &oti->oti_obj_dentry;
963 struct file *filp = &oti->oti_file;
964 const struct file_operations *fops;
965 struct lmv_mds_md_v1 *lmv1;
966 struct osd_check_lmv_buf oclb = {
967 #ifdef HAVE_DIR_CONTEXT
968 .ctx.actor = osd_stripe_dir_filldir,
980 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
983 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
985 lu_buf_realloc(buf, rc);
986 if (buf->lb_buf == NULL)
987 GOTO(out, rc = -ENOMEM);
993 if (unlikely(rc == 0 || rc == -ENODATA))
999 if (unlikely(buf->lb_buf == NULL)) {
1000 lu_buf_realloc(buf, rc);
1001 if (buf->lb_buf == NULL)
1002 GOTO(out, rc = -ENOMEM);
1008 if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1011 fops = inode->i_fop;
1012 dentry->d_inode = inode;
1013 dentry->d_sb = inode->i_sb;
1015 filp->f_path.dentry = dentry;
1016 filp->f_mode = FMODE_64BITHASH;
1017 filp->f_mapping = inode->i_mapping;
1019 filp->private_data = NULL;
1020 set_file_inode(filp, inode);
1023 oclb.oclb_items = 0;
1024 #ifdef HAVE_DIR_CONTEXT
1025 oclb.ctx.pos = filp->f_pos;
1026 #ifdef HAVE_ITERATE_SHARED
1027 rc = fops->iterate_shared(filp, &oclb.ctx);
1029 rc = fops->iterate(filp, &oclb.ctx);
1031 filp->f_pos = oclb.ctx.pos;
1033 rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
1035 } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1036 filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1037 fops->release(inode, filp);
1041 CDEBUG(D_LFSCK, "%s: fail to check LMV EA, inode = %lu/%u,"
1042 DFID": rc = %d\n", osd_ino2name(inode),
1043 inode->i_ino, inode->i_generation,
1044 PFID(&oic->oic_fid), rc);
1051 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1052 const struct lu_fid *fid,
1053 const struct lu_object_conf *conf)
1055 struct osd_thread_info *info;
1056 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1057 struct osd_device *dev;
1058 struct osd_idmap_cache *oic;
1059 struct osd_inode_id *id;
1060 struct inode *inode = NULL;
1061 struct lustre_scrub *scrub;
1062 struct scrub_file *sf;
1063 __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1068 bool remote = false;
1069 bool trusted = true;
1070 bool updated = false;
1071 bool checked = false;
1075 LINVRNT(osd_invariant(obj));
1076 LASSERT(obj->oo_inode == NULL);
1077 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
1079 dev = osd_dev(ldev);
1080 scrub = &dev->od_scrub.os_scrub;
1081 sf = &scrub->os_file;
1082 info = osd_oti_get(env);
1084 oic = &info->oti_cache;
1086 if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1090 * For the object is created as locking anchor, or for the object to
1091 * be created on disk. No need to osd_oi_lookup() at here because FID
1092 * shouldn't never be re-used, if it's really a duplicate FID from
1093 * unexpected reason, we should be able to detect it later by calling
1094 * do_create->osd_oi_insert().
1096 if (conf && conf->loc_flags & LOC_F_NEW)
1097 GOTO(out, result = 0);
1099 /* Search order: 1. per-thread cache. */
1100 if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1106 if (!list_empty(&scrub->os_inconsistent_items)) {
1107 /* Search order: 2. OI scrub pending list. */
1108 result = osd_oii_lookup(dev, fid, id);
1114 * The OI mapping in the OI file can be updated by the OI scrub
1115 * when we locate the inode via FID. So it may be not trustable.
1119 /* Search order: 3. OI files. */
1120 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1121 if (result == -ENOENT) {
1122 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1123 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1124 !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1126 GOTO(out, result = 0);
1135 obj->oo_inode = NULL;
1136 /* for later passes through checks, not true on first pass */
1137 if (!IS_ERR_OR_NULL(inode))
1140 inode = osd_iget_check(info, dev, fid, id, trusted);
1141 if (!IS_ERR(inode)) {
1142 obj->oo_inode = inode;
1150 result = PTR_ERR(inode);
1151 if (result == -ENOENT || result == -ESTALE)
1152 GOTO(out, result = 0);
1154 if (result != -EREMCHG)
1159 * We still have chance to get the valid inode: for the
1160 * object which is referenced by remote name entry, the
1161 * object on the local MDT will be linked under the dir
1162 * of "/REMOTE_PARENT_DIR" with its FID string as name.
1164 * We do not know whether the object for the given FID
1165 * is referenced by some remote name entry or not, and
1166 * especially for DNE II, a multiple-linked object may
1167 * have many name entries reside on many MDTs.
1169 * To simplify the operation, OSD will not distinguish
1170 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1171 * only happened for the RPC from other MDT during the
1172 * OI scrub, or for the client side RPC with FID only,
1173 * such as FID to path, or from old connected client.
1176 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1180 flags |= SS_AUTO_PARTIAL;
1181 flags &= ~SS_AUTO_FULL;
1186 if (thread_is_running(&scrub->os_thread)) {
1187 if (scrub->os_partial_scan && !scrub->os_in_join)
1190 if (IS_ERR_OR_NULL(inode) || result)
1191 GOTO(out, result = -EINPROGRESS);
1194 LASSERT(obj->oo_inode == inode);
1196 osd_add_oi_cache(info, dev, id, fid);
1197 osd_oii_insert(dev, oic, true);
1201 if (dev->od_auto_scrub_interval == AS_NEVER) {
1203 GOTO(out, result = -EREMCHG);
1206 LASSERT(obj->oo_inode == inode);
1208 osd_add_oi_cache(info, dev, id, fid);
1213 rc1 = osd_scrub_start(env, dev, flags);
1214 LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
1215 "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
1216 if (rc1 && rc1 != -EALREADY)
1217 GOTO(out, result = -EREMCHG);
1219 if (IS_ERR_OR_NULL(inode) || result)
1220 GOTO(out, result = -EINPROGRESS);
1223 LASSERT(obj->oo_inode == inode);
1225 osd_add_oi_cache(info, dev, id, fid);
1226 osd_oii_insert(dev, oic, true);
1231 if (unlikely(obj->oo_header))
1234 result = osd_check_lma(env, obj);
1238 LASSERTF(id->oii_ino == inode->i_ino &&
1239 id->oii_gen == inode->i_generation,
1240 "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1241 PFID(fid), id->oii_ino, id->oii_gen,
1242 inode->i_ino, inode->i_generation);
1244 saved_ino = inode->i_ino;
1245 saved_gen = inode->i_generation;
1247 if (unlikely(result == -ENODATA)) {
1249 * If the OI scrub updated the OI mapping by race, it
1250 * must be valid. Trust the inode that has no LMA EA.
1255 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1258 * The OI mapping is still there, the inode is still
1259 * valid. It is just becaues the inode has no LMA EA.
1261 if (saved_ino == id->oii_ino &&
1262 saved_gen == id->oii_gen)
1266 * It is the OI scrub updated the OI mapping by race.
1267 * The new OI mapping must be valid.
1275 * "result == -ENOENT" means that the OI mappinghas been
1276 * removed by race, so the inode belongs to other object.
1278 * Others error can be returned directly.
1280 if (result == -ENOENT) {
1283 obj->oo_inode = NULL;
1288 if (result != -EREMCHG)
1293 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1295 * "result == -ENOENT" means the cached OI mapping has been removed
1296 * from the OI file by race, above inode belongs to other object.
1298 if (result == -ENOENT) {
1301 obj->oo_inode = NULL;
1302 GOTO(out, result = 0);
1308 if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1314 * It is the OI scrub updated the OI mapping by race.
1315 * The new OI mapping must be valid.
1323 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1324 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1326 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1328 if (lma->lma_compat & LMAC_STRIPE_INFO &&
1330 obj->oo_pfid_in_lma = 1;
1331 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1333 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1334 } else if (result != -ENODATA) {
1339 obj->oo_compat_dot_created = 1;
1340 obj->oo_compat_dotdot_created = 1;
1342 if (S_ISDIR(inode->i_mode) &&
1343 (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1344 osd_check_lmv(info, dev, inode, oic);
1346 result = osd_attach_jinode(inode);
1351 GOTO(out, result = 0);
1353 LASSERT(!obj->oo_hl_head);
1354 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1356 GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1359 if (result || !obj->oo_inode) {
1360 if (!IS_ERR_OR_NULL(inode))
1363 obj->oo_inode = NULL;
1365 fid_zero(&oic->oic_fid);
1368 LINVRNT(osd_invariant(obj));
1373 * Concurrency: shouldn't matter.
1375 static void osd_object_init0(struct osd_object *obj)
1377 LASSERT(obj->oo_inode != NULL);
1378 obj->oo_dt.do_body_ops = &osd_body_ops;
1379 obj->oo_dt.do_lu.lo_header->loh_attr |=
1380 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1384 * Concurrency: no concurrent access is possible that early in object
1387 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1388 const struct lu_object_conf *conf)
1390 struct osd_object *obj = osd_obj(l);
1393 LINVRNT(osd_invariant(obj));
1395 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1396 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1397 l->lo_header->loh_attr |= LOHA_EXISTS;
1401 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1402 obj->oo_dt.do_body_ops = &osd_body_ops_new;
1403 if (result == 0 && obj->oo_inode != NULL) {
1404 struct osd_thread_info *oti = osd_oti_get(env);
1405 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1407 osd_object_init0(obj);
1408 if (unlikely(obj->oo_header))
1411 result = osd_get_lma(oti, obj->oo_inode,
1412 &oti->oti_obj_dentry, loa);
1415 * Convert LMAI flags to lustre LMA flags
1416 * and cache it to oo_lma_flags
1419 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1420 } else if (result == -ENODATA) {
1425 LINVRNT(osd_invariant(obj));
1430 * The first part of oxe_buf is xattr name, and is '\0' terminated.
1431 * The left part is for value, binary mode.
1433 struct osd_xattr_entry {
1434 struct list_head oxe_list;
1438 struct rcu_head oxe_rcu;
1442 static int osd_oxc_get(struct osd_object *obj, const char *name,
1445 struct osd_xattr_entry *tmp;
1446 struct osd_xattr_entry *oxe = NULL;
1447 size_t namelen = strlen(name);
1453 list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1454 if (namelen == tmp->oxe_namelen &&
1455 strncmp(name, tmp->oxe_buf, namelen) == 0) {
1462 GOTO(out, rc = -ENOENT);
1464 if (!oxe->oxe_exist)
1465 GOTO(out, rc = -ENODATA);
1468 rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1471 if (buf->lb_buf == NULL)
1474 if (buf->lb_len < rc)
1475 GOTO(out, rc = -ERANGE);
1477 memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1485 static void osd_oxc_free(struct rcu_head *head)
1487 struct osd_xattr_entry *oxe;
1489 oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1490 OBD_FREE(oxe, oxe->oxe_len);
1493 static void osd_oxc_add(struct osd_object *obj, const char *name,
1494 const char *buf, int buflen)
1496 struct osd_xattr_entry *oxe;
1497 struct osd_xattr_entry *old = NULL;
1498 struct osd_xattr_entry *tmp;
1499 size_t namelen = strlen(name);
1500 size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1502 OBD_ALLOC(oxe, len);
1506 INIT_LIST_HEAD(&oxe->oxe_list);
1508 oxe->oxe_namelen = namelen;
1509 memcpy(oxe->oxe_buf, name, namelen);
1511 LASSERT(buf != NULL);
1512 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1513 oxe->oxe_exist = true;
1515 oxe->oxe_exist = false;
1518 /* this should be rarely called, just remove old and add new */
1519 spin_lock(&obj->oo_guard);
1520 list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1521 if (namelen == tmp->oxe_namelen &&
1522 strncmp(name, tmp->oxe_buf, namelen) == 0) {
1528 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1529 call_rcu(&old->oxe_rcu, osd_oxc_free);
1531 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1533 spin_unlock(&obj->oo_guard);
1536 static void osd_oxc_del(struct osd_object *obj, const char *name)
1538 struct osd_xattr_entry *oxe;
1539 size_t namelen = strlen(name);
1541 spin_lock(&obj->oo_guard);
1542 list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1543 if (namelen == oxe->oxe_namelen &&
1544 strncmp(name, oxe->oxe_buf, namelen) == 0) {
1545 list_del_rcu(&oxe->oxe_list);
1546 call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1550 spin_unlock(&obj->oo_guard);
1553 static void osd_oxc_fini(struct osd_object *obj)
1555 struct osd_xattr_entry *oxe, *next;
1557 list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1558 list_del(&oxe->oxe_list);
1559 OBD_FREE(oxe, oxe->oxe_len);
1564 * Concurrency: no concurrent access is possible that late in object
1567 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1569 struct osd_object *obj = osd_obj(l);
1570 struct lu_object_header *h = obj->oo_header;
1572 LINVRNT(osd_invariant(obj));
1575 dt_object_fini(&obj->oo_dt);
1576 if (obj->oo_hl_head != NULL)
1577 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1580 lu_object_header_fini(h);
1586 * Concurrency: no concurrent access is possible that late in object
1589 static void osd_index_fini(struct osd_object *o)
1591 struct iam_container *bag;
1593 if (o->oo_dir != NULL) {
1594 bag = &o->oo_dir->od_container;
1595 if (o->oo_inode != NULL) {
1596 if (bag->ic_object == o->oo_inode)
1597 iam_container_fini(bag);
1599 OBD_FREE_PTR(o->oo_dir);
1605 * Concurrency: no concurrent access is possible that late in object
1606 * life-cycle (for all existing callers, that is. New callers have to provide
1607 * their own locking.)
1609 static int osd_inode_unlinked(const struct inode *inode)
1611 return inode->i_nlink == 0;
1615 OSD_TXN_OI_DELETE_CREDITS = 20,
1616 OSD_TXN_INODE_DELETE_CREDITS = 20
1623 #if OSD_THANDLE_STATS
1625 * Set time when the handle is allocated
1627 static void osd_th_alloced(struct osd_thandle *oth)
1629 oth->oth_alloced = ktime_get();
1633 * Set time when the handle started
1635 static void osd_th_started(struct osd_thandle *oth)
1637 oth->oth_started = ktime_get();
1641 * Check whether the we deal with this handle for too long.
1643 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1644 ktime_t alloced, ktime_t started,
1647 ktime_t now = ktime_get();
1649 LASSERT(dev != NULL);
1651 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1652 ktime_us_delta(started, alloced));
1653 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1654 ktime_us_delta(closed, started));
1655 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1656 ktime_us_delta(now, closed));
1658 if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1659 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1660 oth, now, alloced, started, closed);
1661 libcfs_debug_dumpstack(NULL);
1665 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
1667 ktime_t __closed = ktime_get(); \
1668 ktime_t __alloced = oth->oth_alloced; \
1669 ktime_t __started = oth->oth_started; \
1672 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
1675 #else /* OSD_THANDLE_STATS */
1677 #define osd_th_alloced(h) do {} while(0)
1678 #define osd_th_started(h) do {} while(0)
1679 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
1681 #endif /* OSD_THANDLE_STATS */
1684 * Concurrency: doesn't access mutable data.
1686 static int osd_param_is_not_sane(const struct osd_device *dev,
1687 const struct thandle *th)
1689 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1691 return oh->ot_credits > osd_transaction_size(dev);
1695 * Concurrency: shouldn't matter.
1697 static void osd_trans_commit_cb(struct super_block *sb,
1698 struct ldiskfs_journal_cb_entry *jcb, int error)
1700 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
1701 struct thandle *th = &oh->ot_super;
1702 struct lu_device *lud = &th->th_dev->dd_lu_dev;
1703 struct dt_txn_commit_cb *dcb, *tmp;
1705 LASSERT(oh->ot_handle == NULL);
1708 CERROR("transaction @0x%p commit error: %d\n", th, error);
1710 /* call per-transaction callbacks if any */
1711 list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1713 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1714 "commit callback entry: magic=%x name='%s'\n",
1715 dcb->dcb_magic, dcb->dcb_name);
1716 list_del_init(&dcb->dcb_linkage);
1717 dcb->dcb_func(NULL, th, dcb, error);
1720 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1727 #ifndef HAVE_SB_START_WRITE
1728 # define sb_start_write(sb) do {} while (0)
1729 # define sb_end_write(sb) do {} while (0)
1732 static struct thandle *osd_trans_create(const struct lu_env *env,
1733 struct dt_device *d)
1735 struct osd_thread_info *oti = osd_oti_get(env);
1736 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1737 struct osd_thandle *oh;
1743 CERROR("%s: someone try to start transaction under "
1744 "readonly mode, should be disabled.\n",
1745 osd_name(osd_dt_dev(d)));
1747 RETURN(ERR_PTR(-EROFS));
1750 /* on pending IO in this thread should left from prev. request */
1751 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1753 sb_start_write(osd_sb(osd_dt_dev(d)));
1755 OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1757 sb_end_write(osd_sb(osd_dt_dev(d)));
1758 RETURN(ERR_PTR(-ENOMEM));
1761 oh->ot_quota_trans = &oti->oti_quota_trans;
1762 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1767 INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1768 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1769 INIT_LIST_HEAD(&oh->ot_trunc_locks);
1772 memset(oti->oti_declare_ops, 0,
1773 sizeof(oti->oti_declare_ops));
1774 memset(oti->oti_declare_ops_cred, 0,
1775 sizeof(oti->oti_declare_ops_cred));
1776 memset(oti->oti_declare_ops_used, 0,
1777 sizeof(oti->oti_declare_ops_used));
1779 oti->oti_ins_cache_depth++;
1784 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1786 struct osd_thread_info *oti = osd_oti_get(env);
1787 struct osd_thandle *oh;
1789 oh = container_of0(th, struct osd_thandle, ot_super);
1790 LASSERT(oh != NULL);
1792 CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
1793 oti->oti_declare_ops[OSD_OT_CREATE],
1794 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1795 oti->oti_declare_ops_used[OSD_OT_CREATE],
1796 oti->oti_declare_ops[OSD_OT_DESTROY],
1797 oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1798 oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1799 CWARN(" attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1800 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1801 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1802 oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1803 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1804 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1805 oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1806 CWARN(" write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1807 oti->oti_declare_ops[OSD_OT_WRITE],
1808 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1809 oti->oti_declare_ops_used[OSD_OT_WRITE],
1810 oti->oti_declare_ops[OSD_OT_PUNCH],
1811 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1812 oti->oti_declare_ops_used[OSD_OT_PUNCH],
1813 oti->oti_declare_ops[OSD_OT_QUOTA],
1814 oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1815 oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1816 CWARN(" insert: %u/%u/%u, delete: %u/%u/%u\n",
1817 oti->oti_declare_ops[OSD_OT_INSERT],
1818 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1819 oti->oti_declare_ops_used[OSD_OT_INSERT],
1820 oti->oti_declare_ops[OSD_OT_DELETE],
1821 oti->oti_declare_ops_cred[OSD_OT_DELETE],
1822 oti->oti_declare_ops_used[OSD_OT_DELETE]);
1823 CWARN(" ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1824 oti->oti_declare_ops[OSD_OT_REF_ADD],
1825 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1826 oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1827 oti->oti_declare_ops[OSD_OT_REF_DEL],
1828 oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1829 oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1833 * Concurrency: shouldn't matter.
1835 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1838 struct osd_thread_info *oti = osd_oti_get(env);
1839 struct osd_device *dev = osd_dt_dev(d);
1841 struct osd_thandle *oh;
1846 LASSERT(current->journal_info == NULL);
1848 oh = container_of0(th, struct osd_thandle, ot_super);
1849 LASSERT(oh != NULL);
1850 LASSERT(oh->ot_handle == NULL);
1852 rc = dt_txn_hook_start(env, d, th);
1856 if (unlikely(osd_param_is_not_sane(dev, th))) {
1857 static unsigned long last_printed;
1858 static int last_credits;
1861 * don't make noise on a tiny testing systems
1862 * actual credits misuse will be caught anyway
1864 if (last_credits != oh->ot_credits &&
1865 time_after(jiffies, last_printed +
1866 msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
1867 osd_transaction_size(dev) > 512) {
1868 CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1869 oh->ot_credits, osd_transaction_size(dev));
1870 osd_trans_dump_creds(env, th);
1871 libcfs_debug_dumpstack(NULL);
1872 last_credits = oh->ot_credits;
1873 last_printed = jiffies;
1876 * XXX Limit the credits to 'max_transaction_buffers', and
1877 * let the underlying filesystem to catch the error if
1878 * we really need so many credits.
1880 * This should be removed when we can calculate the
1881 * credits precisely.
1883 oh->ot_credits = osd_transaction_size(dev);
1884 } else if (ldiskfs_track_declares_assert != 0) {
1886 * reserve few credits to prevent an assertion in JBD
1887 * our debugging mechanism will be able to detected
1888 * overuse. this can help to debug single-update
1891 oh->ot_credits += 10;
1892 if (unlikely(osd_param_is_not_sane(dev, th)))
1893 oh->ot_credits = osd_transaction_size(dev);
1896 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1897 GOTO(out, rc = -EIO);
1900 * XXX temporary stuff. Some abstraction layer should
1903 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1907 LASSERT(oti->oti_txns == 0);
1909 lu_device_get(&d->dd_lu_dev);
1910 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1921 static int osd_seq_exists(const struct lu_env *env,
1922 struct osd_device *osd, u64 seq)
1924 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1925 struct seq_server_site *ss = osd_seq_site(osd);
1930 LASSERT(ss != NULL);
1931 LASSERT(ss->ss_server_fld != NULL);
1933 rc = osd_fld_lookup(env, osd, seq, range);
1936 CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
1937 osd_name(osd), seq, rc);
1941 RETURN(ss->ss_node_id == range->lsr_index);
1944 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1946 struct dt_txn_commit_cb *dcb;
1947 struct dt_txn_commit_cb *tmp;
1949 /* call per-transaction stop callbacks if any */
1950 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1952 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1953 "commit callback entry: magic=%x name='%s'\n",
1954 dcb->dcb_magic, dcb->dcb_name);
1955 list_del_init(&dcb->dcb_linkage);
1956 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1961 * Concurrency: shouldn't matter.
1963 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1966 struct osd_thread_info *oti = osd_oti_get(env);
1967 struct osd_thandle *oh;
1968 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1969 struct osd_device *osd = osd_dt_dev(th->th_dev);
1970 struct qsd_instance *qsd = osd_def_qsd(osd);
1971 struct lquota_trans *qtrans;
1972 struct list_head truncates = LIST_HEAD_INIT(truncates);
1973 int rc = 0, remove_agents = 0;
1977 oh = container_of0(th, struct osd_thandle, ot_super);
1979 remove_agents = oh->ot_remove_agents;
1981 qtrans = oh->ot_quota_trans;
1982 oh->ot_quota_trans = NULL;
1984 /* move locks to local list, stop tx, execute truncates */
1985 list_splice(&oh->ot_trunc_locks, &truncates);
1987 if (oh->ot_handle != NULL) {
1990 handle_t *hdl = oh->ot_handle;
1993 * add commit callback
1994 * notice we don't do this in osd_trans_start()
1995 * as underlying transaction can change during truncate
1997 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2000 LASSERT(oti->oti_txns == 1);
2003 rc = dt_txn_hook_stop(env, th);
2005 CERROR("%s: failed in transaction hook: rc = %d\n",
2008 osd_trans_stop_cb(oh, rc);
2009 /* hook functions might modify th_sync */
2010 hdl->h_sync = th->th_sync;
2012 oh->ot_handle = NULL;
2013 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2015 CERROR("%s: failed to stop transaction: rc = %d\n",
2016 osd_name(osd), rc2);
2020 osd_process_truncates(&truncates);
2022 osd_trans_stop_cb(oh, th->th_result);
2026 osd_trunc_unlock_all(&truncates);
2028 /* inform the quota slave device that the transaction is stopping */
2029 qsd_op_end(env, qsd, qtrans);
2032 * as we want IO to journal and data IO be concurrent, we don't block
2033 * awaiting data IO completion in osd_do_bio(), instead we wait here
2034 * once transaction is submitted to the journal. all reqular requests
2035 * don't do direct IO (except read/write), thus this wait_event becomes
2038 * IMPORTANT: we have to wait till any IO submited by the thread is
2039 * completed otherwise iobuf may be corrupted by different request
2041 wait_event(iobuf->dr_wait,
2042 atomic_read(&iobuf->dr_numreqs) == 0);
2043 osd_fini_iobuf(osd, iobuf);
2045 rc = iobuf->dr_error;
2047 if (unlikely(remove_agents != 0))
2048 osd_process_scheduled_agent_removals(env, osd);
2050 oti->oti_ins_cache_depth--;
2051 /* reset OI cache for safety */
2052 if (oti->oti_ins_cache_depth == 0)
2053 oti->oti_ins_cache_used = 0;
2055 sb_end_write(osd_sb(osd));
2060 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2062 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
2065 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2066 LASSERT(&dcb->dcb_func != NULL);
2067 if (dcb->dcb_flags & DCB_TRANS_STOP)
2068 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2070 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2076 * Called just before object is freed. Releases all resources except for
2077 * object itself (that is released by osd_object_free()).
2079 * Concurrency: no concurrent access is possible that late in object
2082 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2084 struct osd_object *obj = osd_obj(l);
2085 struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2086 struct inode *inode = obj->oo_inode;
2091 LINVRNT(osd_invariant(obj));
2094 * If object is unlinked remove fid->ino mapping from object index.
2097 osd_index_fini(obj);
2102 uid = i_uid_read(inode);
2103 gid = i_gid_read(inode);
2104 projid = i_projid_read(inode);
2106 obj->oo_inode = NULL;
2109 /* do not rebalance quota if the caller needs to release memory
2110 * otherwise qsd_refresh_usage() may went into a new ldiskfs
2111 * transaction and risk to deadlock - LU-12178 */
2112 if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2115 if (!obj->oo_header && qsd) {
2116 struct osd_thread_info *info = osd_oti_get(env);
2117 struct lquota_id_info *qi = &info->oti_qi;
2119 /* Release granted quota to master if necessary */
2120 qi->lqi_id.qid_uid = uid;
2121 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2123 qi->lqi_id.qid_uid = gid;
2124 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2126 qi->lqi_id.qid_uid = projid;
2127 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2132 * Concurrency: ->loo_object_release() is called under site spin-lock.
2134 static void osd_object_release(const struct lu_env *env,
2135 struct lu_object *l)
2137 struct osd_object *o = osd_obj(l);
2140 * nobody should be releasing a non-destroyed object with nlink=0
2141 * the API allows this, but ldiskfs doesn't like and then report
2142 * this inode as deleted
2144 LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2145 o->oo_inode->i_nlink == 0));
2149 * Concurrency: shouldn't matter.
2151 static int osd_object_print(const struct lu_env *env, void *cookie,
2152 lu_printer_t p, const struct lu_object *l)
2154 struct osd_object *o = osd_obj(l);
2155 struct iam_descr *d;
2157 if (o->oo_dir != NULL)
2158 d = o->oo_dir->od_container.ic_descr;
2161 return (*p)(env, cookie,
2162 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2164 o->oo_inode ? o->oo_inode->i_ino : 0UL,
2165 o->oo_inode ? o->oo_inode->i_generation : 0,
2166 d ? d->id_ops->id_name : "plain");
2170 * Concurrency: shouldn't matter.
2172 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2173 struct obd_statfs *sfs)
2175 struct osd_device *osd = osd_dt_dev(d);
2176 struct super_block *sb = osd_sb(osd);
2177 struct kstatfs *ksfs;
2181 if (unlikely(osd->od_mnt == NULL))
2182 return -EINPROGRESS;
2184 /* osd_lproc.c call this without env, allocate ksfs for that case */
2185 if (unlikely(env == NULL)) {
2186 OBD_ALLOC_PTR(ksfs);
2190 ksfs = &osd_oti_get(env)->oti_ksfs;
2193 result = sb->s_op->statfs(sb->s_root, ksfs);
2197 statfs_pack(sfs, ksfs);
2198 if (unlikely(sb->s_flags & MS_RDONLY))
2199 sfs->os_state |= OS_STATE_READONLY;
2200 if (ldiskfs_has_feature_extents(sb))
2201 sfs->os_maxbytes = sb->s_maxbytes;
2203 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2206 * Reserve some space so to avoid fragmenting the filesystem too much.
2207 * Fragmentation not only impacts performance, but can also increase
2208 * metadata overhead significantly, causing grant calculation to be
2211 * Reserve 0.78% of total space, at least 8MB for small filesystems.
2213 CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
2214 reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2215 if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2216 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2218 sfs->os_blocks -= reserved;
2219 sfs->os_bfree -= min(reserved, sfs->os_bfree);
2220 sfs->os_bavail -= min(reserved, sfs->os_bavail);
2223 if (unlikely(env == NULL))
2229 * Estimate space needed for file creations. We assume the largest filename
2230 * which is 2^64 - 1, hence a filename of 20 chars.
2231 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2233 #ifdef __LDISKFS_DIR_REC_LEN
2234 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2236 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2240 * Concurrency: doesn't access mutable data.
2242 static void osd_conf_get(const struct lu_env *env,
2243 const struct dt_device *dev,
2244 struct dt_device_param *param)
2246 struct osd_device *d = osd_dt_dev(dev);
2247 struct super_block *sb = osd_sb(d);
2248 struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2253 * XXX should be taken from not-yet-existing fs abstraction layer.
2255 param->ddp_max_name_len = LDISKFS_NAME_LEN;
2256 param->ddp_max_nlink = LDISKFS_LINK_MAX;
2257 param->ddp_symlink_max = sb->s_blocksize;
2258 param->ddp_mount_type = LDD_MT_LDISKFS;
2259 if (ldiskfs_has_feature_extents(sb))
2260 param->ddp_maxbytes = sb->s_maxbytes;
2262 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2264 * inode are statically allocated, so per-inode space consumption
2265 * is the space consumed by the directory entry
2267 param->ddp_inodespace = PER_OBJ_USAGE;
2269 * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2270 * = 128MB) which is unlikely to be hit in real life. Report a smaller
2271 * maximum length to not under count the actual number of extents
2272 * needed for writing a file.
2274 param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
2275 /* worst-case extent insertion metadata overhead */
2276 param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2277 param->ddp_mntopts = 0;
2278 if (test_opt(sb, XATTR_USER))
2279 param->ddp_mntopts |= MNTOPT_USERXATTR;
2280 if (test_opt(sb, POSIX_ACL))
2281 param->ddp_mntopts |= MNTOPT_ACL;
2284 * LOD might calculate the max stripe count based on max_ea_size,
2285 * so we need take account in the overhead as well,
2286 * xattr_header + magic + xattr_entry_head
2288 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2289 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2291 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2292 if (ldiskfs_has_feature_ea_inode(sb))
2293 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2297 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2299 if (param->ddp_max_ea_size > OSD_MAX_EA_SIZE)
2300 param->ddp_max_ea_size = OSD_MAX_EA_SIZE;
2303 * Preferred RPC size for efficient disk IO. 4MB shows good
2304 * all-around performance for ldiskfs, but use bigalloc chunk size
2305 * by default if larger.
2307 #if defined(LDISKFS_CLUSTER_SIZE)
2308 if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2309 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2312 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2314 param->ddp_t10_cksum_type = 0;
2316 unsigned short interval = blk_integrity_interval(bi);
2317 name = blk_integrity_name(bi);
2325 if (strncmp(name, "T10-DIF-TYPE",
2326 sizeof("T10-DIF-TYPE") - 1) == 0) {
2327 /* also skip "1/3-" at end */
2328 const int type_off = sizeof("T10-DIF-TYPE.");
2329 char type_number = name[type_off - 2];
2331 if (interval != 512 && interval != 4096) {
2332 CERROR("%s: unsupported T10PI sector size %u\n",
2333 d->od_svname, interval);
2334 } else if (type_number != '1' && type_number != '3') {
2335 CERROR("%s: unsupported T10PI type %s\n",
2336 d->od_svname, name);
2337 } else if (strcmp(name + type_off, "CRC") == 0) {
2338 d->od_t10_type = type_number == '1' ?
2339 OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2340 param->ddp_t10_cksum_type = interval == 512 ?
2341 OBD_CKSUM_T10CRC512 :
2343 } else if (strcmp(name + type_off, "IP") == 0) {
2344 d->od_t10_type = type_number == '1' ?
2345 OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2346 param->ddp_t10_cksum_type = interval == 512 ?
2347 OBD_CKSUM_T10IP512 :
2350 CERROR("%s: unsupported checksum type of "
2352 d->od_svname, name);
2356 CERROR("%s: unsupported T10PI type '%s'",
2357 d->od_svname, name);
2363 * Concurrency: shouldn't matter.
2365 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2368 struct super_block *s = osd_sb(osd_dt_dev(d));
2371 down_read(&s->s_umount);
2372 rc = s->s_op->sync_fs(s, 1);
2373 up_read(&s->s_umount);
2375 CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2382 * Start commit for OSD device.
2384 * An implementation of dt_commit_async method for OSD device.
2385 * Asychronously starts underlayng fs sync and thereby a transaction
2388 * \param env environment
2389 * \param d dt device
2391 * \see dt_device_operations
2393 static int osd_commit_async(const struct lu_env *env,
2394 struct dt_device *d)
2396 struct super_block *s = osd_sb(osd_dt_dev(d));
2401 CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2402 down_read(&s->s_umount);
2403 rc = s->s_op->sync_fs(s, 0);
2404 up_read(&s->s_umount);
2409 /* Our own copy of the set readonly functions if present, or NU if not. */
2410 static int (*priv_dev_set_rdonly)(struct block_device *bdev);
2411 static int (*priv_dev_check_rdonly)(struct block_device *bdev);
2412 /* static int (*priv_dev_clear_rdonly)(struct block_device *bdev); */
2415 * Concurrency: shouldn't matter.
2417 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2419 struct super_block *sb = osd_sb(osd_dt_dev(d));
2420 struct block_device *dev = sb->s_bdev;
2421 int rc = -EOPNOTSUPP;
2425 if (priv_dev_set_rdonly) {
2426 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
2429 CERROR("*** setting %s read-only ***\n",
2430 osd_dt_dev(d)->od_svname);
2432 if (sb->s_op->freeze_fs) {
2433 rc = sb->s_op->freeze_fs(sb);
2438 if (jdev && (jdev != dev)) {
2439 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
2441 priv_dev_set_rdonly(jdev);
2443 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
2444 priv_dev_set_rdonly(dev);
2446 if (sb->s_op->unfreeze_fs)
2447 sb->s_op->unfreeze_fs(sb);
2452 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2453 osd_dt_dev(d)->od_svname, (long)dev, rc);
2459 * Note: we do not count into QUOTA here.
2460 * If we mount with --data_journal we may need more.
2462 const int osd_dto_credits_noquota[DTO_NR] = {
2465 * INDEX_EXTRA_TRANS_BLOCKS(8) +
2466 * SINGLEDATA_TRANS_BLOCKS(8)
2467 * XXX Note: maybe iam need more, since iam have more level than
2470 [DTO_INDEX_INSERT] = 16,
2473 * just modify a single entry, probably merge few within a block
2475 [DTO_INDEX_DELETE] = 1,
2479 [DTO_INDEX_UPDATE] = 16,
2481 * 4(inode, inode bits, groups, GDT)
2482 * notice: OI updates are counted separately with DTO_INDEX_INSERT
2484 [DTO_OBJECT_CREATE] = 4,
2486 * 4(inode, inode bits, groups, GDT)
2487 * notice: OI updates are counted separately with DTO_INDEX_DELETE
2489 [DTO_OBJECT_DELETE] = 4,
2491 * Attr set credits (inode)
2493 [DTO_ATTR_SET_BASE] = 1,
2495 * Xattr set. The same as xattr of EXT3.
2496 * DATA_TRANS_BLOCKS(14)
2497 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2498 * are also counted in. Do not know why?
2500 [DTO_XATTR_SET] = 14,
2502 * credits for inode change during write.
2504 [DTO_WRITE_BASE] = 3,
2506 * credits for single block write.
2508 [DTO_WRITE_BLOCK] = 14,
2510 * Attr set credits for chown.
2511 * This is extra credits for setattr, and it is null without quota
2513 [DTO_ATTR_SET_CHOWN] = 0
2516 static const struct dt_device_operations osd_dt_ops = {
2517 .dt_root_get = osd_root_get,
2518 .dt_statfs = osd_statfs,
2519 .dt_trans_create = osd_trans_create,
2520 .dt_trans_start = osd_trans_start,
2521 .dt_trans_stop = osd_trans_stop,
2522 .dt_trans_cb_add = osd_trans_cb_add,
2523 .dt_conf_get = osd_conf_get,
2524 .dt_sync = osd_sync,
2526 .dt_commit_async = osd_commit_async,
2529 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2532 struct osd_object *obj = osd_dt_obj(dt);
2533 struct osd_thread_info *oti = osd_oti_get(env);
2535 LINVRNT(osd_invariant(obj));
2537 LASSERT(obj->oo_owner != env);
2538 down_read_nested(&obj->oo_sem, role);
2540 LASSERT(obj->oo_owner == NULL);
2544 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2547 struct osd_object *obj = osd_dt_obj(dt);
2548 struct osd_thread_info *oti = osd_oti_get(env);
2550 LINVRNT(osd_invariant(obj));
2552 LASSERT(obj->oo_owner != env);
2553 down_write_nested(&obj->oo_sem, role);
2555 LASSERT(obj->oo_owner == NULL);
2556 obj->oo_owner = env;
2560 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2562 struct osd_object *obj = osd_dt_obj(dt);
2563 struct osd_thread_info *oti = osd_oti_get(env);
2565 LINVRNT(osd_invariant(obj));
2567 LASSERT(oti->oti_r_locks > 0);
2569 up_read(&obj->oo_sem);
2572 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2574 struct osd_object *obj = osd_dt_obj(dt);
2575 struct osd_thread_info *oti = osd_oti_get(env);
2577 LINVRNT(osd_invariant(obj));
2579 LASSERT(obj->oo_owner == env);
2580 LASSERT(oti->oti_w_locks > 0);
2582 obj->oo_owner = NULL;
2583 up_write(&obj->oo_sem);
2586 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2588 struct osd_object *obj = osd_dt_obj(dt);
2590 LINVRNT(osd_invariant(obj));
2592 return obj->oo_owner == env;
2595 static struct timespec *osd_inode_time(const struct lu_env *env,
2596 struct inode *inode, __u64 seconds)
2598 struct osd_thread_info *oti = osd_oti_get(env);
2599 struct timespec *t = &oti->oti_time;
2601 t->tv_sec = seconds;
2603 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
2607 static void osd_inode_getattr(const struct lu_env *env,
2608 struct inode *inode, struct lu_attr *attr)
2610 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2611 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2612 LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2613 LA_BLKSIZE | LA_TYPE;
2615 attr->la_atime = LTIME_S(inode->i_atime);
2616 attr->la_mtime = LTIME_S(inode->i_mtime);
2617 attr->la_ctime = LTIME_S(inode->i_ctime);
2618 attr->la_mode = inode->i_mode;
2619 attr->la_size = i_size_read(inode);
2620 attr->la_blocks = inode->i_blocks;
2621 attr->la_uid = i_uid_read(inode);
2622 attr->la_gid = i_gid_read(inode);
2623 attr->la_projid = i_projid_read(inode);
2624 attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
2625 attr->la_nlink = inode->i_nlink;
2626 attr->la_rdev = inode->i_rdev;
2627 attr->la_blksize = 1 << inode->i_blkbits;
2628 attr->la_blkbits = inode->i_blkbits;
2630 * Ext4 did not transfer inherit flags from raw inode
2631 * to inode flags, and ext4 internally test raw inode
2632 * @i_flags directly. Instead of patching ext4, we do it here.
2634 if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2635 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2638 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2639 struct lu_attr *attr)
2641 struct osd_object *obj = osd_dt_obj(dt);
2643 if (unlikely(!dt_object_exists(dt)))
2645 if (unlikely(obj->oo_destroyed))
2648 LASSERT(!dt_object_remote(dt));
2649 LINVRNT(osd_invariant(obj));
2651 spin_lock(&obj->oo_guard);
2652 osd_inode_getattr(env, obj->oo_inode, attr);
2653 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2654 attr->la_valid |= LA_FLAGS;
2655 attr->la_flags |= LUSTRE_ORPHAN_FL;
2657 spin_unlock(&obj->oo_guard);
2662 static int osd_declare_attr_qid(const struct lu_env *env,
2663 struct osd_object *obj,
2664 struct osd_thandle *oh, long long bspace,
2665 qid_t old_id, qid_t new_id, bool enforce,
2666 unsigned int type, bool ignore_edquot)
2669 struct osd_thread_info *info = osd_oti_get(env);
2670 struct lquota_id_info *qi = &info->oti_qi;
2672 qi->lqi_type = type;
2673 /* inode accounting */
2674 qi->lqi_is_blk = false;
2676 /* one more inode for the new id ... */
2677 qi->lqi_id.qid_uid = new_id;
2679 /* Reserve credits for the new id */
2680 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2681 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2686 /* and one less inode for the current id */
2687 qi->lqi_id.qid_uid = old_id;
2689 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2690 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2695 /* block accounting */
2696 qi->lqi_is_blk = true;
2698 /* more blocks for the new id ... */
2699 qi->lqi_id.qid_uid = new_id;
2700 qi->lqi_space = bspace;
2702 * Credits for the new uid has been reserved, re-use "obj"
2703 * to save credit reservation.
2705 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2706 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2711 /* and finally less blocks for the current uid */
2712 qi->lqi_id.qid_uid = old_id;
2713 qi->lqi_space = -bspace;
2714 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2715 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2721 static int osd_declare_attr_set(const struct lu_env *env,
2722 struct dt_object *dt,
2723 const struct lu_attr *attr,
2724 struct thandle *handle)
2726 struct osd_thandle *oh;
2727 struct osd_object *obj;
2736 LASSERT(dt != NULL);
2737 LASSERT(handle != NULL);
2739 obj = osd_dt_obj(dt);
2740 LASSERT(osd_invariant(obj));
2742 oh = container_of0(handle, struct osd_thandle, ot_super);
2743 LASSERT(oh->ot_handle == NULL);
2745 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2746 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2748 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2749 osd_dto_credits_noquota[DTO_XATTR_SET]);
2751 if (attr == NULL || obj->oo_inode == NULL)
2754 bspace = obj->oo_inode->i_blocks << 9;
2755 bspace = toqb(bspace);
2758 * Changing ownership is always preformed by super user, it should not
2759 * fail with EDQUOT unless required explicitly.
2761 * We still need to call the osd_declare_qid() to calculate the journal
2762 * credits for updating quota accounting files and to trigger quota
2763 * space adjustment once the operation is completed.
2765 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2766 bool ignore_edquot = !(attr->la_flags & LUSTRE_SET_SYNC_FL);
2769 CDEBUG(D_QUOTA, "%s: enforce quota on UID %u, GID %u"
2770 "(the quota space is %lld)\n",
2771 obj->oo_inode->i_sb->s_id, attr->la_uid,
2772 attr->la_gid, bspace);
2775 uid = i_uid_read(obj->oo_inode);
2776 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2777 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2778 attr->la_uid, enforce, USRQUOTA,
2783 gid = i_gid_read(obj->oo_inode);
2784 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2785 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2786 i_gid_read(obj->oo_inode),
2787 attr->la_gid, enforce, GRPQUOTA,
2793 #ifdef HAVE_PROJECT_QUOTA
2794 if (attr->la_valid & LA_PROJID) {
2795 __u32 projid = i_projid_read(obj->oo_inode);
2797 enforce = (attr->la_valid & LA_PROJID) &&
2798 (attr->la_projid != projid);
2799 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2800 (qid_t)projid, (qid_t)attr->la_projid,
2801 enforce, PRJQUOTA, true);
2809 static int osd_inode_setattr(const struct lu_env *env,
2810 struct inode *inode, const struct lu_attr *attr)
2812 __u64 bits = attr->la_valid;
2814 /* Only allow set size for regular file */
2815 if (!S_ISREG(inode->i_mode))
2816 bits &= ~(LA_SIZE | LA_BLOCKS);
2821 if (bits & LA_ATIME)
2822 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
2823 if (bits & LA_CTIME)
2824 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
2825 if (bits & LA_MTIME)
2826 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
2827 if (bits & LA_SIZE) {
2828 spin_lock(&inode->i_lock);
2829 LDISKFS_I(inode)->i_disksize = attr->la_size;
2830 i_size_write(inode, attr->la_size);
2831 spin_unlock(&inode->i_lock);
2835 * OSD should not change "i_blocks" which is used by quota.
2836 * "i_blocks" should be changed by ldiskfs only.
2839 inode->i_mode = (inode->i_mode & S_IFMT) |
2840 (attr->la_mode & ~S_IFMT);
2842 i_uid_write(inode, attr->la_uid);
2844 i_gid_write(inode, attr->la_gid);
2845 if (bits & LA_PROJID)
2846 i_projid_write(inode, attr->la_projid);
2847 if (bits & LA_NLINK)
2848 set_nlink(inode, attr->la_nlink);
2850 inode->i_rdev = attr->la_rdev;
2852 if (bits & LA_FLAGS) {
2853 /* always keep S_NOCMTIME */
2854 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2857 * Ext4 did not transfer inherit flags from
2858 * @inode->i_flags to raw inode i_flags when writing
2859 * flags, we do it explictly here.
2861 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
2862 LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
2864 LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
2869 #ifdef HAVE_PROJECT_QUOTA
2870 static int osd_transfer_project(struct inode *inode, __u32 projid)
2872 struct super_block *sb = inode->i_sb;
2873 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2876 struct ldiskfs_iloc iloc;
2877 struct ldiskfs_inode *raw_inode;
2878 struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
2880 if (!ldiskfs_has_feature_project(sb)) {
2881 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
2882 == LDISKFS_DEF_PROJID);
2883 if (projid != LDISKFS_DEF_PROJID)
2889 if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
2892 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2893 if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
2896 err = ldiskfs_get_inode_loc(inode, &iloc);
2900 raw_inode = ldiskfs_raw_inode(&iloc);
2901 if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
2908 dquot_initialize(inode);
2909 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2910 if (transfer_to[PRJQUOTA]) {
2911 err = __dquot_transfer(inode, transfer_to);
2912 dqput(transfer_to[PRJQUOTA]);
2921 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
2925 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
2926 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
2929 ll_vfs_dq_init(inode);
2931 if (attr->la_valid & LA_UID)
2932 iattr.ia_valid |= ATTR_UID;
2933 if (attr->la_valid & LA_GID)
2934 iattr.ia_valid |= ATTR_GID;
2935 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
2936 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
2938 rc = ll_vfs_dq_transfer(inode, &iattr);
2940 CERROR("%s: quota transfer failed: rc = %d. Is quota "
2941 "enforcement enabled on the ldiskfs "
2942 "filesystem?\n", inode->i_sb->s_id, rc);
2947 /* Handle project id transfer here properly */
2948 if (attr->la_valid & LA_PROJID &&
2949 attr->la_projid != i_projid_read(inode)) {
2950 #ifdef HAVE_PROJECT_QUOTA
2951 rc = osd_transfer_project(inode, attr->la_projid);
2956 CERROR("%s: quota transfer failed: rc = %d. Is project "
2957 "enforcement enabled on the ldiskfs "
2958 "filesystem?\n", inode->i_sb->s_id, rc);
2965 static int osd_attr_set(const struct lu_env *env,
2966 struct dt_object *dt,
2967 const struct lu_attr *attr,
2968 struct thandle *handle)
2970 struct osd_object *obj = osd_dt_obj(dt);
2971 struct inode *inode;
2974 if (!dt_object_exists(dt))
2977 LASSERT(handle != NULL);
2978 LASSERT(!dt_object_remote(dt));
2979 LASSERT(osd_invariant(obj));
2981 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
2983 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
2984 !osd_obj2dev(obj)->od_is_ost) {
2985 struct osd_thread_info *oti = osd_oti_get(env);
2986 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
2987 struct lu_fid *fid1 = &oti->oti_fid;
2988 struct osd_inode_id *id = &oti->oti_id;
2989 struct iam_path_descr *ipd;
2990 struct iam_container *bag;
2991 struct osd_thandle *oh;
2994 fid_cpu_to_be(fid1, fid0);
2995 memset(id, 1, sizeof(*id));
2996 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
2997 fid0)->oi_dir.od_container;
2998 ipd = osd_idx_ipd_get(env, bag);
2999 if (unlikely(ipd == NULL))
3002 oh = container_of0(handle, struct osd_thandle, ot_super);
3003 rc = iam_update(oh->ot_handle, bag,
3004 (const struct iam_key *)fid1,
3005 (const struct iam_rec *)id, ipd);
3006 osd_ipd_put(env, bag, ipd);
3007 return(rc > 0 ? 0 : rc);
3010 inode = obj->oo_inode;
3012 rc = osd_quota_transfer(inode, attr);
3016 spin_lock(&obj->oo_guard);
3017 rc = osd_inode_setattr(env, inode, attr);
3018 spin_unlock(&obj->oo_guard);
3022 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3024 if (!(attr->la_valid & LA_FLAGS))
3027 /* Let's check if there are extra flags need to be set into LMA */
3028 if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
3029 struct osd_thread_info *info = osd_oti_get(env);
3030 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
3032 LASSERT(!obj->oo_pfid_in_lma);
3034 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
3035 &info->oti_ost_attrs);
3039 lma->lma_incompat |=
3040 lustre_to_lma_flags(attr->la_flags);
3041 lustre_lma_swab(lma);
3042 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
3043 lma, sizeof(*lma), XATTR_REPLACE);
3045 struct osd_device *osd = osd_obj2dev(obj);
3047 CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
3048 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
3049 lma->lma_incompat, rc);
3052 attr->la_flags & LUSTRE_LMA_FL_MASKS;
3054 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
3057 osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
3062 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
3063 struct osd_object *obj,
3064 const char *name, const int namelen)
3066 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
3069 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
3070 umode_t mode, struct dt_allocation_hint *hint,
3071 struct thandle *th, struct lu_attr *attr)
3074 struct osd_device *osd = osd_obj2dev(obj);
3075 struct osd_thandle *oth;
3076 struct dt_object *parent = NULL;
3077 struct inode *inode;
3078 uid_t owner[2] = {0, 0};
3080 if (attr->la_valid & LA_UID)
3081 owner[0] = attr->la_uid;
3082 if (attr->la_valid & LA_GID)
3083 owner[1] = attr->la_gid;
3085 LINVRNT(osd_invariant(obj));
3086 LASSERT(obj->oo_inode == NULL);
3087 LASSERT(obj->oo_hl_head == NULL);
3089 if (S_ISDIR(mode) && ldiskfs_pdo) {
3091 ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
3092 if (obj->oo_hl_head == NULL)
3096 oth = container_of(th, struct osd_thandle, ot_super);
3097 LASSERT(oth->ot_handle->h_transaction != NULL);
3099 if (hint != NULL && hint->dah_parent != NULL &&
3100 !dt_object_remote(hint->dah_parent))
3101 parent = hint->dah_parent;
3103 inode = ldiskfs_create_inode(oth->ot_handle,
3104 parent ? osd_dt_obj(parent)->oo_inode :
3105 osd_sb(osd)->s_root->d_inode,
3107 if (!IS_ERR(inode)) {
3108 /* Do not update file c/mtime in ldiskfs. */
3109 inode->i_flags |= S_NOCMTIME;
3112 * For new created object, it must be consistent,
3113 * and it is unnecessary to scrub against it.
3115 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
3117 obj->oo_inode = inode;
3120 if (obj->oo_hl_head != NULL) {
3121 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
3122 obj->oo_hl_head = NULL;
3124 result = PTR_ERR(inode);
3126 LINVRNT(osd_invariant(obj));
3134 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
3135 struct lu_attr *attr,
3136 struct dt_allocation_hint *hint,
3137 struct dt_object_format *dof,
3141 struct osd_thandle *oth;
3142 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX | S_ISGID));
3144 LASSERT(S_ISDIR(attr->la_mode));
3146 oth = container_of(th, struct osd_thandle, ot_super);
3147 LASSERT(oth->ot_handle->h_transaction != NULL);
3148 result = osd_mkfile(info, obj, mode, hint, th, attr);
3153 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
3154 struct lu_attr *attr,
3155 struct dt_allocation_hint *hint,
3156 struct dt_object_format *dof,
3160 struct osd_thandle *oth;
3161 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
3163 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
3165 LASSERT(S_ISREG(attr->la_mode));
3167 oth = container_of(th, struct osd_thandle, ot_super);
3168 LASSERT(oth->ot_handle->h_transaction != NULL);
3170 result = osd_mkfile(info, obj, mode, hint, th, attr);
3172 LASSERT(obj->oo_inode != NULL);
3173 if (feat->dif_flags & DT_IND_VARKEY)
3174 result = iam_lvar_create(obj->oo_inode,
3175 feat->dif_keysize_max,
3177 feat->dif_recsize_max,
3180 result = iam_lfix_create(obj->oo_inode,
3181 feat->dif_keysize_max,
3183 feat->dif_recsize_max,
3189 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
3190 struct lu_attr *attr,
3191 struct dt_allocation_hint *hint,
3192 struct dt_object_format *dof,
3195 LASSERT(S_ISREG(attr->la_mode));
3196 return osd_mkfile(info, obj, (attr->la_mode &
3197 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th,
3201 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
3202 struct lu_attr *attr,
3203 struct dt_allocation_hint *hint,
3204 struct dt_object_format *dof,
3207 LASSERT(S_ISLNK(attr->la_mode));
3208 return osd_mkfile(info, obj, (attr->la_mode &
3209 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th,
3213 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
3214 struct lu_attr *attr,
3215 struct dt_allocation_hint *hint,
3216 struct dt_object_format *dof,
3219 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
3222 LINVRNT(osd_invariant(obj));
3223 LASSERT(obj->oo_inode == NULL);
3224 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
3225 S_ISFIFO(mode) || S_ISSOCK(mode));
3227 result = osd_mkfile(info, obj, mode, hint, th, attr);
3229 LASSERT(obj->oo_inode != NULL);
3231 * This inode should be marked dirty for i_rdev. Currently
3232 * that is done in the osd_attr_init().
3234 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
3237 LINVRNT(osd_invariant(obj));
3241 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
3243 struct dt_allocation_hint *hint,
3244 struct dt_object_format *dof,
3247 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
3249 osd_obj_type_f result;
3265 result = osd_mk_index;
3276 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
3277 struct dt_object *parent, struct dt_object *child,
3282 ah->dah_parent = parent;
3283 ah->dah_mode = child_mode;
3285 if (parent != NULL && !dt_object_remote(parent)) {
3286 /* will help to find FID->ino at dt_insert("..") */
3287 struct osd_object *pobj = osd_dt_obj(parent);
3289 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
3293 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
3294 struct lu_attr *attr, struct dt_object_format *dof)
3296 struct inode *inode = obj->oo_inode;
3297 __u64 valid = attr->la_valid;
3300 attr->la_valid &= ~(LA_TYPE | LA_MODE);
3302 if (dof->dof_type != DFT_NODE)
3303 attr->la_valid &= ~LA_RDEV;
3304 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
3305 attr->la_valid &= ~LA_ATIME;
3306 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
3307 attr->la_valid &= ~LA_CTIME;
3308 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
3309 attr->la_valid &= ~LA_MTIME;
3311 result = osd_quota_transfer(inode, attr);
3315 if (attr->la_valid != 0) {
3316 result = osd_inode_setattr(info->oti_env, inode, attr);
3318 * The osd_inode_setattr() should always succeed here. The
3319 * only error that could be returned is EDQUOT when we are
3320 * trying to change the UID or GID of the inode. However, this
3321 * should not happen since quota enforcement is no longer
3322 * enabled on ldiskfs (lquota takes care of it).
3324 LASSERTF(result == 0, "%d\n", result);
3325 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3328 attr->la_valid = valid;
3332 * Helper function for osd_create()
3334 * \retval 0, on success
3336 static int __osd_create(struct osd_thread_info *info, struct osd_object *obj,
3337 struct lu_attr *attr, struct dt_allocation_hint *hint,
3338 struct dt_object_format *dof, struct thandle *th)
3343 osd_trans_exec_op(info->oti_env, th, OSD_OT_CREATE);
3345 /* we drop umask so that permissions we pass are not affected */
3346 umask = current->fs->umask;
3347 current->fs->umask = 0;
3349 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
3351 if (likely(obj->oo_inode != NULL)) {
3352 LASSERT(obj->oo_inode->i_state & I_NEW);
3355 * Unlock the inode before attr initialization to avoid
3356 * unnecessary dqget operations. LU-6378
3358 unlock_new_inode(obj->oo_inode);
3361 if (likely(result == 0)) {
3362 osd_attr_init(info, obj, attr, dof);
3363 osd_object_init0(obj);
3366 /* restore previous umask value */
3367 current->fs->umask = umask;
3369 osd_trans_exec_check(info->oti_env, th, OSD_OT_CREATE);
3375 * Helper function for osd_create()
3377 * \retval 0, on success
3379 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
3380 const struct lu_fid *fid, struct thandle *th)
3382 struct osd_thread_info *info = osd_oti_get(env);
3383 struct osd_inode_id *id = &info->oti_id;
3384 struct osd_device *osd = osd_obj2dev(obj);
3385 struct osd_thandle *oh;
3388 LASSERT(obj->oo_inode != NULL);
3390 oh = container_of0(th, struct osd_thandle, ot_super);
3391 LASSERT(oh->ot_handle);
3392 osd_trans_exec_op(env, th, OSD_OT_INSERT);
3394 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
3395 rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
3396 OI_CHECK_FLD, NULL);
3397 osd_trans_exec_check(env, th, OSD_OT_INSERT);
3402 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
3403 u64 seq, struct lu_seq_range *range)
3405 struct seq_server_site *ss = osd_seq_site(osd);
3407 if (fid_seq_is_idif(seq)) {
3408 fld_range_set_ost(range);
3409 range->lsr_index = idif_ost_idx(seq);
3413 if (!fid_seq_in_fldb(seq)) {
3414 fld_range_set_mdt(range);
3417 * FIXME: If ss is NULL, it suppose not get lsr_index
3420 range->lsr_index = ss->ss_node_id;
3424 LASSERT(ss != NULL);
3425 fld_range_set_any(range);
3426 /* OSD will only do local fld lookup */
3427 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
3430 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
3431 struct lu_attr *attr,
3432 struct dt_allocation_hint *hint,
3433 struct dt_object_format *dof,
3434 struct thandle *handle)
3436 struct osd_thandle *oh;
3441 LASSERT(handle != NULL);
3443 oh = container_of0(handle, struct osd_thandle, ot_super);
3444 LASSERT(oh->ot_handle == NULL);
3447 * EA object consumes more credits than regular object: osd_mk_index
3448 * vs. osd_mkreg: osd_mk_index will create 2 blocks for root_node and
3449 * leaf_node, could involves the block, block bitmap, groups, GDT
3450 * change for each block, so add 4 * 2 credits in that case.
3452 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
3453 osd_dto_credits_noquota[DTO_OBJECT_CREATE] +
3454 (dof->dof_type == DFT_INDEX) ? 4 * 2 : 0);
3456 * Reuse idle OI block may cause additional one OI block
3459 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3460 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
3462 /* will help to find FID->ino mapping at dt_insert() */
3463 rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
3471 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid,
3472 attr->la_projid, 1, oh, osd_dt_obj(dt),
3473 NULL, OSD_QID_INODE);
3481 * Called to destroy on-disk representation of the object
3483 * Concurrency: must be locked
3485 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
3488 struct osd_object *obj = osd_dt_obj(dt);
3489 struct inode *inode = obj->oo_inode;
3490 struct osd_thandle *oh;
3498 oh = container_of0(th, struct osd_thandle, ot_super);
3499 LASSERT(oh->ot_handle == NULL);
3501 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
3502 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
3504 /* For removing agent entry */
3505 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
3506 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
3509 * Recycle idle OI leaf may cause additional three OI blocks
3512 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ2))
3513 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3514 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
3515 /* one less inode */
3516 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3517 i_projid_read(inode), -1, oh, obj, NULL,
3521 /* data to be truncated */
3522 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3523 i_projid_read(inode), 0, oh, obj, NULL,
3529 * will help to find FID->ino when this object is being
3532 rc = osd_idc_find_and_init(env, osd_obj2dev(obj), obj);
3537 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
3540 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3541 struct osd_object *obj = osd_dt_obj(dt);
3542 struct inode *inode = obj->oo_inode;
3543 struct osd_device *osd = osd_obj2dev(obj);
3544 struct osd_thandle *oh;
3549 oh = container_of0(th, struct osd_thandle, ot_super);
3550 LASSERT(oh->ot_handle);
3552 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
3554 if (unlikely(fid_is_acct(fid)))
3557 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
3558 result = osd_delete_from_remote_parent(env, osd, obj, oh, true);
3560 CERROR("%s: remove agent entry "DFID": rc = %d\n",
3561 osd_name(osd), PFID(fid), result);
3564 if (S_ISDIR(inode->i_mode)) {
3565 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
3566 inode->i_nlink == 2);
3568 spin_lock(&obj->oo_guard);
3570 spin_unlock(&obj->oo_guard);
3571 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3574 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
3576 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
3578 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ2))
3579 result = osd_oi_delete(osd_oti_get(env), osd, fid,
3580 oh->ot_handle, OI_CHECK_FLD);
3582 osd_trans_exec_check(env, th, OSD_OT_DESTROY);
3583 /* XXX: add to ext3 orphan list */
3584 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
3586 /* not needed in the cache anymore */
3587 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
3588 obj->oo_destroyed = 1;
3594 * Put the fid into lustre_mdt_attrs, and then place the structure
3595 * inode's ea. This fid should not be altered during the life time
3598 * \retval +ve, on success
3599 * \retval -ve, on error
3601 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
3603 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
3604 const struct lu_fid *fid, __u32 compat, __u32 incompat)
3606 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
3607 struct lustre_mdt_attrs *lma = &loa->loa_lma;
3612 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
3615 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_OST_EA_FID_SET))
3618 lustre_loa_init(loa, fid, compat, incompat);
3619 lustre_loa_swab(loa, false);
3622 * For the OST device with 256 bytes inode size by default,
3623 * the PFID EA will be stored together with LMA EA to avoid
3624 * performance trouble. Otherwise the PFID EA can be stored
3625 * independently. LU-8998
3627 if ((compat & LMAC_FID_ON_OST) &&
3628 LDISKFS_INODE_SIZE(inode->i_sb) <= 256)
3629 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
3630 sizeof(*loa), XATTR_CREATE);
3632 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3633 sizeof(*lma), XATTR_CREATE);
3635 * LMA may already exist, but we need to check that all the
3636 * desired compat/incompat flags have been added.
3638 if (unlikely(rc == -EEXIST)) {
3639 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
3640 XATTR_NAME_LMA, (void *)loa, sizeof(*loa));
3644 if (rc < sizeof(*lma))
3647 lustre_loa_swab(loa, true);
3648 if (lu_fid_eq(fid, &lma->lma_self_fid) &&
3649 ((compat == 0 && incompat == 0) ||
3650 (!(~lma->lma_compat & compat) &&
3651 !(~lma->lma_incompat & incompat))))
3654 lma->lma_self_fid = *fid;
3655 lma->lma_compat |= compat;
3656 lma->lma_incompat |= incompat;
3657 if (rc == sizeof(*lma)) {
3658 lustre_lma_swab(lma);
3659 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3660 sizeof(*lma), XATTR_REPLACE);
3662 lustre_loa_swab(loa, false);
3663 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
3664 sizeof(*loa), XATTR_REPLACE);
3672 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
3673 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
3674 * To have compatilibility with 1.8 ldiskfs driver we need to have
3675 * magic number at start of fid data.
3676 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
3679 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
3680 const struct lu_fid *fid)
3682 if (!fid_is_namespace_visible(fid) ||
3683 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
3684 param->edp_magic = 0;
3688 param->edp_magic = LDISKFS_LUFID_MAGIC;
3689 param->edp_len = sizeof(struct lu_fid) + 1;
3690 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
3694 * Try to read the fid from inode ea into dt_rec.
3696 * \param fid object fid.
3698 * \retval 0 on success
3700 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
3701 __u32 ino, struct lu_fid *fid,
3702 struct osd_inode_id *id)
3704 struct osd_thread_info *info = osd_oti_get(env);
3705 struct inode *inode;
3709 osd_id_gen(id, ino, OSD_OII_NOGEN);
3710 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
3712 RETURN(PTR_ERR(inode));
3718 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
3720 struct inode *parent_dir,
3721 const struct lu_fid *dot_fid,
3722 const struct lu_fid *dot_dot_fid,
3723 struct osd_thandle *oth)
3725 struct ldiskfs_dentry_param *dot_ldp;
3726 struct ldiskfs_dentry_param *dot_dot_ldp;
3727 __u32 saved_nlink = dir->i_nlink;
3730 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3731 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3733 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3734 dot_ldp->edp_magic = 0;
3736 rc = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3737 dir, dot_ldp, dot_dot_ldp);
3739 * The ldiskfs_add_dot_dotdot() may dir->i_nlink as 2, then
3740 * the subseqent ref_add() will increase the dir->i_nlink
3741 * as 3. That is incorrect for new created directory.
3743 * It looks like hack, because we want to make the OSD API
3744 * to be order-independent for new created directory object
3745 * between dt_insert(..) and ref_add() operations.
3747 * Here, we only restore the in-RAM dir-inode's nlink attr,
3748 * becuase if the nlink attr is not 2, then there will be
3749 * ref_add() called following the dt_insert(..), such call
3750 * will make both the in-RAM and on-disk dir-inode's nlink
3751 * attr to be set as 2. LU-7447
3753 set_nlink(dir, saved_nlink);
3758 * Create an local agent inode for remote entry
3760 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
3761 struct osd_device *osd,
3762 struct osd_object *pobj,
3763 const struct lu_fid *fid,
3767 struct osd_thread_info *info = osd_oti_get(env);
3768 struct inode *local;
3769 struct osd_thandle *oh;
3775 oh = container_of(th, struct osd_thandle, ot_super);
3776 LASSERT(oh->ot_handle->h_transaction != NULL);
3778 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type,
3780 if (IS_ERR(local)) {
3781 CERROR("%s: create local error %d\n", osd_name(osd),
3782 (int)PTR_ERR(local));
3787 * restore i_gid in case S_ISGID is set, we will inherit S_ISGID and set
3788 * correct gid on remote file, not agent here
3790 local->i_gid = current_fsgid();
3791 ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
3793 /* e2fsck doesn't like empty symlinks. Store remote FID as symlink.
3794 * That gives e2fsck something to look at and be happy, and allows
3795 * debugging if we need to determine where this symlink came from.
3797 if (S_ISLNK(type)) {
3798 CLASSERT(LDISKFS_N_BLOCKS * 4 >= FID_LEN + 1);
3799 rc = snprintf((char *)LDISKFS_I(local)->i_data,
3800 LDISKFS_N_BLOCKS * 4, DFID, PFID(fid));
3802 i_size_write(local, rc);
3803 LDISKFS_I(local)->i_disksize = rc;
3805 unlock_new_inode(local);
3807 /* Agent inode should not have project ID */
3808 #ifdef HAVE_PROJECT_QUOTA
3809 if (LDISKFS_I(pobj->oo_inode)->i_flags & LUSTRE_PROJINHERIT_FL &&
3810 i_projid_read(pobj->oo_inode) != 0) {
3811 rc = osd_transfer_project(local, 0);
3813 CERROR("%s: quota transfer failed: rc = %d. Is project "
3814 "quota enforcement enabled on the ldiskfs "
3815 "filesystem?\n", local->i_sb->s_id, rc);
3816 RETURN(ERR_PTR(rc));
3820 /* Set special LMA flag for local agent inode */
3821 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
3823 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
3824 osd_name(osd), PFID(fid), rc);
3825 RETURN(ERR_PTR(rc));
3831 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
3832 lu_object_fid(&pobj->oo_dt.do_lu),
3835 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
3836 osd_name(osd), PFID(fid), rc);
3837 RETURN(ERR_PTR(rc));
3844 * when direntry is deleted, we have to take care of possible agent inode
3845 * referenced by that. unfortunately we can't do this at that point:
3846 * iget() within a running transaction leads to deadlock and we better do
3847 * not call that every delete declaration to save performance. so we put
3848 * a potention agent inode on a list and process that once the transaction
3849 * is over. Notice it's not any worse in terms of real orphans as regular
3850 * object destroy doesn't put inodes on the on-disk orphan list. this should
3851 * be addressed separately
3853 static int osd_schedule_agent_inode_removal(const struct lu_env *env,
3854 struct osd_thandle *oh,
3857 struct osd_device *osd = osd_dt_dev(oh->ot_super.th_dev);
3858 struct osd_obj_orphan *oor;
3865 oor->oor_env = (struct lu_env *)env;
3866 spin_lock(&osd->od_osfs_lock);
3867 list_add(&oor->oor_list, &osd->od_orphan_list);
3868 spin_unlock(&osd->od_osfs_lock);
3870 oh->ot_remove_agents = 1;
3876 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
3877 struct osd_device *osd)
3879 struct osd_thread_info *info = osd_oti_get(env);
3880 struct osd_obj_orphan *oor, *tmp;
3881 struct osd_inode_id id;
3882 struct list_head list;
3883 struct inode *inode;
3888 INIT_LIST_HEAD(&list);
3890 spin_lock(&osd->od_osfs_lock);
3891 list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
3892 if (oor->oor_env == env) {
3893 list_del(&oor->oor_list);
3894 list_add(&oor->oor_list, &list);
3897 spin_unlock(&osd->od_osfs_lock);
3899 list_for_each_entry_safe(oor, tmp, &list, oor_list) {
3903 list_del(&oor->oor_list);
3906 osd_id_gen(&id, ino, OSD_OII_NOGEN);
3907 inode = osd_iget_fid(info, osd, &id, &fid);
3911 if (!osd_remote_fid(env, osd, &fid)) {
3916 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC, 1);
3918 mark_inode_dirty(inode);
3919 ldiskfs_journal_stop(jh);
3927 * OSD layer object create function for OST objects (b=11826).
3929 * The FID is inserted into inode xattr here.
3931 * \retval 0, on success
3932 * \retval -ve, on error
3934 static int osd_create(const struct lu_env *env, struct dt_object *dt,
3935 struct lu_attr *attr, struct dt_allocation_hint *hint,
3936 struct dt_object_format *dof, struct thandle *th)
3938 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3939 struct osd_object *obj = osd_dt_obj(dt);
3940 struct osd_thread_info *info = osd_oti_get(env);
3941 int result, on_ost = 0;
3945 if (dt_object_exists(dt))
3948 LINVRNT(osd_invariant(obj));
3949 LASSERT(!dt_object_remote(dt));
3950 LASSERT(osd_is_write_locked(env, obj));
3951 LASSERT(th != NULL);
3953 if (unlikely(fid_is_acct(fid)))
3955 * Quota files can't be created from the kernel any more,
3956 * 'tune2fs -O quota' will take care of creating them
3960 result = __osd_create(info, obj, attr, hint, dof, th);
3962 if (fid_is_idif(fid) &&
3963 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
3964 struct lu_fid *tfid = &info->oti_fid;
3965 struct ost_id *oi = &info->oti_ostid;
3967 fid_to_ostid(fid, oi);
3968 ostid_to_fid(tfid, oi, 0);
3970 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
3971 LMAC_FID_ON_OST, 0);
3973 on_ost = fid_is_on_ost(info, osd_obj2dev(obj),
3975 result = osd_ea_fid_set(info, obj->oo_inode, fid,
3976 on_ost ? LMAC_FID_ON_OST : 0,
3979 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
3980 obj->oo_dt.do_body_ops = &osd_body_ops;
3983 if (!result && !CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY))
3984 result = __osd_oi_insert(env, obj, fid, th);
3987 * a small optimization - dt_insert() isn't usually applied
3988 * to OST objects, so we don't need to cache OI mapping for
3991 if (result == 0 && on_ost == 0) {
3992 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
3994 result = osd_idc_find_and_init(env, osd, obj);
3995 LASSERT(result == 0);
3998 LASSERT(ergo(result == 0,
3999 dt_object_exists(dt) && !dt_object_remote(dt)));
4000 LINVRNT(osd_invariant(obj));
4004 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
4005 struct thandle *handle)
4007 struct osd_thandle *oh;
4009 /* it's possible that object doesn't exist yet */
4010 LASSERT(handle != NULL);
4012 oh = container_of0(handle, struct osd_thandle, ot_super);
4013 LASSERT(oh->ot_handle == NULL);
4015 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
4016 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
4018 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
4024 * Concurrency: @dt is write locked.
4026 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
4029 struct osd_object *obj = osd_dt_obj(dt);
4030 struct inode *inode = obj->oo_inode;
4031 struct osd_thandle *oh;
4034 if (!dt_object_exists(dt) || obj->oo_destroyed)
4037 LINVRNT(osd_invariant(obj));
4038 LASSERT(!dt_object_remote(dt));
4039 LASSERT(osd_is_write_locked(env, obj));
4040 LASSERT(th != NULL);
4042 oh = container_of0(th, struct osd_thandle, ot_super);
4043 LASSERT(oh->ot_handle != NULL);
4045 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
4047 CDEBUG(D_INODE, DFID" increase nlink %d\n",
4048 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
4050 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
4051 * (65000) subdirectories by storing "1" in i_nlink if the link count
4052 * would otherwise overflow. Directory tranversal tools understand
4053 * that (st_nlink == 1) indicates that the filesystem dose not track
4054 * hard links count on the directory, and will not abort subdirectory
4055 * scanning early once (st_nlink - 2) subdirs have been found.
4057 * This also has to properly handle the case of inodes with nlink == 0
4058 * in case they are being linked into the PENDING directory
4060 spin_lock(&obj->oo_guard);
4061 if (unlikely(inode->i_nlink == 0))
4062 /* inc_nlink from 0 may cause WARN_ON */
4063 set_nlink(inode, 1);
4065 ldiskfs_inc_count(oh->ot_handle, inode);
4066 if (!S_ISDIR(inode->i_mode))
4067 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
4069 spin_unlock(&obj->oo_guard);
4071 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
4072 LINVRNT(osd_invariant(obj));
4074 osd_trans_exec_check(env, th, OSD_OT_REF_ADD);
4079 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
4080 struct thandle *handle)
4082 struct osd_thandle *oh;
4084 if (!dt_object_exists(dt))
4087 LASSERT(!dt_object_remote(dt));
4088 LASSERT(handle != NULL);
4090 oh = container_of0(handle, struct osd_thandle, ot_super);
4091 LASSERT(oh->ot_handle == NULL);
4093 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
4094 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
4100 * Concurrency: @dt is write locked.
4102 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
4105 struct osd_object *obj = osd_dt_obj(dt);
4106 struct inode *inode = obj->oo_inode;
4107 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
4108 struct osd_thandle *oh;
4110 if (!dt_object_exists(dt))
4113 LINVRNT(osd_invariant(obj));
4114 LASSERT(!dt_object_remote(dt));
4115 LASSERT(osd_is_write_locked(env, obj));
4116 LASSERT(th != NULL);
4118 oh = container_of0(th, struct osd_thandle, ot_super);
4119 LASSERT(oh->ot_handle != NULL);
4121 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
4123 spin_lock(&obj->oo_guard);
4125 * That can be result of upgrade from old Lustre version and
4126 * applied only to local files. Just skip this ref_del call.
4127 * ext4_unlink() only treats this as a warning, don't LASSERT here.
4129 if (inode->i_nlink == 0) {
4130 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
4131 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
4132 ", maybe an upgraded file? (LU-3915)\n",
4133 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
4134 spin_unlock(&obj->oo_guard);
4138 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
4139 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
4141 ldiskfs_dec_count(oh->ot_handle, inode);
4142 spin_unlock(&obj->oo_guard);
4144 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
4145 LINVRNT(osd_invariant(obj));
4147 osd_trans_exec_check(env, th, OSD_OT_REF_DEL);
4153 * Concurrency: @dt is read locked.
4155 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
4156 struct lu_buf *buf, const char *name)
4158 struct osd_object *obj = osd_dt_obj(dt);
4159 struct inode *inode = obj->oo_inode;
4160 struct osd_thread_info *info = osd_oti_get(env);
4161 struct dentry *dentry = &info->oti_obj_dentry;
4162 bool cache_xattr = false;
4167 /* version get is not real XATTR but uses xattr API */
4168 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
4169 dt_obj_version_t *ver = buf->lb_buf;
4172 * for version we are just using xattr API but change inode
4175 if (buf->lb_len == 0)
4176 return sizeof(dt_obj_version_t);
4178 if (buf->lb_len < sizeof(dt_obj_version_t))
4181 CDEBUG(D_INODE, "Get version %#llx for inode %lu\n",
4182 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
4184 *ver = LDISKFS_I(inode)->i_fs_version;
4186 return sizeof(dt_obj_version_t);
4189 if (!dt_object_exists(dt))
4192 LASSERT(!dt_object_remote(dt));
4193 LASSERT(inode->i_op != NULL);
4194 #ifdef HAVE_IOP_XATTR
4195 LASSERT(inode->i_op->getxattr != NULL);
4198 if (strcmp(name, XATTR_NAME_LOV) == 0 ||
4199 strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0)
4203 rc = osd_oxc_get(obj, name, buf);
4208 if (strcmp(name, XATTR_NAME_FID) == 0 && obj->oo_pfid_in_lma) {
4209 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
4210 struct lustre_mdt_attrs *lma = &loa->loa_lma;
4211 struct filter_fid *ff;
4212 struct ost_layout *ol;
4214 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
4218 LASSERT(lma->lma_compat & LMAC_STRIPE_INFO);
4221 if (buf->lb_len == 0 || !buf->lb_buf)
4224 if (buf->lb_len < rc)
4228 ol = &ff->ff_layout;
4229 ol->ol_stripe_count = cpu_to_le32(loa->loa_parent_fid.f_ver >>
4230 PFID_STRIPE_IDX_BITS);
4231 ol->ol_stripe_size = cpu_to_le32(loa->loa_stripe_size);
4232 loa->loa_parent_fid.f_ver &= PFID_STRIPE_COUNT_MASK;
4233 fid_cpu_to_le(&ff->ff_parent, &loa->loa_parent_fid);
4234 if (lma->lma_compat & LMAC_COMP_INFO) {
4235 ol->ol_comp_start = cpu_to_le64(loa->loa_comp_start);
4236 ol->ol_comp_end = cpu_to_le64(loa->loa_comp_end);
4237 ol->ol_comp_id = cpu_to_le32(loa->loa_comp_id);
4239 ol->ol_comp_start = 0;
4240 ol->ol_comp_end = 0;
4244 rc = __osd_xattr_get(inode, dentry, name,
4245 buf->lb_buf, buf->lb_len);
4249 if (rc == -ENOENT || rc == -ENODATA)
4250 osd_oxc_add(obj, name, NULL, 0);
4251 else if (rc > 0 && buf->lb_buf != NULL)
4252 osd_oxc_add(obj, name, buf->lb_buf, rc);
4258 static int osd_declare_xattr_set(const struct lu_env *env,
4259 struct dt_object *dt,
4260 const struct lu_buf *buf, const char *name,
4261 int fl, struct thandle *handle)
4263 struct osd_thandle *oh;
4265 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
4267 LASSERT(handle != NULL);
4269 oh = container_of0(handle, struct osd_thandle, ot_super);
4270 LASSERT(oh->ot_handle == NULL);
4272 if (strcmp(name, XATTR_NAME_LMA) == 0) {
4274 * For non-upgrading case, the LMA is set first and
4275 * usually fit inode. But for upgrade case, the LMA
4276 * may be in another separated EA block.
4278 if (dt_object_exists(dt)) {
4279 if (fl == LU_XATTR_REPLACE)
4284 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
4286 } else if (strcmp(name, XATTR_NAME_FID) == 0) {
4287 /* We may need to delete the old PFID EA. */
4288 credits = LDISKFS_MAXQUOTAS_DEL_BLOCKS(sb);
4289 if (fl == LU_XATTR_REPLACE)
4295 * If some name entry resides on remote MDT, then will create
4296 * agent entry under remote parent. On the other hand, if the
4297 * remote entry will be removed, then related agent entry may
4298 * need to be removed from the remote parent. So there may be
4299 * kinds of cases, let's declare enough credits. The credits
4300 * for create agent entry is enough for remove case.
4302 if (strcmp(name, XATTR_NAME_LINK) == 0) {
4303 credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
4304 if (dt_object_exists(dt))
4305 credits += 1; /* For updating LMA */
4309 credits += osd_dto_credits_noquota[DTO_XATTR_SET];
4314 if (buf->lb_buf == NULL && dt_object_exists(dt)) {
4316 * learn xattr size from osd_xattr_get if
4317 * attribute has not been read yet
4319 buflen = __osd_xattr_get(
4320 osd_dt_obj(dt)->oo_inode,
4321 &osd_oti_get(env)->oti_obj_dentry,
4326 buflen = buf->lb_len;
4329 if (buflen > sb->s_blocksize) {
4330 credits += osd_calc_bkmap_credits(
4332 (buflen + sb->s_blocksize - 1) >>
4333 sb->s_blocksize_bits);
4337 * xattr set may involve inode quota change, reserve credits for
4338 * dquot_initialize()
4340 credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
4343 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
4348 static int osd_xattr_set_pfid(const struct lu_env *env, struct osd_object *obj,
4349 const struct lu_buf *buf, int fl,
4350 struct thandle *handle)
4352 struct osd_thread_info *info = osd_oti_get(env);
4353 struct dentry *dentry = &info->oti_obj_dentry;
4354 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
4355 struct lustre_mdt_attrs *lma = &loa->loa_lma;
4356 struct inode *inode = obj->oo_inode;
4357 struct filter_fid *ff = buf->lb_buf;
4358 struct ost_layout *ol = &ff->ff_layout;
4359 int flags = XATTR_REPLACE;
4364 if (buf->lb_len != sizeof(*ff) && buf->lb_len != sizeof(struct lu_fid))
4367 rc = osd_get_lma(info, inode, dentry, loa);
4368 if (rc == -ENODATA) {
4369 /* Usually for upgarding from old device */
4370 lustre_loa_init(loa, lu_object_fid(&obj->oo_dt.do_lu),
4371 LMAC_FID_ON_OST, 0);
4372 flags = XATTR_CREATE;
4377 if (!rc && lma->lma_compat & LMAC_STRIPE_INFO) {
4378 if ((fl & LU_XATTR_CREATE) && !(fl & LU_XATTR_REPLACE))
4381 if (LDISKFS_INODE_SIZE(inode->i_sb) > 256) {
4382 /* Separate PFID EA from LMA */
4383 lma->lma_compat &= ~(LMAC_STRIPE_INFO | LMAC_COMP_INFO);
4384 lustre_lma_swab(lma);
4385 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
4386 sizeof(*lma), XATTR_REPLACE);
4388 obj->oo_pfid_in_lma = 0;
4389 rc = LU_XATTR_CREATE;
4395 if (LDISKFS_INODE_SIZE(inode->i_sb) > 256)
4399 * Old client does not send stripe information,
4400 * then store the PFID EA on disk separatedly.
4402 if (unlikely(buf->lb_len == sizeof(struct lu_fid) ||
4403 ol->ol_stripe_size == 0))
4406 /* Remove old PFID EA entry firstly. */
4407 ll_vfs_dq_init(inode);
4408 rc = osd_removexattr(dentry, inode, XATTR_NAME_FID);
4409 if (rc == -ENODATA) {
4410 if ((fl & LU_XATTR_REPLACE) && !(fl & LU_XATTR_CREATE))
4417 fid_le_to_cpu(&loa->loa_parent_fid, &ff->ff_parent);
4418 if (likely(ol->ol_stripe_size != 0)) {
4419 loa->loa_parent_fid.f_ver |= le32_to_cpu(ol->ol_stripe_count) <<
4420 PFID_STRIPE_IDX_BITS;
4421 loa->loa_stripe_size = le32_to_cpu(ol->ol_stripe_size);
4422 lma->lma_compat |= LMAC_STRIPE_INFO;
4423 if (ol->ol_comp_id != 0) {
4424 loa->loa_comp_id = le32_to_cpu(ol->ol_comp_id);
4425 loa->loa_comp_start = le64_to_cpu(ol->ol_comp_start);
4426 loa->loa_comp_end = le64_to_cpu(ol->ol_comp_end);
4427 lma->lma_compat |= LMAC_COMP_INFO;
4431 lustre_loa_swab(loa, false);
4433 /* Store the PFID EA inside LMA. */
4434 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa, sizeof(*loa),
4437 obj->oo_pfid_in_lma = 1;
4443 * In DNE environment, the object (in spite of regular file or directory)
4444 * and its name entry may reside on different MDTs. Under such case, we will
4445 * create an agent entry on the MDT where the object resides. The agent entry
4446 * references the object locally, that makes the object to be visible to the
4447 * userspace when mounted as 'ldiskfs' directly. Then the userspace tools,
4448 * such as 'tar' can handle the object properly.
4450 * We handle the agent entry during set linkEA that is the common interface
4451 * for both regular file and directroy, can handle kinds of cases, such as
4452 * create/link/unlink/rename, and so on.
4454 * NOTE: we can NOT do that when ea_{insert,delete} that is only for directory.
4456 * XXX: There are two known issues:
4457 * 1. For one object, we will create at most one agent entry even if there
4458 * may be more than one cross-MDTs hard links on the object. So the local
4459 * e2fsck may claim that the object's nlink is larger than the name entries
4460 * that reference such inode. And in further, the e2fsck will fix the nlink
4461 * attribute to match the local references. Then it will cause the object's
4462 * nlink attribute to be inconsistent with the global references. it is bad
4463 * but not fatal. The ref_del() can handle the zero-referenced case. On the
4464 * other hand, the global namespace LFSCK can repair the object's attribute
4465 * according to the linkEA.
4466 * 2. There may be too many hard links on the object as to its linkEA overflow,
4467 * then the linkEA entry for cross-MDTs reference may be discarded. If such
4468 * case happened, then at this point, we do not know whether there are some
4469 * cross-MDTs reference. But there are local references, it guarantees that
4470 * object is visible to userspace when mounted as 'ldiskfs'. That is enough.
4472 static int osd_xattr_handle_linkea(const struct lu_env *env,
4473 struct osd_device *osd,
4474 struct osd_object *obj,
4475 const struct lu_buf *buf,
4476 struct thandle *handle)
4478 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
4479 struct lu_fid *tfid = &osd_oti_get(env)->oti_fid3;
4480 struct linkea_data ldata = { .ld_buf = (struct lu_buf *)buf };
4481 struct lu_name tmpname;
4482 struct osd_thandle *oh;
4484 bool remote = false;
4488 oh = container_of0(handle, struct osd_thandle, ot_super);
4489 LASSERT(oh->ot_handle != NULL);
4491 rc = linkea_init_with_rec(&ldata);
4493 linkea_first_entry(&ldata);
4494 while (ldata.ld_lee != NULL && !remote) {
4495 linkea_entry_unpack(ldata.ld_lee, &ldata.ld_reclen,
4497 if (osd_remote_fid(env, osd, tfid) > 0)
4500 linkea_next_entry(&ldata);
4502 } else if (rc == -ENODATA) {
4508 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu) && !remote) {
4509 rc = osd_delete_from_remote_parent(env, osd, obj, oh, false);
4511 CERROR("%s: failed to remove agent entry for "DFID
4512 ": rc = %d\n", osd_name(osd), PFID(fid), rc);
4513 } else if (!lu_object_has_agent_entry(&obj->oo_dt.do_lu) && remote) {
4514 rc = osd_add_to_remote_parent(env, osd, obj, oh);
4516 CERROR("%s: failed to create agent entry for "DFID
4517 ": rc = %d\n", osd_name(osd), PFID(fid), rc);
4524 * Concurrency: @dt is write locked.
4526 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
4527 const struct lu_buf *buf, const char *name, int fl,
4528 struct thandle *handle)
4530 struct osd_object *obj = osd_dt_obj(dt);
4531 struct osd_device *osd = osd_obj2dev(obj);
4532 struct inode *inode = obj->oo_inode;
4533 struct osd_thread_info *info = osd_oti_get(env);
4543 /* version set is not real XATTR */
4544 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
4545 dt_obj_version_t *version = buf->lb_buf;
4548 * for version we are just using xattr API but change inode
4551 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
4553 CDEBUG(D_INODE, "Set version %#llx (old %#llx) for inode %lu\n",
4554 *version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
4556 LDISKFS_I(inode)->i_fs_version = *version;
4558 * Version is set after all inode operations are finished,
4559 * so we should mark it dirty here
4561 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
4566 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
4567 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
4570 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
4573 * For the OST device with 256 bytes inode size by default,
4574 * the PFID EA will be stored together with LMA EA to avoid
4575 * performance trouble. Otherwise the PFID EA can be stored
4576 * independently. LU-8998
4578 if (strcmp(name, XATTR_NAME_FID) == 0 && osd->od_is_ost &&
4579 (LDISKFS_INODE_SIZE(inode->i_sb) <= 256 || obj->oo_pfid_in_lma)) {
4580 LASSERT(buf->lb_buf);
4582 fl = osd_xattr_set_pfid(env, obj, buf, fl, handle);
4585 } else if (strcmp(name, XATTR_NAME_LMV) == 0) {
4586 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
4587 struct lustre_mdt_attrs *lma = &loa->loa_lma;
4589 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
4593 lma->lma_incompat |= LMAI_STRIPED;
4594 lustre_lma_swab(lma);
4595 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
4596 sizeof(*lma), XATTR_REPLACE);
4599 } else if (strcmp(name, XATTR_NAME_LINK) == 0) {
4600 LASSERT(!osd->od_is_ost);
4602 rc = osd_xattr_handle_linkea(env, osd, obj, buf, handle);
4607 if (fl & LU_XATTR_REPLACE)
4608 fs_flags |= XATTR_REPLACE;
4610 if (fl & LU_XATTR_CREATE)
4611 fs_flags |= XATTR_CREATE;
4613 rc = __osd_xattr_set(info, inode, name, buf->lb_buf, len, fs_flags);
4614 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
4617 (strcmp(name, XATTR_NAME_LOV) == 0 ||
4618 strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
4619 osd_oxc_add(obj, name, buf->lb_buf, buf->lb_len);
4625 * Concurrency: @dt is read locked.
4627 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
4628 const struct lu_buf *buf)
4630 struct osd_object *obj = osd_dt_obj(dt);
4631 struct inode *inode = obj->oo_inode;
4632 struct osd_thread_info *info = osd_oti_get(env);
4633 struct dentry *dentry = &info->oti_obj_dentry;
4635 if (!dt_object_exists(dt))
4638 LASSERT(!dt_object_remote(dt));
4639 LASSERT(inode->i_op != NULL);
4640 LASSERT(inode->i_op->listxattr != NULL);
4642 dentry->d_inode = inode;
4643 dentry->d_sb = inode->i_sb;
4644 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
4647 static int osd_declare_xattr_del(const struct lu_env *env,
4648 struct dt_object *dt, const char *name,
4649 struct thandle *handle)
4651 struct osd_thandle *oh;
4652 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
4654 LASSERT(!dt_object_remote(dt));
4655 LASSERT(handle != NULL);
4657 oh = container_of0(handle, struct osd_thandle, ot_super);
4658 LASSERT(oh->ot_handle == NULL);
4660 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
4661 osd_dto_credits_noquota[DTO_XATTR_SET]);
4663 * xattr del may involve inode quota change, reserve credits for
4664 * dquot_initialize()
4666 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
4672 * Concurrency: @dt is write locked.
4674 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
4675 const char *name, struct thandle *handle)
4677 struct osd_object *obj = osd_dt_obj(dt);
4678 struct inode *inode = obj->oo_inode;
4679 struct osd_thread_info *info = osd_oti_get(env);
4680 struct dentry *dentry = &info->oti_obj_dentry;
4683 if (!dt_object_exists(dt))
4686 LASSERT(!dt_object_remote(dt));
4687 LASSERT(inode->i_op != NULL);
4688 LASSERT(handle != NULL);
4689 #ifdef HAVE_IOP_XATTR
4690 LASSERT(inode->i_op->removexattr != NULL);
4693 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
4695 if (strcmp(name, XATTR_NAME_FID) == 0 && obj->oo_pfid_in_lma) {
4696 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
4698 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
4699 &info->oti_ost_attrs);
4701 LASSERT(lma->lma_compat & LMAC_STRIPE_INFO);
4703 lma->lma_compat &= ~(LMAC_STRIPE_INFO | LMAC_COMP_INFO);
4704 lustre_lma_swab(lma);
4705 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
4706 sizeof(*lma), XATTR_REPLACE);
4708 obj->oo_pfid_in_lma = 0;
4711 ll_vfs_dq_init(inode);
4712 dentry->d_inode = inode;
4713 dentry->d_sb = inode->i_sb;
4714 rc = osd_removexattr(dentry, inode, name);
4717 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
4720 (strcmp(name, XATTR_NAME_LOV) == 0 ||
4721 strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
4722 osd_oxc_del(obj, name);
4727 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
4728 __u64 start, __u64 end)
4730 struct osd_object *obj = osd_dt_obj(dt);
4731 struct inode *inode = obj->oo_inode;
4732 struct osd_thread_info *info = osd_oti_get(env);
4733 struct dentry *dentry = &info->oti_obj_dentry;
4734 struct file *file = &info->oti_file;
4739 dentry->d_inode = inode;
4740 dentry->d_sb = inode->i_sb;
4741 file->f_path.dentry = dentry;
4742 file->f_mapping = inode->i_mapping;
4743 file->f_op = inode->i_fop;
4744 set_file_inode(file, inode);
4746 rc = ll_vfs_fsync_range(file, start, end, 0);
4751 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
4760 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
4761 const struct dt_index_features *feat)
4763 struct iam_descr *descr;
4765 if (osd_object_is_root(o))
4766 return feat == &dt_directory_features;
4768 LASSERT(o->oo_dir != NULL);
4770 descr = o->oo_dir->od_container.ic_descr;
4771 if (feat == &dt_directory_features) {
4772 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
4777 return feat->dif_keysize_min <= descr->id_key_size &&
4778 descr->id_key_size <= feat->dif_keysize_max &&
4779 feat->dif_recsize_min <= descr->id_rec_size &&
4780 descr->id_rec_size <= feat->dif_recsize_max &&
4781 !(feat->dif_flags & (DT_IND_VARKEY |
4782 DT_IND_VARREC | DT_IND_NONUNQ)) &&
4783 ergo(feat->dif_flags & DT_IND_UPDATE,
4784 1 /* XXX check that object (and fs) is writable */);
4788 static int osd_iam_container_init(const struct lu_env *env,
4789 struct osd_object *obj,
4790 struct osd_directory *dir)
4792 struct iam_container *bag = &dir->od_container;
4795 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
4799 result = iam_container_setup(bag);
4801 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
4803 iam_container_fini(bag);
4810 * Concurrency: no external locking is necessary.
4812 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
4813 const struct dt_index_features *feat)
4817 struct osd_object *obj = osd_dt_obj(dt);
4819 LINVRNT(osd_invariant(obj));
4821 if (osd_object_is_root(obj)) {
4822 dt->do_index_ops = &osd_index_ea_ops;
4824 } else if (feat == &dt_directory_features) {
4825 dt->do_index_ops = &osd_index_ea_ops;
4826 if (obj->oo_inode == NULL || S_ISDIR(obj->oo_inode->i_mode))
4831 } else if (unlikely(feat == &dt_otable_features)) {
4832 dt->do_index_ops = &osd_otable_ops;
4834 } else if (unlikely(feat == &dt_acct_features)) {
4835 dt->do_index_ops = &osd_acct_index_ops;
4838 } else if (!osd_has_index(obj)) {
4839 struct osd_directory *dir;
4840 struct osd_device *osd = osd_obj2dev(obj);
4841 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
4846 spin_lock(&obj->oo_guard);
4847 if (obj->oo_dir == NULL)
4851 * Concurrent thread allocated container data.
4854 spin_unlock(&obj->oo_guard);
4856 * Now, that we have container data, serialize its
4859 down_write(&obj->oo_ext_idx_sem);
4861 * recheck under lock.
4864 if (osd_has_index(obj)) {
4869 result = osd_iam_container_init(env, obj, obj->oo_dir);
4870 if (result || feat == &dt_lfsck_namespace_features ||
4871 feat == &dt_lfsck_layout_orphan_features ||
4872 feat == &dt_lfsck_layout_dangling_features)
4875 result = osd_index_register(osd, fid,
4876 feat->dif_keysize_max,
4877 feat->dif_recsize_max);
4879 CWARN("%s: failed to register index "
4881 osd_name(osd), PFID(fid), result);
4882 else if (result > 0)
4885 CDEBUG(D_LFSCK, "%s: index object "DFID
4886 " (%d/%d) registered\n",
4887 osd_name(osd), PFID(fid),
4888 (int)feat->dif_keysize_max,
4889 (int)feat->dif_recsize_max);
4892 up_write(&obj->oo_ext_idx_sem);
4900 if (result == 0 && skip_iam == 0) {
4901 if (!osd_iam_index_probe(env, obj, feat))
4904 LINVRNT(osd_invariant(obj));
4909 static int osd_otable_it_attr_get(const struct lu_env *env,
4910 struct dt_object *dt,
4911 struct lu_attr *attr)
4917 static const struct dt_object_operations osd_obj_ops = {
4918 .do_read_lock = osd_read_lock,
4919 .do_write_lock = osd_write_lock,
4920 .do_read_unlock = osd_read_unlock,
4921 .do_write_unlock = osd_write_unlock,
4922 .do_write_locked = osd_write_locked,
4923 .do_attr_get = osd_attr_get,
4924 .do_declare_attr_set = osd_declare_attr_set,
4925 .do_attr_set = osd_attr_set,
4926 .do_ah_init = osd_ah_init,
4927 .do_declare_create = osd_declare_create,
4928 .do_create = osd_create,
4929 .do_declare_destroy = osd_declare_destroy,
4930 .do_destroy = osd_destroy,
4931 .do_index_try = osd_index_try,
4932 .do_declare_ref_add = osd_declare_ref_add,
4933 .do_ref_add = osd_ref_add,
4934 .do_declare_ref_del = osd_declare_ref_del,
4935 .do_ref_del = osd_ref_del,
4936 .do_xattr_get = osd_xattr_get,
4937 .do_declare_xattr_set = osd_declare_xattr_set,
4938 .do_xattr_set = osd_xattr_set,
4939 .do_declare_xattr_del = osd_declare_xattr_del,
4940 .do_xattr_del = osd_xattr_del,
4941 .do_xattr_list = osd_xattr_list,
4942 .do_object_sync = osd_object_sync,
4943 .do_invalidate = osd_invalidate,
4946 static const struct dt_object_operations osd_obj_otable_it_ops = {
4947 .do_attr_get = osd_otable_it_attr_get,
4948 .do_index_try = osd_index_try,
4951 static int osd_index_declare_iam_delete(const struct lu_env *env,
4952 struct dt_object *dt,
4953 const struct dt_key *key,
4954 struct thandle *handle)
4956 struct osd_thandle *oh;
4958 oh = container_of0(handle, struct osd_thandle, ot_super);
4959 LASSERT(oh->ot_handle == NULL);
4961 /* Recycle may cause additional three blocks to be changed. */
4962 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
4963 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
4969 * delete a (key, value) pair from index \a dt specified by \a key
4971 * \param dt osd index object
4972 * \param key key for index
4973 * \param rec record reference
4974 * \param handle transaction handler
4977 * \retval -ve failure
4979 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
4980 const struct dt_key *key,
4981 struct thandle *handle)
4983 struct osd_thread_info *oti = osd_oti_get(env);
4984 struct osd_object *obj = osd_dt_obj(dt);
4985 struct osd_thandle *oh;
4986 struct iam_path_descr *ipd;
4987 struct iam_container *bag = &obj->oo_dir->od_container;
4992 if (!dt_object_exists(dt))
4995 LINVRNT(osd_invariant(obj));
4996 LASSERT(!dt_object_remote(dt));
4997 LASSERT(bag->ic_object == obj->oo_inode);
4998 LASSERT(handle != NULL);
5000 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
5002 ipd = osd_idx_ipd_get(env, bag);
5003 if (unlikely(ipd == NULL))
5006 oh = container_of0(handle, struct osd_thandle, ot_super);
5007 LASSERT(oh->ot_handle != NULL);
5008 LASSERT(oh->ot_handle->h_transaction != NULL);
5010 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
5011 /* swab quota uid/gid provided by caller */
5012 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
5013 key = (const struct dt_key *)&oti->oti_quota_id;
5016 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
5017 osd_ipd_put(env, bag, ipd);
5018 LINVRNT(osd_invariant(obj));
5019 osd_trans_exec_check(env, handle, OSD_OT_DELETE);
5023 static int osd_index_declare_ea_delete(const struct lu_env *env,
5024 struct dt_object *dt,
5025 const struct dt_key *key,
5026 struct thandle *handle)
5028 struct osd_thandle *oh;
5029 struct inode *inode;
5034 LASSERT(!dt_object_remote(dt));
5035 LASSERT(handle != NULL);
5037 oh = container_of0(handle, struct osd_thandle, ot_super);
5038 LASSERT(oh->ot_handle == NULL);
5040 credits = osd_dto_credits_noquota[DTO_INDEX_DELETE];
5041 osd_trans_declare_op(env, oh, OSD_OT_DELETE, credits);
5043 inode = osd_dt_obj(dt)->oo_inode;
5047 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
5048 i_projid_read(inode), 0, oh, osd_dt_obj(dt),
5053 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
5056 struct osd_fid_pack *rec;
5059 if (de->file_type & LDISKFS_DIRENT_LUFID) {
5060 rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
5061 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
5062 if (rc == 0 && unlikely(!fid_is_sane((struct lu_fid *)fid)))
5068 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
5069 const struct lu_fid *fid)
5071 struct seq_server_site *ss = osd_seq_site(osd);
5075 /* FID seqs not in FLDB, must be local seq */
5076 if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
5080 * If FLD is not being initialized yet, it only happens during the
5081 * initialization, likely during mgs initialization, and we assume
5082 * this is local FID.
5084 if (ss == NULL || ss->ss_server_fld == NULL)
5087 /* Only check the local FLDB here */
5088 if (osd_seq_exists(env, osd, fid_seq(fid)))
5094 static void osd_take_care_of_agent(const struct lu_env *env,
5095 struct osd_device *osd,
5096 struct osd_thandle *oh,
5097 struct ldiskfs_dir_entry_2 *de)
5099 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
5100 struct osd_idmap_cache *idc;
5101 int rc, schedule = 0;
5103 LASSERT(de != NULL);
5105 rc = osd_get_fid_from_dentry(de, (struct dt_rec *)fid);
5106 if (likely(rc == 0)) {
5107 idc = osd_idc_find_or_init(env, osd, fid);
5108 if (IS_ERR(idc) || idc->oic_remote)
5110 } else if (rc == -ENODATA) {
5112 * can't get FID, postpone to the end of the
5113 * transaction when iget() is safe
5117 CERROR("%s: can't get FID: rc = %d\n", osd_name(osd), rc);
5120 osd_schedule_agent_inode_removal(env, oh,
5121 le32_to_cpu(de->inode));
5125 * Index delete function for interoperability mode (b11826).
5126 * It will remove the directory entry added by osd_index_ea_insert().
5127 * This entry is needed to maintain name->fid mapping.
5129 * \param key, key i.e. file entry to be deleted
5131 * \retval 0, on success
5132 * \retval -ve, on error
5134 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
5135 const struct dt_key *key, struct thandle *handle)
5137 struct osd_object *obj = osd_dt_obj(dt);
5138 struct inode *dir = obj->oo_inode;
5139 struct dentry *dentry;
5140 struct osd_thandle *oh;
5141 struct ldiskfs_dir_entry_2 *de = NULL;
5142 struct buffer_head *bh;
5143 struct htree_lock *hlock = NULL;
5144 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
5149 if (!dt_object_exists(dt))
5152 LINVRNT(osd_invariant(obj));
5153 LASSERT(!dt_object_remote(dt));
5154 LASSERT(handle != NULL);
5156 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
5158 oh = container_of(handle, struct osd_thandle, ot_super);
5159 LASSERT(oh->ot_handle != NULL);
5160 LASSERT(oh->ot_handle->h_transaction != NULL);
5162 ll_vfs_dq_init(dir);
5163 dentry = osd_child_dentry_get(env, obj,
5164 (char *)key, strlen((char *)key));
5166 if (obj->oo_hl_head != NULL) {
5167 hlock = osd_oti_get(env)->oti_hlock;
5168 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
5169 dir, LDISKFS_HLOCK_DEL);
5171 down_write(&obj->oo_ext_idx_sem);
5174 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
5177 * If this is not the ".." entry, it might be a remote DNE
5178 * entry and we need to check if the FID is for a remote
5179 * MDT. If the FID is not in the directory entry (e.g.
5180 * upgraded 1.8 filesystem without dirdata enabled) then
5181 * we need to get the FID from the LMA. For a remote directory
5182 * there HAS to be an LMA, it cannot be an IGIF inode in this
5185 * Delete the entry before the agent inode in order to
5186 * simplify error handling. At worst an error after deleting
5187 * the entry first might leak the agent inode afterward. The
5188 * reverse would need filesystem abort in case of error deleting
5189 * the entry after the agent had been removed, or leave a
5190 * dangling entry pointing at a random inode.
5192 if (strcmp((char *)key, dotdot) != 0)
5193 osd_take_care_of_agent(env, osd, oh, de);
5194 rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
5200 ldiskfs_htree_unlock(hlock);
5202 up_write(&obj->oo_ext_idx_sem);
5206 LASSERT(osd_invariant(obj));
5207 osd_trans_exec_check(env, handle, OSD_OT_DELETE);
5212 * Lookup index for \a key and copy record to \a rec.
5214 * \param dt osd index object
5215 * \param key key for index
5216 * \param rec record reference
5218 * \retval +ve success : exact mach
5219 * \retval 0 return record with key not greater than \a key
5220 * \retval -ve failure
5222 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
5223 struct dt_rec *rec, const struct dt_key *key)
5225 struct osd_object *obj = osd_dt_obj(dt);
5226 struct iam_path_descr *ipd;
5227 struct iam_container *bag = &obj->oo_dir->od_container;
5228 struct osd_thread_info *oti = osd_oti_get(env);
5229 struct iam_iterator *it = &oti->oti_idx_it;
5230 struct iam_rec *iam_rec;
5235 if (!dt_object_exists(dt))
5238 LASSERT(osd_invariant(obj));
5239 LASSERT(!dt_object_remote(dt));
5240 LASSERT(bag->ic_object == obj->oo_inode);
5242 ipd = osd_idx_ipd_get(env, bag);
5246 /* got ipd now we can start iterator. */
5247 iam_it_init(it, bag, 0, ipd);
5249 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
5250 /* swab quota uid/gid provided by caller */
5251 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
5252 key = (const struct dt_key *)&oti->oti_quota_id;
5255 rc = iam_it_get(it, (struct iam_key *)key);
5257 if (S_ISDIR(obj->oo_inode->i_mode))
5258 iam_rec = (struct iam_rec *)oti->oti_ldp;
5260 iam_rec = (struct iam_rec *)rec;
5262 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
5264 if (S_ISDIR(obj->oo_inode->i_mode))
5265 osd_fid_unpack((struct lu_fid *)rec,
5266 (struct osd_fid_pack *)iam_rec);
5267 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
5268 osd_quota_unpack(obj, rec);
5273 osd_ipd_put(env, bag, ipd);
5275 LINVRNT(osd_invariant(obj));
5280 static int osd_index_declare_iam_insert(const struct lu_env *env,
5281 struct dt_object *dt,
5282 const struct dt_rec *rec,
5283 const struct dt_key *key,
5284 struct thandle *handle)
5286 struct osd_thandle *oh;
5288 LASSERT(handle != NULL);
5290 oh = container_of0(handle, struct osd_thandle, ot_super);
5291 LASSERT(oh->ot_handle == NULL);
5293 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
5294 osd_dto_credits_noquota[DTO_INDEX_INSERT]);
5300 * Inserts (key, value) pair in \a dt index object.
5302 * \param dt osd index object
5303 * \param key key for index
5304 * \param rec record reference
5305 * \param th transaction handler
5308 * \retval -ve failure
5310 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
5311 const struct dt_rec *rec,
5312 const struct dt_key *key, struct thandle *th)
5314 struct osd_object *obj = osd_dt_obj(dt);
5315 struct iam_path_descr *ipd;
5316 struct osd_thandle *oh;
5317 struct iam_container *bag;
5318 struct osd_thread_info *oti = osd_oti_get(env);
5319 struct iam_rec *iam_rec;
5324 if (!dt_object_exists(dt))
5327 LINVRNT(osd_invariant(obj));
5328 LASSERT(!dt_object_remote(dt));
5330 bag = &obj->oo_dir->od_container;
5331 LASSERT(bag->ic_object == obj->oo_inode);
5332 LASSERT(th != NULL);
5334 osd_trans_exec_op(env, th, OSD_OT_INSERT);
5336 ipd = osd_idx_ipd_get(env, bag);
5337 if (unlikely(ipd == NULL))
5340 oh = container_of0(th, struct osd_thandle, ot_super);
5341 LASSERT(oh->ot_handle != NULL);
5342 LASSERT(oh->ot_handle->h_transaction != NULL);
5343 if (S_ISDIR(obj->oo_inode->i_mode)) {
5344 iam_rec = (struct iam_rec *)oti->oti_ldp;
5345 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec,
5347 } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
5348 /* pack quota uid/gid */
5349 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
5350 key = (const struct dt_key *)&oti->oti_quota_id;
5351 /* pack quota record */
5352 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
5353 iam_rec = (struct iam_rec *)rec;
5355 iam_rec = (struct iam_rec *)rec;
5358 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
5360 osd_ipd_put(env, bag, ipd);
5361 LINVRNT(osd_invariant(obj));
5362 osd_trans_exec_check(env, th, OSD_OT_INSERT);
5367 * Calls ldiskfs_add_entry() to add directory entry
5368 * into the directory. This is required for
5369 * interoperability mode (b11826)
5371 * \retval 0, on success
5372 * \retval -ve, on error
5374 static int __osd_ea_add_rec(struct osd_thread_info *info,
5375 struct osd_object *pobj, struct inode *cinode,
5376 const char *name, const struct lu_fid *fid,
5377 struct htree_lock *hlock, struct thandle *th)
5379 struct ldiskfs_dentry_param *ldp;
5380 struct dentry *child;
5381 struct osd_thandle *oth;
5384 oth = container_of(th, struct osd_thandle, ot_super);
5385 LASSERT(oth->ot_handle != NULL);
5386 LASSERT(oth->ot_handle->h_transaction != NULL);
5387 LASSERT(pobj->oo_inode);
5389 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
5390 if (unlikely(pobj->oo_inode ==
5391 osd_sb(osd_obj2dev(pobj))->s_root->d_inode))
5394 osd_get_ldiskfs_dirent_param(ldp, fid);
5395 child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
5396 child->d_fsdata = (void *)ldp;
5397 ll_vfs_dq_init(pobj->oo_inode);
5398 rc = osd_ldiskfs_add_entry(info, osd_obj2dev(pobj), oth->ot_handle,
5399 child, cinode, hlock);
5400 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
5401 struct ldiskfs_dir_entry_2 *de;
5402 struct buffer_head *bh;
5405 bh = osd_ldiskfs_find_entry(pobj->oo_inode, &child->d_name, &de,
5408 rc1 = ldiskfs_journal_get_write_access(oth->ot_handle,
5411 if (S_ISDIR(cinode->i_mode))
5412 de->file_type = LDISKFS_DIRENT_LUFID |
5413 LDISKFS_FT_REG_FILE;
5415 de->file_type = LDISKFS_DIRENT_LUFID |
5417 ldiskfs_handle_dirty_metadata(oth->ot_handle,
5428 * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
5429 * into the directory.Also sets flags into osd object to
5430 * indicate dot and dotdot are created. This is required for
5431 * interoperability mode (b11826)
5433 * \param dir directory for dot and dotdot fixup.
5434 * \param obj child object for linking
5436 * \retval 0, on success
5437 * \retval -ve, on error
5439 static int osd_add_dot_dotdot(struct osd_thread_info *info,
5440 struct osd_object *dir,
5441 struct inode *parent_dir, const char *name,
5442 const struct lu_fid *dot_fid,
5443 const struct lu_fid *dot_dot_fid,
5446 struct inode *inode = dir->oo_inode;
5447 struct osd_thandle *oth;
5450 oth = container_of(th, struct osd_thandle, ot_super);
5451 LASSERT(oth->ot_handle->h_transaction != NULL);
5452 LASSERT(S_ISDIR(dir->oo_inode->i_mode));
5454 if (strcmp(name, dot) == 0) {
5455 if (dir->oo_compat_dot_created) {
5458 LASSERT(inode->i_ino == parent_dir->i_ino);
5459 dir->oo_compat_dot_created = 1;
5462 } else if (strcmp(name, dotdot) == 0) {
5463 if (!dir->oo_compat_dot_created)
5465 /* in case of rename, dotdot is already created */
5466 if (dir->oo_compat_dotdot_created) {
5467 return __osd_ea_add_rec(info, dir, parent_dir, name,
5468 dot_dot_fid, NULL, th);
5471 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_PARENT)) {
5472 struct lu_fid tfid = *dot_dot_fid;
5475 result = osd_add_dot_dotdot_internal(info,
5476 dir->oo_inode, parent_dir, dot_fid,
5479 result = osd_add_dot_dotdot_internal(info,
5480 dir->oo_inode, parent_dir, dot_fid,
5485 dir->oo_compat_dotdot_created = 1;
5493 * It will call the appropriate osd_add* function and return the
5494 * value, return by respective functions.
5496 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
5497 struct inode *cinode, const char *name,
5498 const struct lu_fid *fid, struct thandle *th)
5500 struct osd_thread_info *info = osd_oti_get(env);
5501 struct htree_lock *hlock;
5504 hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
5506 if (name[0] == '.' && (name[1] == '\0' ||
5507 (name[1] == '.' && name[2] == '\0'))) {
5508 if (hlock != NULL) {
5509 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
5512 down_write(&pobj->oo_ext_idx_sem);
5515 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
5516 lu_object_fid(&pobj->oo_dt.do_lu),
5519 if (hlock != NULL) {
5520 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
5521 pobj->oo_inode, LDISKFS_HLOCK_ADD);
5523 down_write(&pobj->oo_ext_idx_sem);
5526 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INDIR)) {
5527 struct lu_fid *tfid = &info->oti_fid;
5531 rc = __osd_ea_add_rec(info, pobj, cinode, name,
5534 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
5539 ldiskfs_htree_unlock(hlock);
5541 up_write(&pobj->oo_ext_idx_sem);
5547 osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
5548 struct osd_idmap_cache *oic)
5550 struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
5551 struct lu_fid *fid = &oic->oic_fid;
5552 struct osd_inode_id *id = &oic->oic_lid;
5553 struct inode *inode = NULL;
5560 if (!fid_is_norm(fid) && !fid_is_igif(fid))
5563 if (thread_is_running(&scrub->os_thread) &&
5564 scrub->os_pos_current > id->oii_ino)
5567 if (dev->od_auto_scrub_interval == AS_NEVER ||
5568 ktime_get_real_seconds() <
5569 scrub->os_file.sf_time_last_complete + dev->od_auto_scrub_interval)
5573 rc = osd_oi_lookup(oti, dev, fid, &oti->oti_id, 0);
5574 if (rc == -ENOENT) {
5575 __u32 gen = id->oii_gen;
5581 inode = osd_iget(oti, dev, id);
5582 /* The inode has been removed (by race maybe). */
5583 if (IS_ERR(inode)) {
5584 rc = PTR_ERR(inode);
5586 RETURN(rc == -ESTALE ? -ENOENT : rc);
5589 /* The OI mapping is lost. */
5590 if (gen != OSD_OII_NOGEN)
5594 * The inode may has been reused by others, we do not know,
5595 * leave it to be handled by subsequent osd_fid_lookup().
5598 } else if (rc || osd_id_eq(id, &oti->oti_id)) {
5605 if (thread_is_running(&scrub->os_thread)) {
5606 if (inode == NULL) {
5607 inode = osd_iget(oti, dev, id);
5608 /* The inode has been removed (by race maybe). */
5609 if (IS_ERR(inode)) {
5610 rc = PTR_ERR(inode);
5612 RETURN(rc == -ESTALE ? -ENOENT : rc);
5616 rc = osd_oii_insert(dev, oic, insert);
5618 * There is race condition between osd_oi_lookup and OI scrub.
5619 * The OI scrub finished just after osd_oi_lookup() failure.
5620 * Under such case, it is unnecessary to trigger OI scrub again,
5621 * but try to call osd_oi_lookup() again.
5623 if (unlikely(rc == -EAGAIN))
5626 if (!S_ISDIR(inode->i_mode))
5629 rc = osd_check_lmv(oti, dev, inode, oic);
5634 if (dev->od_auto_scrub_interval != AS_NEVER && ++once == 1) {
5635 rc = osd_scrub_start(oti->oti_env, dev, SS_AUTO_PARTIAL |
5636 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
5637 CDEBUG(D_LFSCK | D_CONSOLE | D_WARNING,
5638 "%s: trigger partial OI scrub for RPC inconsistency "
5639 "checking FID "DFID": rc = %d\n",
5640 osd_dev2name(dev), PFID(fid), rc);
5641 if (rc == 0 || rc == -EALREADY)
5654 static int osd_fail_fid_lookup(struct osd_thread_info *oti,
5655 struct osd_device *dev,
5656 struct osd_idmap_cache *oic,
5657 struct lu_fid *fid, __u32 ino)
5659 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
5660 struct inode *inode;
5663 osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
5664 inode = osd_iget(oti, dev, &oic->oic_lid);
5665 if (IS_ERR(inode)) {
5666 fid_zero(&oic->oic_fid);
5667 return PTR_ERR(inode);
5670 rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, loa);
5673 fid_zero(&oic->oic_fid);
5675 *fid = oic->oic_fid = loa->loa_lma.lma_self_fid;
5679 void osd_add_oi_cache(struct osd_thread_info *info, struct osd_device *osd,
5680 struct osd_inode_id *id, const struct lu_fid *fid)
5682 CDEBUG(D_INODE, "add "DFID" %u:%u to info %p\n", PFID(fid),
5683 id->oii_ino, id->oii_gen, info);
5684 info->oti_cache.oic_lid = *id;
5685 info->oti_cache.oic_fid = *fid;
5686 info->oti_cache.oic_dev = osd;
5690 * Get parent FID from the linkEA.
5692 * For a directory which parent resides on remote MDT, to satisfy the
5693 * local e2fsck, we insert it into the /REMOTE_PARENT_DIR locally. On
5694 * the other hand, to make the lookup(..) on the directory can return
5695 * the real parent FID, we append the real parent FID after its ".."
5696 * name entry in the /REMOTE_PARENT_DIR.
5698 * Unfortunately, such PFID-in-dirent cannot be preserved via file-level
5699 * backup. So after the restore, we cannot get the right parent FID from
5700 * its ".." name entry in the /REMOTE_PARENT_DIR. Under such case, since
5701 * we have stored the real parent FID in the directory object's linkEA,
5702 * we can parse the linkEA for the real parent FID.
5704 * \param[in] env pointer to the thread context
5705 * \param[in] obj pointer to the object to be handled
5706 * \param[out]fid pointer to the buffer to hold the parent FID
5708 * \retval 0 for getting the real parent FID successfully
5709 * \retval negative error number on failure
5711 static int osd_get_pfid_from_linkea(const struct lu_env *env,
5712 struct osd_object *obj,
5715 struct osd_thread_info *oti = osd_oti_get(env);
5716 struct lu_buf *buf = &oti->oti_big_buf;
5717 struct dentry *dentry = &oti->oti_obj_dentry;
5718 struct inode *inode = obj->oo_inode;
5719 struct linkea_data ldata = { NULL };
5725 if (!S_ISDIR(inode->i_mode))
5729 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
5730 buf->lb_buf, buf->lb_len);
5731 if (rc == -ERANGE) {
5732 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
5735 lu_buf_realloc(buf, rc);
5736 if (buf->lb_buf == NULL)
5743 if (unlikely(rc == 0))
5749 if (unlikely(buf->lb_buf == NULL)) {
5750 lu_buf_realloc(buf, rc);
5751 if (buf->lb_buf == NULL)
5758 rc = linkea_init_with_rec(&ldata);
5760 linkea_first_entry(&ldata);
5761 linkea_entry_unpack(ldata.ld_lee, &ldata.ld_reclen, NULL, fid);
5767 static int osd_verify_ent_by_linkea(const struct lu_env *env,
5768 struct inode *inode,
5769 const struct lu_fid *pfid,
5770 const char *name, const int namelen)
5772 struct osd_thread_info *oti = osd_oti_get(env);
5773 struct lu_buf *buf = &oti->oti_big_buf;
5774 struct dentry *dentry = &oti->oti_obj_dentry;
5775 struct linkea_data ldata = { NULL };
5776 struct lu_name cname = { .ln_name = name,
5777 .ln_namelen = namelen };
5783 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
5784 buf->lb_buf, buf->lb_len);
5786 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK, NULL, 0);
5791 if (unlikely(rc == 0))
5794 if (buf->lb_len < rc) {
5795 lu_buf_realloc(buf, rc);
5796 if (buf->lb_buf == NULL)
5803 rc = linkea_init_with_rec(&ldata);
5805 rc = linkea_links_find(&ldata, &cname, pfid);
5811 * Calls ->lookup() to find dentry. From dentry get inode and
5812 * read inode's ea to get fid. This is required for interoperability
5815 * \retval 0, on success
5816 * \retval -ve, on error
5818 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
5819 struct dt_rec *rec, const struct dt_key *key)
5821 struct inode *dir = obj->oo_inode;
5822 struct dentry *dentry;
5823 struct ldiskfs_dir_entry_2 *de;
5824 struct buffer_head *bh;
5825 struct lu_fid *fid = (struct lu_fid *)rec;
5826 struct htree_lock *hlock = NULL;
5832 LASSERT(dir->i_op != NULL);
5833 LASSERT(dir->i_op->lookup != NULL);
5835 dentry = osd_child_dentry_get(env, obj,
5836 (char *)key, strlen((char *)key));
5838 if (obj->oo_hl_head != NULL) {
5839 hlock = osd_oti_get(env)->oti_hlock;
5840 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
5841 dir, LDISKFS_HLOCK_LOOKUP);
5843 down_read(&obj->oo_ext_idx_sem);
5846 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
5848 struct osd_thread_info *oti = osd_oti_get(env);
5849 struct osd_inode_id *id = &oti->oti_id;
5850 struct osd_idmap_cache *oic = &oti->oti_cache;
5851 struct osd_device *dev = osd_obj2dev(obj);
5853 ino = le32_to_cpu(de->inode);
5854 if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP)) {
5856 rc = osd_fail_fid_lookup(oti, dev, oic, fid, ino);
5860 rc = osd_get_fid_from_dentry(de, rec);
5862 /* done with de, release bh */
5865 if (unlikely(is_remote_parent_ino(dev, ino))) {
5866 const char *name = (const char *)key;
5869 * If the parent is on remote MDT, and there
5870 * is no FID-in-dirent, then we have to get
5871 * the parent FID from the linkEA.
5873 if (likely(strlen(name) == 2 &&
5874 name[0] == '.' && name[1] == '.'))
5875 rc = osd_get_pfid_from_linkea(env, obj,
5878 rc = osd_ea_fid_get(env, obj, ino, fid, id);
5881 osd_id_gen(id, ino, OSD_OII_NOGEN);
5884 if (rc != 0 || osd_remote_fid(env, dev, fid)) {
5885 fid_zero(&oic->oic_fid);
5890 osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id, fid);
5891 rc = osd_consistency_check(oti, dev, oic);
5893 fid_zero(&oic->oic_fid);
5895 /* Other error should not affect lookup result. */
5905 ldiskfs_htree_unlock(hlock);
5907 up_read(&obj->oo_ext_idx_sem);
5911 static int osd_index_declare_ea_insert(const struct lu_env *env,
5912 struct dt_object *dt,
5913 const struct dt_rec *rec,
5914 const struct dt_key *key,
5915 struct thandle *handle)
5917 struct osd_thandle *oh;
5918 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
5919 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
5920 const struct lu_fid *fid = rec1->rec_fid;
5921 int credits, rc = 0;
5922 struct osd_idmap_cache *idc;
5926 LASSERT(!dt_object_remote(dt));
5927 LASSERT(handle != NULL);
5928 LASSERT(fid != NULL);
5929 LASSERT(rec1->rec_type != 0);
5931 oh = container_of0(handle, struct osd_thandle, ot_super);
5932 LASSERT(oh->ot_handle == NULL);
5934 credits = osd_dto_credits_noquota[DTO_INDEX_INSERT];
5937 * we can't call iget() while a transactions is running
5938 * (this can lead to a deadlock), but we need to know
5939 * inum and object type. so we find this information at
5940 * declaration and cache in per-thread info
5942 idc = osd_idc_find_or_init(env, osd, fid);
5944 RETURN(PTR_ERR(idc));
5945 if (idc->oic_remote) {
5947 * a reference to remote inode is represented by an
5948 * agent inode which we have to create
5950 credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
5951 credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
5954 osd_trans_declare_op(env, oh, OSD_OT_INSERT, credits);
5956 if (osd_dt_obj(dt)->oo_inode != NULL) {
5957 struct inode *inode = osd_dt_obj(dt)->oo_inode;
5960 * We ignore block quota on meta pool (MDTs), so needn't
5961 * calculate how many blocks will be consumed by this index
5964 rc = osd_declare_inode_qid(env, i_uid_read(inode),
5966 i_projid_read(inode), 0,
5967 oh, osd_dt_obj(dt), NULL,
5972 #ifdef HAVE_PROJECT_QUOTA
5974 * Reserve credits for local agent inode to transfer
5975 * to 0, quota enforcement is ignored in this case.
5977 if (idc->oic_remote &&
5978 LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL &&
5979 i_projid_read(inode) != 0)
5980 rc = osd_declare_attr_qid(env, osd_dt_obj(dt), oh,
5981 0, i_projid_read(inode),
5982 0, false, PRJQUOTA, true);
5990 * Index add function for interoperability mode (b11826).
5991 * It will add the directory entry.This entry is needed to
5992 * maintain name->fid mapping.
5994 * \param key it is key i.e. file entry to be inserted
5995 * \param rec it is value of given key i.e. fid
5997 * \retval 0, on success
5998 * \retval -ve, on error
6000 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
6001 const struct dt_rec *rec,
6002 const struct dt_key *key, struct thandle *th)
6004 struct osd_object *obj = osd_dt_obj(dt);
6005 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
6006 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
6007 const struct lu_fid *fid = rec1->rec_fid;
6008 const char *name = (const char *)key;
6009 struct osd_thread_info *oti = osd_oti_get(env);
6010 struct inode *child_inode = NULL;
6011 struct osd_idmap_cache *idc;
6016 if (!dt_object_exists(dt))
6019 LASSERT(osd_invariant(obj));
6020 LASSERT(!dt_object_remote(dt));
6021 LASSERT(th != NULL);
6023 osd_trans_exec_op(env, th, OSD_OT_INSERT);
6025 LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!\n", PFID(fid));
6027 idc = osd_idc_find(env, osd, fid);
6028 if (unlikely(idc == NULL)) {
6029 idc = osd_idc_find_or_init(env, osd, fid);
6032 * this dt_insert() wasn't declared properly, so
6033 * FID is missing in OI cache. we better do not
6034 * lookup FID in FLDB/OI and don't risk to deadlock,
6035 * but in some special cases (lfsck testing, etc)
6036 * it's much simpler than fixing a caller.
6038 * normally this error should be placed after the first
6039 * find, but migrate may attach source stripes to
6040 * target, which doesn't create stripes.
6042 CERROR("%s: "DFID" wasn't declared for insert\n",
6043 osd_name(osd), PFID(fid));
6045 RETURN(PTR_ERR(idc));
6049 if (idc->oic_remote) {
6050 /* Insert remote entry */
6051 if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
6053 igrab(osd->od_mdt_map->omm_remote_parent->d_inode);
6055 child_inode = osd_create_local_agent_inode(env, osd,
6056 obj, fid, rec1->rec_type & S_IFMT, th);
6057 if (IS_ERR(child_inode))
6058 RETURN(PTR_ERR(child_inode));
6061 /* Insert local entry */
6062 if (unlikely(idc->oic_lid.oii_ino == 0)) {
6063 /* for a reason OI cache wasn't filled properly */
6064 CERROR("%s: OIC for "DFID" isn't filled\n",
6065 osd_name(osd), PFID(fid));
6068 child_inode = oti->oti_inode;
6069 if (unlikely(child_inode == NULL)) {
6070 struct ldiskfs_inode_info *lii;
6075 child_inode = oti->oti_inode = &lii->vfs_inode;
6077 child_inode->i_sb = osd_sb(osd);
6078 child_inode->i_ino = idc->oic_lid.oii_ino;
6079 child_inode->i_mode = rec1->rec_type & S_IFMT;
6082 rc = osd_ea_add_rec(env, obj, child_inode, name, fid, th);
6084 CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
6085 obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
6087 if (child_inode && child_inode != oti->oti_inode)
6089 LASSERT(osd_invariant(obj));
6090 osd_trans_exec_check(env, th, OSD_OT_INSERT);
6095 * Initialize osd Iterator for given osd index object.
6097 * \param dt osd index object
6100 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
6101 struct dt_object *dt,
6104 struct osd_it_iam *it;
6105 struct osd_object *obj = osd_dt_obj(dt);
6106 struct lu_object *lo = &dt->do_lu;
6107 struct iam_path_descr *ipd;
6108 struct iam_container *bag = &obj->oo_dir->od_container;
6110 if (!dt_object_exists(dt))
6111 return ERR_PTR(-ENOENT);
6115 return ERR_PTR(-ENOMEM);
6117 ipd = osd_it_ipd_get(env, bag);
6118 if (likely(ipd != NULL)) {
6122 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
6123 return (struct dt_it *)it;
6126 return ERR_PTR(-ENOMEM);
6131 * free given Iterator.
6133 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
6135 struct osd_it_iam *it = (struct osd_it_iam *)di;
6136 struct osd_object *obj = it->oi_obj;
6138 iam_it_fini(&it->oi_it);
6139 osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
6140 osd_object_put(env, obj);
6145 * Move Iterator to record specified by \a key
6147 * \param di osd iterator
6148 * \param key key for index
6150 * \retval +ve di points to record with least key not larger than key
6151 * \retval 0 di points to exact matched key
6152 * \retval -ve failure
6155 static int osd_it_iam_get(const struct lu_env *env,
6156 struct dt_it *di, const struct dt_key *key)
6158 struct osd_thread_info *oti = osd_oti_get(env);
6159 struct osd_it_iam *it = (struct osd_it_iam *)di;
6161 if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
6162 /* swab quota uid/gid */
6163 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
6164 key = (struct dt_key *)&oti->oti_quota_id;
6167 return iam_it_get(&it->oi_it, (const struct iam_key *)key);
6173 * \param di osd iterator
6175 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
6177 struct osd_it_iam *it = (struct osd_it_iam *)di;
6179 iam_it_put(&it->oi_it);
6183 * Move iterator by one record
6185 * \param di osd iterator
6187 * \retval +1 end of container reached
6189 * \retval -ve failure
6192 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
6194 struct osd_it_iam *it = (struct osd_it_iam *)di;
6196 return iam_it_next(&it->oi_it);
6200 * Return pointer to the key under iterator.
6203 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
6204 const struct dt_it *di)
6206 struct osd_thread_info *oti = osd_oti_get(env);
6207 struct osd_it_iam *it = (struct osd_it_iam *)di;
6208 struct osd_object *obj = it->oi_obj;
6211 key = (struct dt_key *)iam_it_key_get(&it->oi_it);
6213 if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
6214 /* swab quota uid/gid */
6215 oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
6216 key = (struct dt_key *)&oti->oti_quota_id;
6223 * Return size of key under iterator (in bytes)
6226 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
6228 struct osd_it_iam *it = (struct osd_it_iam *)di;
6230 return iam_it_key_size(&it->oi_it);
6234 osd_it_append_attrs(struct lu_dirent *ent, int len, __u16 type)
6236 /* check if file type is required */
6237 if (ent->lde_attrs & LUDA_TYPE) {
6238 struct luda_type *lt;
6239 int align = sizeof(*lt) - 1;
6241 len = (len + align) & ~align;
6242 lt = (struct luda_type *)(ent->lde_name + len);
6243 lt->lt_type = cpu_to_le16(DTTOIF(type));
6246 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
6250 * build lu direct from backend fs dirent.
6254 osd_it_pack_dirent(struct lu_dirent *ent, struct lu_fid *fid, __u64 offset,
6255 char *name, __u16 namelen, __u16 type, __u32 attr)
6257 ent->lde_attrs = attr | LUDA_FID;
6258 fid_cpu_to_le(&ent->lde_fid, fid);
6260 ent->lde_hash = cpu_to_le64(offset);
6261 ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
6263 strncpy(ent->lde_name, name, namelen);
6264 ent->lde_name[namelen] = '\0';
6265 ent->lde_namelen = cpu_to_le16(namelen);
6267 /* append lustre attributes */
6268 osd_it_append_attrs(ent, namelen, type);
6272 * Return pointer to the record under iterator.
6274 static int osd_it_iam_rec(const struct lu_env *env,
6275 const struct dt_it *di,
6276 struct dt_rec *dtrec, __u32 attr)
6278 struct osd_it_iam *it = (struct osd_it_iam *)di;
6279 struct osd_thread_info *info = osd_oti_get(env);
6283 if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
6284 const struct osd_fid_pack *rec;
6285 struct lu_fid *fid = &info->oti_fid;
6286 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
6292 name = (char *)iam_it_key_get(&it->oi_it);
6294 RETURN(PTR_ERR(name));
6296 namelen = iam_it_key_size(&it->oi_it);
6298 rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
6300 RETURN(PTR_ERR(rec));
6302 rc = osd_fid_unpack(fid, rec);
6306 hash = iam_it_store(&it->oi_it);
6308 /* IAM does not store object type in IAM index (dir) */
6309 osd_it_pack_dirent(lde, fid, hash, name, namelen,
6311 } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
6312 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
6313 (struct iam_rec *)dtrec);
6314 osd_quota_unpack(it->oi_obj, dtrec);
6316 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
6317 (struct iam_rec *)dtrec);
6324 * Returns cookie for current Iterator position.
6326 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
6328 struct osd_it_iam *it = (struct osd_it_iam *)di;
6330 return iam_it_store(&it->oi_it);
6334 * Restore iterator from cookie.
6336 * \param di osd iterator
6337 * \param hash Iterator location cookie
6339 * \retval +ve di points to record with least key not larger than key.
6340 * \retval 0 di points to exact matched key
6341 * \retval -ve failure
6344 static int osd_it_iam_load(const struct lu_env *env,
6345 const struct dt_it *di, __u64 hash)
6347 struct osd_it_iam *it = (struct osd_it_iam *)di;
6349 return iam_it_load(&it->oi_it, hash);
6352 static const struct dt_index_operations osd_index_iam_ops = {
6353 .dio_lookup = osd_index_iam_lookup,
6354 .dio_declare_insert = osd_index_declare_iam_insert,
6355 .dio_insert = osd_index_iam_insert,
6356 .dio_declare_delete = osd_index_declare_iam_delete,
6357 .dio_delete = osd_index_iam_delete,
6359 .init = osd_it_iam_init,
6360 .fini = osd_it_iam_fini,
6361 .get = osd_it_iam_get,
6362 .put = osd_it_iam_put,
6363 .next = osd_it_iam_next,
6364 .key = osd_it_iam_key,
6365 .key_size = osd_it_iam_key_size,
6366 .rec = osd_it_iam_rec,
6367 .store = osd_it_iam_store,
6368 .load = osd_it_iam_load
6374 * Creates or initializes iterator context.
6376 * \retval struct osd_it_ea, iterator structure on success
6379 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
6380 struct dt_object *dt,
6383 struct osd_object *obj = osd_dt_obj(dt);
6384 struct osd_thread_info *info = osd_oti_get(env);
6385 struct osd_it_ea *oie;
6387 struct lu_object *lo = &dt->do_lu;
6388 struct dentry *obj_dentry;
6392 if (!dt_object_exists(dt) || obj->oo_destroyed)
6393 RETURN(ERR_PTR(-ENOENT));
6395 OBD_SLAB_ALLOC_PTR_GFP(oie, osd_itea_cachep, GFP_NOFS);
6397 RETURN(ERR_PTR(-ENOMEM));
6398 obj_dentry = &oie->oie_dentry;
6400 obj_dentry->d_inode = obj->oo_inode;
6401 obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
6402 obj_dentry->d_name.hash = 0;
6404 oie->oie_rd_dirent = 0;
6405 oie->oie_it_dirent = 0;
6406 oie->oie_dirent = NULL;
6407 if (unlikely(!info->oti_it_ea_buf_used)) {
6408 oie->oie_buf = info->oti_it_ea_buf;
6409 info->oti_it_ea_buf_used = 1;
6411 OBD_ALLOC(oie->oie_buf, OSD_IT_EA_BUFSIZE);
6412 if (oie->oie_buf == NULL)
6413 RETURN(ERR_PTR(-ENOMEM));
6417 file = &oie->oie_file;
6419 /* Only FMODE_64BITHASH or FMODE_32BITHASH should be set, NOT both. */
6420 if (attr & LUDA_64BITHASH)
6421 file->f_mode = FMODE_64BITHASH;
6423 file->f_mode = FMODE_32BITHASH;
6424 file->f_path.dentry = obj_dentry;
6425 file->f_mapping = obj->oo_inode->i_mapping;
6426 file->f_op = obj->oo_inode->i_fop;
6427 set_file_inode(file, obj->oo_inode);
6430 RETURN((struct dt_it *)oie);
6434 * Destroy or finishes iterator context.
6436 * \param di iterator structure to be destroyed
6438 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
6440 struct osd_thread_info *info = osd_oti_get(env);
6441 struct osd_it_ea *oie = (struct osd_it_ea *)di;
6442 struct osd_object *obj = oie->oie_obj;
6443 struct inode *inode = obj->oo_inode;
6446 oie->oie_file.f_op->release(inode, &oie->oie_file);
6447 osd_object_put(env, obj);
6448 if (unlikely(oie->oie_buf != info->oti_it_ea_buf))
6449 OBD_FREE(oie->oie_buf, OSD_IT_EA_BUFSIZE);
6451 info->oti_it_ea_buf_used = 0;
6452 OBD_SLAB_FREE_PTR(oie, osd_itea_cachep);
6457 * It position the iterator at given key, so that next lookup continues from
6458 * that key Or it is similar to dio_it->load() but based on a key,
6459 * rather than file position.
6461 * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
6464 * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
6466 static int osd_it_ea_get(const struct lu_env *env,
6467 struct dt_it *di, const struct dt_key *key)
6469 struct osd_it_ea *it = (struct osd_it_ea *)di;
6472 LASSERT(((const char *)key)[0] == '\0');
6473 it->oie_file.f_pos = 0;
6474 it->oie_rd_dirent = 0;
6475 it->oie_it_dirent = 0;
6476 it->oie_dirent = NULL;
6484 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
6488 struct osd_filldir_cbs {
6489 #ifdef HAVE_DIR_CONTEXT
6490 struct dir_context ctx;
6492 struct osd_it_ea *it;
6495 * It is called internally by ->readdir(). It fills the
6496 * iterator's in-memory data structure with required
6497 * information i.e. name, namelen, rec_size etc.
6499 * \param buf in which information to be filled in.
6500 * \param name name of the file in given dir
6502 * \retval 0 on success
6503 * \retval 1 on buffer full
6505 #ifdef HAVE_FILLDIR_USE_CTX
6506 static int osd_ldiskfs_filldir(struct dir_context *buf,
6508 static int osd_ldiskfs_filldir(void *buf,
6510 const char *name, int namelen,
6511 loff_t offset, __u64 ino, unsigned int d_type)
6513 struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
6514 struct osd_object *obj = it->oie_obj;
6515 struct osd_it_ea_dirent *ent = it->oie_dirent;
6516 struct lu_fid *fid = &ent->oied_fid;
6517 struct osd_fid_pack *rec;
6521 /* this should never happen */
6522 if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
6523 CERROR("ldiskfs return invalid namelen %d\n", namelen);
6527 if ((void *)ent - it->oie_buf + sizeof(*ent) + namelen >
6531 /* "." is just the object itself. */
6532 if (namelen == 1 && name[0] == '.') {
6533 *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
6534 } else if (d_type & LDISKFS_DIRENT_LUFID) {
6535 rec = (struct osd_fid_pack *)(name + namelen + 1);
6536 if (osd_fid_unpack(fid, rec) != 0)
6541 d_type &= ~LDISKFS_DIRENT_LUFID;
6543 /* NOT export local root. */
6544 if (unlikely(osd_sb(osd_obj2dev(obj))->s_root->d_inode->i_ino == ino)) {
6545 ino = obj->oo_inode->i_ino;
6546 *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
6549 ent->oied_ino = ino;
6550 ent->oied_off = offset;
6551 ent->oied_namelen = namelen;
6552 ent->oied_type = d_type;
6554 memcpy(ent->oied_name, name, namelen);
6556 it->oie_rd_dirent++;
6557 it->oie_dirent = (void *)ent + cfs_size_round(sizeof(*ent) + namelen);
6562 * Calls ->readdir() to load a directory entry at a time
6563 * and stored it in iterator's in-memory data structure.
6565 * \param di iterator's in memory structure
6567 * \retval 0 on success
6568 * \retval -ve on error
6569 * \retval +1 reach the end of entry
6571 static int osd_ldiskfs_it_fill(const struct lu_env *env,
6572 const struct dt_it *di)
6574 struct osd_it_ea *it = (struct osd_it_ea *)di;
6575 struct osd_object *obj = it->oie_obj;
6576 struct inode *inode = obj->oo_inode;
6577 struct htree_lock *hlock = NULL;
6578 struct file *filp = &it->oie_file;
6580 struct osd_filldir_cbs buf = {
6581 #ifdef HAVE_DIR_CONTEXT
6582 .ctx.actor = osd_ldiskfs_filldir,
6588 it->oie_dirent = it->oie_buf;
6589 it->oie_rd_dirent = 0;
6591 if (obj->oo_hl_head != NULL) {
6592 hlock = osd_oti_get(env)->oti_hlock;
6593 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
6594 inode, LDISKFS_HLOCK_READDIR);
6596 down_read(&obj->oo_ext_idx_sem);
6599 #ifdef HAVE_DIR_CONTEXT
6600 buf.ctx.pos = filp->f_pos;
6601 #ifdef HAVE_ITERATE_SHARED
6602 rc = inode->i_fop->iterate_shared(filp, &buf.ctx);
6604 rc = inode->i_fop->iterate(filp, &buf.ctx);
6606 filp->f_pos = buf.ctx.pos;
6608 rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
6612 ldiskfs_htree_unlock(hlock);
6614 up_read(&obj->oo_ext_idx_sem);
6616 if (it->oie_rd_dirent == 0) {
6618 * If it does not get any dirent, it means it has been reached
6619 * to the end of the dir
6621 it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
6625 it->oie_dirent = it->oie_buf;
6626 it->oie_it_dirent = 1;
6633 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
6634 * to load a directory entry at a time and stored it in
6635 * iterator's in-memory data structure.
6637 * \param di iterator's in memory structure
6639 * \retval +ve iterator reached to end
6640 * \retval 0 iterator not reached to end
6641 * \retval -ve on error
6643 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
6645 struct osd_it_ea *it = (struct osd_it_ea *)di;
6650 if (it->oie_it_dirent < it->oie_rd_dirent) {
6652 (void *)it->oie_dirent +
6653 cfs_size_round(sizeof(struct osd_it_ea_dirent) +
6654 it->oie_dirent->oied_namelen);
6655 it->oie_it_dirent++;
6658 if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
6661 rc = osd_ldiskfs_it_fill(env, di);
6668 * Returns the key at current position from iterator's in memory structure.
6670 * \param di iterator's in memory structure
6672 * \retval key i.e. struct dt_key on success
6674 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
6675 const struct dt_it *di)
6677 struct osd_it_ea *it = (struct osd_it_ea *)di;
6679 return (struct dt_key *)it->oie_dirent->oied_name;
6683 * Returns key's size at current position from iterator's in memory structure.
6685 * \param di iterator's in memory structure
6687 * \retval key_size i.e. struct dt_key on success
6689 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
6691 struct osd_it_ea *it = (struct osd_it_ea *)di;
6693 return it->oie_dirent->oied_namelen;
6696 static inline bool osd_dotdot_has_space(struct ldiskfs_dir_entry_2 *de)
6698 if (LDISKFS_DIR_REC_LEN(de) >=
6699 __LDISKFS_DIR_REC_LEN(2 + 1 + sizeof(struct osd_fid_pack)))
6706 osd_dirent_has_space(struct ldiskfs_dir_entry_2 *de, __u16 namelen,
6707 unsigned int blocksize, bool dotdot)
6710 return osd_dotdot_has_space(de);
6712 if (ldiskfs_rec_len_from_disk(de->rec_len, blocksize) >=
6713 __LDISKFS_DIR_REC_LEN(namelen + 1 + sizeof(struct osd_fid_pack)))
6720 osd_dirent_reinsert(const struct lu_env *env, struct osd_device *dev,
6721 handle_t *jh, struct dentry *dentry,
6722 const struct lu_fid *fid, struct buffer_head *bh,
6723 struct ldiskfs_dir_entry_2 *de, struct htree_lock *hlock,
6726 struct inode *dir = dentry->d_parent->d_inode;
6727 struct inode *inode = dentry->d_inode;
6728 struct osd_fid_pack *rec;
6729 struct ldiskfs_dentry_param *ldp;
6730 int namelen = dentry->d_name.len;
6732 struct osd_thread_info *info = osd_oti_get(env);
6736 if (!ldiskfs_has_feature_dirdata(inode->i_sb))
6739 /* There is enough space to hold the FID-in-dirent. */
6740 if (osd_dirent_has_space(de, namelen, dir->i_sb->s_blocksize, dotdot)) {
6741 rc = ldiskfs_journal_get_write_access(jh, bh);
6745 de->name[namelen] = 0;
6746 rec = (struct osd_fid_pack *)(de->name + namelen + 1);
6747 rec->fp_len = sizeof(struct lu_fid) + 1;
6748 fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
6749 de->file_type |= LDISKFS_DIRENT_LUFID;
6750 rc = ldiskfs_handle_dirty_metadata(jh, NULL, bh);
6757 rc = ldiskfs_delete_entry(jh, dir, de, bh);
6761 ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
6762 osd_get_ldiskfs_dirent_param(ldp, fid);
6763 dentry->d_fsdata = (void *)ldp;
6764 ll_vfs_dq_init(dir);
6765 rc = osd_ldiskfs_add_entry(info, dev, jh, dentry, inode, hlock);
6767 * It is too bad, we cannot reinsert the name entry back.
6768 * That means we lose it!
6771 CDEBUG(D_LFSCK, "%s: fail to reinsert the dirent, "
6772 "dir = %lu/%u, name = %.*s, "DFID": rc = %d\n",
6773 osd_ino2name(inode),
6774 dir->i_ino, dir->i_generation, namelen,
6775 dentry->d_name.name, PFID(fid), rc);
6781 osd_dirent_check_repair(const struct lu_env *env, struct osd_object *obj,
6782 struct osd_it_ea *it, struct lu_fid *fid,
6783 struct osd_inode_id *id, __u32 *attr)
6785 struct osd_thread_info *info = osd_oti_get(env);
6786 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
6787 struct osd_device *dev = osd_obj2dev(obj);
6788 struct super_block *sb = osd_sb(dev);
6789 const char *devname = osd_name(dev);
6790 struct osd_it_ea_dirent *ent = it->oie_dirent;
6791 struct inode *dir = obj->oo_inode;
6792 struct htree_lock *hlock = NULL;
6793 struct buffer_head *bh = NULL;
6794 handle_t *jh = NULL;
6795 struct ldiskfs_dir_entry_2 *de;
6796 struct dentry *dentry;
6797 struct inode *inode;
6798 const struct lu_fid *pfid = lu_object_fid(&obj->oo_dt.do_lu);
6801 bool dotdot = false;
6806 if (ent->oied_name[0] == '.') {
6807 if (ent->oied_namelen == 1)
6810 if (ent->oied_namelen == 2 && ent->oied_name[1] == '.')
6814 osd_id_gen(id, ent->oied_ino, OSD_OII_NOGEN);
6815 inode = osd_iget(info, dev, id);
6816 if (IS_ERR(inode)) {
6817 rc = PTR_ERR(inode);
6818 if (rc == -ENOENT || rc == -ESTALE) {
6820 * Maybe dangling name entry, or
6821 * corrupted directory entry.
6823 *attr |= LUDA_UNKNOWN;
6826 CDEBUG(D_LFSCK, "%s: fail to iget() for dirent "
6827 "check_repair, dir = %lu/%u, name = %.*s, "
6828 "ino = %llu, rc = %d\n",
6829 devname, dir->i_ino, dir->i_generation,
6830 ent->oied_namelen, ent->oied_name,
6837 dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
6839 rc = osd_get_lma(info, inode, dentry, &info->oti_ost_attrs);
6840 if (rc == -ENODATA || !fid_is_sane(&lma->lma_self_fid))
6846 * We need to ensure that the name entry is still valid.
6847 * Because it may be removed or renamed by other already.
6849 * The unlink or rename operation will start journal before PDO lock,
6850 * so to avoid deadlock, here we need to start journal handle before
6851 * related PDO lock also. But because we do not know whether there
6852 * will be something to be repaired before PDO lock, we just start
6853 * journal without conditions.
6855 * We may need to remove the name entry firstly, then insert back.
6856 * One credit is for user quota file update.
6857 * One credit is for group quota file update.
6858 * Two credits are for dirty inode.
6860 credits = osd_dto_credits_noquota[DTO_INDEX_DELETE] +
6861 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1 + 1 + 2;
6863 if (dev->od_dirent_journal != 0) {
6866 jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
6869 CDEBUG(D_LFSCK, "%s: fail to start trans for dirent "
6870 "check_repair, dir = %lu/%u, credits = %d, "
6871 "name = %.*s, ino = %llu: rc = %d\n",
6872 devname, dir->i_ino, dir->i_generation, credits,
6873 ent->oied_namelen, ent->oied_name,
6876 GOTO(out_inode, rc);
6879 if (obj->oo_hl_head != NULL) {
6880 hlock = osd_oti_get(env)->oti_hlock;
6882 * "0" means exclusive lock for the whole directory.
6883 * We need to prevent others access such name entry
6884 * during the delete + insert. Neither HLOCK_ADD nor
6885 * HLOCK_DEL cannot guarantee the atomicity.
6887 ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir, 0);
6889 down_write(&obj->oo_ext_idx_sem);
6892 if (obj->oo_hl_head != NULL) {
6893 hlock = osd_oti_get(env)->oti_hlock;
6894 ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir,
6895 LDISKFS_HLOCK_LOOKUP);
6897 down_read(&obj->oo_ext_idx_sem);
6901 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
6902 if (IS_ERR(bh) || le32_to_cpu(de->inode) != inode->i_ino) {
6903 *attr |= LUDA_IGNORE;
6909 * For dotdot entry, if there is not enough space to hold the
6910 * FID-in-dirent, just keep them there. It only happens when the
6911 * device upgraded from 1.8 or restored from MDT file-level backup.
6912 * For the whole directory, only dotdot entry have no FID-in-dirent
6913 * and needs to get FID from LMA when readdir, it will not affect the
6916 if (dotdot && !osd_dotdot_has_space(de)) {
6917 *attr |= LUDA_UNKNOWN;
6923 if (lu_fid_eq(fid, &lma->lma_self_fid))
6926 if (unlikely(lma->lma_compat & LMAC_NOT_IN_OI)) {
6927 struct lu_fid *tfid = &lma->lma_self_fid;
6929 if (likely(dotdot &&
6930 fid_seq(tfid) == FID_SEQ_LOCAL_FILE &&
6931 fid_oid(tfid) == REMOTE_PARENT_DIR_OID)) {
6933 * It must be REMOTE_PARENT_DIR and as the
6934 * 'dotdot' entry of remote directory
6936 *attr |= LUDA_IGNORE;
6938 CDEBUG(D_LFSCK, "%s: expect remote agent "
6939 "parent directory, but got %.*s under "
6940 "dir = %lu/%u with the FID "DFID"\n",
6941 devname, ent->oied_namelen,
6942 ent->oied_name, dir->i_ino,
6943 dir->i_generation, PFID(tfid));
6945 *attr |= LUDA_UNKNOWN;
6952 if (!fid_is_zero(fid)) {
6953 rc = osd_verify_ent_by_linkea(env, inode, pfid, ent->oied_name,
6955 if (rc == -ENOENT ||
6957 !(dev->od_scrub.os_scrub.os_file.sf_flags & SF_UPGRADE))) {
6959 * linkEA does not recognize the dirent entry,
6960 * it may because the dirent entry corruption
6961 * and points to other's inode.
6963 CDEBUG(D_LFSCK, "%s: the target inode does not "
6964 "recognize the dirent, dir = %lu/%u, "
6965 " name = %.*s, ino = %llu, "
6966 DFID": rc = %d\n", devname, dir->i_ino,
6967 dir->i_generation, ent->oied_namelen,
6968 ent->oied_name, ent->oied_ino, PFID(fid), rc);
6969 *attr |= LUDA_UNKNOWN;
6974 if (rc && rc != -ENODATA) {
6975 CDEBUG(D_LFSCK, "%s: fail to verify FID in the dirent, "
6976 "dir = %lu/%u, name = %.*s, ino = %llu, "
6977 DFID": rc = %d\n", devname, dir->i_ino,
6978 dir->i_generation, ent->oied_namelen,
6979 ent->oied_name, ent->oied_ino, PFID(fid), rc);
6980 *attr |= LUDA_UNKNOWN;
6988 * linkEA recognizes the dirent entry, the FID-in-LMA is
6989 * valid, trusted, in spite of fid_is_sane(fid) or not.
6991 if (*attr & LUDA_VERIFY_DRYRUN) {
6992 *fid = lma->lma_self_fid;
6993 *attr |= LUDA_REPAIR;
7000 dev->od_dirent_journal = 1;
7001 if (hlock != NULL) {
7002 ldiskfs_htree_unlock(hlock);
7005 up_read(&obj->oo_ext_idx_sem);
7011 *fid = lma->lma_self_fid;
7013 /* Update or append the FID-in-dirent. */
7014 rc = osd_dirent_reinsert(env, dev, jh, dentry, fid,
7015 bh, de, hlock, dotdot);
7017 *attr |= LUDA_REPAIR;
7019 CDEBUG(D_LFSCK, "%s: fail to re-insert FID after "
7020 "the dirent, dir = %lu/%u, name = %.*s, "
7021 "ino = %llu, "DFID": rc = %d\n",
7022 devname, dir->i_ino, dir->i_generation,
7023 ent->oied_namelen, ent->oied_name,
7024 ent->oied_ino, PFID(fid), rc);
7026 /* lma is NULL, trust the FID-in-dirent if it is valid. */
7027 if (*attr & LUDA_VERIFY_DRYRUN) {
7028 if (fid_is_sane(fid)) {
7029 *attr |= LUDA_REPAIR;
7030 } else if (dev->od_index == 0) {
7031 lu_igif_build(fid, inode->i_ino,
7032 inode->i_generation);
7033 *attr |= LUDA_UPGRADE;
7041 dev->od_dirent_journal = 1;
7042 if (hlock != NULL) {
7043 ldiskfs_htree_unlock(hlock);
7046 up_read(&obj->oo_ext_idx_sem);
7053 if (unlikely(fid_is_sane(fid))) {
7055 * FID-in-dirent exists, but FID-in-LMA is lost.
7056 * Trust the FID-in-dirent, and add FID-in-LMA.
7058 rc = osd_ea_fid_set(info, inode, fid, 0, 0);
7060 *attr |= LUDA_REPAIR;
7062 CDEBUG(D_LFSCK, "%s: fail to set LMA for "
7063 "update dirent, dir = %lu/%u, "
7064 "name = %.*s, ino = %llu, "
7066 devname, dir->i_ino, dir->i_generation,
7067 ent->oied_namelen, ent->oied_name,
7068 ent->oied_ino, PFID(fid), rc);
7069 } else if (dev->od_index == 0) {
7070 lu_igif_build(fid, inode->i_ino, inode->i_generation);
7072 * It is probably IGIF object. Only aappend the
7073 * FID-in-dirent. OI scrub will process FID-in-LMA.
7075 rc = osd_dirent_reinsert(env, dev, jh, dentry, fid,
7076 bh, de, hlock, dotdot);
7078 *attr |= LUDA_UPGRADE;
7080 CDEBUG(D_LFSCK, "%s: fail to append IGIF "
7081 "after the dirent, dir = %lu/%u, "
7082 "name = %.*s, ino = %llu, "
7084 devname, dir->i_ino, dir->i_generation,
7085 ent->oied_namelen, ent->oied_name,
7086 ent->oied_ino, PFID(fid), rc);
7095 if (hlock != NULL) {
7096 ldiskfs_htree_unlock(hlock);
7098 if (dev->od_dirent_journal != 0)
7099 up_write(&obj->oo_ext_idx_sem);
7101 up_read(&obj->oo_ext_idx_sem);
7105 ldiskfs_journal_stop(jh);
7109 if (rc >= 0 && !dirty)
7110 dev->od_dirent_journal = 0;
7116 * Returns the value at current position from iterator's in memory structure.
7118 * \param di struct osd_it_ea, iterator's in memory structure
7119 * \param attr attr requested for dirent.
7120 * \param lde lustre dirent
7122 * \retval 0 no error and \param lde has correct lustre dirent.
7123 * \retval -ve on error
7125 static inline int osd_it_ea_rec(const struct lu_env *env,
7126 const struct dt_it *di,
7127 struct dt_rec *dtrec, __u32 attr)
7129 struct osd_it_ea *it = (struct osd_it_ea *)di;
7130 struct osd_object *obj = it->oie_obj;
7131 struct osd_device *dev = osd_obj2dev(obj);
7132 struct osd_thread_info *oti = osd_oti_get(env);
7133 struct osd_inode_id *id = &oti->oti_id;
7134 struct lu_fid *fid = &it->oie_dirent->oied_fid;
7135 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
7136 __u32 ino = it->oie_dirent->oied_ino;
7141 LASSERT(!is_remote_parent_ino(dev, obj->oo_inode->i_ino));
7143 if (attr & LUDA_VERIFY) {
7144 if (unlikely(is_remote_parent_ino(dev, ino))) {
7145 attr |= LUDA_IGNORE;
7147 * If the parent is on remote MDT, and there
7148 * is no FID-in-dirent, then we have to get
7149 * the parent FID from the linkEA.
7151 if (!fid_is_sane(fid) &&
7152 it->oie_dirent->oied_namelen == 2 &&
7153 it->oie_dirent->oied_name[0] == '.' &&
7154 it->oie_dirent->oied_name[1] == '.')
7155 osd_get_pfid_from_linkea(env, obj, fid);
7157 rc = osd_dirent_check_repair(env, obj, it, fid, id,
7161 if (!fid_is_sane(fid))
7162 attr |= LUDA_UNKNOWN;
7164 attr &= ~LU_DIRENT_ATTRS_MASK;
7165 if (!fid_is_sane(fid)) {
7166 bool is_dotdot = false;
7168 if (it->oie_dirent->oied_namelen == 2 &&
7169 it->oie_dirent->oied_name[0] == '.' &&
7170 it->oie_dirent->oied_name[1] == '.')
7173 * If the parent is on remote MDT, and there
7174 * is no FID-in-dirent, then we have to get
7175 * the parent FID from the linkEA.
7177 if (is_remote_parent_ino(dev, ino) && is_dotdot) {
7178 rc = osd_get_pfid_from_linkea(env, obj, fid);
7180 if (is_dotdot == false &&
7181 OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
7184 rc = osd_ea_fid_get(env, obj, ino, fid, id);
7187 osd_id_gen(id, ino, OSD_OII_NOGEN);
7191 /* Pack the entry anyway, at least the offset is right. */
7192 osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
7193 it->oie_dirent->oied_name,
7194 it->oie_dirent->oied_namelen,
7195 it->oie_dirent->oied_type, attr);
7200 if (osd_remote_fid(env, dev, fid))
7203 if (likely(!(attr & (LUDA_IGNORE | LUDA_UNKNOWN)) && rc == 0))
7204 osd_add_oi_cache(oti, dev, id, fid);
7206 RETURN(rc > 0 ? 0 : rc);
7210 * Returns the record size size at current position.
7212 * This function will return record(lu_dirent) size in bytes.
7214 * \param[in] env execution environment
7215 * \param[in] di iterator's in memory structure
7216 * \param[in] attr attribute of the entry, only requires LUDA_TYPE to
7217 * calculate the lu_dirent size.
7219 * \retval record size(in bytes & in memory) of the current lu_dirent
7222 static int osd_it_ea_rec_size(const struct lu_env *env, const struct dt_it *di,
7225 struct osd_it_ea *it = (struct osd_it_ea *)di;
7227 return lu_dirent_calc_size(it->oie_dirent->oied_namelen, attr);
7231 * Returns a cookie for current position of the iterator head, so that
7232 * user can use this cookie to load/start the iterator next time.
7234 * \param di iterator's in memory structure
7236 * \retval cookie for current position, on success
7238 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
7240 struct osd_it_ea *it = (struct osd_it_ea *)di;
7242 return it->oie_dirent->oied_off;
7246 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
7247 * to load a directory entry at a time and stored it i inn,
7248 * in iterator's in-memory data structure.
7250 * \param di struct osd_it_ea, iterator's in memory structure
7252 * \retval +ve on success
7253 * \retval -ve on error
7255 static int osd_it_ea_load(const struct lu_env *env,
7256 const struct dt_it *di, __u64 hash)
7258 struct osd_it_ea *it = (struct osd_it_ea *)di;
7262 it->oie_file.f_pos = hash;
7264 rc = osd_ldiskfs_it_fill(env, di);
7275 * Index lookup function for interoperability mode (b11826).
7277 * \param key, key i.e. file name to be searched
7279 * \retval +ve, on success
7280 * \retval -ve, on error
7282 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
7283 struct dt_rec *rec, const struct dt_key *key)
7285 struct osd_object *obj = osd_dt_obj(dt);
7290 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
7291 LINVRNT(osd_invariant(obj));
7293 rc = osd_ea_lookup_rec(env, obj, rec, key);
7300 * Index and Iterator operations for interoperability
7301 * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
7303 static const struct dt_index_operations osd_index_ea_ops = {
7304 .dio_lookup = osd_index_ea_lookup,
7305 .dio_declare_insert = osd_index_declare_ea_insert,
7306 .dio_insert = osd_index_ea_insert,
7307 .dio_declare_delete = osd_index_declare_ea_delete,
7308 .dio_delete = osd_index_ea_delete,
7310 .init = osd_it_ea_init,
7311 .fini = osd_it_ea_fini,
7312 .get = osd_it_ea_get,
7313 .put = osd_it_ea_put,
7314 .next = osd_it_ea_next,
7315 .key = osd_it_ea_key,
7316 .key_size = osd_it_ea_key_size,
7317 .rec = osd_it_ea_rec,
7318 .rec_size = osd_it_ea_rec_size,
7319 .store = osd_it_ea_store,
7320 .load = osd_it_ea_load
7324 static void *osd_key_init(const struct lu_context *ctx,
7325 struct lu_context_key *key)
7327 struct osd_thread_info *info;
7329 OBD_ALLOC_PTR(info);
7331 return ERR_PTR(-ENOMEM);
7333 OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
7334 if (info->oti_it_ea_buf == NULL)
7337 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
7339 info->oti_hlock = ldiskfs_htree_lock_alloc();
7340 if (info->oti_hlock == NULL)
7346 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
7349 return ERR_PTR(-ENOMEM);
7352 static void osd_key_fini(const struct lu_context *ctx,
7353 struct lu_context_key *key, void *data)
7355 struct osd_thread_info *info = data;
7356 struct ldiskfs_inode_info *lli = LDISKFS_I(info->oti_inode);
7357 struct osd_idmap_cache *idc = info->oti_ins_cache;
7359 if (info->oti_dio_pages) {
7361 for (i = 0; i < PTLRPC_MAX_BRW_PAGES; i++) {
7362 if (info->oti_dio_pages[i])
7363 __free_page(info->oti_dio_pages[i]);
7365 OBD_FREE(info->oti_dio_pages,
7366 sizeof(struct page *) * PTLRPC_MAX_BRW_PAGES);
7369 if (info->oti_inode != NULL)
7371 if (info->oti_hlock != NULL)
7372 ldiskfs_htree_lock_free(info->oti_hlock);
7373 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
7374 lu_buf_free(&info->oti_iobuf.dr_pg_buf);
7375 lu_buf_free(&info->oti_iobuf.dr_bl_buf);
7376 lu_buf_free(&info->oti_iobuf.dr_lnb_buf);
7377 lu_buf_free(&info->oti_big_buf);
7379 LASSERT(info->oti_ins_cache_size > 0);
7380 OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
7381 info->oti_ins_cache = NULL;
7382 info->oti_ins_cache_size = 0;
7387 static void osd_key_exit(const struct lu_context *ctx,
7388 struct lu_context_key *key, void *data)
7390 struct osd_thread_info *info = data;
7392 LASSERT(info->oti_r_locks == 0);
7393 LASSERT(info->oti_w_locks == 0);
7394 LASSERT(info->oti_txns == 0);
7397 /* type constructor/destructor: osd_type_init, osd_type_fini */
7398 LU_TYPE_INIT_FINI(osd, &osd_key);
7400 struct lu_context_key osd_key = {
7401 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
7402 .lct_init = osd_key_init,
7403 .lct_fini = osd_key_fini,
7404 .lct_exit = osd_key_exit
7408 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
7409 const char *name, struct lu_device *next)
7411 struct osd_device *osd = osd_dev(d);
7413 if (strlcpy(osd->od_svname, name, sizeof(osd->od_svname)) >=
7414 sizeof(osd->od_svname))
7416 return osd_procfs_init(osd, name);
7419 static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
7421 struct seq_server_site *ss = osd_seq_site(osd);
7426 if (osd->od_is_ost || osd->od_cl_seq != NULL)
7429 if (unlikely(ss == NULL))
7432 OBD_ALLOC_PTR(osd->od_cl_seq);
7433 if (osd->od_cl_seq == NULL)
7436 rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
7437 osd->od_svname, ss->ss_server_seq);
7439 OBD_FREE_PTR(osd->od_cl_seq);
7440 osd->od_cl_seq = NULL;
7444 if (ss->ss_node_id == 0) {
7446 * If the OSD on the sequence controller(MDT0), then allocate
7447 * sequence here, otherwise allocate sequence after connected
7448 * to MDT0 (see mdt_register_lwp_callback()).
7450 rc = seq_server_alloc_meta(osd->od_cl_seq->lcs_srv,
7451 &osd->od_cl_seq->lcs_space, env);
7457 static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
7459 if (osd->od_cl_seq == NULL)
7462 seq_client_fini(osd->od_cl_seq);
7463 OBD_FREE_PTR(osd->od_cl_seq);
7464 osd->od_cl_seq = NULL;
7467 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
7471 /* shutdown quota slave instance associated with the device */
7472 if (o->od_quota_slave_md != NULL) {
7473 struct qsd_instance *qsd = o->od_quota_slave_md;
7475 o->od_quota_slave_md = NULL;
7479 if (o->od_quota_slave_dt != NULL) {
7480 struct qsd_instance *qsd = o->od_quota_slave_dt;
7482 o->od_quota_slave_dt = NULL;
7486 osd_fid_fini(env, o);
7487 osd_scrub_cleanup(env, o);
7492 static void osd_umount(const struct lu_env *env, struct osd_device *o)
7496 if (o->od_mnt != NULL) {
7497 shrink_dcache_sb(osd_sb(o));
7498 osd_sync(env, &o->od_dt_dev);
7507 static int osd_mount(const struct lu_env *env,
7508 struct osd_device *o, struct lustre_cfg *cfg)
7510 const char *name = lustre_cfg_string(cfg, 0);
7511 const char *dev = lustre_cfg_string(cfg, 1);
7513 unsigned long page, s_flags, lmd_flags = 0;
7514 struct page *__page;
7515 struct file_system_type *type;
7516 char *options = NULL;
7518 struct osd_thread_info *info = osd_oti_get(env);
7519 struct lu_fid *fid = &info->oti_fid;
7520 struct inode *inode;
7521 int rc = 0, force_over_512tb = 0;
7525 if (o->od_mnt != NULL)
7528 if (strlen(dev) >= sizeof(o->od_mntdev))
7530 strcpy(o->od_mntdev, dev);
7532 str = lustre_cfg_string(cfg, 2);
7533 s_flags = simple_strtoul(str, NULL, 0);
7534 str = strstr(str, ":");
7536 lmd_flags = simple_strtoul(str + 1, NULL, 0);
7537 opts = lustre_cfg_string(cfg, 3);
7539 if (opts == NULL || strstr(opts, "bigendian_extents") == NULL) {
7540 CERROR("%s: device %s extents feature is not guaranteed to "
7541 "work on big-endian systems. Use \"bigendian_extents\" "
7542 "mount option to override.\n", name, dev);
7546 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
7547 if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
7548 CWARN("force_over_128tb option is deprecated. "
7549 "Filesystems less than 512TB can be created without any "
7550 "force options. Use force_over_512tb option for "
7551 "filesystems greater than 512TB.\n");
7554 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 1, 53, 0)
7555 if (opts != NULL && strstr(opts, "force_over_256tb") != NULL) {
7556 CWARN("force_over_256tb option is deprecated. "
7557 "Filesystems less than 512TB can be created without any "
7558 "force options. Use force_over_512tb option for "
7559 "filesystems greater than 512TB.\n");
7563 if (opts != NULL && strstr(opts, "force_over_512tb") != NULL)
7564 force_over_512tb = 1;
7566 __page = alloc_page(GFP_KERNEL);
7568 GOTO(out, rc = -ENOMEM);
7569 page = (unsigned long)page_address(__page);
7570 options = (char *)page;
7573 /* strip out the options for back compatiblity */
7574 static char *sout[] = {
7581 /* strip out option we processed in osd */
7582 "bigendian_extents",
7588 strcat(options, opts);
7589 for (rc = 0, str = options; sout[rc]; ) {
7590 char *op = strstr(str, sout[rc]);
7597 if (op == options || *(op - 1) == ',') {
7598 str = op + strlen(sout[rc]);
7599 if (*str == ',' || *str == '\0') {
7600 *str == ',' ? str++ : str;
7601 memmove(op, str, strlen(str) + 1);
7604 for (str = op; *str != ',' && *str != '\0'; str++)
7608 strncat(options, "user_xattr,acl", 14);
7611 /* Glom up mount options */
7612 if (*options != '\0')
7613 strcat(options, ",");
7614 strlcat(options, "no_mbcache,nodelalloc", PAGE_SIZE);
7616 type = get_fs_type("ldiskfs");
7618 CERROR("%s: cannot find ldiskfs module\n", name);
7619 GOTO(out, rc = -ENODEV);
7622 o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
7623 module_put(type->owner);
7625 if (IS_ERR(o->od_mnt)) {
7626 rc = PTR_ERR(o->od_mnt);
7628 CERROR("%s: can't mount %s: %d\n", name, dev, rc);
7632 if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) <<
7633 osd_sb(o)->s_blocksize_bits > 512ULL << 40 &&
7634 force_over_512tb == 0) {
7635 CERROR("%s: device %s LDISKFS does not support filesystems "
7636 "greater than 512TB and can cause data corruption. "
7637 "Use \"force_over_512tb\" mount option to override.\n",
7639 GOTO(out_mnt, rc = -EINVAL);
7642 if (lmd_flags & LMD_FLG_DEV_RDONLY) {
7643 if (priv_dev_set_rdonly) {
7644 priv_dev_set_rdonly(osd_sb(o)->s_bdev);
7645 o->od_dt_dev.dd_rdonly = 1;
7646 LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
7649 LCONSOLE_WARN("%s: not support dev_rdonly on this device",
7652 GOTO(out_mnt, rc = -EOPNOTSUPP);
7654 } else if (priv_dev_check_rdonly &&
7655 priv_dev_check_rdonly(osd_sb(o)->s_bdev)) {
7656 CERROR("%s: underlying device %s is marked as "
7657 "read-only. Setup failed\n", name, dev);
7659 GOTO(out_mnt, rc = -EROFS);
7662 if (!ldiskfs_has_feature_journal(o->od_mnt->mnt_sb)) {
7663 CERROR("%s: device %s is mounted w/o journal\n", name, dev);
7664 GOTO(out_mnt, rc = -EINVAL);
7667 #ifdef LDISKFS_MOUNT_DIRDATA
7668 if (ldiskfs_has_feature_dirdata(o->od_mnt->mnt_sb))
7669 LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
7670 else if (strstr(name, "MDT")) /* don't complain for MGT or OSTs */
7671 CWARN("%s: device %s was upgraded from Lustre-1.x without "
7672 "enabling the dirdata feature. If you do not want to "
7673 "downgrade to Lustre-1.x again, you can enable it via "
7674 "'tune2fs -O dirdata device'\n", name, dev);
7676 inode = osd_sb(o)->s_root->d_inode;
7677 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
7678 rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
7680 CERROR("%s: failed to set lma on %s root inode\n", name, dev);
7684 if (lmd_flags & LMD_FLG_NOSCRUB)
7685 o->od_auto_scrub_interval = AS_NEVER;
7687 if (blk_queue_nonrot(bdev_get_queue(osd_sb(o)->s_bdev))) {
7688 /* do not use pagecache with flash-backed storage */
7689 o->od_writethrough_cache = 0;
7690 o->od_read_cache = 0;
7701 __free_page(__page);
7706 static struct lu_device *osd_device_fini(const struct lu_env *env,
7707 struct lu_device *d)
7709 struct osd_device *o = osd_dev(d);
7713 osd_index_backup(env, o, false);
7714 osd_shutdown(env, o);
7716 osd_obj_map_fini(o);
7722 static int osd_device_init0(const struct lu_env *env,
7723 struct osd_device *o,
7724 struct lustre_cfg *cfg)
7726 struct lu_device *l = osd2lu_dev(o);
7727 struct osd_thread_info *info;
7731 /* if the module was re-loaded, env can loose its keys */
7732 rc = lu_env_refill((struct lu_env *)env);
7735 info = osd_oti_get(env);
7738 l->ld_ops = &osd_lu_ops;
7739 o->od_dt_dev.dd_ops = &osd_dt_ops;
7741 spin_lock_init(&o->od_osfs_lock);
7742 mutex_init(&o->od_otable_mutex);
7743 INIT_LIST_HEAD(&o->od_orphan_list);
7744 INIT_LIST_HEAD(&o->od_index_backup_list);
7745 INIT_LIST_HEAD(&o->od_index_restore_list);
7746 spin_lock_init(&o->od_lock);
7747 o->od_index_backup_policy = LIBP_NONE;
7750 o->od_read_cache = 1;
7751 o->od_writethrough_cache = 1;
7752 o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
7753 o->od_auto_scrub_interval = AS_DEFAULT;
7755 cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
7756 sizeof(o->od_svname));
7757 if (cplen >= sizeof(o->od_svname)) {
7762 o->od_index_backup_stop = 0;
7763 o->od_index = -1; /* -1 means index is invalid */
7764 rc = server_name2index(o->od_svname, &o->od_index, NULL);
7765 if (rc == LDD_F_SV_TYPE_OST)
7768 o->od_full_scrub_ratio = OFSR_DEFAULT;
7769 o->od_full_scrub_threshold_rate = FULL_SCRUB_THRESHOLD_RATE_DEFAULT;
7770 rc = osd_mount(env, o, cfg);
7774 rc = osd_obj_map_init(env, o);
7778 rc = lu_site_init(&o->od_site, l);
7780 GOTO(out_compat, rc);
7781 o->od_site.ls_bottom_dev = l;
7783 rc = lu_site_init_finish(&o->od_site);
7787 INIT_LIST_HEAD(&o->od_ios_list);
7788 /* setup scrub, including OI files initialization */
7790 rc = osd_scrub_setup(env, o);
7795 rc = osd_procfs_init(o, o->od_svname);
7797 CERROR("%s: can't initialize procfs: rc = %d\n",
7799 GOTO(out_scrub, rc);
7802 LASSERT(l->ld_site->ls_linkage.next != NULL);
7803 LASSERT(l->ld_site->ls_linkage.prev != NULL);
7805 /* initialize quota slave instance */
7806 /* currently it's no need to prepare qsd_instance_md for OST */
7807 if (!o->od_is_ost) {
7808 o->od_quota_slave_md = qsd_init(env, o->od_svname,
7810 o->od_proc_entry, true);
7811 if (IS_ERR(o->od_quota_slave_md)) {
7812 rc = PTR_ERR(o->od_quota_slave_md);
7813 o->od_quota_slave_md = NULL;
7814 GOTO(out_procfs, rc);
7818 o->od_quota_slave_dt = qsd_init(env, o->od_svname, &o->od_dt_dev,
7819 o->od_proc_entry, false);
7821 if (IS_ERR(o->od_quota_slave_dt)) {
7822 if (o->od_quota_slave_md != NULL) {
7823 qsd_fini(env, o->od_quota_slave_md);
7824 o->od_quota_slave_md = NULL;
7827 rc = PTR_ERR(o->od_quota_slave_dt);
7828 o->od_quota_slave_dt = NULL;
7829 GOTO(out_procfs, rc);
7837 osd_scrub_cleanup(env, o);
7839 lu_site_fini(&o->od_site);
7841 osd_obj_map_fini(o);
7848 static struct lu_device *osd_device_alloc(const struct lu_env *env,
7849 struct lu_device_type *t,
7850 struct lustre_cfg *cfg)
7852 struct osd_device *o;
7857 return ERR_PTR(-ENOMEM);
7859 rc = dt_device_init(&o->od_dt_dev, t);
7862 * Because the ctx might be revived in dt_device_init,
7863 * refill the env here
7865 lu_env_refill((struct lu_env *)env);
7866 rc = osd_device_init0(env, o, cfg);
7868 dt_device_fini(&o->od_dt_dev);
7871 if (unlikely(rc != 0))
7874 return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
7877 static struct lu_device *osd_device_free(const struct lu_env *env,
7878 struct lu_device *d)
7880 struct osd_device *o = osd_dev(d);
7884 /* XXX: make osd top device in order to release reference */
7885 d->ld_site->ls_top_dev = d;
7886 lu_site_purge(env, d->ld_site, -1);
7887 if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
7888 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
7889 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
7891 lu_site_fini(&o->od_site);
7892 dt_device_fini(&o->od_dt_dev);
7897 static int osd_process_config(const struct lu_env *env,
7898 struct lu_device *d, struct lustre_cfg *cfg)
7900 struct osd_device *o = osd_dev(d);
7905 switch (cfg->lcfg_command) {
7907 rc = osd_mount(env, o, cfg);
7911 * For the case LCFG_PRE_CLEANUP is not called in advance,
7912 * that may happend if hit failure during mount process.
7914 osd_index_backup(env, o, false);
7915 lu_dev_del_linkage(d->ld_site, d);
7916 rc = osd_shutdown(env, o);
7919 LASSERT(&o->od_dt_dev);
7920 rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
7921 cfg, &o->od_dt_dev);
7922 if (rc > 0 || rc == -ENOSYS) {
7923 rc = class_process_proc_param(PARAM_OST,
7924 lprocfs_osd_obd_vars,
7925 cfg, &o->od_dt_dev);
7930 case LCFG_PRE_CLEANUP:
7932 osd_index_backup(env, o,
7933 o->od_index_backup_policy != LIBP_NONE);
7943 static int osd_recovery_complete(const struct lu_env *env,
7944 struct lu_device *d)
7946 struct osd_device *osd = osd_dev(d);
7951 if (osd->od_quota_slave_md == NULL && osd->od_quota_slave_dt == NULL)
7955 * start qsd instance on recovery completion, this notifies the quota
7956 * slave code that we are about to process new requests now
7958 rc = qsd_start(env, osd->od_quota_slave_dt);
7959 if (rc == 0 && osd->od_quota_slave_md != NULL)
7960 rc = qsd_start(env, osd->od_quota_slave_md);
7966 * we use exports to track all osd users
7968 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
7969 struct obd_device *obd, struct obd_uuid *cluuid,
7970 struct obd_connect_data *data, void *localdata)
7972 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
7973 struct lustre_handle conn;
7978 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
7980 rc = class_connect(&conn, obd, cluuid);
7984 *exp = class_conn2export(&conn);
7986 spin_lock(&osd->od_osfs_lock);
7988 spin_unlock(&osd->od_osfs_lock);
7994 * once last export (we don't count self-export) disappeared
7995 * osd can be released
7997 static int osd_obd_disconnect(struct obd_export *exp)
7999 struct obd_device *obd = exp->exp_obd;
8000 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
8001 int rc, release = 0;
8005 /* Only disconnect the underlying layers on the final disconnect. */
8006 spin_lock(&osd->od_osfs_lock);
8008 if (osd->od_connects == 0)
8010 spin_unlock(&osd->od_osfs_lock);
8012 rc = class_disconnect(exp); /* bz 9811 */
8014 if (rc == 0 && release)
8015 class_manual_cleanup(obd);
8019 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
8020 struct lu_device *dev)
8022 struct osd_device *osd = osd_dev(dev);
8023 struct lr_server_data *lsd =
8024 &osd->od_dt_dev.dd_lu_dev.ld_site->ls_tgt->lut_lsd;
8029 if (osd->od_quota_slave_md != NULL) {
8030 /* set up quota slave objects for inode */
8031 result = qsd_prepare(env, osd->od_quota_slave_md);
8036 if (osd->od_quota_slave_dt != NULL) {
8037 /* set up quota slave objects for block */
8038 result = qsd_prepare(env, osd->od_quota_slave_dt);
8044 if (lsd->lsd_feature_incompat & OBD_COMPAT_OST) {
8045 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 52, 0)
8046 if (lsd->lsd_feature_rocompat & OBD_ROCOMPAT_IDX_IN_IDIF) {
8047 osd->od_index_in_idif = 1;
8049 osd->od_index_in_idif = 0;
8050 result = osd_register_proc_index_in_idif(osd);
8055 osd->od_index_in_idif = 1;
8059 result = osd_fid_init(env, osd);
8064 static int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
8065 struct lu_fid *fid, struct md_op_data *op_data)
8067 struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
8069 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
8072 static const struct lu_object_operations osd_lu_obj_ops = {
8073 .loo_object_init = osd_object_init,
8074 .loo_object_delete = osd_object_delete,
8075 .loo_object_release = osd_object_release,
8076 .loo_object_free = osd_object_free,
8077 .loo_object_print = osd_object_print,
8078 .loo_object_invariant = osd_object_invariant
8081 const struct lu_device_operations osd_lu_ops = {
8082 .ldo_object_alloc = osd_object_alloc,
8083 .ldo_process_config = osd_process_config,
8084 .ldo_recovery_complete = osd_recovery_complete,
8085 .ldo_prepare = osd_prepare,
8088 static const struct lu_device_type_operations osd_device_type_ops = {
8089 .ldto_init = osd_type_init,
8090 .ldto_fini = osd_type_fini,
8092 .ldto_start = osd_type_start,
8093 .ldto_stop = osd_type_stop,
8095 .ldto_device_alloc = osd_device_alloc,
8096 .ldto_device_free = osd_device_free,
8098 .ldto_device_init = osd_device_init,
8099 .ldto_device_fini = osd_device_fini
8102 static struct lu_device_type osd_device_type = {
8103 .ldt_tags = LU_DEVICE_DT,
8104 .ldt_name = LUSTRE_OSD_LDISKFS_NAME,
8105 .ldt_ops = &osd_device_type_ops,
8106 .ldt_ctx_tags = LCT_LOCAL,
8109 static int osd_health_check(const struct lu_env *env, struct obd_device *obd)
8111 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
8112 struct super_block *sb = osd_sb(osd);
8114 return (osd->od_mnt == NULL || sb->s_flags & MS_RDONLY);
8118 * lprocfs legacy support.
8120 static struct obd_ops osd_obd_device_ops = {
8121 .o_owner = THIS_MODULE,
8122 .o_connect = osd_obd_connect,
8123 .o_disconnect = osd_obd_disconnect,
8124 .o_fid_alloc = osd_fid_alloc,
8125 .o_health_check = osd_health_check,
8128 static int __init osd_init(void)
8132 CLASSERT(BH_DXLock < sizeof(((struct buffer_head *)0)->b_state) * 8);
8133 #if !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_DEBUG_SPINLOCK)
8134 /* please, try to keep osd_thread_info smaller than a page */
8135 CLASSERT(sizeof(struct osd_thread_info) <= PAGE_SIZE);
8140 rc = lu_kmem_init(ldiskfs_caches);
8144 #ifdef CONFIG_KALLSYMS
8145 priv_dev_set_rdonly = (void *)kallsyms_lookup_name("dev_set_rdonly");
8146 priv_dev_check_rdonly =
8147 (void *)kallsyms_lookup_name("dev_check_rdonly");
8150 rc = class_register_type(&osd_obd_device_ops, NULL, true,
8151 lprocfs_osd_module_vars,
8152 LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
8154 lu_kmem_fini(ldiskfs_caches);
8158 static void __exit osd_exit(void)
8160 class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
8161 lu_kmem_fini(ldiskfs_caches);
8164 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
8165 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
8166 MODULE_VERSION(LUSTRE_VERSION_STRING);
8167 MODULE_LICENSE("GPL");
8169 module_init(osd_init);
8170 module_exit(osd_exit);