4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <linux/module.h>
47 #include <linux/user_namespace.h>
48 #ifdef HAVE_UIDGID_HEADER
49 # include <linux/uidgid.h>
52 /* LUSTRE_VERSION_CODE */
53 #include <lustre_ver.h>
54 /* prerequisite for linux/xattr.h */
55 #include <linux/types.h>
56 /* prerequisite for linux/xattr.h */
58 /* XATTR_{REPLACE,CREATE} */
59 #include <linux/xattr.h>
61 #include <ldiskfs/ldiskfs.h>
62 #include <ldiskfs/xattr.h>
65 * struct OBD_{ALLOC,FREE}*()
68 #include <obd_support.h>
69 /* struct ptlrpc_thread */
70 #include <lustre_net.h>
71 #include <lustre_fid.h>
73 #include <lustre_param.h>
75 #include "osd_internal.h"
76 #include "osd_dynlocks.h"
78 /* llo_* api support */
79 #include <md_object.h>
80 #include <lustre_quota.h>
82 #include <lustre_linkea.h>
85 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
86 "ldiskfs with parallel directory operations");
88 int ldiskfs_track_declares_assert;
89 CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
90 "LBUG during tracking of declares");
92 /* Slab to allocate dynlocks */
93 struct kmem_cache *dynlock_cachep;
95 /* Slab to allocate osd_it_ea */
96 struct kmem_cache *osd_itea_cachep;
98 static struct lu_kmem_descr ldiskfs_caches[] = {
100 .ckd_cache = &dynlock_cachep,
101 .ckd_name = "dynlock_cache",
102 .ckd_size = sizeof(struct dynlock_handle)
105 .ckd_cache = &osd_itea_cachep,
106 .ckd_name = "osd_itea_cache",
107 .ckd_size = sizeof(struct osd_it_ea)
114 static const char dot[] = ".";
115 static const char dotdot[] = "..";
116 static const char remote_obj_dir[] = "REM_OBJ_DIR";
118 static const struct lu_object_operations osd_lu_obj_ops;
119 static const struct dt_object_operations osd_obj_ops;
120 static const struct dt_object_operations osd_obj_ea_ops;
121 static const struct dt_object_operations osd_obj_otable_it_ops;
122 static const struct dt_index_operations osd_index_iam_ops;
123 static const struct dt_index_operations osd_index_ea_ops;
125 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
126 const struct lu_fid *fid);
127 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
128 struct osd_device *osd);
130 int osd_trans_declare_op2rb[] = {
131 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
132 [OSD_OT_PUNCH] = OSD_OT_MAX,
133 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
134 [OSD_OT_CREATE] = OSD_OT_DESTROY,
135 [OSD_OT_DESTROY] = OSD_OT_CREATE,
136 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
137 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
138 [OSD_OT_WRITE] = OSD_OT_WRITE,
139 [OSD_OT_INSERT] = OSD_OT_DELETE,
140 [OSD_OT_DELETE] = OSD_OT_INSERT,
141 [OSD_OT_QUOTA] = OSD_OT_MAX,
144 static int osd_has_index(const struct osd_object *obj)
146 return obj->oo_dt.do_index_ops != NULL;
149 static int osd_object_invariant(const struct lu_object *l)
151 return osd_invariant(osd_obj(l));
155 * Concurrency: doesn't matter
159 * Concurrency: doesn't matter
161 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
163 struct osd_thread_info *oti = osd_oti_get(env);
164 return oti->oti_w_locks > 0 && o->oo_owner == env;
168 * Concurrency: doesn't access mutable data
170 static int osd_root_get(const struct lu_env *env,
171 struct dt_device *dev, struct lu_fid *f)
173 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
178 * OSD object methods.
182 * Concurrency: no concurrent access is possible that early in object
185 static struct lu_object *osd_object_alloc(const struct lu_env *env,
186 const struct lu_object_header *hdr,
189 struct osd_object *mo;
195 l = &mo->oo_dt.do_lu;
196 dt_object_init(&mo->oo_dt, NULL, d);
197 mo->oo_dt.do_ops = &osd_obj_ea_ops;
198 l->lo_ops = &osd_lu_obj_ops;
199 init_rwsem(&mo->oo_sem);
200 init_rwsem(&mo->oo_ext_idx_sem);
201 spin_lock_init(&mo->oo_guard);
208 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
209 struct dentry *dentry, struct lustre_mdt_attrs *lma)
213 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
214 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
215 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
217 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
218 memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
220 lustre_lma_swab(lma);
221 /* Check LMA compatibility */
222 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
223 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
224 "for fid = "DFID", ino = %lu\n",
225 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
226 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
227 PFID(&lma->lma_self_fid), inode->i_ino);
230 } else if (rc == 0) {
238 * retrieve object from backend ext fs.
240 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
241 struct osd_inode_id *id)
243 struct inode *inode = NULL;
245 /* if we look for an inode withing a running
246 * transaction, then we risk to deadlock */
247 /* osd_dirent_check_repair() breaks this */
248 /*LASSERT(current->journal_info == NULL);*/
250 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
252 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
253 id->oii_ino, PTR_ERR(inode));
254 } else if (id->oii_gen != OSD_OII_NOGEN &&
255 inode->i_generation != id->oii_gen) {
256 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
257 "i_generation = %u\n",
258 id->oii_ino, id->oii_gen, inode->i_generation);
260 inode = ERR_PTR(-ESTALE);
261 } else if (inode->i_nlink == 0) {
262 /* due to parallel readdir and unlink,
263 * we can have dead inode here. */
264 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
266 inode = ERR_PTR(-ESTALE);
267 } else if (is_bad_inode(inode)) {
268 CWARN("%.16s: bad inode: ino = %u\n",
269 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
271 inode = ERR_PTR(-ENOENT);
273 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
274 if (id->oii_gen == OSD_OII_NOGEN)
275 osd_id_gen(id, inode->i_ino, inode->i_generation);
277 /* Do not update file c/mtime in ldiskfs.
278 * NB: we don't have any lock to protect this because we don't
279 * have reference on osd_object now, but contention with
280 * another lookup + attr_set can't happen in the tiny window
281 * between if (...) and set S_NOCMTIME. */
282 if (!(inode->i_flags & S_NOCMTIME))
283 inode->i_flags |= S_NOCMTIME;
288 static struct inode *
289 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
290 struct osd_inode_id *id, struct lu_fid *fid)
292 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
296 inode = osd_iget(info, dev, id);
300 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
302 *fid = lma->lma_self_fid;
303 } else if (rc == -ENODATA) {
304 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
305 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
307 lu_igif_build(fid, inode->i_ino, inode->i_generation);
315 static struct inode *osd_iget_check(struct osd_thread_info *info,
316 struct osd_device *dev,
317 const struct lu_fid *fid,
318 struct osd_inode_id *id,
325 /* The cached OI mapping is trustable. If we cannot locate the inode
326 * via the cached OI mapping, then return the failure to the caller
327 * directly without further OI checking. */
329 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
332 if (cached || (rc != -ENOENT && rc != -ESTALE)) {
333 CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
342 if (is_bad_inode(inode)) {
345 CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
353 if (id->oii_gen != OSD_OII_NOGEN &&
354 inode->i_generation != id->oii_gen) {
357 CDEBUG(D_INODE, "unmatched inode: ino = %u, "
358 "oii_gen = %u, i_generation = %u\n",
359 id->oii_ino, id->oii_gen, inode->i_generation);
367 if (inode->i_nlink == 0) {
370 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
378 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
382 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
384 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
385 /* XXX: There are four possible cases:
387 * Backup/restore caused the OI invalid.
389 * Someone unlinked the object but NOT removed
390 * the OI mapping, such as mount target device
391 * as ldiskfs, and modify something directly.
393 * Someone just removed the object between the
394 * former oi_lookup and the iget. It is normal.
395 * 4. Other failure cases.
397 * Generally, when the device is mounted, it will
398 * auto check whether the system is restored from
399 * file-level backup or not. We trust such detect
400 * to distinguish the 1st case from the 2nd case. */
402 if (!IS_ERR(inode) && inode->i_generation != 0 &&
403 inode->i_generation == id->oii_gen)
404 /* "id->oii_gen != OSD_OII_NOGEN" is for
405 * "@cached == false" case. */
410 /* If the OI mapping was in OI file before the
411 * osd_iget_check(), but now, it is disappear,
412 * then it must be removed by race. That is a
413 * normal race case. */
416 if (id->oii_gen == OSD_OII_NOGEN)
417 osd_id_gen(id, inode->i_ino, inode->i_generation);
419 /* Do not update file c/mtime in ldiskfs.
420 * NB: we don't have any lock to protect this because we don't
421 * have reference on osd_object now, but contention with
422 * another lookup + attr_set can't happen in the tiny window
423 * between if (...) and set S_NOCMTIME. */
424 if (!(inode->i_flags & S_NOCMTIME))
425 inode->i_flags |= S_NOCMTIME;
442 * \retval +v: new filter_fid, does not contain self-fid
443 * \retval 0: filter_fid_old, contains self-fid
444 * \retval -v: other failure cases
446 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
447 struct dentry *dentry, struct lu_fid *fid)
449 struct filter_fid_old *ff = &info->oti_ff;
450 struct ost_id *ostid = &info->oti_ostid;
453 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
454 if (rc == sizeof(*ff)) {
456 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
457 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
458 /* XXX: use 0 as the index for compatibility, the caller will
459 * handle index related issues when necessarry. */
460 ostid_to_fid(fid, ostid, 0);
461 } else if (rc == sizeof(struct filter_fid)) {
463 } else if (rc >= 0) {
470 static int osd_lma_self_repair(struct osd_thread_info *info,
471 struct osd_device *osd, struct inode *inode,
472 const struct lu_fid *fid, __u32 compat)
477 LASSERT(current->journal_info == NULL);
479 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
480 osd_dto_credits_noquota[DTO_XATTR_SET]);
483 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
488 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
490 CWARN("%s: cannot self repair the LMA: rc = %d\n",
492 ldiskfs_journal_stop(jh);
496 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
498 struct osd_thread_info *info = osd_oti_get(env);
499 struct osd_device *osd = osd_obj2dev(obj);
500 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
501 struct inode *inode = obj->oo_inode;
502 struct dentry *dentry = &info->oti_obj_dentry;
503 struct lu_fid *fid = NULL;
504 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
508 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
509 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
510 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
511 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
512 fid = &lma->lma_self_fid;
513 rc = osd_get_idif(info, inode, dentry, fid);
514 if ((rc > 0) || (rc == -ENODATA && osd->od_index_in_idif)) {
515 /* For the given OST-object, if it has neither LMA nor
516 * FID in XATTR_NAME_FID, then the given FID (which is
517 * contained in the @obj, from client RPC for locating
518 * the OST-object) is trusted. We use it to generate
520 osd_lma_self_repair(info, osd, inode, rfid,
531 lustre_lma_swab(lma);
532 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
533 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
534 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
535 "fid = "DFID", ino = %lu\n", osd_name(osd),
536 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
537 PFID(rfid), inode->i_ino);
540 fid = &lma->lma_self_fid;
544 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
545 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
546 struct ost_id *oi = &info->oti_ostid;
547 struct lu_fid *fid1 = &info->oti_fid3;
548 __u32 idx = fid_idif_ost_idx(rfid);
550 /* For old IDIF, the OST index is not part of the IDIF,
551 * Means that different OSTs may have the same IDIFs.
552 * Under such case, we need to make some compatible
553 * check to make sure to trigger OI scrub properly. */
554 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
555 /* Given @rfid is new, LMA is old. */
556 fid_to_ostid(fid, oi);
557 ostid_to_fid(fid1, oi, idx);
558 if (lu_fid_eq(fid1, rfid)) {
559 if (osd->od_index_in_idif)
560 osd_lma_self_repair(info, osd,
574 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
575 const struct lu_fid *fid,
576 const struct lu_object_conf *conf)
578 struct osd_thread_info *info;
579 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
580 struct osd_device *dev;
581 struct osd_idmap_cache *oic;
582 struct osd_inode_id *id;
584 struct osd_scrub *scrub;
585 struct scrub_file *sf;
589 bool triggered = false;
592 LINVRNT(osd_invariant(obj));
593 LASSERT(obj->oo_inode == NULL);
594 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
597 scrub = &dev->od_scrub;
598 sf = &scrub->os_file;
599 info = osd_oti_get(env);
601 oic = &info->oti_cache;
603 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
606 /* For the object is created as locking anchor, or for the object to
607 * be created on disk. No need to osd_oi_lookup() at here because FID
608 * shouldn't never be re-used, if it's really a duplicate FID from
609 * unexpected reason, we should be able to detect it later by calling
610 * do_create->osd_oi_insert(). */
611 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
612 GOTO(out, result = 0);
614 /* Search order: 1. per-thread cache. */
615 if (lu_fid_eq(fid, &oic->oic_fid) &&
616 likely(oic->oic_dev == dev)) {
622 if (!list_empty(&scrub->os_inconsistent_items)) {
623 /* Search order: 2. OI scrub pending list. */
624 result = osd_oii_lookup(dev, fid, id);
630 /* Search order: 3. OI files. */
631 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
632 if (result == -ENOENT) {
633 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
634 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
635 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
637 GOTO(out, result = 0);
646 inode = osd_iget_check(info, dev, fid, id, cached);
648 result = PTR_ERR(inode);
649 if (result == -ENOENT || result == -ESTALE)
650 GOTO(out, result = -ENOENT);
652 if (result == -EREMCHG) {
655 if (unlikely(triggered))
656 GOTO(out, result = saved);
659 if (thread_is_running(&scrub->os_thread)) {
660 result = -EINPROGRESS;
661 } else if (!dev->od_noscrub) {
662 result = osd_scrub_start(dev, SS_AUTO_FULL |
663 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
664 LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
665 "for "DFID", rc = %d [1]\n",
666 osd_name(dev), PFID(fid), result);
667 if (result == 0 || result == -EALREADY)
668 result = -EINPROGRESS;
673 if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD))
676 /* We still have chance to get the valid inode: for the
677 * object which is referenced by remote name entry, the
678 * object on the local MDT will be linked under the dir
679 * of "/REMOTE_PARENT_DIR" with its FID string as name.
681 * We do not know whether the object for the given FID
682 * is referenced by some remote name entry or not, and
683 * especially for DNE II, a multiple-linked object may
684 * have many name entries reside on many MDTs.
686 * To simplify the operation, OSD will not distinguish
687 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
688 * only happened for the RPC from other MDT during the
689 * OI scrub, or for the client side RPC with FID only,
690 * such as FID to path, or from old connected client. */
692 result = osd_lookup_in_remote_parent(info, dev,
705 obj->oo_inode = inode;
706 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
708 result = osd_check_lma(env, obj);
710 if (result == -ENODATA) {
712 result = osd_oi_lookup(info, dev, fid, id,
715 /* result == -ENOENT means that the OI
716 * mapping has been removed by race,
717 * the target inode belongs to other
720 * Others error also can be returned
723 obj->oo_inode = NULL;
726 /* result == 0 means the cached OI
727 * mapping is still in the OI file,
728 * the target the inode is valid. */
731 /* The current OI mapping is from the OI file,
732 * since the inode has been found via
733 * osd_iget_check(), no need recheck OI. */
740 obj->oo_inode = NULL;
741 if (result != -EREMCHG)
745 result = osd_oi_lookup(info, dev, fid, id,
747 /* result == -ENOENT means the cached OI mapping
748 * has been removed from the OI file by race,
749 * above target inode belongs to other object.
751 * Others error also can be returned directly. */
755 /* result == 0, goto trigger */
757 /* The current OI mapping is from the OI file,
758 * since the inode has been found via
759 * osd_iget_check(), no need recheck OI. */
766 obj->oo_compat_dot_created = 1;
767 obj->oo_compat_dotdot_created = 1;
769 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
770 GOTO(out, result = 0);
772 LASSERT(obj->oo_hl_head == NULL);
773 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
774 if (obj->oo_hl_head == NULL) {
775 obj->oo_inode = NULL;
777 GOTO(out, result = -ENOMEM);
779 GOTO(out, result = 0);
782 if (result != 0 && cached)
783 fid_zero(&oic->oic_fid);
785 LINVRNT(osd_invariant(obj));
790 * Concurrency: shouldn't matter.
792 static void osd_object_init0(struct osd_object *obj)
794 LASSERT(obj->oo_inode != NULL);
795 obj->oo_dt.do_body_ops = &osd_body_ops;
796 obj->oo_dt.do_lu.lo_header->loh_attr |=
797 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
801 * Concurrency: no concurrent access is possible that early in object
804 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
805 const struct lu_object_conf *conf)
807 struct osd_object *obj = osd_obj(l);
810 LINVRNT(osd_invariant(obj));
812 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
813 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
814 l->lo_header->loh_attr |= LOHA_EXISTS;
818 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
819 obj->oo_dt.do_body_ops = &osd_body_ops_new;
820 if (result == 0 && obj->oo_inode != NULL)
821 osd_object_init0(obj);
823 LINVRNT(osd_invariant(obj));
828 * Concurrency: no concurrent access is possible that late in object
831 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
833 struct osd_object *obj = osd_obj(l);
835 LINVRNT(osd_invariant(obj));
837 dt_object_fini(&obj->oo_dt);
838 if (obj->oo_hl_head != NULL)
839 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
844 * Concurrency: no concurrent access is possible that late in object
847 static void osd_index_fini(struct osd_object *o)
849 struct iam_container *bag;
851 if (o->oo_dir != NULL) {
852 bag = &o->oo_dir->od_container;
853 if (o->oo_inode != NULL) {
854 if (bag->ic_object == o->oo_inode)
855 iam_container_fini(bag);
857 OBD_FREE_PTR(o->oo_dir);
863 * Concurrency: no concurrent access is possible that late in object
864 * life-cycle (for all existing callers, that is. New callers have to provide
865 * their own locking.)
867 static int osd_inode_unlinked(const struct inode *inode)
869 return inode->i_nlink == 0;
873 OSD_TXN_OI_DELETE_CREDITS = 20,
874 OSD_TXN_INODE_DELETE_CREDITS = 20
881 #if OSD_THANDLE_STATS
883 * Set time when the handle is allocated
885 static void osd_th_alloced(struct osd_thandle *oth)
887 oth->oth_alloced = cfs_time_current();
891 * Set time when the handle started
893 static void osd_th_started(struct osd_thandle *oth)
895 oth->oth_started = cfs_time_current();
899 * Helper function to convert time interval to microseconds packed in
902 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
906 cfs_duration_usec(cfs_time_sub(end, start), &val);
907 return val.tv_sec * 1000000 + val.tv_usec;
911 * Check whether the we deal with this handle for too long.
913 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
914 cfs_time_t alloced, cfs_time_t started,
917 cfs_time_t now = cfs_time_current();
919 LASSERT(dev != NULL);
921 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
922 interval_to_usec(alloced, started));
923 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
924 interval_to_usec(started, closed));
925 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
926 interval_to_usec(closed, now));
928 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
929 CWARN("transaction handle %p was open for too long: "
931 "alloced "CFS_TIME_T" ,"
932 "started "CFS_TIME_T" ,"
933 "closed "CFS_TIME_T"\n",
934 oth, now, alloced, started, closed);
935 libcfs_debug_dumpstack(NULL);
939 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
941 cfs_time_t __closed = cfs_time_current(); \
942 cfs_time_t __alloced = oth->oth_alloced; \
943 cfs_time_t __started = oth->oth_started; \
946 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
949 #else /* OSD_THANDLE_STATS */
951 #define osd_th_alloced(h) do {} while(0)
952 #define osd_th_started(h) do {} while(0)
953 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
955 #endif /* OSD_THANDLE_STATS */
958 * Concurrency: doesn't access mutable data.
960 static int osd_param_is_not_sane(const struct osd_device *dev,
961 const struct thandle *th)
963 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
965 return oh->ot_credits > osd_transaction_size(dev);
969 * Concurrency: shouldn't matter.
971 static void osd_trans_commit_cb(struct super_block *sb,
972 struct ldiskfs_journal_cb_entry *jcb, int error)
974 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
975 struct thandle *th = &oh->ot_super;
976 struct lu_device *lud = &th->th_dev->dd_lu_dev;
977 struct dt_txn_commit_cb *dcb, *tmp;
979 LASSERT(oh->ot_handle == NULL);
982 CERROR("transaction @0x%p commit error: %d\n", th, error);
984 dt_txn_hook_commit(th);
986 /* call per-transaction callbacks if any */
987 list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
989 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
990 "commit callback entry: magic=%x name='%s'\n",
991 dcb->dcb_magic, dcb->dcb_name);
992 list_del_init(&dcb->dcb_linkage);
993 dcb->dcb_func(NULL, th, dcb, error);
996 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1000 lu_context_exit(&th->th_ctx);
1001 lu_context_fini(&th->th_ctx);
1005 static struct thandle *osd_trans_create(const struct lu_env *env,
1006 struct dt_device *d)
1008 struct osd_thread_info *oti = osd_oti_get(env);
1009 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1010 struct osd_thandle *oh;
1014 /* on pending IO in this thread should left from prev. request */
1015 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1017 th = ERR_PTR(-ENOMEM);
1018 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
1020 oh->ot_quota_trans = &oti->oti_quota_trans;
1021 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1025 th->th_tags = LCT_TX_HANDLE;
1027 INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1028 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1031 memset(oti->oti_declare_ops, 0,
1032 sizeof(oti->oti_declare_ops));
1033 memset(oti->oti_declare_ops_cred, 0,
1034 sizeof(oti->oti_declare_ops_cred));
1035 memset(oti->oti_declare_ops_used, 0,
1036 sizeof(oti->oti_declare_ops_used));
1041 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1043 struct osd_thread_info *oti = osd_oti_get(env);
1044 struct osd_thandle *oh;
1046 oh = container_of0(th, struct osd_thandle, ot_super);
1047 LASSERT(oh != NULL);
1049 CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
1050 oti->oti_declare_ops[OSD_OT_CREATE],
1051 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1052 oti->oti_declare_ops_used[OSD_OT_CREATE],
1053 oti->oti_declare_ops[OSD_OT_DESTROY],
1054 oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1055 oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1056 CWARN(" attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1057 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1058 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1059 oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1060 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1061 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1062 oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1063 CWARN(" write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1064 oti->oti_declare_ops[OSD_OT_WRITE],
1065 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1066 oti->oti_declare_ops_used[OSD_OT_WRITE],
1067 oti->oti_declare_ops[OSD_OT_PUNCH],
1068 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1069 oti->oti_declare_ops_used[OSD_OT_PUNCH],
1070 oti->oti_declare_ops[OSD_OT_QUOTA],
1071 oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1072 oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1073 CWARN(" insert: %u/%u/%u, delete: %u/%u/%u\n",
1074 oti->oti_declare_ops[OSD_OT_INSERT],
1075 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1076 oti->oti_declare_ops_used[OSD_OT_INSERT],
1077 oti->oti_declare_ops[OSD_OT_DELETE],
1078 oti->oti_declare_ops_cred[OSD_OT_DELETE],
1079 oti->oti_declare_ops_used[OSD_OT_DELETE]);
1080 CWARN(" ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1081 oti->oti_declare_ops[OSD_OT_REF_ADD],
1082 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1083 oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1084 oti->oti_declare_ops[OSD_OT_REF_DEL],
1085 oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1086 oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1090 * Concurrency: shouldn't matter.
1092 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1095 struct osd_thread_info *oti = osd_oti_get(env);
1096 struct osd_device *dev = osd_dt_dev(d);
1098 struct osd_thandle *oh;
1103 LASSERT(current->journal_info == NULL);
1105 oh = container_of0(th, struct osd_thandle, ot_super);
1106 LASSERT(oh != NULL);
1107 LASSERT(oh->ot_handle == NULL);
1109 rc = dt_txn_hook_start(env, d, th);
1113 if (unlikely(osd_param_is_not_sane(dev, th))) {
1114 static unsigned long last_printed;
1115 static int last_credits;
1117 CWARN("%.16s: too many transaction credits (%d > %d)\n",
1118 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
1120 osd_journal(dev)->j_max_transaction_buffers);
1122 osd_trans_dump_creds(env, th);
1124 if (last_credits != oh->ot_credits &&
1125 time_after(jiffies, last_printed +
1126 msecs_to_jiffies(60 * MSEC_PER_SEC))) {
1127 libcfs_debug_dumpstack(NULL);
1128 last_credits = oh->ot_credits;
1129 last_printed = jiffies;
1131 /* XXX Limit the credits to 'max_transaction_buffers', and
1132 * let the underlying filesystem to catch the error if
1133 * we really need so many credits.
1135 * This should be removed when we can calculate the
1136 * credits precisely. */
1137 oh->ot_credits = osd_transaction_size(dev);
1141 * XXX temporary stuff. Some abstraction layer should
1144 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1148 LASSERT(oti->oti_txns == 0);
1149 lu_context_init(&th->th_ctx, th->th_tags);
1150 lu_context_enter(&th->th_ctx);
1152 lu_device_get(&d->dd_lu_dev);
1153 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1164 static int osd_seq_exists(const struct lu_env *env,
1165 struct osd_device *osd, u64 seq)
1167 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1168 struct seq_server_site *ss = osd_seq_site(osd);
1172 LASSERT(ss != NULL);
1173 LASSERT(ss->ss_server_fld != NULL);
1175 rc = osd_fld_lookup(env, osd, seq, range);
1178 CERROR("%s: can't lookup FLD sequence "LPX64
1179 ": rc = %d\n", osd_name(osd), seq, rc);
1183 RETURN(ss->ss_node_id == range->lsr_index);
1186 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1188 struct dt_txn_commit_cb *dcb;
1189 struct dt_txn_commit_cb *tmp;
1191 /* call per-transaction stop callbacks if any */
1192 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1194 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1195 "commit callback entry: magic=%x name='%s'\n",
1196 dcb->dcb_magic, dcb->dcb_name);
1197 list_del_init(&dcb->dcb_linkage);
1198 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1203 * Concurrency: shouldn't matter.
1205 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1208 int rc = 0, remove_agents = 0;
1209 struct osd_thandle *oh;
1210 struct osd_thread_info *oti = osd_oti_get(env);
1211 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1212 struct osd_device *osd = osd_dt_dev(th->th_dev);
1213 struct qsd_instance *qsd = osd->od_quota_slave;
1214 struct lquota_trans *qtrans;
1217 oh = container_of0(th, struct osd_thandle, ot_super);
1219 remove_agents = oh->ot_remove_agents;
1221 qtrans = oh->ot_quota_trans;
1222 oh->ot_quota_trans = NULL;
1224 if (oh->ot_handle != NULL) {
1225 handle_t *hdl = oh->ot_handle;
1228 * add commit callback
1229 * notice we don't do this in osd_trans_start()
1230 * as underlying transaction can change during truncate
1232 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1235 LASSERT(oti->oti_txns == 1);
1238 rc = dt_txn_hook_stop(env, th);
1240 CERROR("%s: failed in transaction hook: rc = %d\n",
1243 osd_trans_stop_cb(oh, rc);
1244 /* hook functions might modify th_sync */
1245 hdl->h_sync = th->th_sync;
1247 oh->ot_handle = NULL;
1248 OSD_CHECK_SLOW_TH(oh, osd, rc = ldiskfs_journal_stop(hdl));
1250 CERROR("%s: failed to stop transaction: rc = %d\n",
1253 osd_trans_stop_cb(oh, th->th_result);
1257 /* inform the quota slave device that the transaction is stopping */
1258 qsd_op_end(env, qsd, qtrans);
1260 /* as we want IO to journal and data IO be concurrent, we don't block
1261 * awaiting data IO completion in osd_do_bio(), instead we wait here
1262 * once transaction is submitted to the journal. all reqular requests
1263 * don't do direct IO (except read/write), thus this wait_event becomes
1266 * IMPORTANT: we have to wait till any IO submited by the thread is
1267 * completed otherwise iobuf may be corrupted by different request
1269 wait_event(iobuf->dr_wait,
1270 atomic_read(&iobuf->dr_numreqs) == 0);
1271 osd_fini_iobuf(osd, iobuf);
1273 rc = iobuf->dr_error;
1275 if (unlikely(remove_agents != 0))
1276 osd_process_scheduled_agent_removals(env, osd);
1281 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1283 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1286 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1287 LASSERT(&dcb->dcb_func != NULL);
1288 if (dcb->dcb_flags & DCB_TRANS_STOP)
1289 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
1291 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
1297 * Called just before object is freed. Releases all resources except for
1298 * object itself (that is released by osd_object_free()).
1300 * Concurrency: no concurrent access is possible that late in object
1303 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1305 struct osd_object *obj = osd_obj(l);
1306 struct inode *inode = obj->oo_inode;
1308 LINVRNT(osd_invariant(obj));
1311 * If object is unlinked remove fid->ino mapping from object index.
1314 osd_index_fini(obj);
1315 if (inode != NULL) {
1316 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1317 qid_t uid = i_uid_read(inode);
1318 qid_t gid = i_gid_read(inode);
1321 obj->oo_inode = NULL;
1324 struct osd_thread_info *info = osd_oti_get(env);
1325 struct lquota_id_info *qi = &info->oti_qi;
1327 /* Release granted quota to master if necessary */
1328 qi->lqi_id.qid_uid = uid;
1329 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1331 qi->lqi_id.qid_uid = gid;
1332 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1338 * Concurrency: ->loo_object_release() is called under site spin-lock.
1340 static void osd_object_release(const struct lu_env *env,
1341 struct lu_object *l)
1346 * Concurrency: shouldn't matter.
1348 static int osd_object_print(const struct lu_env *env, void *cookie,
1349 lu_printer_t p, const struct lu_object *l)
1351 struct osd_object *o = osd_obj(l);
1352 struct iam_descr *d;
1354 if (o->oo_dir != NULL)
1355 d = o->oo_dir->od_container.ic_descr;
1358 return (*p)(env, cookie,
1359 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1361 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1362 o->oo_inode ? o->oo_inode->i_generation : 0,
1363 d ? d->id_ops->id_name : "plain");
1366 #define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
1369 * Concurrency: shouldn't matter.
1371 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1372 struct obd_statfs *sfs)
1374 struct osd_device *osd = osd_dt_dev(d);
1375 struct super_block *sb = osd_sb(osd);
1376 struct kstatfs *ksfs;
1379 if (unlikely(osd->od_mnt == NULL))
1380 return -EINPROGRESS;
1382 /* osd_lproc.c call this without env, allocate ksfs for that case */
1383 if (unlikely(env == NULL)) {
1384 OBD_ALLOC_PTR(ksfs);
1388 ksfs = &osd_oti_get(env)->oti_ksfs;
1391 spin_lock(&osd->od_osfs_lock);
1392 result = sb->s_op->statfs(sb->s_root, ksfs);
1393 if (likely(result == 0)) { /* N.B. statfs can't really fail */
1394 statfs_pack(sfs, ksfs);
1395 if (unlikely(sb->s_flags & MS_RDONLY))
1396 sfs->os_state = OS_STATE_READONLY;
1397 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
1398 LDISKFS_FEATURE_INCOMPAT_EXTENTS))
1399 sfs->os_maxbytes = sb->s_maxbytes;
1401 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
1403 spin_unlock(&osd->od_osfs_lock);
1405 if (unlikely(env == NULL))
1408 /* Reserve a small amount of space for local objects like last_rcvd,
1409 * llog, quota files, ... */
1410 if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
1413 sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
1414 /** Take out metadata overhead for indirect blocks */
1415 sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
1422 * Estimate space needed for file creations. We assume the largest filename
1423 * which is 2^64 - 1, hence a filename of 20 chars.
1424 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
1426 #ifdef __LDISKFS_DIR_REC_LEN
1427 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
1429 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
1433 * Concurrency: doesn't access mutable data.
1435 static void osd_conf_get(const struct lu_env *env,
1436 const struct dt_device *dev,
1437 struct dt_device_param *param)
1439 struct super_block *sb = osd_sb(osd_dt_dev(dev));
1443 * XXX should be taken from not-yet-existing fs abstraction layer.
1445 param->ddp_max_name_len = LDISKFS_NAME_LEN;
1446 param->ddp_max_nlink = LDISKFS_LINK_MAX;
1447 param->ddp_block_shift = sb->s_blocksize_bits;
1448 param->ddp_mount_type = LDD_MT_LDISKFS;
1449 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
1450 param->ddp_maxbytes = sb->s_maxbytes;
1452 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
1453 /* Overhead estimate should be fairly accurate, so we really take a tiny
1454 * error margin which also avoids fragmenting the filesystem too much */
1455 param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
1456 /* inode are statically allocated, so per-inode space consumption
1457 * is the space consumed by the directory entry */
1458 param->ddp_inodespace = PER_OBJ_USAGE;
1459 /* per-fragment overhead to be used by the client code */
1460 param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
1461 param->ddp_mntopts = 0;
1462 if (test_opt(sb, XATTR_USER))
1463 param->ddp_mntopts |= MNTOPT_USERXATTR;
1464 if (test_opt(sb, POSIX_ACL))
1465 param->ddp_mntopts |= MNTOPT_ACL;
1467 /* LOD might calculate the max stripe count based on max_ea_size,
1468 * so we need take account in the overhead as well,
1469 * xattr_header + magic + xattr_entry_head */
1470 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
1471 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
1473 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1474 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1475 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
1479 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
1483 * Concurrency: shouldn't matter.
1485 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1489 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1491 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1493 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
1494 LUSTRE_OSD_LDISKFS_NAME, rc);
1500 * Start commit for OSD device.
1502 * An implementation of dt_commit_async method for OSD device.
1503 * Asychronously starts underlayng fs sync and thereby a transaction
1506 * \param env environment
1507 * \param d dt device
1509 * \see dt_device_operations
1511 static int osd_commit_async(const struct lu_env *env,
1512 struct dt_device *d)
1514 struct super_block *s = osd_sb(osd_dt_dev(d));
1517 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1518 RETURN(s->s_op->sync_fs(s, 0));
1522 * Concurrency: shouldn't matter.
1525 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1527 struct super_block *sb = osd_sb(osd_dt_dev(d));
1528 struct block_device *dev = sb->s_bdev;
1529 #ifdef HAVE_DEV_SET_RDONLY
1530 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
1533 int rc = -EOPNOTSUPP;
1537 #ifdef HAVE_DEV_SET_RDONLY
1538 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1540 if (jdev && (jdev != dev)) {
1541 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
1543 dev_set_rdonly(jdev);
1545 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
1546 dev_set_rdonly(dev);
1548 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
1549 osd_dt_dev(d)->od_svname, (long)dev, rc);
1555 * Note: we do not count into QUOTA here.
1556 * If we mount with --data_journal we may need more.
1558 const int osd_dto_credits_noquota[DTO_NR] = {
1561 * INDEX_EXTRA_TRANS_BLOCKS(8) +
1562 * SINGLEDATA_TRANS_BLOCKS(8)
1563 * XXX Note: maybe iam need more, since iam have more level than
1566 [DTO_INDEX_INSERT] = 16,
1569 * just modify a single entry, probably merge few within a block
1571 [DTO_INDEX_DELETE] = 1,
1575 [DTO_INDEX_UPDATE] = 16,
1577 * 4(inode, inode bits, groups, GDT)
1578 * notice: OI updates are counted separately with DTO_INDEX_INSERT
1580 [DTO_OBJECT_CREATE] = 4,
1582 * 4(inode, inode bits, groups, GDT)
1583 * notice: OI updates are counted separately with DTO_INDEX_DELETE
1585 [DTO_OBJECT_DELETE] = 4,
1587 * Attr set credits (inode)
1589 [DTO_ATTR_SET_BASE] = 1,
1591 * Xattr set. The same as xattr of EXT3.
1592 * DATA_TRANS_BLOCKS(14)
1593 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1594 * are also counted in. Do not know why?
1596 [DTO_XATTR_SET] = 14,
1598 * credits for inode change during write.
1600 [DTO_WRITE_BASE] = 3,
1602 * credits for single block write.
1604 [DTO_WRITE_BLOCK] = 14,
1606 * Attr set credits for chown.
1607 * This is extra credits for setattr, and it is null without quota
1609 [DTO_ATTR_SET_CHOWN] = 0
1612 static const struct dt_device_operations osd_dt_ops = {
1613 .dt_root_get = osd_root_get,
1614 .dt_statfs = osd_statfs,
1615 .dt_trans_create = osd_trans_create,
1616 .dt_trans_start = osd_trans_start,
1617 .dt_trans_stop = osd_trans_stop,
1618 .dt_trans_cb_add = osd_trans_cb_add,
1619 .dt_conf_get = osd_conf_get,
1620 .dt_sync = osd_sync,
1622 .dt_commit_async = osd_commit_async,
1625 static void osd_object_read_lock(const struct lu_env *env,
1626 struct dt_object *dt, unsigned role)
1628 struct osd_object *obj = osd_dt_obj(dt);
1629 struct osd_thread_info *oti = osd_oti_get(env);
1631 LINVRNT(osd_invariant(obj));
1633 LASSERT(obj->oo_owner != env);
1634 down_read_nested(&obj->oo_sem, role);
1636 LASSERT(obj->oo_owner == NULL);
1640 static void osd_object_write_lock(const struct lu_env *env,
1641 struct dt_object *dt, unsigned role)
1643 struct osd_object *obj = osd_dt_obj(dt);
1644 struct osd_thread_info *oti = osd_oti_get(env);
1646 LINVRNT(osd_invariant(obj));
1648 LASSERT(obj->oo_owner != env);
1649 down_write_nested(&obj->oo_sem, role);
1651 LASSERT(obj->oo_owner == NULL);
1652 obj->oo_owner = env;
1656 static void osd_object_read_unlock(const struct lu_env *env,
1657 struct dt_object *dt)
1659 struct osd_object *obj = osd_dt_obj(dt);
1660 struct osd_thread_info *oti = osd_oti_get(env);
1662 LINVRNT(osd_invariant(obj));
1664 LASSERT(oti->oti_r_locks > 0);
1666 up_read(&obj->oo_sem);
1669 static void osd_object_write_unlock(const struct lu_env *env,
1670 struct dt_object *dt)
1672 struct osd_object *obj = osd_dt_obj(dt);
1673 struct osd_thread_info *oti = osd_oti_get(env);
1675 LINVRNT(osd_invariant(obj));
1677 LASSERT(obj->oo_owner == env);
1678 LASSERT(oti->oti_w_locks > 0);
1680 obj->oo_owner = NULL;
1681 up_write(&obj->oo_sem);
1684 static int osd_object_write_locked(const struct lu_env *env,
1685 struct dt_object *dt)
1687 struct osd_object *obj = osd_dt_obj(dt);
1689 LINVRNT(osd_invariant(obj));
1691 return obj->oo_owner == env;
1694 static struct timespec *osd_inode_time(const struct lu_env *env,
1695 struct inode *inode, __u64 seconds)
1697 struct osd_thread_info *oti = osd_oti_get(env);
1698 struct timespec *t = &oti->oti_time;
1700 t->tv_sec = seconds;
1702 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1707 static void osd_inode_getattr(const struct lu_env *env,
1708 struct inode *inode, struct lu_attr *attr)
1710 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1711 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1712 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
1715 attr->la_atime = LTIME_S(inode->i_atime);
1716 attr->la_mtime = LTIME_S(inode->i_mtime);
1717 attr->la_ctime = LTIME_S(inode->i_ctime);
1718 attr->la_mode = inode->i_mode;
1719 attr->la_size = i_size_read(inode);
1720 attr->la_blocks = inode->i_blocks;
1721 attr->la_uid = i_uid_read(inode);
1722 attr->la_gid = i_gid_read(inode);
1723 attr->la_flags = LDISKFS_I(inode)->i_flags;
1724 attr->la_nlink = inode->i_nlink;
1725 attr->la_rdev = inode->i_rdev;
1726 attr->la_blksize = 1 << inode->i_blkbits;
1727 attr->la_blkbits = inode->i_blkbits;
1730 static int osd_attr_get(const struct lu_env *env,
1731 struct dt_object *dt,
1732 struct lu_attr *attr)
1734 struct osd_object *obj = osd_dt_obj(dt);
1736 if (!dt_object_exists(dt))
1739 LASSERT(!dt_object_remote(dt));
1740 LINVRNT(osd_invariant(obj));
1742 spin_lock(&obj->oo_guard);
1743 osd_inode_getattr(env, obj->oo_inode, attr);
1744 spin_unlock(&obj->oo_guard);
1748 static int osd_declare_attr_set(const struct lu_env *env,
1749 struct dt_object *dt,
1750 const struct lu_attr *attr,
1751 struct thandle *handle)
1753 struct osd_thandle *oh;
1754 struct osd_object *obj;
1755 struct osd_thread_info *info = osd_oti_get(env);
1756 struct lquota_id_info *qi = &info->oti_qi;
1764 LASSERT(dt != NULL);
1765 LASSERT(handle != NULL);
1767 obj = osd_dt_obj(dt);
1768 LASSERT(osd_invariant(obj));
1770 oh = container_of0(handle, struct osd_thandle, ot_super);
1771 LASSERT(oh->ot_handle == NULL);
1773 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
1774 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1776 if (attr == NULL || obj->oo_inode == NULL)
1779 bspace = obj->oo_inode->i_blocks;
1780 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1781 bspace = toqb(bspace);
1783 /* Changing ownership is always preformed by super user, it should not
1786 * We still need to call the osd_declare_qid() to calculate the journal
1787 * credits for updating quota accounting files and to trigger quota
1788 * space adjustment once the operation is completed.*/
1789 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
1791 uid = i_uid_read(obj->oo_inode);
1792 qi->lqi_type = USRQUOTA;
1793 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
1794 /* inode accounting */
1795 qi->lqi_is_blk = false;
1797 /* one more inode for the new uid ... */
1798 qi->lqi_id.qid_uid = attr->la_uid;
1800 /* Reserve credits for the new uid */
1801 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1802 if (rc == -EDQUOT || rc == -EINPROGRESS)
1807 /* and one less inode for the current uid */
1808 qi->lqi_id.qid_uid = uid;
1810 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1811 if (rc == -EDQUOT || rc == -EINPROGRESS)
1816 /* block accounting */
1817 qi->lqi_is_blk = true;
1819 /* more blocks for the new uid ... */
1820 qi->lqi_id.qid_uid = attr->la_uid;
1821 qi->lqi_space = bspace;
1823 * Credits for the new uid has been reserved, re-use "obj"
1824 * to save credit reservation.
1826 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1827 if (rc == -EDQUOT || rc == -EINPROGRESS)
1832 /* and finally less blocks for the current uid */
1833 qi->lqi_id.qid_uid = uid;
1834 qi->lqi_space = -bspace;
1835 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1836 if (rc == -EDQUOT || rc == -EINPROGRESS)
1842 gid = i_gid_read(obj->oo_inode);
1843 qi->lqi_type = GRPQUOTA;
1844 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
1846 /* inode accounting */
1847 qi->lqi_is_blk = false;
1849 /* one more inode for the new gid ... */
1850 qi->lqi_id.qid_gid = attr->la_gid;
1852 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1853 if (rc == -EDQUOT || rc == -EINPROGRESS)
1858 /* and one less inode for the current gid */
1859 qi->lqi_id.qid_gid = gid;
1861 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1862 if (rc == -EDQUOT || rc == -EINPROGRESS)
1867 /* block accounting */
1868 qi->lqi_is_blk = true;
1870 /* more blocks for the new gid ... */
1871 qi->lqi_id.qid_gid = attr->la_gid;
1872 qi->lqi_space = bspace;
1873 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1874 if (rc == -EDQUOT || rc == -EINPROGRESS)
1879 /* and finally less blocks for the current gid */
1880 qi->lqi_id.qid_gid = gid;
1881 qi->lqi_space = -bspace;
1882 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1883 if (rc == -EDQUOT || rc == -EINPROGRESS)
1892 static int osd_inode_setattr(const struct lu_env *env,
1893 struct inode *inode, const struct lu_attr *attr)
1895 __u64 bits = attr->la_valid;
1897 /* Only allow set size for regular file */
1898 if (!S_ISREG(inode->i_mode))
1899 bits &= ~(LA_SIZE | LA_BLOCKS);
1904 if (bits & LA_ATIME)
1905 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1906 if (bits & LA_CTIME)
1907 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1908 if (bits & LA_MTIME)
1909 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1910 if (bits & LA_SIZE) {
1911 LDISKFS_I(inode)->i_disksize = attr->la_size;
1912 i_size_write(inode, attr->la_size);
1916 /* OSD should not change "i_blocks" which is used by quota.
1917 * "i_blocks" should be changed by ldiskfs only. */
1918 if (bits & LA_BLOCKS)
1919 inode->i_blocks = attr->la_blocks;
1922 inode->i_mode = (inode->i_mode & S_IFMT) |
1923 (attr->la_mode & ~S_IFMT);
1925 i_uid_write(inode, attr->la_uid);
1927 i_gid_write(inode, attr->la_gid);
1928 if (bits & LA_NLINK)
1929 set_nlink(inode, attr->la_nlink);
1931 inode->i_rdev = attr->la_rdev;
1933 if (bits & LA_FLAGS) {
1934 /* always keep S_NOCMTIME */
1935 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1941 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1943 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
1944 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
1948 ll_vfs_dq_init(inode);
1950 if (attr->la_valid & LA_UID)
1951 iattr.ia_valid |= ATTR_UID;
1952 if (attr->la_valid & LA_GID)
1953 iattr.ia_valid |= ATTR_GID;
1954 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
1955 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
1957 rc = ll_vfs_dq_transfer(inode, &iattr);
1959 CERROR("%s: quota transfer failed: rc = %d. Is quota "
1960 "enforcement enabled on the ldiskfs "
1961 "filesystem?\n", inode->i_sb->s_id, rc);
1968 static int osd_attr_set(const struct lu_env *env,
1969 struct dt_object *dt,
1970 const struct lu_attr *attr,
1971 struct thandle *handle)
1973 struct osd_object *obj = osd_dt_obj(dt);
1974 struct inode *inode;
1977 if (!dt_object_exists(dt))
1980 LASSERT(handle != NULL);
1981 LASSERT(!dt_object_remote(dt));
1982 LASSERT(osd_invariant(obj));
1984 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
1986 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
1987 struct osd_thread_info *oti = osd_oti_get(env);
1988 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
1989 struct lu_fid *fid1 = &oti->oti_fid;
1990 struct osd_inode_id *id = &oti->oti_id;
1991 struct iam_path_descr *ipd;
1992 struct iam_container *bag;
1993 struct osd_thandle *oh;
1996 fid_cpu_to_be(fid1, fid0);
1997 memset(id, 1, sizeof(*id));
1998 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
1999 fid0)->oi_dir.od_container;
2000 ipd = osd_idx_ipd_get(env, bag);
2001 if (unlikely(ipd == NULL))
2004 oh = container_of0(handle, struct osd_thandle, ot_super);
2005 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
2006 (const struct iam_rec *)id, ipd);
2007 osd_ipd_put(env, bag, ipd);
2008 return(rc > 0 ? 0 : rc);
2011 inode = obj->oo_inode;
2013 rc = osd_quota_transfer(inode, attr);
2017 spin_lock(&obj->oo_guard);
2018 rc = osd_inode_setattr(env, inode, attr);
2019 spin_unlock(&obj->oo_guard);
2022 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2024 osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
2029 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
2030 struct osd_object *obj,
2031 const char *name, const int namelen)
2033 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
2036 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
2037 umode_t mode, struct dt_allocation_hint *hint,
2041 struct osd_device *osd = osd_obj2dev(obj);
2042 struct osd_thandle *oth;
2043 struct dt_object *parent = NULL;
2044 struct inode *inode;
2046 LINVRNT(osd_invariant(obj));
2047 LASSERT(obj->oo_inode == NULL);
2048 LASSERT(obj->oo_hl_head == NULL);
2050 if (S_ISDIR(mode) && ldiskfs_pdo) {
2051 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
2052 if (obj->oo_hl_head == NULL)
2056 oth = container_of(th, struct osd_thandle, ot_super);
2057 LASSERT(oth->ot_handle->h_transaction != NULL);
2059 if (hint != NULL && hint->dah_parent != NULL &&
2060 !dt_object_remote(hint->dah_parent))
2061 parent = hint->dah_parent;
2063 inode = ldiskfs_create_inode(oth->ot_handle,
2064 parent ? osd_dt_obj(parent)->oo_inode :
2065 osd_sb(osd)->s_root->d_inode,
2067 if (!IS_ERR(inode)) {
2068 /* Do not update file c/mtime in ldiskfs. */
2069 inode->i_flags |= S_NOCMTIME;
2071 /* For new created object, it must be consistent,
2072 * and it is unnecessary to scrub against it. */
2073 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
2075 obj->oo_inode = inode;
2078 if (obj->oo_hl_head != NULL) {
2079 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
2080 obj->oo_hl_head = NULL;
2082 result = PTR_ERR(inode);
2084 LINVRNT(osd_invariant(obj));
2092 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
2093 struct lu_attr *attr,
2094 struct dt_allocation_hint *hint,
2095 struct dt_object_format *dof,
2099 struct osd_thandle *oth;
2100 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
2102 LASSERT(S_ISDIR(attr->la_mode));
2104 oth = container_of(th, struct osd_thandle, ot_super);
2105 LASSERT(oth->ot_handle->h_transaction != NULL);
2106 result = osd_mkfile(info, obj, mode, hint, th);
2111 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
2112 struct lu_attr *attr,
2113 struct dt_allocation_hint *hint,
2114 struct dt_object_format *dof,
2118 struct osd_thandle *oth;
2119 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
2121 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
2123 LASSERT(S_ISREG(attr->la_mode));
2125 oth = container_of(th, struct osd_thandle, ot_super);
2126 LASSERT(oth->ot_handle->h_transaction != NULL);
2128 result = osd_mkfile(info, obj, mode, hint, th);
2130 LASSERT(obj->oo_inode != NULL);
2131 if (feat->dif_flags & DT_IND_VARKEY)
2132 result = iam_lvar_create(obj->oo_inode,
2133 feat->dif_keysize_max,
2135 feat->dif_recsize_max,
2138 result = iam_lfix_create(obj->oo_inode,
2139 feat->dif_keysize_max,
2141 feat->dif_recsize_max,
2148 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
2149 struct lu_attr *attr,
2150 struct dt_allocation_hint *hint,
2151 struct dt_object_format *dof,
2154 LASSERT(S_ISREG(attr->la_mode));
2155 return osd_mkfile(info, obj, (attr->la_mode &
2156 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2159 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
2160 struct lu_attr *attr,
2161 struct dt_allocation_hint *hint,
2162 struct dt_object_format *dof,
2165 LASSERT(S_ISLNK(attr->la_mode));
2166 return osd_mkfile(info, obj, (attr->la_mode &
2167 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2170 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
2171 struct lu_attr *attr,
2172 struct dt_allocation_hint *hint,
2173 struct dt_object_format *dof,
2176 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
2179 LINVRNT(osd_invariant(obj));
2180 LASSERT(obj->oo_inode == NULL);
2181 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
2182 S_ISFIFO(mode) || S_ISSOCK(mode));
2184 result = osd_mkfile(info, obj, mode, hint, th);
2186 LASSERT(obj->oo_inode != NULL);
2188 * This inode should be marked dirty for i_rdev. Currently
2189 * that is done in the osd_attr_init().
2191 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
2194 LINVRNT(osd_invariant(obj));
2198 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
2200 struct dt_allocation_hint *hint,
2201 struct dt_object_format *dof,
2204 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
2206 osd_obj_type_f result;
2222 result = osd_mk_index;
2233 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2234 struct dt_object *parent, struct dt_object *child,
2239 ah->dah_parent = parent;
2240 ah->dah_mode = child_mode;
2243 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2244 struct lu_attr *attr, struct dt_object_format *dof)
2246 struct inode *inode = obj->oo_inode;
2247 __u64 valid = attr->la_valid;
2250 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2252 if (dof->dof_type != DFT_NODE)
2253 attr->la_valid &= ~LA_RDEV;
2254 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2255 attr->la_valid &= ~LA_ATIME;
2256 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2257 attr->la_valid &= ~LA_CTIME;
2258 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2259 attr->la_valid &= ~LA_MTIME;
2261 result = osd_quota_transfer(inode, attr);
2265 if (attr->la_valid != 0) {
2266 result = osd_inode_setattr(info->oti_env, inode, attr);
2268 * The osd_inode_setattr() should always succeed here. The
2269 * only error that could be returned is EDQUOT when we are
2270 * trying to change the UID or GID of the inode. However, this
2271 * should not happen since quota enforcement is no longer
2272 * enabled on ldiskfs (lquota takes care of it).
2274 LASSERTF(result == 0, "%d\n", result);
2275 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2278 attr->la_valid = valid;
2282 * Helper function for osd_object_create()
2284 * \retval 0, on success
2286 static int __osd_object_create(struct osd_thread_info *info,
2287 struct osd_object *obj, struct lu_attr *attr,
2288 struct dt_allocation_hint *hint,
2289 struct dt_object_format *dof,
2295 osd_trans_exec_op(info->oti_env, th, OSD_OT_CREATE);
2297 /* we drop umask so that permissions we pass are not affected */
2298 umask = current->fs->umask;
2299 current->fs->umask = 0;
2301 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2303 if (likely(obj->oo_inode != NULL)) {
2304 LASSERT(obj->oo_inode->i_state & I_NEW);
2306 /* Unlock the inode before attr initialization to avoid
2307 * unnecessary dqget operations. LU-6378 */
2308 unlock_new_inode(obj->oo_inode);
2311 if (likely(result == 0)) {
2312 osd_attr_init(info, obj, attr, dof);
2313 osd_object_init0(obj);
2316 /* restore previous umask value */
2317 current->fs->umask = umask;
2319 osd_trans_exec_check(info->oti_env, th, OSD_OT_CREATE);
2325 * Helper function for osd_object_create()
2327 * \retval 0, on success
2329 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2330 const struct lu_fid *fid, struct thandle *th)
2332 struct osd_thread_info *info = osd_oti_get(env);
2333 struct osd_inode_id *id = &info->oti_id;
2334 struct osd_device *osd = osd_obj2dev(obj);
2335 struct osd_thandle *oh;
2338 LASSERT(obj->oo_inode != NULL);
2340 oh = container_of0(th, struct osd_thandle, ot_super);
2341 LASSERT(oh->ot_handle);
2342 osd_trans_exec_op(env, th, OSD_OT_INSERT);
2344 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2345 rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
2346 osd_trans_exec_check(env, th, OSD_OT_INSERT);
2351 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2352 u64 seq, struct lu_seq_range *range)
2354 struct seq_server_site *ss = osd_seq_site(osd);
2356 if (fid_seq_is_idif(seq)) {
2357 fld_range_set_ost(range);
2358 range->lsr_index = idif_ost_idx(seq);
2362 if (!fid_seq_in_fldb(seq)) {
2363 fld_range_set_mdt(range);
2365 /* FIXME: If ss is NULL, it suppose not get lsr_index
2367 range->lsr_index = ss->ss_node_id;
2371 LASSERT(ss != NULL);
2372 fld_range_set_any(range);
2373 /* OSD will only do local fld lookup */
2374 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
2378 * Concurrency: no external locking is necessary.
2380 static int osd_declare_object_create(const struct lu_env *env,
2381 struct dt_object *dt,
2382 struct lu_attr *attr,
2383 struct dt_allocation_hint *hint,
2384 struct dt_object_format *dof,
2385 struct thandle *handle)
2387 struct osd_thandle *oh;
2391 LASSERT(handle != NULL);
2393 oh = container_of0(handle, struct osd_thandle, ot_super);
2394 LASSERT(oh->ot_handle == NULL);
2396 /* EA object consumes more credits than regular object: osd_mk_index
2397 * vs. osd_mkreg: osd_mk_index will create 2 blocks for root_node and
2398 * leaf_node, could involves the block, block bitmap, groups, GDT
2399 * change for each block, so add 4 * 2 credits in that case. */
2400 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
2401 osd_dto_credits_noquota[DTO_OBJECT_CREATE] +
2402 (dof->dof_type == DFT_INDEX) ? 4 * 2 : 0);
2403 /* Reuse idle OI block may cause additional one OI block
2405 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2406 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
2411 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
2412 osd_dt_obj(dt), false, NULL, false);
2419 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2420 struct lu_attr *attr,
2421 struct dt_allocation_hint *hint,
2422 struct dt_object_format *dof, struct thandle *th)
2424 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2425 struct osd_object *obj = osd_dt_obj(dt);
2426 struct osd_thread_info *info = osd_oti_get(env);
2430 if (dt_object_exists(dt))
2433 LINVRNT(osd_invariant(obj));
2434 LASSERT(!dt_object_remote(dt));
2435 LASSERT(osd_write_locked(env, obj));
2436 LASSERT(th != NULL);
2438 if (unlikely(fid_is_acct(fid)))
2439 /* Quota files can't be created from the kernel any more,
2440 * 'tune2fs -O quota' will take care of creating them */
2443 result = __osd_object_create(info, obj, attr, hint, dof, th);
2445 result = __osd_oi_insert(env, obj, fid, th);
2446 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
2447 obj->oo_dt.do_body_ops = &osd_body_ops;
2449 LASSERT(ergo(result == 0,
2450 dt_object_exists(dt) && !dt_object_remote(dt)));
2452 LASSERT(osd_invariant(obj));
2457 * Called to destroy on-disk representation of the object
2459 * Concurrency: must be locked
2461 static int osd_declare_object_destroy(const struct lu_env *env,
2462 struct dt_object *dt,
2465 struct osd_object *obj = osd_dt_obj(dt);
2466 struct inode *inode = obj->oo_inode;
2467 struct osd_thandle *oh;
2471 oh = container_of0(th, struct osd_thandle, ot_super);
2472 LASSERT(oh->ot_handle == NULL);
2475 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
2476 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2477 /* Recycle idle OI leaf may cause additional three OI blocks
2479 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
2480 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
2481 /* one less inode */
2482 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2483 -1, oh, obj, false, NULL, false);
2486 /* data to be truncated */
2487 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2488 0, oh, obj, true, NULL, false);
2492 static int osd_object_destroy(const struct lu_env *env,
2493 struct dt_object *dt,
2496 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2497 struct osd_object *obj = osd_dt_obj(dt);
2498 struct inode *inode = obj->oo_inode;
2499 struct osd_device *osd = osd_obj2dev(obj);
2500 struct osd_thandle *oh;
2504 oh = container_of0(th, struct osd_thandle, ot_super);
2505 LASSERT(oh->ot_handle);
2507 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2509 if (unlikely(fid_is_acct(fid)))
2512 if (S_ISDIR(inode->i_mode)) {
2513 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
2514 inode->i_nlink == 2);
2515 /* it will check/delete the inode from remote parent,
2516 * how to optimize it? unlink performance impaction XXX */
2517 result = osd_delete_from_remote_parent(env, osd, obj, oh);
2518 if (result != 0 && result != -ENOENT) {
2519 CERROR("%s: delete inode "DFID": rc = %d\n",
2520 osd_name(osd), PFID(fid), result);
2522 spin_lock(&obj->oo_guard);
2524 spin_unlock(&obj->oo_guard);
2525 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2528 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
2530 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
2531 result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
2534 osd_trans_exec_check(env, th, OSD_OT_DESTROY);
2535 /* XXX: add to ext3 orphan list */
2536 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2538 /* not needed in the cache anymore */
2539 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2540 obj->oo_destroyed = 1;
2546 * Put the fid into lustre_mdt_attrs, and then place the structure
2547 * inode's ea. This fid should not be altered during the life time
2550 * \retval +ve, on success
2551 * \retval -ve, on error
2553 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2555 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
2556 const struct lu_fid *fid, __u32 compat, __u32 incompat)
2558 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2562 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
2565 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_OST_EA_FID_SET))
2568 lustre_lma_init(lma, fid, compat, incompat);
2569 lustre_lma_swab(lma);
2571 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
2573 /* LMA may already exist, but we need to check that all the
2574 * desired compat/incompat flags have been added. */
2575 if (unlikely(rc == -EEXIST)) {
2576 if (compat == 0 && incompat == 0)
2579 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
2580 XATTR_NAME_LMA, info->oti_mdt_attrs_old,
2585 lustre_lma_swab(lma);
2586 if (!(~lma->lma_compat & compat) &&
2587 !(~lma->lma_incompat & incompat))
2590 lma->lma_compat |= compat;
2591 lma->lma_incompat |= incompat;
2592 lustre_lma_swab(lma);
2593 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2594 sizeof(*lma), XATTR_REPLACE);
2601 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2602 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2603 * To have compatilibility with 1.8 ldiskfs driver we need to have
2604 * magic number at start of fid data.
2605 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2608 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2609 const struct lu_fid *fid)
2611 if (!fid_is_namespace_visible(fid) ||
2612 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
2613 param->edp_magic = 0;
2617 param->edp_magic = LDISKFS_LUFID_MAGIC;
2618 param->edp_len = sizeof(struct lu_fid) + 1;
2619 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
2623 * Try to read the fid from inode ea into dt_rec.
2625 * \param fid object fid.
2627 * \retval 0 on success
2629 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2630 __u32 ino, struct lu_fid *fid,
2631 struct osd_inode_id *id)
2633 struct osd_thread_info *info = osd_oti_get(env);
2634 struct inode *inode;
2637 osd_id_gen(id, ino, OSD_OII_NOGEN);
2638 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2640 RETURN(PTR_ERR(inode));
2646 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
2648 struct inode *parent_dir,
2649 const struct lu_fid *dot_fid,
2650 const struct lu_fid *dot_dot_fid,
2651 struct osd_thandle *oth)
2653 struct ldiskfs_dentry_param *dot_ldp;
2654 struct ldiskfs_dentry_param *dot_dot_ldp;
2656 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
2657 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
2659 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2660 dot_ldp->edp_magic = 0;
2662 return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
2663 dir, dot_ldp, dot_dot_ldp);
2667 * Create an local agent inode for remote entry
2669 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
2670 struct osd_device *osd,
2671 struct osd_object *pobj,
2672 const struct lu_fid *fid,
2676 struct osd_thread_info *info = osd_oti_get(env);
2677 struct inode *local;
2678 struct osd_thandle *oh;
2683 oh = container_of(th, struct osd_thandle, ot_super);
2684 LASSERT(oh->ot_handle->h_transaction != NULL);
2686 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type);
2687 if (IS_ERR(local)) {
2688 CERROR("%s: create local error %d\n", osd_name(osd),
2689 (int)PTR_ERR(local));
2693 ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
2694 unlock_new_inode(local);
2696 /* Set special LMA flag for local agent inode */
2697 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
2699 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
2700 osd_name(osd), PFID(fid), rc);
2701 RETURN(ERR_PTR(rc));
2707 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
2708 lu_object_fid(&pobj->oo_dt.do_lu),
2711 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
2712 osd_name(osd), PFID(fid), rc);
2713 RETURN(ERR_PTR(rc));
2720 * when direntry is deleted, we have to take care of possible agent inode
2721 * referenced by that. unfortunately we can't do this at that point:
2722 * iget() within a running transaction leads to deadlock and we better do
2723 * not call that every delete declaration to save performance. so we put
2724 * a potention agent inode on a list and process that once the transaction
2725 * is over. Notice it's not any worse in terms of real orphans as regular
2726 * object destroy doesn't put inodes on the on-disk orphan list. this should
2727 * be addressed separately
2729 static int osd_schedule_agent_inode_removal(const struct lu_env *env,
2730 struct osd_thandle *oh,
2733 struct osd_device *osd = osd_dt_dev(oh->ot_super.th_dev);
2734 struct osd_obj_orphan *oor;
2741 oor->oor_env = (struct lu_env *)env;
2742 spin_lock(&osd->od_osfs_lock);
2743 list_add(&oor->oor_list, &osd->od_orphan_list);
2744 spin_unlock(&osd->od_osfs_lock);
2746 oh->ot_remove_agents = 1;
2752 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
2753 struct osd_device *osd)
2755 struct osd_thread_info *info = osd_oti_get(env);
2756 struct osd_obj_orphan *oor, *tmp;
2757 struct osd_inode_id id;
2758 struct list_head list;
2759 struct inode *inode;
2764 INIT_LIST_HEAD(&list);
2766 spin_lock(&osd->od_osfs_lock);
2767 list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
2768 if (oor->oor_env == env) {
2769 list_del(&oor->oor_list);
2770 list_add(&oor->oor_list, &list);
2773 spin_unlock(&osd->od_osfs_lock);
2775 list_for_each_entry_safe(oor, tmp, &list, oor_list) {
2779 list_del(&oor->oor_list);
2782 osd_id_gen(&id, ino, OSD_OII_NOGEN);
2783 inode = osd_iget_fid(info, osd, &id, &fid);
2787 if (!osd_remote_fid(env, osd, &fid)) {
2792 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC, 1);
2794 mark_inode_dirty(inode);
2795 ldiskfs_journal_stop(jh);
2803 * OSD layer object create function for interoperability mode (b11826).
2804 * This is mostly similar to osd_object_create(). Only difference being, fid is
2805 * inserted into inode ea here.
2807 * \retval 0, on success
2808 * \retval -ve, on error
2810 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2811 struct lu_attr *attr,
2812 struct dt_allocation_hint *hint,
2813 struct dt_object_format *dof,
2816 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2817 struct osd_object *obj = osd_dt_obj(dt);
2818 struct osd_thread_info *info = osd_oti_get(env);
2823 if (dt_object_exists(dt))
2826 LASSERT(osd_invariant(obj));
2827 LASSERT(!dt_object_remote(dt));
2828 LASSERT(osd_write_locked(env, obj));
2829 LASSERT(th != NULL);
2831 if (unlikely(fid_is_acct(fid)))
2832 /* Quota files can't be created from the kernel any more,
2833 * 'tune2fs -O quota' will take care of creating them */
2836 result = __osd_object_create(info, obj, attr, hint, dof, th);
2838 if (fid_is_idif(fid) &&
2839 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
2840 struct lu_fid *tfid = &info->oti_fid;
2841 struct ost_id *oi = &info->oti_ostid;
2843 fid_to_ostid(fid, oi);
2844 ostid_to_fid(tfid, oi, 0);
2845 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
2846 LMAC_FID_ON_OST, 0);
2848 result = osd_ea_fid_set(info, obj->oo_inode, fid,
2849 fid_is_on_ost(info, osd_obj2dev(obj),
2850 fid, OI_CHECK_FLD) ?
2851 LMAC_FID_ON_OST : 0, 0);
2853 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
2854 obj->oo_dt.do_body_ops = &osd_body_ops;
2858 result = __osd_oi_insert(env, obj, fid, th);
2860 LASSERT(ergo(result == 0,
2861 dt_object_exists(dt) && !dt_object_remote(dt)));
2862 LINVRNT(osd_invariant(obj));
2866 static int osd_declare_object_ref_add(const struct lu_env *env,
2867 struct dt_object *dt,
2868 struct thandle *handle)
2870 struct osd_thandle *oh;
2872 /* it's possible that object doesn't exist yet */
2873 LASSERT(handle != NULL);
2875 oh = container_of0(handle, struct osd_thandle, ot_super);
2876 LASSERT(oh->ot_handle == NULL);
2878 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
2879 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2885 * Concurrency: @dt is write locked.
2887 static int osd_object_ref_add(const struct lu_env *env,
2888 struct dt_object *dt, struct thandle *th)
2890 struct osd_object *obj = osd_dt_obj(dt);
2891 struct inode *inode = obj->oo_inode;
2892 struct osd_thandle *oh;
2895 if (!dt_object_exists(dt))
2898 LINVRNT(osd_invariant(obj));
2899 LASSERT(!dt_object_remote(dt));
2900 LASSERT(osd_write_locked(env, obj));
2901 LASSERT(th != NULL);
2903 oh = container_of0(th, struct osd_thandle, ot_super);
2904 LASSERT(oh->ot_handle != NULL);
2906 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
2908 CDEBUG(D_INODE, DFID" increase nlink %d\n",
2909 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2911 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
2912 * (65000) subdirectories by storing "1" in i_nlink if the link count
2913 * would otherwise overflow. Directory tranversal tools understand
2914 * that (st_nlink == 1) indicates that the filesystem dose not track
2915 * hard links count on the directory, and will not abort subdirectory
2916 * scanning early once (st_nlink - 2) subdirs have been found.
2918 * This also has to properly handle the case of inodes with nlink == 0
2919 * in case they are being linked into the PENDING directory
2921 spin_lock(&obj->oo_guard);
2922 if (unlikely(inode->i_nlink == 0))
2923 /* inc_nlink from 0 may cause WARN_ON */
2924 set_nlink(inode, 1);
2926 ldiskfs_inc_count(oh->ot_handle, inode);
2927 if (!S_ISDIR(inode->i_mode))
2928 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2930 spin_unlock(&obj->oo_guard);
2932 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2933 LINVRNT(osd_invariant(obj));
2935 osd_trans_exec_check(env, th, OSD_OT_REF_ADD);
2940 static int osd_declare_object_ref_del(const struct lu_env *env,
2941 struct dt_object *dt,
2942 struct thandle *handle)
2944 struct osd_thandle *oh;
2946 LASSERT(!dt_object_remote(dt));
2947 LASSERT(handle != NULL);
2949 oh = container_of0(handle, struct osd_thandle, ot_super);
2950 LASSERT(oh->ot_handle == NULL);
2952 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
2953 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2959 * Concurrency: @dt is write locked.
2961 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2964 struct osd_object *obj = osd_dt_obj(dt);
2965 struct inode *inode = obj->oo_inode;
2966 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
2967 struct osd_thandle *oh;
2969 if (!dt_object_exists(dt))
2972 LINVRNT(osd_invariant(obj));
2973 LASSERT(!dt_object_remote(dt));
2974 LASSERT(osd_write_locked(env, obj));
2975 LASSERT(th != NULL);
2977 oh = container_of0(th, struct osd_thandle, ot_super);
2978 LASSERT(oh->ot_handle != NULL);
2980 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
2982 spin_lock(&obj->oo_guard);
2983 /* That can be result of upgrade from old Lustre version and
2984 * applied only to local files. Just skip this ref_del call.
2985 * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
2986 if (inode->i_nlink == 0) {
2987 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2988 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
2989 ", maybe an upgraded file? (LU-3915)\n",
2990 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
2991 spin_unlock(&obj->oo_guard);
2995 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
2996 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2998 ldiskfs_dec_count(oh->ot_handle, inode);
2999 spin_unlock(&obj->oo_guard);
3001 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3002 LINVRNT(osd_invariant(obj));
3004 osd_trans_exec_check(env, th, OSD_OT_REF_DEL);
3010 * Get the 64-bit version for an inode.
3012 static int osd_object_version_get(const struct lu_env *env,
3013 struct dt_object *dt, dt_obj_version_t *ver)
3015 struct inode *inode = osd_dt_obj(dt)->oo_inode;
3017 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
3018 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
3019 *ver = LDISKFS_I(inode)->i_fs_version;
3024 * Concurrency: @dt is read locked.
3026 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
3027 struct lu_buf *buf, const char *name)
3029 struct osd_object *obj = osd_dt_obj(dt);
3030 struct inode *inode = obj->oo_inode;
3031 struct osd_thread_info *info = osd_oti_get(env);
3032 struct dentry *dentry = &info->oti_obj_dentry;
3034 /* version get is not real XATTR but uses xattr API */
3035 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3036 /* for version we are just using xattr API but change inode
3038 if (buf->lb_len == 0)
3039 return sizeof(dt_obj_version_t);
3041 if (buf->lb_len < sizeof(dt_obj_version_t))
3044 osd_object_version_get(env, dt, buf->lb_buf);
3046 return sizeof(dt_obj_version_t);
3049 if (!dt_object_exists(dt))
3052 LASSERT(!dt_object_remote(dt));
3053 LASSERT(inode->i_op != NULL);
3054 LASSERT(inode->i_op->getxattr != NULL);
3056 return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
3060 static int osd_declare_xattr_set(const struct lu_env *env,
3061 struct dt_object *dt,
3062 const struct lu_buf *buf, const char *name,
3063 int fl, struct thandle *handle)
3065 struct osd_thandle *oh;
3067 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
3069 LASSERT(handle != NULL);
3071 oh = container_of0(handle, struct osd_thandle, ot_super);
3072 LASSERT(oh->ot_handle == NULL);
3074 if (strcmp(name, XATTR_NAME_LMA) == 0) {
3075 /* For non-upgrading case, the LMA is set first and
3076 * usually fit inode. But for upgrade case, the LMA
3077 * may be in another separated EA block. */
3078 if (!dt_object_exists(dt))
3080 else if (fl == LU_XATTR_REPLACE)
3084 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3088 credits = osd_dto_credits_noquota[DTO_XATTR_SET];
3093 if (buf->lb_buf == NULL && dt_object_exists(dt)) {
3094 /* learn xattr size from osd_xattr_get if
3095 attribute has not been read yet */
3096 buflen = __osd_xattr_get(
3097 osd_dt_obj(dt)->oo_inode,
3098 &osd_oti_get(env)->oti_obj_dentry,
3103 buflen = buf->lb_len;
3106 if (buflen > sb->s_blocksize) {
3107 credits += osd_calc_bkmap_credits(
3109 (buflen + sb->s_blocksize - 1) >>
3110 sb->s_blocksize_bits);
3114 * xattr set may involve inode quota change, reserve credits for
3115 * dquot_initialize()
3117 credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
3120 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
3126 * Set the 64-bit version for object
3128 static void osd_object_version_set(const struct lu_env *env,
3129 struct dt_object *dt,
3130 dt_obj_version_t *new_version)
3132 struct inode *inode = osd_dt_obj(dt)->oo_inode;
3134 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
3135 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
3137 LDISKFS_I(inode)->i_fs_version = *new_version;
3138 /** Version is set after all inode operations are finished,
3139 * so we should mark it dirty here */
3140 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3144 * Concurrency: @dt is write locked.
3146 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
3147 const struct lu_buf *buf, const char *name, int fl,
3148 struct thandle *handle)
3150 struct osd_object *obj = osd_dt_obj(dt);
3151 struct inode *inode = obj->oo_inode;
3152 struct osd_thread_info *info = osd_oti_get(env);
3157 LASSERT(handle != NULL);
3159 /* version set is not real XATTR */
3160 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3161 /* for version we are just using xattr API but change inode
3163 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
3164 osd_object_version_set(env, dt, buf->lb_buf);
3165 return sizeof(dt_obj_version_t);
3168 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
3169 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
3171 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3172 if (fl & LU_XATTR_REPLACE)
3173 fs_flags |= XATTR_REPLACE;
3175 if (fl & LU_XATTR_CREATE)
3176 fs_flags |= XATTR_CREATE;
3178 if (strcmp(name, XATTR_NAME_LMV) == 0) {
3179 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
3182 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
3186 lma->lma_incompat |= LMAI_STRIPED;
3187 lustre_lma_swab(lma);
3188 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3189 sizeof(*lma), XATTR_REPLACE);
3194 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_OVERFLOW) &&
3195 strcmp(name, XATTR_NAME_LINK) == 0)
3198 rc = __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
3200 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
3206 * Concurrency: @dt is read locked.
3208 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
3209 const struct lu_buf *buf)
3211 struct osd_object *obj = osd_dt_obj(dt);
3212 struct inode *inode = obj->oo_inode;
3213 struct osd_thread_info *info = osd_oti_get(env);
3214 struct dentry *dentry = &info->oti_obj_dentry;
3216 if (!dt_object_exists(dt))
3219 LASSERT(!dt_object_remote(dt));
3220 LASSERT(inode->i_op != NULL);
3221 LASSERT(inode->i_op->listxattr != NULL);
3223 dentry->d_inode = inode;
3224 dentry->d_sb = inode->i_sb;
3225 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
3228 static int osd_declare_xattr_del(const struct lu_env *env,
3229 struct dt_object *dt, const char *name,
3230 struct thandle *handle)
3232 struct osd_thandle *oh;
3233 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
3235 LASSERT(!dt_object_remote(dt));
3236 LASSERT(handle != NULL);
3238 oh = container_of0(handle, struct osd_thandle, ot_super);
3239 LASSERT(oh->ot_handle == NULL);
3241 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
3242 osd_dto_credits_noquota[DTO_XATTR_SET]);
3244 * xattr del may involve inode quota change, reserve credits for
3245 * dquot_initialize()
3247 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
3253 * Concurrency: @dt is write locked.
3255 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
3256 const char *name, struct thandle *handle)
3258 struct osd_object *obj = osd_dt_obj(dt);
3259 struct inode *inode = obj->oo_inode;
3260 struct osd_thread_info *info = osd_oti_get(env);
3261 struct dentry *dentry = &info->oti_obj_dentry;
3264 if (!dt_object_exists(dt))
3267 LASSERT(!dt_object_remote(dt));
3268 LASSERT(inode->i_op != NULL);
3269 LASSERT(inode->i_op->removexattr != NULL);
3270 LASSERT(handle != NULL);
3272 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3274 ll_vfs_dq_init(inode);
3275 dentry->d_inode = inode;
3276 dentry->d_sb = inode->i_sb;
3277 rc = inode->i_op->removexattr(dentry, name);
3278 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
3282 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
3283 __u64 start, __u64 end)
3285 struct osd_object *obj = osd_dt_obj(dt);
3286 struct inode *inode = obj->oo_inode;
3287 struct osd_thread_info *info = osd_oti_get(env);
3288 struct dentry *dentry = &info->oti_obj_dentry;
3289 struct file *file = &info->oti_file;
3294 dentry->d_inode = inode;
3295 dentry->d_sb = inode->i_sb;
3296 file->f_path.dentry = dentry;
3297 file->f_mapping = inode->i_mapping;
3298 file->f_op = inode->i_fop;
3299 set_file_inode(file, inode);
3301 rc = ll_vfs_fsync_range(file, start, end, 0);
3310 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
3311 const struct dt_index_features *feat)
3313 struct iam_descr *descr;
3315 if (osd_object_is_root(o))
3316 return feat == &dt_directory_features;
3318 LASSERT(o->oo_dir != NULL);
3320 descr = o->oo_dir->od_container.ic_descr;
3321 if (feat == &dt_directory_features) {
3322 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
3328 feat->dif_keysize_min <= descr->id_key_size &&
3329 descr->id_key_size <= feat->dif_keysize_max &&
3330 feat->dif_recsize_min <= descr->id_rec_size &&
3331 descr->id_rec_size <= feat->dif_recsize_max &&
3332 !(feat->dif_flags & (DT_IND_VARKEY |
3333 DT_IND_VARREC | DT_IND_NONUNQ)) &&
3334 ergo(feat->dif_flags & DT_IND_UPDATE,
3335 1 /* XXX check that object (and file system) is
3340 static int osd_iam_container_init(const struct lu_env *env,
3341 struct osd_object *obj,
3342 struct osd_directory *dir)
3344 struct iam_container *bag = &dir->od_container;
3347 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
3351 result = iam_container_setup(bag);
3353 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
3355 iam_container_fini(bag);
3362 * Concurrency: no external locking is necessary.
3364 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
3365 const struct dt_index_features *feat)
3369 struct osd_object *obj = osd_dt_obj(dt);
3371 LINVRNT(osd_invariant(obj));
3373 if (osd_object_is_root(obj)) {
3374 dt->do_index_ops = &osd_index_ea_ops;
3376 } else if (feat == &dt_directory_features) {
3377 dt->do_index_ops = &osd_index_ea_ops;
3378 if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
3383 } else if (unlikely(feat == &dt_otable_features)) {
3384 dt->do_index_ops = &osd_otable_ops;
3386 } else if (unlikely(feat == &dt_acct_features)) {
3387 dt->do_index_ops = &osd_acct_index_ops;
3390 } else if (!osd_has_index(obj)) {
3391 struct osd_directory *dir;
3396 spin_lock(&obj->oo_guard);
3397 if (obj->oo_dir == NULL)
3401 * Concurrent thread allocated container data.
3404 spin_unlock(&obj->oo_guard);
3406 * Now, that we have container data, serialize its
3409 down_write(&obj->oo_ext_idx_sem);
3411 * recheck under lock.
3413 if (!osd_has_index(obj))
3414 result = osd_iam_container_init(env, obj,
3418 up_write(&obj->oo_ext_idx_sem);
3426 if (result == 0 && skip_iam == 0) {
3427 if (!osd_iam_index_probe(env, obj, feat))
3430 LINVRNT(osd_invariant(obj));
3432 if (result == 0 && feat == &dt_quota_glb_features &&
3433 fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
3434 result = osd_quota_migration(env, dt);
3439 static int osd_otable_it_attr_get(const struct lu_env *env,
3440 struct dt_object *dt,
3441 struct lu_attr *attr)
3447 static const struct dt_object_operations osd_obj_ops = {
3448 .do_read_lock = osd_object_read_lock,
3449 .do_write_lock = osd_object_write_lock,
3450 .do_read_unlock = osd_object_read_unlock,
3451 .do_write_unlock = osd_object_write_unlock,
3452 .do_write_locked = osd_object_write_locked,
3453 .do_attr_get = osd_attr_get,
3454 .do_declare_attr_set = osd_declare_attr_set,
3455 .do_attr_set = osd_attr_set,
3456 .do_ah_init = osd_ah_init,
3457 .do_declare_create = osd_declare_object_create,
3458 .do_create = osd_object_create,
3459 .do_declare_destroy = osd_declare_object_destroy,
3460 .do_destroy = osd_object_destroy,
3461 .do_index_try = osd_index_try,
3462 .do_declare_ref_add = osd_declare_object_ref_add,
3463 .do_ref_add = osd_object_ref_add,
3464 .do_declare_ref_del = osd_declare_object_ref_del,
3465 .do_ref_del = osd_object_ref_del,
3466 .do_xattr_get = osd_xattr_get,
3467 .do_declare_xattr_set = osd_declare_xattr_set,
3468 .do_xattr_set = osd_xattr_set,
3469 .do_declare_xattr_del = osd_declare_xattr_del,
3470 .do_xattr_del = osd_xattr_del,
3471 .do_xattr_list = osd_xattr_list,
3472 .do_object_sync = osd_object_sync,
3476 * dt_object_operations for interoperability mode
3477 * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
3479 static const struct dt_object_operations osd_obj_ea_ops = {
3480 .do_read_lock = osd_object_read_lock,
3481 .do_write_lock = osd_object_write_lock,
3482 .do_read_unlock = osd_object_read_unlock,
3483 .do_write_unlock = osd_object_write_unlock,
3484 .do_write_locked = osd_object_write_locked,
3485 .do_attr_get = osd_attr_get,
3486 .do_declare_attr_set = osd_declare_attr_set,
3487 .do_attr_set = osd_attr_set,
3488 .do_ah_init = osd_ah_init,
3489 .do_declare_create = osd_declare_object_create,
3490 .do_create = osd_object_ea_create,
3491 .do_declare_destroy = osd_declare_object_destroy,
3492 .do_destroy = osd_object_destroy,
3493 .do_index_try = osd_index_try,
3494 .do_declare_ref_add = osd_declare_object_ref_add,
3495 .do_ref_add = osd_object_ref_add,
3496 .do_declare_ref_del = osd_declare_object_ref_del,
3497 .do_ref_del = osd_object_ref_del,
3498 .do_xattr_get = osd_xattr_get,
3499 .do_declare_xattr_set = osd_declare_xattr_set,
3500 .do_xattr_set = osd_xattr_set,
3501 .do_declare_xattr_del = osd_declare_xattr_del,
3502 .do_xattr_del = osd_xattr_del,
3503 .do_xattr_list = osd_xattr_list,
3504 .do_object_sync = osd_object_sync,
3507 static const struct dt_object_operations osd_obj_otable_it_ops = {
3508 .do_attr_get = osd_otable_it_attr_get,
3509 .do_index_try = osd_index_try,
3512 static int osd_index_declare_iam_delete(const struct lu_env *env,
3513 struct dt_object *dt,
3514 const struct dt_key *key,
3515 struct thandle *handle)
3517 struct osd_thandle *oh;
3519 oh = container_of0(handle, struct osd_thandle, ot_super);
3520 LASSERT(oh->ot_handle == NULL);
3522 /* Recycle may cause additional three blocks to be changed. */
3523 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3524 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
3530 * delete a (key, value) pair from index \a dt specified by \a key
3532 * \param dt osd index object
3533 * \param key key for index
3534 * \param rec record reference
3535 * \param handle transaction handler
3538 * \retval -ve failure
3540 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
3541 const struct dt_key *key,
3542 struct thandle *handle)
3544 struct osd_thread_info *oti = osd_oti_get(env);
3545 struct osd_object *obj = osd_dt_obj(dt);
3546 struct osd_thandle *oh;
3547 struct iam_path_descr *ipd;
3548 struct iam_container *bag = &obj->oo_dir->od_container;
3552 if (!dt_object_exists(dt))
3555 LINVRNT(osd_invariant(obj));
3556 LASSERT(!dt_object_remote(dt));
3557 LASSERT(bag->ic_object == obj->oo_inode);
3558 LASSERT(handle != NULL);
3560 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
3562 ipd = osd_idx_ipd_get(env, bag);
3563 if (unlikely(ipd == NULL))
3566 oh = container_of0(handle, struct osd_thandle, ot_super);
3567 LASSERT(oh->ot_handle != NULL);
3568 LASSERT(oh->ot_handle->h_transaction != NULL);
3570 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3571 /* swab quota uid/gid provided by caller */
3572 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3573 key = (const struct dt_key *)&oti->oti_quota_id;
3576 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
3577 osd_ipd_put(env, bag, ipd);
3578 LINVRNT(osd_invariant(obj));
3579 osd_trans_exec_check(env, handle, OSD_OT_DELETE);
3583 static int osd_index_declare_ea_delete(const struct lu_env *env,
3584 struct dt_object *dt,
3585 const struct dt_key *key,
3586 struct thandle *handle)
3588 struct osd_thandle *oh;
3589 struct inode *inode;
3593 LASSERT(!dt_object_remote(dt));
3594 LASSERT(handle != NULL);
3596 oh = container_of0(handle, struct osd_thandle, ot_super);
3597 LASSERT(oh->ot_handle == NULL);
3599 /* due to DNE we may need to remove an agent inode */
3600 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3601 osd_dto_credits_noquota[DTO_INDEX_DELETE] +
3602 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
3604 inode = osd_dt_obj(dt)->oo_inode;
3607 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3608 0, oh, osd_dt_obj(dt), true, NULL, false);
3612 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
3615 struct osd_fid_pack *rec;
3618 if (de->file_type & LDISKFS_DIRENT_LUFID) {
3619 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
3620 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
3621 if (rc == 0 && unlikely(!fid_is_sane((struct lu_fid *)fid)))
3627 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
3628 const struct lu_fid *fid)
3630 struct seq_server_site *ss = osd_seq_site(osd);
3633 /* FID seqs not in FLDB, must be local seq */
3634 if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
3637 /* If FLD is not being initialized yet, it only happens during the
3638 * initialization, likely during mgs initialization, and we assume
3639 * this is local FID. */
3640 if (ss == NULL || ss->ss_server_fld == NULL)
3643 /* Only check the local FLDB here */
3644 if (osd_seq_exists(env, osd, fid_seq(fid)))
3651 * Index delete function for interoperability mode (b11826).
3652 * It will remove the directory entry added by osd_index_ea_insert().
3653 * This entry is needed to maintain name->fid mapping.
3655 * \param key, key i.e. file entry to be deleted
3657 * \retval 0, on success
3658 * \retval -ve, on error
3660 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
3661 const struct dt_key *key, struct thandle *handle)
3663 struct osd_object *obj = osd_dt_obj(dt);
3664 struct inode *dir = obj->oo_inode;
3665 struct dentry *dentry;
3666 struct osd_thandle *oh;
3667 struct ldiskfs_dir_entry_2 *de = NULL;
3668 struct buffer_head *bh;
3669 struct htree_lock *hlock = NULL;
3670 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
3671 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
3675 if (!dt_object_exists(dt))
3678 LINVRNT(osd_invariant(obj));
3679 LASSERT(!dt_object_remote(dt));
3680 LASSERT(handle != NULL);
3682 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
3684 oh = container_of(handle, struct osd_thandle, ot_super);
3685 LASSERT(oh->ot_handle != NULL);
3686 LASSERT(oh->ot_handle->h_transaction != NULL);
3688 ll_vfs_dq_init(dir);
3689 dentry = osd_child_dentry_get(env, obj,
3690 (char *)key, strlen((char *)key));
3692 if (obj->oo_hl_head != NULL) {
3693 hlock = osd_oti_get(env)->oti_hlock;
3694 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3695 dir, LDISKFS_HLOCK_DEL);
3697 down_write(&obj->oo_ext_idx_sem);
3700 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
3702 /* If this is not the ".." entry, it might be a remote DNE
3703 * entry and we need to check if the FID is for a remote
3704 * MDT. If the FID is not in the directory entry (e.g.
3705 * upgraded 1.8 filesystem without dirdata enabled) then
3706 * we need to get the FID from the LMA. For a remote directory
3707 * there HAS to be an LMA, it cannot be an IGIF inode in this
3710 * Delete the entry before the agent inode in order to
3711 * simplify error handling. At worst an error after deleting
3712 * the entry first might leak the agent inode afterward. The
3713 * reverse would need filesystem abort in case of error deleting
3714 * the entry after the agent had been removed, or leave a
3715 * dangling entry pointing at a random inode. */
3716 if (strcmp((char *)key, dotdot) != 0) {
3717 LASSERT(de != NULL);
3718 rc = osd_get_fid_from_dentry(de, (struct dt_rec *)fid);
3719 if (rc == -ENODATA) {
3720 /* can't get FID, postpone to the end of the
3721 * transaction when iget() is safe */
3722 osd_schedule_agent_inode_removal(env, oh,
3723 le32_to_cpu(de->inode));
3724 } else if (rc == 0 &&
3725 unlikely(osd_remote_fid(env, osd, fid))) {
3726 osd_schedule_agent_inode_removal(env, oh,
3727 le32_to_cpu(de->inode));
3730 rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
3736 ldiskfs_htree_unlock(hlock);
3738 up_write(&obj->oo_ext_idx_sem);
3743 /* For inode on the remote MDT, .. will point to
3744 * /Agent directory, Check whether it needs to delete
3745 * from agent directory */
3746 if (unlikely(strcmp((char *)key, dotdot) == 0)) {
3747 rc = osd_delete_from_remote_parent(env, osd_obj2dev(obj), obj,
3749 if (rc != 0 && rc != -ENOENT) {
3750 CERROR("%s: delete agent inode "DFID": rc = %d\n",
3751 osd_name(osd), PFID(fid), rc);
3761 LASSERT(osd_invariant(obj));
3762 osd_trans_exec_check(env, handle, OSD_OT_DELETE);
3767 * Lookup index for \a key and copy record to \a rec.
3769 * \param dt osd index object
3770 * \param key key for index
3771 * \param rec record reference
3773 * \retval +ve success : exact mach
3774 * \retval 0 return record with key not greater than \a key
3775 * \retval -ve failure
3777 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
3778 struct dt_rec *rec, const struct dt_key *key)
3780 struct osd_object *obj = osd_dt_obj(dt);
3781 struct iam_path_descr *ipd;
3782 struct iam_container *bag = &obj->oo_dir->od_container;
3783 struct osd_thread_info *oti = osd_oti_get(env);
3784 struct iam_iterator *it = &oti->oti_idx_it;
3785 struct iam_rec *iam_rec;
3789 if (!dt_object_exists(dt))
3792 LASSERT(osd_invariant(obj));
3793 LASSERT(!dt_object_remote(dt));
3794 LASSERT(bag->ic_object == obj->oo_inode);
3796 ipd = osd_idx_ipd_get(env, bag);
3800 /* got ipd now we can start iterator. */
3801 iam_it_init(it, bag, 0, ipd);
3803 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3804 /* swab quota uid/gid provided by caller */
3805 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3806 key = (const struct dt_key *)&oti->oti_quota_id;
3809 rc = iam_it_get(it, (struct iam_key *)key);
3811 if (S_ISDIR(obj->oo_inode->i_mode))
3812 iam_rec = (struct iam_rec *)oti->oti_ldp;
3814 iam_rec = (struct iam_rec *) rec;
3816 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
3818 if (S_ISDIR(obj->oo_inode->i_mode))
3819 osd_fid_unpack((struct lu_fid *) rec,
3820 (struct osd_fid_pack *)iam_rec);
3821 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
3822 osd_quota_unpack(obj, rec);
3827 osd_ipd_put(env, bag, ipd);
3829 LINVRNT(osd_invariant(obj));
3834 static int osd_index_declare_iam_insert(const struct lu_env *env,
3835 struct dt_object *dt,
3836 const struct dt_rec *rec,
3837 const struct dt_key *key,
3838 struct thandle *handle)
3840 struct osd_thandle *oh;
3842 LASSERT(handle != NULL);
3844 oh = container_of0(handle, struct osd_thandle, ot_super);
3845 LASSERT(oh->ot_handle == NULL);
3847 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3848 osd_dto_credits_noquota[DTO_INDEX_INSERT]);
3854 * Inserts (key, value) pair in \a dt index object.
3856 * \param dt osd index object
3857 * \param key key for index
3858 * \param rec record reference
3859 * \param th transaction handler
3862 * \retval -ve failure
3864 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
3865 const struct dt_rec *rec,
3866 const struct dt_key *key, struct thandle *th,
3869 struct osd_object *obj = osd_dt_obj(dt);
3870 struct iam_path_descr *ipd;
3871 struct osd_thandle *oh;
3872 struct iam_container *bag;
3873 struct osd_thread_info *oti = osd_oti_get(env);
3874 struct iam_rec *iam_rec;
3878 if (!dt_object_exists(dt))
3881 LINVRNT(osd_invariant(obj));
3882 LASSERT(!dt_object_remote(dt));
3884 bag = &obj->oo_dir->od_container;
3885 LASSERT(bag->ic_object == obj->oo_inode);
3886 LASSERT(th != NULL);
3888 osd_trans_exec_op(env, th, OSD_OT_INSERT);
3890 ipd = osd_idx_ipd_get(env, bag);
3891 if (unlikely(ipd == NULL))
3894 oh = container_of0(th, struct osd_thandle, ot_super);
3895 LASSERT(oh->ot_handle != NULL);
3896 LASSERT(oh->ot_handle->h_transaction != NULL);
3897 if (S_ISDIR(obj->oo_inode->i_mode)) {
3898 iam_rec = (struct iam_rec *)oti->oti_ldp;
3899 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
3900 } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3901 /* pack quota uid/gid */
3902 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3903 key = (const struct dt_key *)&oti->oti_quota_id;
3904 /* pack quota record */
3905 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
3906 iam_rec = (struct iam_rec *)rec;
3908 iam_rec = (struct iam_rec *)rec;
3911 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
3913 osd_ipd_put(env, bag, ipd);
3914 LINVRNT(osd_invariant(obj));
3915 osd_trans_exec_check(env, th, OSD_OT_INSERT);
3920 * Calls ldiskfs_add_entry() to add directory entry
3921 * into the directory. This is required for
3922 * interoperability mode (b11826)
3924 * \retval 0, on success
3925 * \retval -ve, on error
3927 static int __osd_ea_add_rec(struct osd_thread_info *info,
3928 struct osd_object *pobj, struct inode *cinode,
3929 const char *name, const struct lu_fid *fid,
3930 struct htree_lock *hlock, struct thandle *th)
3932 struct ldiskfs_dentry_param *ldp;
3933 struct dentry *child;
3934 struct osd_thandle *oth;
3937 oth = container_of(th, struct osd_thandle, ot_super);
3938 LASSERT(oth->ot_handle != NULL);
3939 LASSERT(oth->ot_handle->h_transaction != NULL);
3940 LASSERT(pobj->oo_inode);
3942 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3943 if (unlikely(pobj->oo_inode ==
3944 osd_sb(osd_obj2dev(pobj))->s_root->d_inode))
3947 osd_get_ldiskfs_dirent_param(ldp, fid);
3948 child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
3949 child->d_fsdata = (void *)ldp;
3950 ll_vfs_dq_init(pobj->oo_inode);
3951 rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
3952 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
3953 struct ldiskfs_dir_entry_2 *de;
3954 struct buffer_head *bh;
3957 bh = osd_ldiskfs_find_entry(pobj->oo_inode, &child->d_name, &de,
3960 rc1 = ldiskfs_journal_get_write_access(oth->ot_handle,
3963 if (S_ISDIR(cinode->i_mode))
3964 de->file_type = LDISKFS_DIRENT_LUFID |
3965 LDISKFS_FT_REG_FILE;
3967 de->file_type = LDISKFS_DIRENT_LUFID |
3969 ldiskfs_handle_dirty_metadata(oth->ot_handle,
3980 * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
3981 * into the directory.Also sets flags into osd object to
3982 * indicate dot and dotdot are created. This is required for
3983 * interoperability mode (b11826)
3985 * \param dir directory for dot and dotdot fixup.
3986 * \param obj child object for linking
3988 * \retval 0, on success
3989 * \retval -ve, on error
3991 static int osd_add_dot_dotdot(struct osd_thread_info *info,
3992 struct osd_object *dir,
3993 struct inode *parent_dir, const char *name,
3994 const struct lu_fid *dot_fid,
3995 const struct lu_fid *dot_dot_fid,
3998 struct inode *inode = dir->oo_inode;
3999 struct osd_thandle *oth;
4002 oth = container_of(th, struct osd_thandle, ot_super);
4003 LASSERT(oth->ot_handle->h_transaction != NULL);
4004 LASSERT(S_ISDIR(dir->oo_inode->i_mode));
4006 if (strcmp(name, dot) == 0) {
4007 if (dir->oo_compat_dot_created) {
4010 LASSERT(inode == parent_dir);
4011 dir->oo_compat_dot_created = 1;
4014 } else if (strcmp(name, dotdot) == 0) {
4015 if (!dir->oo_compat_dot_created)
4017 /* in case of rename, dotdot is already created */
4018 if (dir->oo_compat_dotdot_created) {
4019 return __osd_ea_add_rec(info, dir, parent_dir, name,
4020 dot_dot_fid, NULL, th);
4023 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_PARENT)) {
4024 struct lu_fid tfid = *dot_dot_fid;
4027 result = osd_add_dot_dotdot_internal(info,
4028 dir->oo_inode, parent_dir, dot_fid,
4031 result = osd_add_dot_dotdot_internal(info,
4032 dir->oo_inode, parent_dir, dot_fid,
4037 dir->oo_compat_dotdot_created = 1;
4045 * It will call the appropriate osd_add* function and return the
4046 * value, return by respective functions.
4048 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
4049 struct inode *cinode, const char *name,
4050 const struct lu_fid *fid, struct thandle *th)
4052 struct osd_thread_info *info = osd_oti_get(env);
4053 struct htree_lock *hlock;
4056 hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
4058 if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' &&
4060 if (hlock != NULL) {
4061 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
4064 down_write(&pobj->oo_ext_idx_sem);
4067 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
4068 lu_object_fid(&pobj->oo_dt.do_lu),
4071 if (hlock != NULL) {
4072 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
4073 pobj->oo_inode, LDISKFS_HLOCK_ADD);
4075 down_write(&pobj->oo_ext_idx_sem);
4078 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INDIR)) {
4079 struct lu_fid *tfid = &info->oti_fid;
4083 rc = __osd_ea_add_rec(info, pobj, cinode, name,
4086 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
4091 ldiskfs_htree_unlock(hlock);
4093 up_write(&pobj->oo_ext_idx_sem);
4099 osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
4100 struct osd_idmap_cache *oic)
4102 struct osd_scrub *scrub = &dev->od_scrub;
4103 struct lu_fid *fid = &oic->oic_fid;
4104 struct osd_inode_id *id = &oti->oti_id;
4109 if (!fid_is_norm(fid) && !fid_is_igif(fid))
4112 if (scrub->os_pos_current > id->oii_ino)
4116 rc = osd_oi_lookup(oti, dev, fid, id, 0);
4117 if (rc == -ENOENT) {
4118 struct inode *inode;
4121 inode = osd_iget(oti, dev, &oic->oic_lid);
4123 /* The inode has been removed (by race maybe). */
4124 if (IS_ERR(inode)) {
4125 rc = PTR_ERR(inode);
4127 RETURN(rc == -ESTALE ? -ENOENT : rc);
4131 /* The OI mapping is lost. */
4132 if (id->oii_gen != OSD_OII_NOGEN)
4135 /* The inode may has been reused by others, we do not know,
4136 * leave it to be handled by subsequent osd_fid_lookup(). */
4138 } else if (rc != 0 || osd_id_eq(id, &oic->oic_lid)) {
4143 if (thread_is_running(&scrub->os_thread)) {
4144 rc = osd_oii_insert(dev, oic, rc == -ENOENT);
4145 /* There is race condition between osd_oi_lookup and OI scrub.
4146 * The OI scrub finished just after osd_oi_lookup() failure.
4147 * Under such case, it is unnecessary to trigger OI scrub again,
4148 * but try to call osd_oi_lookup() again. */
4149 if (unlikely(rc == -EAGAIN))
4155 if (!dev->od_noscrub && ++once == 1) {
4156 rc = osd_scrub_start(dev, SS_AUTO_PARTIAL | SS_CLEAR_DRYRUN |
4158 CDEBUG(D_LFSCK | D_CONSOLE, "%.16s: trigger OI scrub by RPC "
4159 "for "DFID", rc = %d [2]\n",
4160 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
4162 if (rc == 0 || rc == -EALREADY)
4169 static int osd_fail_fid_lookup(struct osd_thread_info *oti,
4170 struct osd_device *dev,
4171 struct osd_idmap_cache *oic,
4172 struct lu_fid *fid, __u32 ino)
4174 struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
4175 struct inode *inode;
4178 osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
4179 inode = osd_iget(oti, dev, &oic->oic_lid);
4180 if (IS_ERR(inode)) {
4181 fid_zero(&oic->oic_fid);
4182 return PTR_ERR(inode);
4185 rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, lma);
4188 fid_zero(&oic->oic_fid);
4190 *fid = oic->oic_fid = lma->lma_self_fid;
4194 void osd_add_oi_cache(struct osd_thread_info *info, struct osd_device *osd,
4195 struct osd_inode_id *id, const struct lu_fid *fid)
4197 CDEBUG(D_INODE, "add "DFID" %u:%u to info %p\n", PFID(fid),
4198 id->oii_ino, id->oii_gen, info);
4199 info->oti_cache.oic_lid = *id;
4200 info->oti_cache.oic_fid = *fid;
4201 info->oti_cache.oic_dev = osd;
4205 * Get parent FID from the linkEA.
4207 * For a directory which parent resides on remote MDT, to satisfy the
4208 * local e2fsck, we insert it into the /REMOTE_PARENT_DIR locally. On
4209 * the other hand, to make the lookup(..) on the directory can return
4210 * the real parent FID, we append the real parent FID after its ".."
4211 * name entry in the /REMOTE_PARENT_DIR.
4213 * Unfortunately, such PFID-in-dirent cannot be preserved via file-level
4214 * backup. So after the restore, we cannot get the right parent FID from
4215 * its ".." name entry in the /REMOTE_PARENT_DIR. Under such case, since
4216 * we have stored the real parent FID in the directory object's linkEA,
4217 * we can parse the linkEA for the real parent FID.
4219 * \param[in] env pointer to the thread context
4220 * \param[in] obj pointer to the object to be handled
4221 * \param[out]fid pointer to the buffer to hold the parent FID
4223 * \retval 0 for getting the real parent FID successfully
4224 * \retval negative error number on failure
4226 static int osd_get_pfid_from_linkea(const struct lu_env *env,
4227 struct osd_object *obj,
4230 struct osd_thread_info *oti = osd_oti_get(env);
4231 struct lu_buf *buf = &oti->oti_big_buf;
4232 struct dentry *dentry = &oti->oti_obj_dentry;
4233 struct inode *inode = obj->oo_inode;
4234 struct linkea_data ldata = { NULL };
4239 if (!S_ISDIR(inode->i_mode))
4243 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
4244 buf->lb_buf, buf->lb_len);
4245 if (rc == -ERANGE) {
4246 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LINK,
4249 lu_buf_realloc(buf, rc);
4250 if (buf->lb_buf == NULL)
4257 if (unlikely(rc == 0))
4263 if (unlikely(buf->lb_buf == NULL)) {
4264 lu_buf_realloc(buf, rc);
4265 if (buf->lb_buf == NULL)
4272 rc = linkea_init(&ldata);
4274 linkea_first_entry(&ldata);
4275 linkea_entry_unpack(ldata.ld_lee, &ldata.ld_reclen, NULL, fid);
4282 * Calls ->lookup() to find dentry. From dentry get inode and
4283 * read inode's ea to get fid. This is required for interoperability
4286 * \retval 0, on success
4287 * \retval -ve, on error
4289 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
4290 struct dt_rec *rec, const struct dt_key *key)
4292 struct inode *dir = obj->oo_inode;
4293 struct dentry *dentry;
4294 struct ldiskfs_dir_entry_2 *de;
4295 struct buffer_head *bh;
4296 struct lu_fid *fid = (struct lu_fid *) rec;
4297 struct htree_lock *hlock = NULL;
4302 LASSERT(dir->i_op != NULL);
4303 LASSERT(dir->i_op->lookup != NULL);
4305 dentry = osd_child_dentry_get(env, obj,
4306 (char *)key, strlen((char *)key));
4308 if (obj->oo_hl_head != NULL) {
4309 hlock = osd_oti_get(env)->oti_hlock;
4310 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
4311 dir, LDISKFS_HLOCK_LOOKUP);
4313 down_read(&obj->oo_ext_idx_sem);
4316 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
4318 struct osd_thread_info *oti = osd_oti_get(env);
4319 struct osd_inode_id *id = &oti->oti_id;
4320 struct osd_idmap_cache *oic = &oti->oti_cache;
4321 struct osd_device *dev = osd_obj2dev(obj);
4323 ino = le32_to_cpu(de->inode);
4324 if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP)) {
4326 rc = osd_fail_fid_lookup(oti, dev, oic, fid, ino);
4330 rc = osd_get_fid_from_dentry(de, rec);
4332 /* done with de, release bh */
4335 if (unlikely(ino == osd_remote_parent_ino(dev))) {
4336 const char *name = (const char *)key;
4338 /* If the parent is on remote MDT, and there
4339 * is no FID-in-dirent, then we have to get
4340 * the parent FID from the linkEA. */
4341 if (likely(strlen(name) == 2 &&
4342 name[0] == '.' && name[1] == '.'))
4343 rc = osd_get_pfid_from_linkea(env, obj,
4346 rc = osd_ea_fid_get(env, obj, ino, fid, id);
4349 osd_id_gen(id, ino, OSD_OII_NOGEN);
4352 if (rc != 0 || osd_remote_fid(env, dev, fid)) {
4353 fid_zero(&oic->oic_fid);
4358 osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id, fid);
4359 rc = osd_consistency_check(oti, dev, oic);
4361 fid_zero(&oic->oic_fid);
4370 ldiskfs_htree_unlock(hlock);
4372 up_read(&obj->oo_ext_idx_sem);
4377 * Find the osd object for given fid.
4379 * \param fid need to find the osd object having this fid
4381 * \retval osd_object on success
4382 * \retval -ve on error
4384 static struct osd_object *osd_object_find(const struct lu_env *env,
4385 struct dt_object *dt,
4386 const struct lu_fid *fid)
4388 struct lu_device *ludev = dt->do_lu.lo_dev;
4389 struct osd_object *child = NULL;
4390 struct lu_object *luch;
4391 struct lu_object *lo;
4394 * at this point topdev might not exist yet
4395 * (i.e. MGS is preparing profiles). so we can
4396 * not rely on topdev and instead lookup with
4397 * our device passed as topdev. this can't work
4398 * if the object isn't cached yet (as osd doesn't
4399 * allocate lu_header). IOW, the object must be
4400 * in the cache, otherwise lu_object_alloc() crashes
4403 luch = lu_object_find_at(env, ludev->ld_site->ls_top_dev == NULL ?
4404 ludev : ludev->ld_site->ls_top_dev,
4406 if (!IS_ERR(luch)) {
4407 if (lu_object_exists(luch)) {
4408 lo = lu_object_locate(luch->lo_header, ludev->ld_type);
4410 child = osd_obj(lo);
4412 LU_OBJECT_DEBUG(D_ERROR, env, luch,
4413 "lu_object can't be located"
4414 DFID"\n", PFID(fid));
4416 if (child == NULL) {
4417 lu_object_put(env, luch);
4418 CERROR("Unable to get osd_object\n");
4419 child = ERR_PTR(-ENOENT);
4422 LU_OBJECT_DEBUG(D_ERROR, env, luch,
4423 "lu_object does not exists "DFID"\n",
4425 lu_object_put(env, luch);
4426 child = ERR_PTR(-ENOENT);
4429 child = ERR_CAST(luch);
4436 * Put the osd object once done with it.
4438 * \param obj osd object that needs to be put
4440 static inline void osd_object_put(const struct lu_env *env,
4441 struct osd_object *obj)
4443 lu_object_put(env, &obj->oo_dt.do_lu);
4446 static int osd_index_declare_ea_insert(const struct lu_env *env,
4447 struct dt_object *dt,
4448 const struct dt_rec *rec,
4449 const struct dt_key *key,
4450 struct thandle *handle)
4452 struct osd_thandle *oh;
4453 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
4454 struct lu_fid *fid = (struct lu_fid *)rec;
4455 int credits, rc = 0;
4458 LASSERT(!dt_object_remote(dt));
4459 LASSERT(handle != NULL);
4461 oh = container_of0(handle, struct osd_thandle, ot_super);
4462 LASSERT(oh->ot_handle == NULL);
4464 credits = osd_dto_credits_noquota[DTO_INDEX_INSERT];
4466 rc = osd_remote_fid(env, osd, fid);
4467 if (unlikely(rc < 0))
4470 /* a reference to remote inode is represented by an
4471 * agent inode which we have to create */
4472 credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
4473 credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
4478 osd_trans_declare_op(env, oh, OSD_OT_INSERT, credits);
4480 if (osd_dt_obj(dt)->oo_inode != NULL) {
4481 struct inode *inode = osd_dt_obj(dt)->oo_inode;
4483 /* We ignore block quota on meta pool (MDTs), so needn't
4484 * calculate how many blocks will be consumed by this index
4486 rc = osd_declare_inode_qid(env, i_uid_read(inode),
4487 i_gid_read(inode), 0, oh,
4488 osd_dt_obj(dt), true, NULL, false);
4495 * Index add function for interoperability mode (b11826).
4496 * It will add the directory entry.This entry is needed to
4497 * maintain name->fid mapping.
4499 * \param key it is key i.e. file entry to be inserted
4500 * \param rec it is value of given key i.e. fid
4502 * \retval 0, on success
4503 * \retval -ve, on error
4505 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
4506 const struct dt_rec *rec,
4507 const struct dt_key *key, struct thandle *th,
4510 struct osd_object *obj = osd_dt_obj(dt);
4511 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
4512 struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
4513 const struct lu_fid *fid = rec1->rec_fid;
4514 const char *name = (const char *)key;
4515 struct osd_thread_info *oti = osd_oti_get(env);
4516 struct osd_inode_id *id = &oti->oti_id;
4517 struct inode *child_inode = NULL;
4518 struct osd_object *child = NULL;
4522 if (!dt_object_exists(dt))
4525 LASSERT(osd_invariant(obj));
4526 LASSERT(!dt_object_remote(dt));
4527 LASSERT(th != NULL);
4529 osd_trans_exec_op(env, th, OSD_OT_INSERT);
4531 LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!\n", PFID(fid));
4533 rc = osd_remote_fid(env, osd, fid);
4535 CERROR("%s: Can not find object "DFID" rc %d\n",
4536 osd_name(osd), PFID(fid), rc);
4541 /* Insert remote entry */
4542 if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
4543 struct osd_mdobj_map *omm = osd->od_mdt_map;
4544 struct osd_thandle *oh;
4546 /* If parent on remote MDT, we need put this object
4548 oh = container_of(th, typeof(*oh), ot_super);
4549 rc = osd_add_to_remote_parent(env, osd, obj, oh);
4551 CERROR("%s: add "DFID" error: rc = %d\n",
4553 PFID(lu_object_fid(&dt->do_lu)), rc);
4557 child_inode = igrab(omm->omm_remote_parent->d_inode);
4559 child_inode = osd_create_local_agent_inode(env, osd,
4560 obj, fid, rec1->rec_type & S_IFMT, th);
4561 if (IS_ERR(child_inode))
4562 RETURN(PTR_ERR(child_inode));
4565 /* Insert local entry */
4566 child = osd_object_find(env, dt, fid);
4567 if (IS_ERR(child)) {
4568 CERROR("%s: Can not find object "DFID"%u:%u: rc = %d\n",
4569 osd_name(osd), PFID(fid),
4570 id->oii_ino, id->oii_gen,
4571 (int)PTR_ERR(child));
4572 RETURN(PTR_ERR(child));
4574 child_inode = igrab(child->oo_inode);
4577 rc = osd_ea_add_rec(env, obj, child_inode, name, fid, th);
4579 CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
4580 obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
4584 osd_object_put(env, child);
4585 LASSERT(osd_invariant(obj));
4586 osd_trans_exec_check(env, th, OSD_OT_INSERT);
4591 * Initialize osd Iterator for given osd index object.
4593 * \param dt osd index object
4596 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
4597 struct dt_object *dt,
4600 struct osd_it_iam *it;
4601 struct osd_object *obj = osd_dt_obj(dt);
4602 struct lu_object *lo = &dt->do_lu;
4603 struct iam_path_descr *ipd;
4604 struct iam_container *bag = &obj->oo_dir->od_container;
4606 if (!dt_object_exists(dt))
4607 return ERR_PTR(-ENOENT);
4611 return ERR_PTR(-ENOMEM);
4613 ipd = osd_it_ipd_get(env, bag);
4614 if (likely(ipd != NULL)) {
4618 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
4619 return (struct dt_it *)it;
4622 return ERR_PTR(-ENOMEM);
4627 * free given Iterator.
4630 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
4632 struct osd_it_iam *it = (struct osd_it_iam *)di;
4633 struct osd_object *obj = it->oi_obj;
4635 iam_it_fini(&it->oi_it);
4636 osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
4637 lu_object_put(env, &obj->oo_dt.do_lu);
4642 * Move Iterator to record specified by \a key
4644 * \param di osd iterator
4645 * \param key key for index
4647 * \retval +ve di points to record with least key not larger than key
4648 * \retval 0 di points to exact matched key
4649 * \retval -ve failure
4652 static int osd_it_iam_get(const struct lu_env *env,
4653 struct dt_it *di, const struct dt_key *key)
4655 struct osd_thread_info *oti = osd_oti_get(env);
4656 struct osd_it_iam *it = (struct osd_it_iam *)di;
4658 if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
4659 /* swab quota uid/gid */
4660 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
4661 key = (struct dt_key *)&oti->oti_quota_id;
4664 return iam_it_get(&it->oi_it, (const struct iam_key *)key);
4670 * \param di osd iterator
4672 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
4674 struct osd_it_iam *it = (struct osd_it_iam *)di;
4676 iam_it_put(&it->oi_it);
4680 * Move iterator by one record
4682 * \param di osd iterator
4684 * \retval +1 end of container reached
4686 * \retval -ve failure
4689 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
4691 struct osd_it_iam *it = (struct osd_it_iam *)di;
4693 return iam_it_next(&it->oi_it);
4697 * Return pointer to the key under iterator.
4700 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
4701 const struct dt_it *di)
4703 struct osd_thread_info *oti = osd_oti_get(env);
4704 struct osd_it_iam *it = (struct osd_it_iam *)di;
4705 struct osd_object *obj = it->oi_obj;
4708 key = (struct dt_key *)iam_it_key_get(&it->oi_it);
4710 if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
4711 /* swab quota uid/gid */
4712 oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
4713 key = (struct dt_key *)&oti->oti_quota_id;
4720 * Return size of key under iterator (in bytes)
4723 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
4725 struct osd_it_iam *it = (struct osd_it_iam *)di;
4727 return iam_it_key_size(&it->oi_it);
4731 osd_it_append_attrs(struct lu_dirent *ent, int len, __u16 type)
4733 /* check if file type is required */
4734 if (ent->lde_attrs & LUDA_TYPE) {
4735 struct luda_type *lt;
4736 int align = sizeof(*lt) - 1;
4738 len = (len + align) & ~align;
4739 lt = (struct luda_type *)(ent->lde_name + len);
4740 lt->lt_type = cpu_to_le16(DTTOIF(type));
4743 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
4747 * build lu direct from backend fs dirent.
4751 osd_it_pack_dirent(struct lu_dirent *ent, struct lu_fid *fid, __u64 offset,
4752 char *name, __u16 namelen, __u16 type, __u32 attr)
4754 ent->lde_attrs = attr | LUDA_FID;
4755 fid_cpu_to_le(&ent->lde_fid, fid);
4757 ent->lde_hash = cpu_to_le64(offset);
4758 ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
4760 strncpy(ent->lde_name, name, namelen);
4761 ent->lde_name[namelen] = '\0';
4762 ent->lde_namelen = cpu_to_le16(namelen);
4764 /* append lustre attributes */
4765 osd_it_append_attrs(ent, namelen, type);
4769 * Return pointer to the record under iterator.
4771 static int osd_it_iam_rec(const struct lu_env *env,
4772 const struct dt_it *di,
4773 struct dt_rec *dtrec, __u32 attr)
4775 struct osd_it_iam *it = (struct osd_it_iam *)di;
4776 struct osd_thread_info *info = osd_oti_get(env);
4779 if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
4780 const struct osd_fid_pack *rec;
4781 struct lu_fid *fid = &info->oti_fid;
4782 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
4788 name = (char *)iam_it_key_get(&it->oi_it);
4790 RETURN(PTR_ERR(name));
4792 namelen = iam_it_key_size(&it->oi_it);
4794 rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
4796 RETURN(PTR_ERR(rec));
4798 rc = osd_fid_unpack(fid, rec);
4802 hash = iam_it_store(&it->oi_it);
4804 /* IAM does not store object type in IAM index (dir) */
4805 osd_it_pack_dirent(lde, fid, hash, name, namelen,
4807 } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
4808 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
4809 (struct iam_rec *)dtrec);
4810 osd_quota_unpack(it->oi_obj, dtrec);
4812 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
4813 (struct iam_rec *)dtrec);
4820 * Returns cookie for current Iterator position.
4822 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
4824 struct osd_it_iam *it = (struct osd_it_iam *)di;
4826 return iam_it_store(&it->oi_it);
4830 * Restore iterator from cookie.
4832 * \param di osd iterator
4833 * \param hash Iterator location cookie
4835 * \retval +ve di points to record with least key not larger than key.
4836 * \retval 0 di points to exact matched key
4837 * \retval -ve failure
4840 static int osd_it_iam_load(const struct lu_env *env,
4841 const struct dt_it *di, __u64 hash)
4843 struct osd_it_iam *it = (struct osd_it_iam *)di;
4845 return iam_it_load(&it->oi_it, hash);
4848 static const struct dt_index_operations osd_index_iam_ops = {
4849 .dio_lookup = osd_index_iam_lookup,
4850 .dio_declare_insert = osd_index_declare_iam_insert,
4851 .dio_insert = osd_index_iam_insert,
4852 .dio_declare_delete = osd_index_declare_iam_delete,
4853 .dio_delete = osd_index_iam_delete,
4855 .init = osd_it_iam_init,
4856 .fini = osd_it_iam_fini,
4857 .get = osd_it_iam_get,
4858 .put = osd_it_iam_put,
4859 .next = osd_it_iam_next,
4860 .key = osd_it_iam_key,
4861 .key_size = osd_it_iam_key_size,
4862 .rec = osd_it_iam_rec,
4863 .store = osd_it_iam_store,
4864 .load = osd_it_iam_load
4870 * Creates or initializes iterator context.
4872 * \retval struct osd_it_ea, iterator structure on success
4875 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
4876 struct dt_object *dt,
4879 struct osd_object *obj = osd_dt_obj(dt);
4880 struct osd_thread_info *info = osd_oti_get(env);
4881 struct osd_it_ea *oie;
4883 struct lu_object *lo = &dt->do_lu;
4884 struct dentry *obj_dentry;
4887 if (!dt_object_exists(dt) || obj->oo_destroyed)
4888 RETURN(ERR_PTR(-ENOENT));
4890 OBD_SLAB_ALLOC_PTR_GFP(oie, osd_itea_cachep, GFP_NOFS);
4892 RETURN(ERR_PTR(-ENOMEM));
4893 obj_dentry = &oie->oie_dentry;
4895 obj_dentry->d_inode = obj->oo_inode;
4896 obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
4897 obj_dentry->d_name.hash = 0;
4899 oie->oie_rd_dirent = 0;
4900 oie->oie_it_dirent = 0;
4901 oie->oie_dirent = NULL;
4902 if (unlikely(!info->oti_it_ea_buf_used)) {
4903 oie->oie_buf = info->oti_it_ea_buf;
4904 info->oti_it_ea_buf_used = 1;
4906 OBD_ALLOC(oie->oie_buf, OSD_IT_EA_BUFSIZE);
4907 if (oie->oie_buf == NULL)
4908 RETURN(ERR_PTR(-ENOMEM));
4912 file = &oie->oie_file;
4914 /* Only FMODE_64BITHASH or FMODE_32BITHASH should be set, NOT both. */
4915 if (attr & LUDA_64BITHASH)
4916 file->f_mode = FMODE_64BITHASH;
4918 file->f_mode = FMODE_32BITHASH;
4919 file->f_path.dentry = obj_dentry;
4920 file->f_mapping = obj->oo_inode->i_mapping;
4921 file->f_op = obj->oo_inode->i_fop;
4922 set_file_inode(file, obj->oo_inode);
4925 RETURN((struct dt_it *) oie);
4929 * Destroy or finishes iterator context.
4931 * \param di iterator structure to be destroyed
4933 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
4935 struct osd_thread_info *info = osd_oti_get(env);
4936 struct osd_it_ea *oie = (struct osd_it_ea *)di;
4937 struct osd_object *obj = oie->oie_obj;
4938 struct inode *inode = obj->oo_inode;
4941 oie->oie_file.f_op->release(inode, &oie->oie_file);
4942 lu_object_put(env, &obj->oo_dt.do_lu);
4943 if (unlikely(oie->oie_buf != info->oti_it_ea_buf))
4944 OBD_FREE(oie->oie_buf, OSD_IT_EA_BUFSIZE);
4946 info->oti_it_ea_buf_used = 0;
4947 OBD_SLAB_FREE_PTR(oie, osd_itea_cachep);
4952 * It position the iterator at given key, so that next lookup continues from
4953 * that key Or it is similar to dio_it->load() but based on a key,
4954 * rather than file position.
4956 * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
4959 * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
4961 static int osd_it_ea_get(const struct lu_env *env,
4962 struct dt_it *di, const struct dt_key *key)
4964 struct osd_it_ea *it = (struct osd_it_ea *)di;
4967 LASSERT(((const char *)key)[0] == '\0');
4968 it->oie_file.f_pos = 0;
4969 it->oie_rd_dirent = 0;
4970 it->oie_it_dirent = 0;
4971 it->oie_dirent = NULL;
4979 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
4983 struct osd_filldir_cbs {
4984 #ifdef HAVE_DIR_CONTEXT
4985 struct dir_context ctx;
4987 struct osd_it_ea *it;
4990 * It is called internally by ->readdir(). It fills the
4991 * iterator's in-memory data structure with required
4992 * information i.e. name, namelen, rec_size etc.
4994 * \param buf in which information to be filled in.
4995 * \param name name of the file in given dir
4997 * \retval 0 on success
4998 * \retval 1 on buffer full
5000 static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
5001 loff_t offset, __u64 ino,
5004 struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
5005 struct osd_object *obj = it->oie_obj;
5006 struct osd_it_ea_dirent *ent = it->oie_dirent;
5007 struct lu_fid *fid = &ent->oied_fid;
5008 struct osd_fid_pack *rec;
5011 /* this should never happen */
5012 if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
5013 CERROR("ldiskfs return invalid namelen %d\n", namelen);
5017 if ((void *) ent - it->oie_buf + sizeof(*ent) + namelen >
5021 /* "." is just the object itself. */
5022 if (namelen == 1 && name[0] == '.') {
5023 *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
5024 } else if (d_type & LDISKFS_DIRENT_LUFID) {
5025 rec = (struct osd_fid_pack*) (name + namelen + 1);
5026 if (osd_fid_unpack(fid, rec) != 0)
5031 d_type &= ~LDISKFS_DIRENT_LUFID;
5033 /* NOT export local root. */
5034 if (unlikely(osd_sb(osd_obj2dev(obj))->s_root->d_inode->i_ino == ino)) {
5035 ino = obj->oo_inode->i_ino;
5036 *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
5039 ent->oied_ino = ino;
5040 ent->oied_off = offset;
5041 ent->oied_namelen = namelen;
5042 ent->oied_type = d_type;
5044 memcpy(ent->oied_name, name, namelen);
5046 it->oie_rd_dirent++;
5047 it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
5052 * Calls ->readdir() to load a directory entry at a time
5053 * and stored it in iterator's in-memory data structure.
5055 * \param di iterator's in memory structure
5057 * \retval 0 on success
5058 * \retval -ve on error
5059 * \retval +1 reach the end of entry
5061 static int osd_ldiskfs_it_fill(const struct lu_env *env,
5062 const struct dt_it *di)
5064 struct osd_it_ea *it = (struct osd_it_ea *)di;
5065 struct osd_object *obj = it->oie_obj;
5066 struct inode *inode = obj->oo_inode;
5067 struct htree_lock *hlock = NULL;
5068 struct file *filp = &it->oie_file;
5070 struct osd_filldir_cbs buf = {
5071 #ifdef HAVE_DIR_CONTEXT
5072 .ctx.actor = osd_ldiskfs_filldir,
5078 it->oie_dirent = it->oie_buf;
5079 it->oie_rd_dirent = 0;
5081 if (obj->oo_hl_head != NULL) {
5082 hlock = osd_oti_get(env)->oti_hlock;
5083 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
5084 inode, LDISKFS_HLOCK_READDIR);
5086 down_read(&obj->oo_ext_idx_sem);
5089 #ifdef HAVE_DIR_CONTEXT
5090 buf.ctx.pos = filp->f_pos;
5091 rc = inode->i_fop->iterate(filp, &buf.ctx);
5092 filp->f_pos = buf.ctx.pos;
5094 rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
5098 ldiskfs_htree_unlock(hlock);
5100 up_read(&obj->oo_ext_idx_sem);
5102 if (it->oie_rd_dirent == 0) {
5103 /*If it does not get any dirent, it means it has been reached
5104 *to the end of the dir */
5105 it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
5109 it->oie_dirent = it->oie_buf;
5110 it->oie_it_dirent = 1;
5117 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
5118 * to load a directory entry at a time and stored it in
5119 * iterator's in-memory data structure.
5121 * \param di iterator's in memory structure
5123 * \retval +ve iterator reached to end
5124 * \retval 0 iterator not reached to end
5125 * \retval -ve on error
5127 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
5129 struct osd_it_ea *it = (struct osd_it_ea *)di;
5134 if (it->oie_it_dirent < it->oie_rd_dirent) {
5136 (void *) it->oie_dirent +
5137 cfs_size_round(sizeof(struct osd_it_ea_dirent) +
5138 it->oie_dirent->oied_namelen);
5139 it->oie_it_dirent++;
5142 if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
5145 rc = osd_ldiskfs_it_fill(env, di);
5152 * Returns the key at current position from iterator's in memory structure.
5154 * \param di iterator's in memory structure
5156 * \retval key i.e. struct dt_key on success
5158 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
5159 const struct dt_it *di)
5161 struct osd_it_ea *it = (struct osd_it_ea *)di;
5163 return (struct dt_key *)it->oie_dirent->oied_name;
5167 * Returns the key's size at current position from iterator's in memory structure.
5169 * \param di iterator's in memory structure
5171 * \retval key_size i.e. struct dt_key on success
5173 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
5175 struct osd_it_ea *it = (struct osd_it_ea *)di;
5177 return it->oie_dirent->oied_namelen;
5181 osd_dot_dotdot_has_space(struct ldiskfs_dir_entry_2 *de, int dot_dotdot)
5183 LASSERTF(dot_dotdot == 1 || dot_dotdot == 2,
5184 "dot_dotdot = %d\n", dot_dotdot);
5186 if (LDISKFS_DIR_REC_LEN(de) >=
5187 __LDISKFS_DIR_REC_LEN(dot_dotdot + 1 + sizeof(struct osd_fid_pack)))
5194 osd_dirent_has_space(struct ldiskfs_dir_entry_2 *de, __u16 namelen,
5195 unsigned blocksize, int dot_dotdot)
5198 return osd_dot_dotdot_has_space(de, dot_dotdot);
5200 if (ldiskfs_rec_len_from_disk(de->rec_len, blocksize) >=
5201 __LDISKFS_DIR_REC_LEN(namelen + 1 + sizeof(struct osd_fid_pack)))
5208 osd_dirent_reinsert(const struct lu_env *env, handle_t *jh,
5209 struct dentry *dentry, const struct lu_fid *fid,
5210 struct buffer_head *bh, struct ldiskfs_dir_entry_2 *de,
5211 struct htree_lock *hlock, int dot_dotdot)
5213 struct inode *dir = dentry->d_parent->d_inode;
5214 struct inode *inode = dentry->d_inode;
5215 struct osd_fid_pack *rec;
5216 struct ldiskfs_dentry_param *ldp;
5217 int namelen = dentry->d_name.len;
5221 if (!LDISKFS_HAS_INCOMPAT_FEATURE(inode->i_sb,
5222 LDISKFS_FEATURE_INCOMPAT_DIRDATA))
5225 /* There is enough space to hold the FID-in-dirent. */
5226 if (osd_dirent_has_space(de, namelen, dir->i_sb->s_blocksize,
5228 rc = ldiskfs_journal_get_write_access(jh, bh);
5232 de->name[namelen] = 0;
5233 rec = (struct osd_fid_pack *)(de->name + namelen + 1);
5234 rec->fp_len = sizeof(struct lu_fid) + 1;
5235 fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
5236 de->file_type |= LDISKFS_DIRENT_LUFID;
5237 rc = ldiskfs_handle_dirty_metadata(jh, NULL, bh);
5242 LASSERTF(dot_dotdot == 0, "dot_dotdot = %d\n", dot_dotdot);
5244 rc = ldiskfs_delete_entry(jh, dir, de, bh);
5248 ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
5249 osd_get_ldiskfs_dirent_param(ldp, fid);
5250 dentry->d_fsdata = (void *)ldp;
5251 ll_vfs_dq_init(dir);
5252 rc = osd_ldiskfs_add_entry(jh, dentry, inode, hlock);
5253 /* It is too bad, we cannot reinsert the name entry back.
5254 * That means we lose it! */
5256 CDEBUG(D_LFSCK, "%.16s: fail to reinsert the dirent, "
5257 "dir = %lu/%u, name = %.*s, "DFID": rc = %d\n",
5258 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
5259 dir->i_ino, dir->i_generation, namelen,
5260 dentry->d_name.name, PFID(fid), rc);
5266 osd_dirent_check_repair(const struct lu_env *env, struct osd_object *obj,
5267 struct osd_it_ea *it, struct lu_fid *fid,
5268 struct osd_inode_id *id, __u32 *attr)
5270 struct osd_thread_info *info = osd_oti_get(env);
5271 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
5272 struct osd_device *dev = osd_obj2dev(obj);
5273 struct super_block *sb = osd_sb(dev);
5274 const char *devname =
5275 LDISKFS_SB(sb)->s_es->s_volume_name;
5276 struct osd_it_ea_dirent *ent = it->oie_dirent;
5277 struct inode *dir = obj->oo_inode;
5278 struct htree_lock *hlock = NULL;
5279 struct buffer_head *bh = NULL;
5280 handle_t *jh = NULL;
5281 struct ldiskfs_dir_entry_2 *de;
5282 struct dentry *dentry;
5283 struct inode *inode;
5290 osd_id_gen(id, ent->oied_ino, OSD_OII_NOGEN);
5291 inode = osd_iget(info, dev, id);
5292 if (IS_ERR(inode)) {
5293 rc = PTR_ERR(inode);
5294 if (rc == -ENOENT || rc == -ESTALE) {
5295 *attr |= LUDA_UNKNOWN;
5298 CDEBUG(D_LFSCK, "%.16s: fail to iget for dirent "
5299 "check_repair, dir = %lu/%u, name = %.*s: "
5301 devname, dir->i_ino, dir->i_generation,
5302 ent->oied_namelen, ent->oied_name, rc);
5308 dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
5310 rc = osd_get_lma(info, inode, dentry, lma);
5316 if (ent->oied_name[0] == '.') {
5317 if (ent->oied_namelen == 1)
5319 else if (ent->oied_namelen == 2 && ent->oied_name[1] == '.')
5323 /* We need to ensure that the name entry is still valid.
5324 * Because it may be removed or renamed by other already.
5326 * The unlink or rename operation will start journal before PDO lock,
5327 * so to avoid deadlock, here we need to start journal handle before
5328 * related PDO lock also. But because we do not know whether there
5329 * will be something to be repaired before PDO lock, we just start
5330 * journal without conditions.
5332 * We may need to remove the name entry firstly, then insert back.
5333 * One credit is for user quota file update.
5334 * One credit is for group quota file update.
5335 * Two credits are for dirty inode. */
5336 credits = osd_dto_credits_noquota[DTO_INDEX_DELETE] +
5337 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1 + 1 + 2;
5339 if (dev->od_dirent_journal != 0) {
5342 jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
5345 CDEBUG(D_LFSCK, "%.16s: fail to start trans for dirent "
5346 "check_repair, dir = %lu/%u, credits = %d, "
5347 "name = %.*s: rc = %d\n",
5348 devname, dir->i_ino, dir->i_generation, credits,
5349 ent->oied_namelen, ent->oied_name, rc);
5351 GOTO(out_inode, rc);
5354 if (obj->oo_hl_head != NULL) {
5355 hlock = osd_oti_get(env)->oti_hlock;
5356 /* "0" means exclusive lock for the whole directory.
5357 * We need to prevent others access such name entry
5358 * during the delete + insert. Neither HLOCK_ADD nor
5359 * HLOCK_DEL cannot guarantee the atomicity. */
5360 ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir, 0);
5362 down_write(&obj->oo_ext_idx_sem);
5365 if (obj->oo_hl_head != NULL) {
5366 hlock = osd_oti_get(env)->oti_hlock;
5367 ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir,
5368 LDISKFS_HLOCK_LOOKUP);
5370 down_read(&obj->oo_ext_idx_sem);
5374 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
5375 /* For dot/dotdot entry, if there is not enough space to hold the
5376 * FID-in-dirent, just keep them there. It only happens when the
5377 * device upgraded from 1.8 or restored from MDT file-level backup.
5378 * For the whole directory, only dot/dotdot entry have no FID-in-dirent
5379 * and needs to get FID from LMA when readdir, it will not affect the
5380 * performance much. */
5381 if ((bh == NULL) || (le32_to_cpu(de->inode) != inode->i_ino) ||
5382 (dot_dotdot != 0 && !osd_dot_dotdot_has_space(de, dot_dotdot))) {
5383 *attr |= LUDA_IGNORE;
5389 if (unlikely(lma->lma_compat & LMAC_NOT_IN_OI)) {
5390 struct lu_fid *tfid = &lma->lma_self_fid;
5392 *attr |= LUDA_IGNORE;
5393 /* It must be REMOTE_PARENT_DIR and as the
5394 * dotdot entry of remote directory */
5395 if (unlikely(dot_dotdot != 2 ||
5396 fid_seq(tfid) != FID_SEQ_LOCAL_FILE ||
5397 fid_oid(tfid) != REMOTE_PARENT_DIR_OID)) {
5398 CDEBUG(D_LFSCK, "%.16s: expect remote agent "
5399 "parent directory, but got %.*s under "
5400 "dir = %lu/%u with the FID "DFID"\n",
5401 devname, ent->oied_namelen,
5402 ent->oied_name, dir->i_ino,
5403 dir->i_generation, PFID(tfid));
5405 GOTO(out, rc = -EIO);
5411 if (fid_is_sane(fid)) {
5412 /* FID-in-dirent is valid. */
5413 if (lu_fid_eq(fid, &lma->lma_self_fid))
5416 /* Do not repair under dryrun mode. */
5417 if (*attr & LUDA_VERIFY_DRYRUN) {
5418 *attr |= LUDA_REPAIR;
5426 ldiskfs_htree_unlock(hlock);
5428 up_read(&obj->oo_ext_idx_sem);
5429 dev->od_dirent_journal = 1;
5434 *fid = lma->lma_self_fid;
5436 /* Update the FID-in-dirent. */
5437 rc = osd_dirent_reinsert(env, jh, dentry, fid, bh, de,
5440 *attr |= LUDA_REPAIR;
5442 CDEBUG(D_LFSCK, "%.16s: fail to update FID "
5443 "in the dirent, dir = %lu/%u, "
5444 "name = %.*s, "DFID": rc = %d\n",
5445 devname, dir->i_ino, dir->i_generation,
5446 ent->oied_namelen, ent->oied_name,
5449 /* Do not repair under dryrun mode. */
5450 if (*attr & LUDA_VERIFY_DRYRUN) {
5451 *fid = lma->lma_self_fid;
5452 *attr |= LUDA_REPAIR;
5460 ldiskfs_htree_unlock(hlock);
5462 up_read(&obj->oo_ext_idx_sem);
5463 dev->od_dirent_journal = 1;
5468 *fid = lma->lma_self_fid;
5470 /* Append the FID-in-dirent. */
5471 rc = osd_dirent_reinsert(env, jh, dentry, fid, bh, de,
5474 *attr |= LUDA_REPAIR;
5476 CDEBUG(D_LFSCK, "%.16s: fail to append FID "
5477 "after the dirent, dir = %lu/%u, "
5478 "name = %.*s, "DFID": rc = %d\n",
5479 devname, dir->i_ino, dir->i_generation,
5480 ent->oied_namelen, ent->oied_name,
5484 /* Do not repair under dryrun mode. */
5485 if (*attr & LUDA_VERIFY_DRYRUN) {
5486 if (fid_is_sane(fid)) {
5487 *attr |= LUDA_REPAIR;
5489 lu_igif_build(fid, inode->i_ino,
5490 inode->i_generation);
5491 *attr |= LUDA_UPGRADE;
5500 ldiskfs_htree_unlock(hlock);
5502 up_read(&obj->oo_ext_idx_sem);
5503 dev->od_dirent_journal = 1;
5509 if (unlikely(fid_is_sane(fid))) {
5510 /* FID-in-dirent exists, but FID-in-LMA is lost.
5511 * Trust the FID-in-dirent, and add FID-in-LMA. */
5512 rc = osd_ea_fid_set(info, inode, fid, 0, 0);
5514 *attr |= LUDA_REPAIR;
5516 CDEBUG(D_LFSCK, "%.16s: fail to set LMA for "
5517 "update dirent, dir = %lu/%u, "
5518 "name = %.*s, "DFID": rc = %d\n",
5519 devname, dir->i_ino, dir->i_generation,
5520 ent->oied_namelen, ent->oied_name,
5523 lu_igif_build(fid, inode->i_ino, inode->i_generation);
5524 /* It is probably IGIF object. Only aappend the
5525 * FID-in-dirent. OI scrub will process FID-in-LMA. */
5526 rc = osd_dirent_reinsert(env, jh, dentry, fid, bh, de,
5529 *attr |= LUDA_UPGRADE;
5531 CDEBUG(D_LFSCK, "%.16s: fail to append IGIF "
5532 "after the dirent, dir = %lu/%u, "
5533 "name = %.*s, "DFID": rc = %d\n",
5534 devname, dir->i_ino, dir->i_generation,
5535 ent->oied_namelen, ent->oied_name,
5544 if (hlock != NULL) {
5545 ldiskfs_htree_unlock(hlock);
5547 if (dev->od_dirent_journal != 0)
5548 up_write(&obj->oo_ext_idx_sem);
5550 up_read(&obj->oo_ext_idx_sem);
5554 ldiskfs_journal_stop(jh);
5558 if (rc >= 0 && !dirty)
5559 dev->od_dirent_journal = 0;
5565 * Returns the value at current position from iterator's in memory structure.
5567 * \param di struct osd_it_ea, iterator's in memory structure
5568 * \param attr attr requested for dirent.
5569 * \param lde lustre dirent
5571 * \retval 0 no error and \param lde has correct lustre dirent.
5572 * \retval -ve on error
5574 static inline int osd_it_ea_rec(const struct lu_env *env,
5575 const struct dt_it *di,
5576 struct dt_rec *dtrec, __u32 attr)
5578 struct osd_it_ea *it = (struct osd_it_ea *)di;
5579 struct osd_object *obj = it->oie_obj;
5580 struct osd_device *dev = osd_obj2dev(obj);
5581 struct osd_thread_info *oti = osd_oti_get(env);
5582 struct osd_inode_id *id = &oti->oti_id;
5583 struct lu_fid *fid = &it->oie_dirent->oied_fid;
5584 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
5585 __u32 ino = it->oie_dirent->oied_ino;
5589 LASSERT(obj->oo_inode != dev->od_mdt_map->omm_remote_parent->d_inode);
5591 if (attr & LUDA_VERIFY) {
5592 if (unlikely(ino == osd_remote_parent_ino(dev))) {
5593 attr |= LUDA_IGNORE;
5594 /* If the parent is on remote MDT, and there
5595 * is no FID-in-dirent, then we have to get
5596 * the parent FID from the linkEA. */
5597 if (!fid_is_sane(fid) &&
5598 it->oie_dirent->oied_namelen == 2 &&
5599 it->oie_dirent->oied_name[0] == '.' &&
5600 it->oie_dirent->oied_name[1] == '.')
5601 osd_get_pfid_from_linkea(env, obj, fid);
5603 rc = osd_dirent_check_repair(env, obj, it, fid, id,
5607 if (!fid_is_sane(fid)) {
5608 attr &= ~LUDA_IGNORE;
5609 attr |= LUDA_UNKNOWN;
5612 attr &= ~LU_DIRENT_ATTRS_MASK;
5613 if (!fid_is_sane(fid)) {
5614 bool is_dotdot = false;
5615 if (it->oie_dirent->oied_namelen == 2 &&
5616 it->oie_dirent->oied_name[0] == '.' &&
5617 it->oie_dirent->oied_name[1] == '.')
5619 /* If the parent is on remote MDT, and there
5620 * is no FID-in-dirent, then we have to get
5621 * the parent FID from the linkEA. */
5622 if (ino == osd_remote_parent_ino(dev) && is_dotdot) {
5623 rc = osd_get_pfid_from_linkea(env, obj, fid);
5625 if (is_dotdot == false &&
5626 OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
5629 rc = osd_ea_fid_get(env, obj, ino, fid, id);
5632 osd_id_gen(id, ino, OSD_OII_NOGEN);
5636 /* Pack the entry anyway, at least the offset is right. */
5637 osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
5638 it->oie_dirent->oied_name,
5639 it->oie_dirent->oied_namelen,
5640 it->oie_dirent->oied_type, attr);
5645 if (osd_remote_fid(env, dev, fid))
5648 if (likely(!(attr & (LUDA_IGNORE | LUDA_UNKNOWN)) && rc == 0))
5649 osd_add_oi_cache(oti, dev, id, fid);
5651 RETURN(rc > 0 ? 0 : rc);
5655 * Returns the record size size at current position.
5657 * This function will return record(lu_dirent) size in bytes.
5659 * \param[in] env execution environment
5660 * \param[in] di iterator's in memory structure
5661 * \param[in] attr attribute of the entry, only requires LUDA_TYPE to
5662 * calculate the lu_dirent size.
5664 * \retval record size(in bytes & in memory) of the current lu_dirent
5667 static int osd_it_ea_rec_size(const struct lu_env *env, const struct dt_it *di,
5670 struct osd_it_ea *it = (struct osd_it_ea *)di;
5672 return lu_dirent_calc_size(it->oie_dirent->oied_namelen, attr);
5676 * Returns a cookie for current position of the iterator head, so that
5677 * user can use this cookie to load/start the iterator next time.
5679 * \param di iterator's in memory structure
5681 * \retval cookie for current position, on success
5683 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
5685 struct osd_it_ea *it = (struct osd_it_ea *)di;
5687 return it->oie_dirent->oied_off;
5691 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
5692 * to load a directory entry at a time and stored it i inn,
5693 * in iterator's in-memory data structure.
5695 * \param di struct osd_it_ea, iterator's in memory structure
5697 * \retval +ve on success
5698 * \retval -ve on error
5700 static int osd_it_ea_load(const struct lu_env *env,
5701 const struct dt_it *di, __u64 hash)
5703 struct osd_it_ea *it = (struct osd_it_ea *)di;
5707 it->oie_file.f_pos = hash;
5709 rc = osd_ldiskfs_it_fill(env, di);
5720 * Index lookup function for interoperability mode (b11826).
5722 * \param key, key i.e. file name to be searched
5724 * \retval +ve, on success
5725 * \retval -ve, on error
5727 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
5728 struct dt_rec *rec, const struct dt_key *key)
5730 struct osd_object *obj = osd_dt_obj(dt);
5735 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
5736 LINVRNT(osd_invariant(obj));
5738 rc = osd_ea_lookup_rec(env, obj, rec, key);
5745 * Index and Iterator operations for interoperability
5746 * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
5748 static const struct dt_index_operations osd_index_ea_ops = {
5749 .dio_lookup = osd_index_ea_lookup,
5750 .dio_declare_insert = osd_index_declare_ea_insert,
5751 .dio_insert = osd_index_ea_insert,
5752 .dio_declare_delete = osd_index_declare_ea_delete,
5753 .dio_delete = osd_index_ea_delete,
5755 .init = osd_it_ea_init,
5756 .fini = osd_it_ea_fini,
5757 .get = osd_it_ea_get,
5758 .put = osd_it_ea_put,
5759 .next = osd_it_ea_next,
5760 .key = osd_it_ea_key,
5761 .key_size = osd_it_ea_key_size,
5762 .rec = osd_it_ea_rec,
5763 .rec_size = osd_it_ea_rec_size,
5764 .store = osd_it_ea_store,
5765 .load = osd_it_ea_load
5769 static void *osd_key_init(const struct lu_context *ctx,
5770 struct lu_context_key *key)
5772 struct osd_thread_info *info;
5774 OBD_ALLOC_PTR(info);
5776 return ERR_PTR(-ENOMEM);
5778 OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
5779 if (info->oti_it_ea_buf == NULL)
5782 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
5784 info->oti_hlock = ldiskfs_htree_lock_alloc();
5785 if (info->oti_hlock == NULL)
5791 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
5794 return ERR_PTR(-ENOMEM);
5797 static void osd_key_fini(const struct lu_context *ctx,
5798 struct lu_context_key *key, void* data)
5800 struct osd_thread_info *info = data;
5802 if (info->oti_inode != NULL)
5803 OBD_FREE_PTR(info->oti_inode);
5804 if (info->oti_hlock != NULL)
5805 ldiskfs_htree_lock_free(info->oti_hlock);
5806 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
5807 lu_buf_free(&info->oti_iobuf.dr_pg_buf);
5808 lu_buf_free(&info->oti_iobuf.dr_bl_buf);
5809 lu_buf_free(&info->oti_big_buf);
5813 static void osd_key_exit(const struct lu_context *ctx,
5814 struct lu_context_key *key, void *data)
5816 struct osd_thread_info *info = data;
5818 LASSERT(info->oti_r_locks == 0);
5819 LASSERT(info->oti_w_locks == 0);
5820 LASSERT(info->oti_txns == 0);
5823 /* type constructor/destructor: osd_type_init, osd_type_fini */
5824 LU_TYPE_INIT_FINI(osd, &osd_key);
5826 struct lu_context_key osd_key = {
5827 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
5828 .lct_init = osd_key_init,
5829 .lct_fini = osd_key_fini,
5830 .lct_exit = osd_key_exit
5834 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
5835 const char *name, struct lu_device *next)
5837 struct osd_device *osd = osd_dev(d);
5839 if (strlcpy(osd->od_svname, name, sizeof(osd->od_svname))
5840 >= sizeof(osd->od_svname))
5842 return osd_procfs_init(osd, name);
5845 static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
5847 struct seq_server_site *ss = osd_seq_site(osd);
5851 if (osd->od_is_ost || osd->od_cl_seq != NULL)
5854 if (unlikely(ss == NULL))
5857 OBD_ALLOC_PTR(osd->od_cl_seq);
5858 if (osd->od_cl_seq == NULL)
5861 rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
5862 osd->od_svname, ss->ss_server_seq);
5864 OBD_FREE_PTR(osd->od_cl_seq);
5865 osd->od_cl_seq = NULL;
5869 if (ss->ss_node_id == 0) {
5870 /* If the OSD on the sequence controller(MDT0), then allocate
5871 * sequence here, otherwise allocate sequence after connected
5872 * to MDT0 (see mdt_register_lwp_callback()). */
5873 rc = seq_server_alloc_meta(osd->od_cl_seq->lcs_srv,
5874 &osd->od_cl_seq->lcs_space, env);
5880 static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
5882 if (osd->od_cl_seq == NULL)
5885 seq_client_fini(osd->od_cl_seq);
5886 OBD_FREE_PTR(osd->od_cl_seq);
5887 osd->od_cl_seq = NULL;
5890 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
5894 /* shutdown quota slave instance associated with the device */
5895 if (o->od_quota_slave != NULL) {
5896 qsd_fini(env, o->od_quota_slave);
5897 o->od_quota_slave = NULL;
5900 osd_fid_fini(env, o);
5905 static void osd_umount(const struct lu_env *env, struct osd_device *o)
5909 if (o->od_mnt != NULL) {
5910 shrink_dcache_sb(osd_sb(o));
5911 osd_sync(env, &o->od_dt_dev);
5920 static int osd_mount(const struct lu_env *env,
5921 struct osd_device *o, struct lustre_cfg *cfg)
5923 const char *name = lustre_cfg_string(cfg, 0);
5924 const char *dev = lustre_cfg_string(cfg, 1);
5926 unsigned long page, s_flags, lmd_flags = 0;
5927 struct page *__page;
5928 struct file_system_type *type;
5929 char *options = NULL;
5931 struct osd_thread_info *info = osd_oti_get(env);
5932 struct lu_fid *fid = &info->oti_fid;
5933 struct inode *inode;
5934 int rc = 0, force_over_128tb = 0;
5937 if (o->od_mnt != NULL)
5940 if (strlen(dev) >= sizeof(o->od_mntdev))
5942 strcpy(o->od_mntdev, dev);
5944 str = lustre_cfg_string(cfg, 2);
5945 s_flags = simple_strtoul(str, NULL, 0);
5946 str = strstr(str, ":");
5948 lmd_flags = simple_strtoul(str + 1, NULL, 0);
5949 opts = lustre_cfg_string(cfg, 3);
5951 if (opts == NULL || strstr(opts, "bigendian_extents") == NULL) {
5952 CERROR("%s: device %s extents feature is not guaranteed to "
5953 "work on big-endian systems. Use \"bigendian_extents\" "
5954 "mount option to override.\n", name, dev);
5958 if (opts != NULL && strstr(opts, "force_over_128tb") != NULL)
5959 force_over_128tb = 1;
5961 __page = alloc_page(GFP_IOFS);
5963 GOTO(out, rc = -ENOMEM);
5964 page = (unsigned long)page_address(__page);
5965 options = (char *)page;
5968 /* strip out the options for back compatiblity */
5969 static char *sout[] = {
5976 /* strip out option we processed in osd */
5977 "bigendian_extents",
5981 strcat(options, opts);
5982 for (rc = 0, str = options; sout[rc]; ) {
5983 char *op = strstr(str, sout[rc]);
5989 if (op == options || *(op - 1) == ',') {
5990 str = op + strlen(sout[rc]);
5991 if (*str == ',' || *str == '\0') {
5992 *str == ',' ? str++ : str;
5993 memmove(op, str, strlen(str) + 1);
5996 for (str = op; *str != ',' && *str != '\0'; str++)
6000 strncat(options, "user_xattr,acl", 14);
6003 /* Glom up mount options */
6004 if (*options != '\0')
6005 strcat(options, ",");
6006 strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
6008 type = get_fs_type("ldiskfs");
6010 CERROR("%s: cannot find ldiskfs module\n", name);
6011 GOTO(out, rc = -ENODEV);
6014 o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
6015 module_put(type->owner);
6017 if (IS_ERR(o->od_mnt)) {
6018 rc = PTR_ERR(o->od_mnt);
6020 CERROR("%s: can't mount %s: %d\n", name, dev, rc);
6024 if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (8ULL << 32) &&
6025 force_over_128tb == 0) {
6026 CERROR("%s: device %s LDISKFS does not support filesystems "
6027 "greater than 128TB and can cause data corruption. "
6028 "Use \"force_over_128tb\" mount option to override.\n",
6030 GOTO(out, rc = -EINVAL);
6033 #ifdef HAVE_DEV_SET_RDONLY
6034 if (dev_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
6035 CERROR("%s: underlying device %s is marked as read-only. "
6036 "Setup failed\n", name, dev);
6037 GOTO(out_mnt, rc = -EROFS);
6041 if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
6042 LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
6043 CERROR("%s: device %s is mounted w/o journal\n", name, dev);
6044 GOTO(out_mnt, rc = -EINVAL);
6047 #ifdef LDISKFS_MOUNT_DIRDATA
6048 if (LDISKFS_HAS_INCOMPAT_FEATURE(o->od_mnt->mnt_sb,
6049 LDISKFS_FEATURE_INCOMPAT_DIRDATA))
6050 LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
6051 else if (!o->od_is_ost)
6052 CWARN("%s: device %s was upgraded from Lustre-1.x without "
6053 "enabling the dirdata feature. If you do not want to "
6054 "downgrade to Lustre-1.x again, you can enable it via "
6055 "'tune2fs -O dirdata device'\n", name, dev);
6057 inode = osd_sb(o)->s_root->d_inode;
6058 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
6059 rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
6061 CERROR("%s: failed to set lma on %s root inode\n", name, dev);
6065 if (lmd_flags & LMD_FLG_NOSCRUB)
6076 __free_page(__page);
6081 static struct lu_device *osd_device_fini(const struct lu_env *env,
6082 struct lu_device *d)
6084 struct osd_device *o = osd_dev(d);
6087 osd_shutdown(env, o);
6089 osd_scrub_cleanup(env, o);
6090 osd_obj_map_fini(o);
6096 static int osd_device_init0(const struct lu_env *env,
6097 struct osd_device *o,
6098 struct lustre_cfg *cfg)
6100 struct lu_device *l = osd2lu_dev(o);
6101 struct osd_thread_info *info;
6105 /* if the module was re-loaded, env can loose its keys */
6106 rc = lu_env_refill((struct lu_env *) env);
6109 info = osd_oti_get(env);
6112 l->ld_ops = &osd_lu_ops;
6113 o->od_dt_dev.dd_ops = &osd_dt_ops;
6115 spin_lock_init(&o->od_osfs_lock);
6116 mutex_init(&o->od_otable_mutex);
6117 INIT_LIST_HEAD(&o->od_orphan_list);
6119 o->od_read_cache = 1;
6120 o->od_writethrough_cache = 1;
6121 o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
6123 cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
6124 sizeof(o->od_svname));
6125 if (cplen >= sizeof(o->od_svname)) {
6130 o->od_index = -1; /* -1 means index is invalid */
6131 rc = server_name2index(o->od_svname, &o->od_index, NULL);
6132 if (rc == LDD_F_SV_TYPE_OST)
6135 o->od_full_scrub_ratio = OFSR_DEFAULT;
6136 o->od_full_scrub_threshold_rate = FULL_SCRUB_THRESHOLD_RATE_DEFAULT;
6137 rc = osd_mount(env, o, cfg);
6141 rc = osd_obj_map_init(env, o);
6145 rc = lu_site_init(&o->od_site, l);
6147 GOTO(out_compat, rc);
6148 o->od_site.ls_bottom_dev = l;
6150 rc = lu_site_init_finish(&o->od_site);
6154 INIT_LIST_HEAD(&o->od_ios_list);
6155 /* setup scrub, including OI files initialization */
6156 rc = osd_scrub_setup(env, o);
6160 rc = osd_procfs_init(o, o->od_svname);
6162 CERROR("%s: can't initialize procfs: rc = %d\n",
6164 GOTO(out_scrub, rc);
6167 LASSERT(l->ld_site->ls_linkage.next != NULL);
6168 LASSERT(l->ld_site->ls_linkage.prev != NULL);
6170 /* initialize quota slave instance */
6171 o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
6173 if (IS_ERR(o->od_quota_slave)) {
6174 rc = PTR_ERR(o->od_quota_slave);
6175 o->od_quota_slave = NULL;
6176 GOTO(out_procfs, rc);
6184 osd_scrub_cleanup(env, o);
6186 lu_site_fini(&o->od_site);
6188 osd_obj_map_fini(o);
6195 static struct lu_device *osd_device_alloc(const struct lu_env *env,
6196 struct lu_device_type *t,
6197 struct lustre_cfg *cfg)
6199 struct osd_device *o;
6204 return ERR_PTR(-ENOMEM);
6206 rc = dt_device_init(&o->od_dt_dev, t);
6208 /* Because the ctx might be revived in dt_device_init,
6209 * refill the env here */
6210 lu_env_refill((struct lu_env *)env);
6211 rc = osd_device_init0(env, o, cfg);
6213 dt_device_fini(&o->od_dt_dev);
6216 if (unlikely(rc != 0))
6219 return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
6222 static struct lu_device *osd_device_free(const struct lu_env *env,
6223 struct lu_device *d)
6225 struct osd_device *o = osd_dev(d);
6228 /* XXX: make osd top device in order to release reference */
6229 d->ld_site->ls_top_dev = d;
6230 lu_site_purge(env, d->ld_site, -1);
6231 if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
6232 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
6233 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
6235 lu_site_fini(&o->od_site);
6236 dt_device_fini(&o->od_dt_dev);
6241 static int osd_process_config(const struct lu_env *env,
6242 struct lu_device *d, struct lustre_cfg *cfg)
6244 struct osd_device *o = osd_dev(d);
6248 switch (cfg->lcfg_command) {
6250 rc = osd_mount(env, o, cfg);
6253 lu_dev_del_linkage(d->ld_site, d);
6254 rc = osd_shutdown(env, o);
6257 LASSERT(&o->od_dt_dev);
6258 rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
6259 cfg, &o->od_dt_dev);
6260 if (rc > 0 || rc == -ENOSYS)
6261 rc = class_process_proc_param(PARAM_OST,
6262 lprocfs_osd_obd_vars,
6263 cfg, &o->od_dt_dev);
6272 static int osd_recovery_complete(const struct lu_env *env,
6273 struct lu_device *d)
6275 struct osd_device *osd = osd_dev(d);
6279 if (osd->od_quota_slave == NULL)
6282 /* start qsd instance on recovery completion, this notifies the quota
6283 * slave code that we are about to process new requests now */
6284 rc = qsd_start(env, osd->od_quota_slave);
6289 * we use exports to track all osd users
6291 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
6292 struct obd_device *obd, struct obd_uuid *cluuid,
6293 struct obd_connect_data *data, void *localdata)
6295 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
6296 struct lustre_handle conn;
6300 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
6302 rc = class_connect(&conn, obd, cluuid);
6306 *exp = class_conn2export(&conn);
6308 spin_lock(&osd->od_osfs_lock);
6310 spin_unlock(&osd->od_osfs_lock);
6316 * once last export (we don't count self-export) disappeared
6317 * osd can be released
6319 static int osd_obd_disconnect(struct obd_export *exp)
6321 struct obd_device *obd = exp->exp_obd;
6322 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
6323 int rc, release = 0;
6326 /* Only disconnect the underlying layers on the final disconnect. */
6327 spin_lock(&osd->od_osfs_lock);
6329 if (osd->od_connects == 0)
6331 spin_unlock(&osd->od_osfs_lock);
6333 rc = class_disconnect(exp); /* bz 9811 */
6335 if (rc == 0 && release)
6336 class_manual_cleanup(obd);
6340 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
6341 struct lu_device *dev)
6343 struct osd_device *osd = osd_dev(dev);
6344 struct lr_server_data *lsd =
6345 &osd->od_dt_dev.dd_lu_dev.ld_site->ls_tgt->lut_lsd;
6349 if (osd->od_quota_slave != NULL) {
6350 /* set up quota slave objects */
6351 result = qsd_prepare(env, osd->od_quota_slave);
6356 if (lsd->lsd_feature_incompat & OBD_COMPAT_OST) {
6357 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 52, 0)
6358 if (lsd->lsd_feature_rocompat & OBD_ROCOMPAT_IDX_IN_IDIF) {
6359 osd->od_index_in_idif = 1;
6361 osd->od_index_in_idif = 0;
6362 result = osd_register_proc_index_in_idif(osd);
6367 osd->od_index_in_idif = 1;
6371 result = osd_fid_init(env, osd);
6376 static int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
6377 struct lu_fid *fid, struct md_op_data *op_data)
6379 struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
6381 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
6384 static const struct lu_object_operations osd_lu_obj_ops = {
6385 .loo_object_init = osd_object_init,
6386 .loo_object_delete = osd_object_delete,
6387 .loo_object_release = osd_object_release,
6388 .loo_object_free = osd_object_free,
6389 .loo_object_print = osd_object_print,
6390 .loo_object_invariant = osd_object_invariant
6393 const struct lu_device_operations osd_lu_ops = {
6394 .ldo_object_alloc = osd_object_alloc,
6395 .ldo_process_config = osd_process_config,
6396 .ldo_recovery_complete = osd_recovery_complete,
6397 .ldo_prepare = osd_prepare,
6400 static const struct lu_device_type_operations osd_device_type_ops = {
6401 .ldto_init = osd_type_init,
6402 .ldto_fini = osd_type_fini,
6404 .ldto_start = osd_type_start,
6405 .ldto_stop = osd_type_stop,
6407 .ldto_device_alloc = osd_device_alloc,
6408 .ldto_device_free = osd_device_free,
6410 .ldto_device_init = osd_device_init,
6411 .ldto_device_fini = osd_device_fini
6414 static struct lu_device_type osd_device_type = {
6415 .ldt_tags = LU_DEVICE_DT,
6416 .ldt_name = LUSTRE_OSD_LDISKFS_NAME,
6417 .ldt_ops = &osd_device_type_ops,
6418 .ldt_ctx_tags = LCT_LOCAL,
6421 static int osd_health_check(const struct lu_env *env, struct obd_device *obd)
6423 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
6424 struct super_block *sb = osd_sb(osd);
6426 return (osd->od_mnt == NULL || sb->s_flags & MS_RDONLY);
6430 * lprocfs legacy support.
6432 static struct obd_ops osd_obd_device_ops = {
6433 .o_owner = THIS_MODULE,
6434 .o_connect = osd_obd_connect,
6435 .o_disconnect = osd_obd_disconnect,
6436 .o_fid_alloc = osd_fid_alloc,
6437 .o_health_check = osd_health_check,
6440 static int __init osd_mod_init(void)
6444 LASSERT(BH_DXLock < sizeof(((struct buffer_head *)0)->b_state) * 8);
6445 #if !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_DEBUG_SPINLOCK)
6446 /* please, try to keep osd_thread_info smaller than a page */
6447 CLASSERT(sizeof(struct osd_thread_info) <= PAGE_SIZE);
6452 rc = lu_kmem_init(ldiskfs_caches);
6456 rc = class_register_type(&osd_obd_device_ops, NULL, true,
6457 lprocfs_osd_module_vars,
6458 LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
6460 lu_kmem_fini(ldiskfs_caches);
6464 static void __exit osd_mod_exit(void)
6466 class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
6467 lu_kmem_fini(ldiskfs_caches);
6470 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
6471 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
6472 MODULE_VERSION(LUSTRE_VERSION_STRING);
6473 MODULE_LICENSE("GPL");
6475 module_init(osd_mod_init);
6476 module_exit(osd_mod_exit);