4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <linux/module.h>
47 #include <linux/user_namespace.h>
48 #ifdef HAVE_UIDGID_HEADER
49 # include <linux/uidgid.h>
52 /* LUSTRE_VERSION_CODE */
53 #include <lustre_ver.h>
54 /* prerequisite for linux/xattr.h */
55 #include <linux/types.h>
56 /* prerequisite for linux/xattr.h */
58 /* XATTR_{REPLACE,CREATE} */
59 #include <linux/xattr.h>
61 #include <ldiskfs/ldiskfs.h>
62 #include <ldiskfs/xattr.h>
65 * struct OBD_{ALLOC,FREE}*()
68 #include <obd_support.h>
69 /* struct ptlrpc_thread */
70 #include <lustre_net.h>
71 #include <lustre_fid.h>
73 #include <lustre_param.h>
75 #include "osd_internal.h"
76 #include "osd_dynlocks.h"
78 /* llo_* api support */
79 #include <md_object.h>
80 #include <lustre_quota.h>
82 #include <lustre_linkea.h>
85 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
86 "ldiskfs with parallel directory operations");
88 int ldiskfs_track_declares_assert;
89 CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
90 "LBUG during tracking of declares");
92 /* Slab to allocate dynlocks */
93 struct kmem_cache *dynlock_cachep;
95 /* Slab to allocate osd_it_ea */
96 struct kmem_cache *osd_itea_cachep;
98 static struct lu_kmem_descr ldiskfs_caches[] = {
100 .ckd_cache = &dynlock_cachep,
101 .ckd_name = "dynlock_cache",
102 .ckd_size = sizeof(struct dynlock_handle)
105 .ckd_cache = &osd_itea_cachep,
106 .ckd_name = "osd_itea_cache",
107 .ckd_size = sizeof(struct osd_it_ea)
114 static const char dot[] = ".";
115 static const char dotdot[] = "..";
116 static const char remote_obj_dir[] = "REM_OBJ_DIR";
118 static const struct lu_object_operations osd_lu_obj_ops;
119 static const struct dt_object_operations osd_obj_ops;
120 static const struct dt_object_operations osd_obj_ea_ops;
121 static const struct dt_object_operations osd_obj_otable_it_ops;
122 static const struct dt_index_operations osd_index_iam_ops;
123 static const struct dt_index_operations osd_index_ea_ops;
125 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
126 const struct lu_fid *fid);
127 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
128 struct osd_device *osd);
130 int osd_trans_declare_op2rb[] = {
131 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
132 [OSD_OT_PUNCH] = OSD_OT_MAX,
133 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
134 [OSD_OT_CREATE] = OSD_OT_DESTROY,
135 [OSD_OT_DESTROY] = OSD_OT_CREATE,
136 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
137 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
138 [OSD_OT_WRITE] = OSD_OT_WRITE,
139 [OSD_OT_INSERT] = OSD_OT_DELETE,
140 [OSD_OT_DELETE] = OSD_OT_INSERT,
141 [OSD_OT_QUOTA] = OSD_OT_MAX,
144 static int osd_has_index(const struct osd_object *obj)
146 return obj->oo_dt.do_index_ops != NULL;
149 static int osd_object_invariant(const struct lu_object *l)
151 return osd_invariant(osd_obj(l));
155 * Concurrency: doesn't matter
159 * Concurrency: doesn't matter
161 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
163 struct osd_thread_info *oti = osd_oti_get(env);
164 return oti->oti_w_locks > 0 && o->oo_owner == env;
168 * Concurrency: doesn't access mutable data
170 static int osd_root_get(const struct lu_env *env,
171 struct dt_device *dev, struct lu_fid *f)
173 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
178 * OSD object methods.
182 * Concurrency: no concurrent access is possible that early in object
185 static struct lu_object *osd_object_alloc(const struct lu_env *env,
186 const struct lu_object_header *hdr,
189 struct osd_object *mo;
195 l = &mo->oo_dt.do_lu;
196 dt_object_init(&mo->oo_dt, NULL, d);
197 mo->oo_dt.do_ops = &osd_obj_ea_ops;
198 l->lo_ops = &osd_lu_obj_ops;
199 init_rwsem(&mo->oo_sem);
200 init_rwsem(&mo->oo_ext_idx_sem);
201 spin_lock_init(&mo->oo_guard);
208 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
209 struct dentry *dentry, struct lustre_mdt_attrs *lma)
213 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
214 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
215 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
217 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
218 memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
220 lustre_lma_swab(lma);
221 /* Check LMA compatibility */
222 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
223 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
224 "for fid = "DFID", ino = %lu\n",
225 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
226 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
227 PFID(&lma->lma_self_fid), inode->i_ino);
230 } else if (rc == 0) {
238 * retrieve object from backend ext fs.
240 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
241 struct osd_inode_id *id)
243 struct inode *inode = NULL;
245 /* if we look for an inode withing a running
246 * transaction, then we risk to deadlock */
247 /* osd_dirent_check_repair() breaks this */
248 /*LASSERT(current->journal_info == NULL);*/
250 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
252 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
253 id->oii_ino, PTR_ERR(inode));
254 } else if (id->oii_gen != OSD_OII_NOGEN &&
255 inode->i_generation != id->oii_gen) {
256 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
257 "i_generation = %u\n",
258 id->oii_ino, id->oii_gen, inode->i_generation);
260 inode = ERR_PTR(-ESTALE);
261 } else if (inode->i_nlink == 0) {
262 /* due to parallel readdir and unlink,
263 * we can have dead inode here. */
264 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
266 inode = ERR_PTR(-ESTALE);
267 } else if (is_bad_inode(inode)) {
268 CWARN("%.16s: bad inode: ino = %u\n",
269 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
271 inode = ERR_PTR(-ENOENT);
273 if (id->oii_gen == OSD_OII_NOGEN)
274 osd_id_gen(id, inode->i_ino, inode->i_generation);
276 /* Do not update file c/mtime in ldiskfs.
277 * NB: we don't have any lock to protect this because we don't
278 * have reference on osd_object now, but contention with
279 * another lookup + attr_set can't happen in the tiny window
280 * between if (...) and set S_NOCMTIME. */
281 if (!(inode->i_flags & S_NOCMTIME))
282 inode->i_flags |= S_NOCMTIME;
287 static struct inode *
288 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
289 struct osd_inode_id *id, struct lu_fid *fid)
291 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
295 inode = osd_iget(info, dev, id);
299 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
301 *fid = lma->lma_self_fid;
302 } else if (rc == -ENODATA) {
303 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
304 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
306 lu_igif_build(fid, inode->i_ino, inode->i_generation);
314 static struct inode *osd_iget_check(struct osd_thread_info *info,
315 struct osd_device *dev,
316 const struct lu_fid *fid,
317 struct osd_inode_id *id,
324 /* The cached OI mapping is trustable. If we cannot locate the inode
325 * via the cached OI mapping, then return the failure to the caller
326 * directly without further OI checking. */
328 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
331 if (cached || (rc != -ENOENT && rc != -ESTALE)) {
332 CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
341 if (is_bad_inode(inode)) {
344 CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
352 if (id->oii_gen != OSD_OII_NOGEN &&
353 inode->i_generation != id->oii_gen) {
356 CDEBUG(D_INODE, "unmatched inode: ino = %u, "
357 "oii_gen = %u, i_generation = %u\n",
358 id->oii_ino, id->oii_gen, inode->i_generation);
366 if (inode->i_nlink == 0) {
369 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
379 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
381 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
382 /* XXX: There are four possible cases:
384 * Backup/restore caused the OI invalid.
386 * Someone unlinked the object but NOT removed
387 * the OI mapping, such as mount target device
388 * as ldiskfs, and modify something directly.
390 * Someone just removed the object between the
391 * former oi_lookup and the iget. It is normal.
392 * 4. Other failure cases.
394 * Generally, when the device is mounted, it will
395 * auto check whether the system is restored from
396 * file-level backup or not. We trust such detect
397 * to distinguish the 1st case from the 2nd case. */
399 if (!IS_ERR(inode) && inode->i_generation != 0 &&
400 inode->i_generation == id->oii_gen)
401 /* "id->oii_gen != OSD_OII_NOGEN" is for
402 * "@cached == false" case. */
407 /* If the OI mapping was in OI file before the
408 * osd_iget_check(), but now, it is disappear,
409 * then it must be removed by race. That is a
410 * normal race case. */
413 if (id->oii_gen == OSD_OII_NOGEN)
414 osd_id_gen(id, inode->i_ino, inode->i_generation);
416 /* Do not update file c/mtime in ldiskfs.
417 * NB: we don't have any lock to protect this because we don't
418 * have reference on osd_object now, but contention with
419 * another lookup + attr_set can't happen in the tiny window
420 * between if (...) and set S_NOCMTIME. */
421 if (!(inode->i_flags & S_NOCMTIME))
422 inode->i_flags |= S_NOCMTIME;
439 * \retval +v: new filter_fid, does not contain self-fid
440 * \retval 0: filter_fid_old, contains self-fid
441 * \retval -v: other failure cases
443 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
444 struct dentry *dentry, struct lu_fid *fid)
446 struct filter_fid_old *ff = &info->oti_ff;
447 struct ost_id *ostid = &info->oti_ostid;
450 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
451 if (rc == sizeof(*ff)) {
453 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
454 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
455 /* XXX: should use real OST index in the future. LU-3569 */
456 ostid_to_fid(fid, ostid, 0);
457 } else if (rc == sizeof(struct filter_fid)) {
459 } else if (rc >= 0) {
466 static int osd_lma_self_repair(struct osd_thread_info *info,
467 struct osd_device *osd, struct inode *inode,
468 const struct lu_fid *fid, __u32 compat)
473 LASSERT(current->journal_info == NULL);
475 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
476 osd_dto_credits_noquota[DTO_XATTR_SET]);
479 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
484 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
486 CWARN("%s: cannot self repair the LMA: rc = %d\n",
488 ldiskfs_journal_stop(jh);
492 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
494 struct osd_thread_info *info = osd_oti_get(env);
495 struct osd_device *osd = osd_obj2dev(obj);
496 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
497 struct inode *inode = obj->oo_inode;
498 struct dentry *dentry = &info->oti_obj_dentry;
499 struct lu_fid *fid = NULL;
500 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
504 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
505 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
506 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
507 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
508 fid = &lma->lma_self_fid;
509 rc = osd_get_idif(info, inode, dentry, fid);
510 if ((rc > 0) || (rc == -ENODATA && osd->od_index_in_idif)) {
511 /* For the given OST-object, if it has neither LMA nor
512 * FID in XATTR_NAME_FID, then the given FID (which is
513 * contained in the @obj, from client RPC for locating
514 * the OST-object) is trusted. We use it to generate
516 osd_lma_self_repair(info, osd, inode, rfid,
527 lustre_lma_swab(lma);
528 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
529 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
530 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
531 "fid = "DFID", ino = %lu\n", osd_name(osd),
532 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
533 PFID(rfid), inode->i_ino);
536 fid = &lma->lma_self_fid;
540 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
541 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
542 struct ost_id *oi = &info->oti_ostid;
543 struct lu_fid *fid1 = &info->oti_fid3;
544 __u32 idx = fid_idif_ost_idx(rfid);
546 /* For old IDIF, the OST index is not part of the IDIF,
547 * Means that different OSTs may have the same IDIFs.
548 * Under such case, we need to make some compatible
549 * check to make sure to trigger OI scrub properly. */
550 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
551 /* Given @rfid is new, LMA is old. */
552 fid_to_ostid(fid, oi);
553 ostid_to_fid(fid1, oi, idx);
554 if (lu_fid_eq(fid1, rfid)) {
555 if (osd->od_index_in_idif)
556 osd_lma_self_repair(info, osd,
570 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
571 const struct lu_fid *fid,
572 const struct lu_object_conf *conf)
574 struct osd_thread_info *info;
575 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
576 struct osd_device *dev;
577 struct osd_idmap_cache *oic;
578 struct osd_inode_id *id;
580 struct osd_scrub *scrub;
581 struct scrub_file *sf;
585 bool triggered = false;
588 LINVRNT(osd_invariant(obj));
589 LASSERT(obj->oo_inode == NULL);
590 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
593 scrub = &dev->od_scrub;
594 sf = &scrub->os_file;
595 info = osd_oti_get(env);
597 oic = &info->oti_cache;
599 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
602 /* For the object is created as locking anchor, or for the object to
603 * be created on disk. No need to osd_oi_lookup() at here because FID
604 * shouldn't never be re-used, if it's really a duplicate FID from
605 * unexpected reason, we should be able to detect it later by calling
606 * do_create->osd_oi_insert(). */
607 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
608 GOTO(out, result = 0);
610 /* Search order: 1. per-thread cache. */
611 if (lu_fid_eq(fid, &oic->oic_fid) &&
612 likely(oic->oic_dev == dev)) {
618 if (!list_empty(&scrub->os_inconsistent_items)) {
619 /* Search order: 2. OI scrub pending list. */
620 result = osd_oii_lookup(dev, fid, id);
626 /* Search order: 3. OI files. */
627 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
628 if (result == -ENOENT) {
629 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
630 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
631 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
633 GOTO(out, result = 0);
642 inode = osd_iget_check(info, dev, fid, id, cached);
644 result = PTR_ERR(inode);
645 if (result == -ENOENT || result == -ESTALE)
646 GOTO(out, result = -ENOENT);
648 if (result == -EREMCHG) {
651 if (unlikely(triggered))
652 GOTO(out, result = saved);
655 if (thread_is_running(&scrub->os_thread)) {
656 result = -EINPROGRESS;
657 } else if (!dev->od_noscrub) {
658 result = osd_scrub_start(dev, SS_AUTO_FULL |
659 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
660 LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
661 "for "DFID", rc = %d [1]\n",
662 osd_name(dev), PFID(fid), result);
663 if (result == 0 || result == -EALREADY)
664 result = -EINPROGRESS;
669 if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD))
672 /* We still have chance to get the valid inode: for the
673 * object which is referenced by remote name entry, the
674 * object on the local MDT will be linked under the dir
675 * of "/REMOTE_PARENT_DIR" with its FID string as name.
677 * We do not know whether the object for the given FID
678 * is referenced by some remote name entry or not, and
679 * especially for DNE II, a multiple-linked object may
680 * have many name entries reside on many MDTs.
682 * To simplify the operation, OSD will not distinguish
683 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
684 * only happened for the RPC from other MDT during the
685 * OI scrub, or for the client side RPC with FID only,
686 * such as FID to path, or from old connected client. */
688 result = osd_lookup_in_remote_parent(info, dev,
701 obj->oo_inode = inode;
702 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
704 result = osd_check_lma(env, obj);
706 if (result == -ENODATA) {
708 result = osd_oi_lookup(info, dev, fid, id,
711 /* result == -ENOENT means that the OI
712 * mapping has been removed by race,
713 * the target inode belongs to other
716 * Others error also can be returned
719 obj->oo_inode = NULL;
722 /* result == 0 means the cached OI
723 * mapping is still in the OI file,
724 * the target the inode is valid. */
727 /* The current OI mapping is from the OI file,
728 * since the inode has been found via
729 * osd_iget_check(), no need recheck OI. */
736 obj->oo_inode = NULL;
737 if (result != -EREMCHG)
741 result = osd_oi_lookup(info, dev, fid, id,
743 /* result == -ENOENT means the cached OI mapping
744 * has been removed from the OI file by race,
745 * above target inode belongs to other object.
747 * Others error also can be returned directly. */
751 /* result == 0, goto trigger */
753 /* The current OI mapping is from the OI file,
754 * since the inode has been found via
755 * osd_iget_check(), no need recheck OI. */
762 obj->oo_compat_dot_created = 1;
763 obj->oo_compat_dotdot_created = 1;
765 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
766 GOTO(out, result = 0);
768 LASSERT(obj->oo_hl_head == NULL);
769 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
770 if (obj->oo_hl_head == NULL) {
771 obj->oo_inode = NULL;
773 GOTO(out, result = -ENOMEM);
775 GOTO(out, result = 0);
778 if (result != 0 && cached)
779 fid_zero(&oic->oic_fid);
781 LINVRNT(osd_invariant(obj));
786 * Concurrency: shouldn't matter.
788 static void osd_object_init0(struct osd_object *obj)
790 LASSERT(obj->oo_inode != NULL);
791 obj->oo_dt.do_body_ops = &osd_body_ops;
792 obj->oo_dt.do_lu.lo_header->loh_attr |=
793 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
797 * Concurrency: no concurrent access is possible that early in object
800 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
801 const struct lu_object_conf *conf)
803 struct osd_object *obj = osd_obj(l);
806 LINVRNT(osd_invariant(obj));
808 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
809 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
810 l->lo_header->loh_attr |= LOHA_EXISTS;
814 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
815 obj->oo_dt.do_body_ops = &osd_body_ops_new;
816 if (result == 0 && obj->oo_inode != NULL)
817 osd_object_init0(obj);
819 LINVRNT(osd_invariant(obj));
824 * Concurrency: no concurrent access is possible that late in object
827 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
829 struct osd_object *obj = osd_obj(l);
831 LINVRNT(osd_invariant(obj));
833 dt_object_fini(&obj->oo_dt);
834 if (obj->oo_hl_head != NULL)
835 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
840 * Concurrency: no concurrent access is possible that late in object
843 static void osd_index_fini(struct osd_object *o)
845 struct iam_container *bag;
847 if (o->oo_dir != NULL) {
848 bag = &o->oo_dir->od_container;
849 if (o->oo_inode != NULL) {
850 if (bag->ic_object == o->oo_inode)
851 iam_container_fini(bag);
853 OBD_FREE_PTR(o->oo_dir);
859 * Concurrency: no concurrent access is possible that late in object
860 * life-cycle (for all existing callers, that is. New callers have to provide
861 * their own locking.)
863 static int osd_inode_unlinked(const struct inode *inode)
865 return inode->i_nlink == 0;
869 OSD_TXN_OI_DELETE_CREDITS = 20,
870 OSD_TXN_INODE_DELETE_CREDITS = 20
877 #if OSD_THANDLE_STATS
879 * Set time when the handle is allocated
881 static void osd_th_alloced(struct osd_thandle *oth)
883 oth->oth_alloced = cfs_time_current();
887 * Set time when the handle started
889 static void osd_th_started(struct osd_thandle *oth)
891 oth->oth_started = cfs_time_current();
895 * Helper function to convert time interval to microseconds packed in
898 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
902 cfs_duration_usec(cfs_time_sub(end, start), &val);
903 return val.tv_sec * 1000000 + val.tv_usec;
907 * Check whether the we deal with this handle for too long.
909 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
910 cfs_time_t alloced, cfs_time_t started,
913 cfs_time_t now = cfs_time_current();
915 LASSERT(dev != NULL);
917 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
918 interval_to_usec(alloced, started));
919 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
920 interval_to_usec(started, closed));
921 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
922 interval_to_usec(closed, now));
924 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
925 CWARN("transaction handle %p was open for too long: "
927 "alloced "CFS_TIME_T" ,"
928 "started "CFS_TIME_T" ,"
929 "closed "CFS_TIME_T"\n",
930 oth, now, alloced, started, closed);
931 libcfs_debug_dumpstack(NULL);
935 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
937 cfs_time_t __closed = cfs_time_current(); \
938 cfs_time_t __alloced = oth->oth_alloced; \
939 cfs_time_t __started = oth->oth_started; \
942 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
945 #else /* OSD_THANDLE_STATS */
947 #define osd_th_alloced(h) do {} while(0)
948 #define osd_th_started(h) do {} while(0)
949 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
951 #endif /* OSD_THANDLE_STATS */
954 * Concurrency: doesn't access mutable data.
956 static int osd_param_is_not_sane(const struct osd_device *dev,
957 const struct thandle *th)
959 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
961 return oh->ot_credits > osd_transaction_size(dev);
965 * Concurrency: shouldn't matter.
967 static void osd_trans_commit_cb(struct super_block *sb,
968 struct ldiskfs_journal_cb_entry *jcb, int error)
970 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
971 struct thandle *th = &oh->ot_super;
972 struct lu_device *lud = &th->th_dev->dd_lu_dev;
973 struct dt_txn_commit_cb *dcb, *tmp;
975 LASSERT(oh->ot_handle == NULL);
978 CERROR("transaction @0x%p commit error: %d\n", th, error);
980 dt_txn_hook_commit(th);
982 /* call per-transaction callbacks if any */
983 list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
985 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
986 "commit callback entry: magic=%x name='%s'\n",
987 dcb->dcb_magic, dcb->dcb_name);
988 list_del_init(&dcb->dcb_linkage);
989 dcb->dcb_func(NULL, th, dcb, error);
992 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
996 lu_context_exit(&th->th_ctx);
997 lu_context_fini(&th->th_ctx);
1001 static struct thandle *osd_trans_create(const struct lu_env *env,
1002 struct dt_device *d)
1004 struct osd_thread_info *oti = osd_oti_get(env);
1005 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1006 struct osd_thandle *oh;
1010 /* on pending IO in this thread should left from prev. request */
1011 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1013 th = ERR_PTR(-ENOMEM);
1014 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
1016 oh->ot_quota_trans = &oti->oti_quota_trans;
1017 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1021 th->th_tags = LCT_TX_HANDLE;
1023 INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1024 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1027 memset(oti->oti_declare_ops, 0,
1028 sizeof(oti->oti_declare_ops));
1029 memset(oti->oti_declare_ops_cred, 0,
1030 sizeof(oti->oti_declare_ops_cred));
1031 memset(oti->oti_declare_ops_used, 0,
1032 sizeof(oti->oti_declare_ops_used));
1037 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1039 struct osd_thread_info *oti = osd_oti_get(env);
1040 struct osd_thandle *oh;
1042 oh = container_of0(th, struct osd_thandle, ot_super);
1043 LASSERT(oh != NULL);
1045 CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
1046 oti->oti_declare_ops[OSD_OT_CREATE],
1047 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1048 oti->oti_declare_ops_used[OSD_OT_CREATE],
1049 oti->oti_declare_ops[OSD_OT_DESTROY],
1050 oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1051 oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1052 CWARN(" attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1053 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1054 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1055 oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1056 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1057 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1058 oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1059 CWARN(" write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1060 oti->oti_declare_ops[OSD_OT_WRITE],
1061 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1062 oti->oti_declare_ops_used[OSD_OT_WRITE],
1063 oti->oti_declare_ops[OSD_OT_PUNCH],
1064 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1065 oti->oti_declare_ops_used[OSD_OT_PUNCH],
1066 oti->oti_declare_ops[OSD_OT_QUOTA],
1067 oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1068 oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1069 CWARN(" insert: %u/%u/%u, delete: %u/%u/%u\n",
1070 oti->oti_declare_ops[OSD_OT_INSERT],
1071 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1072 oti->oti_declare_ops_used[OSD_OT_INSERT],
1073 oti->oti_declare_ops[OSD_OT_DELETE],
1074 oti->oti_declare_ops_cred[OSD_OT_DELETE],
1075 oti->oti_declare_ops_used[OSD_OT_DELETE]);
1076 CWARN(" ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1077 oti->oti_declare_ops[OSD_OT_REF_ADD],
1078 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1079 oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1080 oti->oti_declare_ops[OSD_OT_REF_DEL],
1081 oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1082 oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1086 * Concurrency: shouldn't matter.
1088 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1091 struct osd_thread_info *oti = osd_oti_get(env);
1092 struct osd_device *dev = osd_dt_dev(d);
1094 struct osd_thandle *oh;
1099 LASSERT(current->journal_info == NULL);
1101 oh = container_of0(th, struct osd_thandle, ot_super);
1102 LASSERT(oh != NULL);
1103 LASSERT(oh->ot_handle == NULL);
1105 rc = dt_txn_hook_start(env, d, th);
1109 if (unlikely(osd_param_is_not_sane(dev, th))) {
1110 static unsigned long last_printed;
1111 static int last_credits;
1113 CWARN("%.16s: too many transaction credits (%d > %d)\n",
1114 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
1116 osd_journal(dev)->j_max_transaction_buffers);
1118 osd_trans_dump_creds(env, th);
1120 if (last_credits != oh->ot_credits &&
1121 time_after(jiffies, last_printed +
1122 msecs_to_jiffies(60 * MSEC_PER_SEC))) {
1123 libcfs_debug_dumpstack(NULL);
1124 last_credits = oh->ot_credits;
1125 last_printed = jiffies;
1127 /* XXX Limit the credits to 'max_transaction_buffers', and
1128 * let the underlying filesystem to catch the error if
1129 * we really need so many credits.
1131 * This should be removed when we can calculate the
1132 * credits precisely. */
1133 oh->ot_credits = osd_transaction_size(dev);
1137 * XXX temporary stuff. Some abstraction layer should
1140 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1144 LASSERT(oti->oti_txns == 0);
1145 lu_context_init(&th->th_ctx, th->th_tags);
1146 lu_context_enter(&th->th_ctx);
1148 lu_device_get(&d->dd_lu_dev);
1149 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1160 static int osd_seq_exists(const struct lu_env *env,
1161 struct osd_device *osd, u64 seq)
1163 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1164 struct seq_server_site *ss = osd_seq_site(osd);
1168 LASSERT(ss != NULL);
1169 LASSERT(ss->ss_server_fld != NULL);
1171 rc = osd_fld_lookup(env, osd, seq, range);
1174 CERROR("%s: can't lookup FLD sequence "LPX64
1175 ": rc = %d\n", osd_name(osd), seq, rc);
1179 RETURN(ss->ss_node_id == range->lsr_index);
1182 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1184 struct dt_txn_commit_cb *dcb;
1185 struct dt_txn_commit_cb *tmp;
1187 /* call per-transaction stop callbacks if any */
1188 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1190 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1191 "commit callback entry: magic=%x name='%s'\n",
1192 dcb->dcb_magic, dcb->dcb_name);
1193 list_del_init(&dcb->dcb_linkage);
1194 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1199 * Concurrency: shouldn't matter.
1201 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1204 int rc = 0, remove_agents = 0;
1205 struct osd_thandle *oh;
1206 struct osd_thread_info *oti = osd_oti_get(env);
1207 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1208 struct osd_device *osd = osd_dt_dev(th->th_dev);
1209 struct qsd_instance *qsd = osd->od_quota_slave;
1210 struct lquota_trans *qtrans;
1213 oh = container_of0(th, struct osd_thandle, ot_super);
1215 remove_agents = oh->ot_remove_agents;
1217 qtrans = oh->ot_quota_trans;
1218 oh->ot_quota_trans = NULL;
1220 if (oh->ot_handle != NULL) {
1221 handle_t *hdl = oh->ot_handle;
1224 * add commit callback
1225 * notice we don't do this in osd_trans_start()
1226 * as underlying transaction can change during truncate
1228 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1231 LASSERT(oti->oti_txns == 1);
1234 rc = dt_txn_hook_stop(env, th);
1236 CERROR("%s: failed in transaction hook: rc = %d\n",
1239 osd_trans_stop_cb(oh, rc);
1240 /* hook functions might modify th_sync */
1241 hdl->h_sync = th->th_sync;
1243 oh->ot_handle = NULL;
1244 OSD_CHECK_SLOW_TH(oh, osd, rc = ldiskfs_journal_stop(hdl));
1246 CERROR("%s: failed to stop transaction: rc = %d\n",
1249 osd_trans_stop_cb(oh, th->th_result);
1253 /* inform the quota slave device that the transaction is stopping */
1254 qsd_op_end(env, qsd, qtrans);
1256 /* as we want IO to journal and data IO be concurrent, we don't block
1257 * awaiting data IO completion in osd_do_bio(), instead we wait here
1258 * once transaction is submitted to the journal. all reqular requests
1259 * don't do direct IO (except read/write), thus this wait_event becomes
1262 * IMPORTANT: we have to wait till any IO submited by the thread is
1263 * completed otherwise iobuf may be corrupted by different request
1265 wait_event(iobuf->dr_wait,
1266 atomic_read(&iobuf->dr_numreqs) == 0);
1267 osd_fini_iobuf(osd, iobuf);
1269 rc = iobuf->dr_error;
1271 if (unlikely(remove_agents != 0))
1272 osd_process_scheduled_agent_removals(env, osd);
1277 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1279 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1282 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1283 LASSERT(&dcb->dcb_func != NULL);
1284 if (dcb->dcb_flags & DCB_TRANS_STOP)
1285 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
1287 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
1293 * Called just before object is freed. Releases all resources except for
1294 * object itself (that is released by osd_object_free()).
1296 * Concurrency: no concurrent access is possible that late in object
1299 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1301 struct osd_object *obj = osd_obj(l);
1302 struct inode *inode = obj->oo_inode;
1304 LINVRNT(osd_invariant(obj));
1307 * If object is unlinked remove fid->ino mapping from object index.
1310 osd_index_fini(obj);
1311 if (inode != NULL) {
1312 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1313 qid_t uid = i_uid_read(inode);
1314 qid_t gid = i_gid_read(inode);
1317 obj->oo_inode = NULL;
1320 struct osd_thread_info *info = osd_oti_get(env);
1321 struct lquota_id_info *qi = &info->oti_qi;
1323 /* Release granted quota to master if necessary */
1324 qi->lqi_id.qid_uid = uid;
1325 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1327 qi->lqi_id.qid_uid = gid;
1328 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1334 * Concurrency: ->loo_object_release() is called under site spin-lock.
1336 static void osd_object_release(const struct lu_env *env,
1337 struct lu_object *l)
1342 * Concurrency: shouldn't matter.
1344 static int osd_object_print(const struct lu_env *env, void *cookie,
1345 lu_printer_t p, const struct lu_object *l)
1347 struct osd_object *o = osd_obj(l);
1348 struct iam_descr *d;
1350 if (o->oo_dir != NULL)
1351 d = o->oo_dir->od_container.ic_descr;
1354 return (*p)(env, cookie,
1355 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1357 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1358 o->oo_inode ? o->oo_inode->i_generation : 0,
1359 d ? d->id_ops->id_name : "plain");
1362 #define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
1365 * Concurrency: shouldn't matter.
1367 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1368 struct obd_statfs *sfs)
1370 struct osd_device *osd = osd_dt_dev(d);
1371 struct super_block *sb = osd_sb(osd);
1372 struct kstatfs *ksfs;
1375 if (unlikely(osd->od_mnt == NULL))
1376 return -EINPROGRESS;
1378 /* osd_lproc.c call this without env, allocate ksfs for that case */
1379 if (unlikely(env == NULL)) {
1380 OBD_ALLOC_PTR(ksfs);
1384 ksfs = &osd_oti_get(env)->oti_ksfs;
1387 spin_lock(&osd->od_osfs_lock);
1388 result = sb->s_op->statfs(sb->s_root, ksfs);
1389 if (likely(result == 0)) { /* N.B. statfs can't really fail */
1390 statfs_pack(sfs, ksfs);
1391 if (unlikely(sb->s_flags & MS_RDONLY))
1392 sfs->os_state = OS_STATE_READONLY;
1393 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
1394 LDISKFS_FEATURE_INCOMPAT_EXTENTS))
1395 sfs->os_maxbytes = sb->s_maxbytes;
1397 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
1399 spin_unlock(&osd->od_osfs_lock);
1401 if (unlikely(env == NULL))
1404 /* Reserve a small amount of space for local objects like last_rcvd,
1405 * llog, quota files, ... */
1406 if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
1409 sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
1410 /** Take out metadata overhead for indirect blocks */
1411 sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
1418 * Estimate space needed for file creations. We assume the largest filename
1419 * which is 2^64 - 1, hence a filename of 20 chars.
1420 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
1422 #ifdef __LDISKFS_DIR_REC_LEN
1423 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
1425 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
1429 * Concurrency: doesn't access mutable data.
1431 static void osd_conf_get(const struct lu_env *env,
1432 const struct dt_device *dev,
1433 struct dt_device_param *param)
1435 struct super_block *sb = osd_sb(osd_dt_dev(dev));
1439 * XXX should be taken from not-yet-existing fs abstraction layer.
1441 param->ddp_max_name_len = LDISKFS_NAME_LEN;
1442 param->ddp_max_nlink = LDISKFS_LINK_MAX;
1443 param->ddp_block_shift = sb->s_blocksize_bits;
1444 param->ddp_mount_type = LDD_MT_LDISKFS;
1445 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
1446 param->ddp_maxbytes = sb->s_maxbytes;
1448 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
1449 /* Overhead estimate should be fairly accurate, so we really take a tiny
1450 * error margin which also avoids fragmenting the filesystem too much */
1451 param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
1452 /* inode are statically allocated, so per-inode space consumption
1453 * is the space consumed by the directory entry */
1454 param->ddp_inodespace = PER_OBJ_USAGE;
1455 /* per-fragment overhead to be used by the client code */
1456 param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
1457 param->ddp_mntopts = 0;
1458 if (test_opt(sb, XATTR_USER))
1459 param->ddp_mntopts |= MNTOPT_USERXATTR;
1460 if (test_opt(sb, POSIX_ACL))
1461 param->ddp_mntopts |= MNTOPT_ACL;
1463 /* LOD might calculate the max stripe count based on max_ea_size,
1464 * so we need take account in the overhead as well,
1465 * xattr_header + magic + xattr_entry_head */
1466 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
1467 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
1469 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1470 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1471 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
1475 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
1479 * Concurrency: shouldn't matter.
1481 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1485 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1487 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1489 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
1490 LUSTRE_OSD_LDISKFS_NAME, rc);
1496 * Start commit for OSD device.
1498 * An implementation of dt_commit_async method for OSD device.
1499 * Asychronously starts underlayng fs sync and thereby a transaction
1502 * \param env environment
1503 * \param d dt device
1505 * \see dt_device_operations
1507 static int osd_commit_async(const struct lu_env *env,
1508 struct dt_device *d)
1510 struct super_block *s = osd_sb(osd_dt_dev(d));
1513 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1514 RETURN(s->s_op->sync_fs(s, 0));
1518 * Concurrency: shouldn't matter.
1521 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1523 struct super_block *sb = osd_sb(osd_dt_dev(d));
1524 struct block_device *dev = sb->s_bdev;
1525 #ifdef HAVE_DEV_SET_RDONLY
1526 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
1529 int rc = -EOPNOTSUPP;
1533 #ifdef HAVE_DEV_SET_RDONLY
1534 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1536 if (jdev && (jdev != dev)) {
1537 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
1539 dev_set_rdonly(jdev);
1541 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
1542 dev_set_rdonly(dev);
1544 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
1545 osd_dt_dev(d)->od_svname, (long)dev, rc);
1551 * Note: we do not count into QUOTA here.
1552 * If we mount with --data_journal we may need more.
1554 const int osd_dto_credits_noquota[DTO_NR] = {
1557 * INDEX_EXTRA_TRANS_BLOCKS(8) +
1558 * SINGLEDATA_TRANS_BLOCKS(8)
1559 * XXX Note: maybe iam need more, since iam have more level than
1562 [DTO_INDEX_INSERT] = 16,
1565 * just modify a single entry, probably merge few within a block
1567 [DTO_INDEX_DELETE] = 1,
1571 [DTO_INDEX_UPDATE] = 16,
1573 * 4(inode, inode bits, groups, GDT)
1574 * notice: OI updates are counted separately with DTO_INDEX_INSERT
1576 [DTO_OBJECT_CREATE] = 4,
1578 * 4(inode, inode bits, groups, GDT)
1579 * notice: OI updates are counted separately with DTO_INDEX_DELETE
1581 [DTO_OBJECT_DELETE] = 4,
1583 * Attr set credits (inode)
1585 [DTO_ATTR_SET_BASE] = 1,
1587 * Xattr set. The same as xattr of EXT3.
1588 * DATA_TRANS_BLOCKS(14)
1589 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1590 * are also counted in. Do not know why?
1592 [DTO_XATTR_SET] = 14,
1594 * credits for inode change during write.
1596 [DTO_WRITE_BASE] = 3,
1598 * credits for single block write.
1600 [DTO_WRITE_BLOCK] = 14,
1602 * Attr set credits for chown.
1603 * This is extra credits for setattr, and it is null without quota
1605 [DTO_ATTR_SET_CHOWN] = 0
1608 static const struct dt_device_operations osd_dt_ops = {
1609 .dt_root_get = osd_root_get,
1610 .dt_statfs = osd_statfs,
1611 .dt_trans_create = osd_trans_create,
1612 .dt_trans_start = osd_trans_start,
1613 .dt_trans_stop = osd_trans_stop,
1614 .dt_trans_cb_add = osd_trans_cb_add,
1615 .dt_conf_get = osd_conf_get,
1616 .dt_sync = osd_sync,
1618 .dt_commit_async = osd_commit_async,
1621 static void osd_object_read_lock(const struct lu_env *env,
1622 struct dt_object *dt, unsigned role)
1624 struct osd_object *obj = osd_dt_obj(dt);
1625 struct osd_thread_info *oti = osd_oti_get(env);
1627 LINVRNT(osd_invariant(obj));
1629 LASSERT(obj->oo_owner != env);
1630 down_read_nested(&obj->oo_sem, role);
1632 LASSERT(obj->oo_owner == NULL);
1636 static void osd_object_write_lock(const struct lu_env *env,
1637 struct dt_object *dt, unsigned role)
1639 struct osd_object *obj = osd_dt_obj(dt);
1640 struct osd_thread_info *oti = osd_oti_get(env);
1642 LINVRNT(osd_invariant(obj));
1644 LASSERT(obj->oo_owner != env);
1645 down_write_nested(&obj->oo_sem, role);
1647 LASSERT(obj->oo_owner == NULL);
1648 obj->oo_owner = env;
1652 static void osd_object_read_unlock(const struct lu_env *env,
1653 struct dt_object *dt)
1655 struct osd_object *obj = osd_dt_obj(dt);
1656 struct osd_thread_info *oti = osd_oti_get(env);
1658 LINVRNT(osd_invariant(obj));
1660 LASSERT(oti->oti_r_locks > 0);
1662 up_read(&obj->oo_sem);
1665 static void osd_object_write_unlock(const struct lu_env *env,
1666 struct dt_object *dt)
1668 struct osd_object *obj = osd_dt_obj(dt);
1669 struct osd_thread_info *oti = osd_oti_get(env);
1671 LINVRNT(osd_invariant(obj));
1673 LASSERT(obj->oo_owner == env);
1674 LASSERT(oti->oti_w_locks > 0);
1676 obj->oo_owner = NULL;
1677 up_write(&obj->oo_sem);
1680 static int osd_object_write_locked(const struct lu_env *env,
1681 struct dt_object *dt)
1683 struct osd_object *obj = osd_dt_obj(dt);
1685 LINVRNT(osd_invariant(obj));
1687 return obj->oo_owner == env;
1690 static struct timespec *osd_inode_time(const struct lu_env *env,
1691 struct inode *inode, __u64 seconds)
1693 struct osd_thread_info *oti = osd_oti_get(env);
1694 struct timespec *t = &oti->oti_time;
1696 t->tv_sec = seconds;
1698 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1703 static void osd_inode_getattr(const struct lu_env *env,
1704 struct inode *inode, struct lu_attr *attr)
1706 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1707 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1708 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
1711 attr->la_atime = LTIME_S(inode->i_atime);
1712 attr->la_mtime = LTIME_S(inode->i_mtime);
1713 attr->la_ctime = LTIME_S(inode->i_ctime);
1714 attr->la_mode = inode->i_mode;
1715 attr->la_size = i_size_read(inode);
1716 attr->la_blocks = inode->i_blocks;
1717 attr->la_uid = i_uid_read(inode);
1718 attr->la_gid = i_gid_read(inode);
1719 attr->la_flags = LDISKFS_I(inode)->i_flags;
1720 attr->la_nlink = inode->i_nlink;
1721 attr->la_rdev = inode->i_rdev;
1722 attr->la_blksize = 1 << inode->i_blkbits;
1723 attr->la_blkbits = inode->i_blkbits;
1726 static int osd_attr_get(const struct lu_env *env,
1727 struct dt_object *dt,
1728 struct lu_attr *attr)
1730 struct osd_object *obj = osd_dt_obj(dt);
1732 if (!dt_object_exists(dt))
1735 LASSERT(!dt_object_remote(dt));
1736 LINVRNT(osd_invariant(obj));
1738 spin_lock(&obj->oo_guard);
1739 osd_inode_getattr(env, obj->oo_inode, attr);
1740 spin_unlock(&obj->oo_guard);
1744 static int osd_declare_attr_set(const struct lu_env *env,
1745 struct dt_object *dt,
1746 const struct lu_attr *attr,
1747 struct thandle *handle)
1749 struct osd_thandle *oh;
1750 struct osd_object *obj;
1751 struct osd_thread_info *info = osd_oti_get(env);
1752 struct lquota_id_info *qi = &info->oti_qi;
1760 LASSERT(dt != NULL);
1761 LASSERT(handle != NULL);
1763 obj = osd_dt_obj(dt);
1764 LASSERT(osd_invariant(obj));
1766 oh = container_of0(handle, struct osd_thandle, ot_super);
1767 LASSERT(oh->ot_handle == NULL);
1769 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
1770 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1772 if (attr == NULL || obj->oo_inode == NULL)
1775 bspace = obj->oo_inode->i_blocks;
1776 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1777 bspace = toqb(bspace);
1779 /* Changing ownership is always preformed by super user, it should not
1782 * We still need to call the osd_declare_qid() to calculate the journal
1783 * credits for updating quota accounting files and to trigger quota
1784 * space adjustment once the operation is completed.*/
1785 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
1787 uid = i_uid_read(obj->oo_inode);
1788 qi->lqi_type = USRQUOTA;
1789 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
1790 /* inode accounting */
1791 qi->lqi_is_blk = false;
1793 /* one more inode for the new uid ... */
1794 qi->lqi_id.qid_uid = attr->la_uid;
1796 /* Reserve credits for the new uid */
1797 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1798 if (rc == -EDQUOT || rc == -EINPROGRESS)
1803 /* and one less inode for the current uid */
1804 qi->lqi_id.qid_uid = uid;
1806 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1807 if (rc == -EDQUOT || rc == -EINPROGRESS)
1812 /* block accounting */
1813 qi->lqi_is_blk = true;
1815 /* more blocks for the new uid ... */
1816 qi->lqi_id.qid_uid = attr->la_uid;
1817 qi->lqi_space = bspace;
1819 * Credits for the new uid has been reserved, re-use "obj"
1820 * to save credit reservation.
1822 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1823 if (rc == -EDQUOT || rc == -EINPROGRESS)
1828 /* and finally less blocks for the current uid */
1829 qi->lqi_id.qid_uid = uid;
1830 qi->lqi_space = -bspace;
1831 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1832 if (rc == -EDQUOT || rc == -EINPROGRESS)
1838 gid = i_gid_read(obj->oo_inode);
1839 qi->lqi_type = GRPQUOTA;
1840 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
1842 /* inode accounting */
1843 qi->lqi_is_blk = false;
1845 /* one more inode for the new gid ... */
1846 qi->lqi_id.qid_gid = attr->la_gid;
1848 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1849 if (rc == -EDQUOT || rc == -EINPROGRESS)
1854 /* and one less inode for the current gid */
1855 qi->lqi_id.qid_gid = gid;
1857 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1858 if (rc == -EDQUOT || rc == -EINPROGRESS)
1863 /* block accounting */
1864 qi->lqi_is_blk = true;
1866 /* more blocks for the new gid ... */
1867 qi->lqi_id.qid_gid = attr->la_gid;
1868 qi->lqi_space = bspace;
1869 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1870 if (rc == -EDQUOT || rc == -EINPROGRESS)
1875 /* and finally less blocks for the current gid */
1876 qi->lqi_id.qid_gid = gid;
1877 qi->lqi_space = -bspace;
1878 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1879 if (rc == -EDQUOT || rc == -EINPROGRESS)
1888 static int osd_inode_setattr(const struct lu_env *env,
1889 struct inode *inode, const struct lu_attr *attr)
1891 __u64 bits = attr->la_valid;
1893 /* Only allow set size for regular file */
1894 if (!S_ISREG(inode->i_mode))
1895 bits &= ~(LA_SIZE | LA_BLOCKS);
1900 if (bits & LA_ATIME)
1901 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1902 if (bits & LA_CTIME)
1903 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1904 if (bits & LA_MTIME)
1905 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1906 if (bits & LA_SIZE) {
1907 LDISKFS_I(inode)->i_disksize = attr->la_size;
1908 i_size_write(inode, attr->la_size);
1912 /* OSD should not change "i_blocks" which is used by quota.
1913 * "i_blocks" should be changed by ldiskfs only. */
1914 if (bits & LA_BLOCKS)
1915 inode->i_blocks = attr->la_blocks;
1918 inode->i_mode = (inode->i_mode & S_IFMT) |
1919 (attr->la_mode & ~S_IFMT);
1921 i_uid_write(inode, attr->la_uid);
1923 i_gid_write(inode, attr->la_gid);
1924 if (bits & LA_NLINK)
1925 set_nlink(inode, attr->la_nlink);
1927 inode->i_rdev = attr->la_rdev;
1929 if (bits & LA_FLAGS) {
1930 /* always keep S_NOCMTIME */
1931 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1937 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1939 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
1940 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
1944 ll_vfs_dq_init(inode);
1946 if (attr->la_valid & LA_UID)
1947 iattr.ia_valid |= ATTR_UID;
1948 if (attr->la_valid & LA_GID)
1949 iattr.ia_valid |= ATTR_GID;
1950 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
1951 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
1953 rc = ll_vfs_dq_transfer(inode, &iattr);
1955 CERROR("%s: quota transfer failed: rc = %d. Is quota "
1956 "enforcement enabled on the ldiskfs "
1957 "filesystem?\n", inode->i_sb->s_id, rc);
1964 static int osd_attr_set(const struct lu_env *env,
1965 struct dt_object *dt,
1966 const struct lu_attr *attr,
1967 struct thandle *handle)
1969 struct osd_object *obj = osd_dt_obj(dt);
1970 struct inode *inode;
1973 if (!dt_object_exists(dt))
1976 LASSERT(handle != NULL);
1977 LASSERT(!dt_object_remote(dt));
1978 LASSERT(osd_invariant(obj));
1980 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
1982 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
1983 struct osd_thread_info *oti = osd_oti_get(env);
1984 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
1985 struct lu_fid *fid1 = &oti->oti_fid;
1986 struct osd_inode_id *id = &oti->oti_id;
1987 struct iam_path_descr *ipd;
1988 struct iam_container *bag;
1989 struct osd_thandle *oh;
1992 fid_cpu_to_be(fid1, fid0);
1993 memset(id, 1, sizeof(*id));
1994 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
1995 fid0)->oi_dir.od_container;
1996 ipd = osd_idx_ipd_get(env, bag);
1997 if (unlikely(ipd == NULL))
2000 oh = container_of0(handle, struct osd_thandle, ot_super);
2001 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
2002 (const struct iam_rec *)id, ipd);
2003 osd_ipd_put(env, bag, ipd);
2004 return(rc > 0 ? 0 : rc);
2007 inode = obj->oo_inode;
2009 rc = osd_quota_transfer(inode, attr);
2013 spin_lock(&obj->oo_guard);
2014 rc = osd_inode_setattr(env, inode, attr);
2015 spin_unlock(&obj->oo_guard);
2018 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2020 osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
2025 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
2026 struct osd_object *obj,
2027 const char *name, const int namelen)
2029 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
2032 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
2033 umode_t mode, struct dt_allocation_hint *hint,
2037 struct osd_device *osd = osd_obj2dev(obj);
2038 struct osd_thandle *oth;
2039 struct dt_object *parent = NULL;
2040 struct inode *inode;
2042 LINVRNT(osd_invariant(obj));
2043 LASSERT(obj->oo_inode == NULL);
2044 LASSERT(obj->oo_hl_head == NULL);
2046 if (S_ISDIR(mode) && ldiskfs_pdo) {
2047 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
2048 if (obj->oo_hl_head == NULL)
2052 oth = container_of(th, struct osd_thandle, ot_super);
2053 LASSERT(oth->ot_handle->h_transaction != NULL);
2055 if (hint != NULL && hint->dah_parent != NULL &&
2056 !dt_object_remote(hint->dah_parent))
2057 parent = hint->dah_parent;
2059 inode = ldiskfs_create_inode(oth->ot_handle,
2060 parent ? osd_dt_obj(parent)->oo_inode :
2061 osd_sb(osd)->s_root->d_inode,
2063 if (!IS_ERR(inode)) {
2064 /* Do not update file c/mtime in ldiskfs. */
2065 inode->i_flags |= S_NOCMTIME;
2067 /* For new created object, it must be consistent,
2068 * and it is unnecessary to scrub against it. */
2069 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
2071 obj->oo_inode = inode;
2074 if (obj->oo_hl_head != NULL) {
2075 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
2076 obj->oo_hl_head = NULL;
2078 result = PTR_ERR(inode);
2080 LINVRNT(osd_invariant(obj));
2088 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
2089 struct lu_attr *attr,
2090 struct dt_allocation_hint *hint,
2091 struct dt_object_format *dof,
2095 struct osd_thandle *oth;
2096 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
2098 LASSERT(S_ISDIR(attr->la_mode));
2100 oth = container_of(th, struct osd_thandle, ot_super);
2101 LASSERT(oth->ot_handle->h_transaction != NULL);
2102 result = osd_mkfile(info, obj, mode, hint, th);
2107 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
2108 struct lu_attr *attr,
2109 struct dt_allocation_hint *hint,
2110 struct dt_object_format *dof,
2114 struct osd_thandle *oth;
2115 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
2117 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
2119 LASSERT(S_ISREG(attr->la_mode));
2121 oth = container_of(th, struct osd_thandle, ot_super);
2122 LASSERT(oth->ot_handle->h_transaction != NULL);
2124 result = osd_mkfile(info, obj, mode, hint, th);
2126 LASSERT(obj->oo_inode != NULL);
2127 if (feat->dif_flags & DT_IND_VARKEY)
2128 result = iam_lvar_create(obj->oo_inode,
2129 feat->dif_keysize_max,
2131 feat->dif_recsize_max,
2134 result = iam_lfix_create(obj->oo_inode,
2135 feat->dif_keysize_max,
2137 feat->dif_recsize_max,
2144 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
2145 struct lu_attr *attr,
2146 struct dt_allocation_hint *hint,
2147 struct dt_object_format *dof,
2150 LASSERT(S_ISREG(attr->la_mode));
2151 return osd_mkfile(info, obj, (attr->la_mode &
2152 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2155 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
2156 struct lu_attr *attr,
2157 struct dt_allocation_hint *hint,
2158 struct dt_object_format *dof,
2161 LASSERT(S_ISLNK(attr->la_mode));
2162 return osd_mkfile(info, obj, (attr->la_mode &
2163 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2166 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
2167 struct lu_attr *attr,
2168 struct dt_allocation_hint *hint,
2169 struct dt_object_format *dof,
2172 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
2175 LINVRNT(osd_invariant(obj));
2176 LASSERT(obj->oo_inode == NULL);
2177 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
2178 S_ISFIFO(mode) || S_ISSOCK(mode));
2180 result = osd_mkfile(info, obj, mode, hint, th);
2182 LASSERT(obj->oo_inode != NULL);
2184 * This inode should be marked dirty for i_rdev. Currently
2185 * that is done in the osd_attr_init().
2187 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
2190 LINVRNT(osd_invariant(obj));
2194 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
2196 struct dt_allocation_hint *hint,
2197 struct dt_object_format *dof,
2200 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
2202 osd_obj_type_f result;
2218 result = osd_mk_index;
2229 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2230 struct dt_object *parent, struct dt_object *child,
2235 ah->dah_parent = parent;
2236 ah->dah_mode = child_mode;
2239 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2240 struct lu_attr *attr, struct dt_object_format *dof)
2242 struct inode *inode = obj->oo_inode;
2243 __u64 valid = attr->la_valid;
2246 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2248 if (dof->dof_type != DFT_NODE)
2249 attr->la_valid &= ~LA_RDEV;
2250 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2251 attr->la_valid &= ~LA_ATIME;
2252 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2253 attr->la_valid &= ~LA_CTIME;
2254 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2255 attr->la_valid &= ~LA_MTIME;
2257 result = osd_quota_transfer(inode, attr);
2261 if (attr->la_valid != 0) {
2262 result = osd_inode_setattr(info->oti_env, inode, attr);
2264 * The osd_inode_setattr() should always succeed here. The
2265 * only error that could be returned is EDQUOT when we are
2266 * trying to change the UID or GID of the inode. However, this
2267 * should not happen since quota enforcement is no longer
2268 * enabled on ldiskfs (lquota takes care of it).
2270 LASSERTF(result == 0, "%d\n", result);
2271 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2274 attr->la_valid = valid;
2278 * Helper function for osd_object_create()
2280 * \retval 0, on success
2282 static int __osd_object_create(struct osd_thread_info *info,
2283 struct osd_object *obj, struct lu_attr *attr,
2284 struct dt_allocation_hint *hint,
2285 struct dt_object_format *dof,
2291 osd_trans_exec_op(info->oti_env, th, OSD_OT_CREATE);
2293 /* we drop umask so that permissions we pass are not affected */
2294 umask = current->fs->umask;
2295 current->fs->umask = 0;
2297 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2299 if (likely(obj->oo_inode != NULL)) {
2300 LASSERT(obj->oo_inode->i_state & I_NEW);
2302 /* Unlock the inode before attr initialization to avoid
2303 * unnecessary dqget operations. LU-6378 */
2304 unlock_new_inode(obj->oo_inode);
2307 if (likely(result == 0)) {
2308 osd_attr_init(info, obj, attr, dof);
2309 osd_object_init0(obj);
2312 /* restore previous umask value */
2313 current->fs->umask = umask;
2315 osd_trans_exec_check(info->oti_env, th, OSD_OT_CREATE);
2321 * Helper function for osd_object_create()
2323 * \retval 0, on success
2325 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2326 const struct lu_fid *fid, struct thandle *th)
2328 struct osd_thread_info *info = osd_oti_get(env);
2329 struct osd_inode_id *id = &info->oti_id;
2330 struct osd_device *osd = osd_obj2dev(obj);
2331 struct osd_thandle *oh;
2334 LASSERT(obj->oo_inode != NULL);
2336 oh = container_of0(th, struct osd_thandle, ot_super);
2337 LASSERT(oh->ot_handle);
2338 osd_trans_exec_op(env, th, OSD_OT_INSERT);
2340 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2341 rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
2342 osd_trans_exec_check(env, th, OSD_OT_INSERT);
2347 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2348 u64 seq, struct lu_seq_range *range)
2350 struct seq_server_site *ss = osd_seq_site(osd);
2352 if (fid_seq_is_idif(seq)) {
2353 fld_range_set_ost(range);
2354 range->lsr_index = idif_ost_idx(seq);
2358 if (!fid_seq_in_fldb(seq)) {
2359 fld_range_set_mdt(range);
2361 /* FIXME: If ss is NULL, it suppose not get lsr_index
2363 range->lsr_index = ss->ss_node_id;
2367 LASSERT(ss != NULL);
2368 fld_range_set_any(range);
2369 /* OSD will only do local fld lookup */
2370 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
2374 * Concurrency: no external locking is necessary.
2376 static int osd_declare_object_create(const struct lu_env *env,
2377 struct dt_object *dt,
2378 struct lu_attr *attr,
2379 struct dt_allocation_hint *hint,
2380 struct dt_object_format *dof,
2381 struct thandle *handle)
2383 struct osd_thandle *oh;
2387 LASSERT(handle != NULL);
2389 oh = container_of0(handle, struct osd_thandle, ot_super);
2390 LASSERT(oh->ot_handle == NULL);
2392 /* EA object consumes more credits than regular object: osd_mk_index
2393 * vs. osd_mkreg: osd_mk_index will create 2 blocks for root_node and
2394 * leaf_node, could involves the block, block bitmap, groups, GDT
2395 * change for each block, so add 4 * 2 credits in that case. */
2396 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
2397 osd_dto_credits_noquota[DTO_OBJECT_CREATE] +
2398 (dof->dof_type == DFT_INDEX) ? 4 * 2 : 0);
2399 /* Reuse idle OI block may cause additional one OI block
2401 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2402 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
2407 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
2408 osd_dt_obj(dt), false, NULL, false);
2415 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2416 struct lu_attr *attr,
2417 struct dt_allocation_hint *hint,
2418 struct dt_object_format *dof, struct thandle *th)
2420 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2421 struct osd_object *obj = osd_dt_obj(dt);
2422 struct osd_thread_info *info = osd_oti_get(env);
2426 if (dt_object_exists(dt))
2429 LINVRNT(osd_invariant(obj));
2430 LASSERT(!dt_object_remote(dt));
2431 LASSERT(osd_write_locked(env, obj));
2432 LASSERT(th != NULL);
2434 if (unlikely(fid_is_acct(fid)))
2435 /* Quota files can't be created from the kernel any more,
2436 * 'tune2fs -O quota' will take care of creating them */
2439 result = __osd_object_create(info, obj, attr, hint, dof, th);
2441 result = __osd_oi_insert(env, obj, fid, th);
2442 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
2443 obj->oo_dt.do_body_ops = &osd_body_ops;
2445 LASSERT(ergo(result == 0,
2446 dt_object_exists(dt) && !dt_object_remote(dt)));
2448 LASSERT(osd_invariant(obj));
2453 * Called to destroy on-disk representation of the object
2455 * Concurrency: must be locked
2457 static int osd_declare_object_destroy(const struct lu_env *env,
2458 struct dt_object *dt,
2461 struct osd_object *obj = osd_dt_obj(dt);
2462 struct inode *inode = obj->oo_inode;
2463 struct osd_thandle *oh;
2467 oh = container_of0(th, struct osd_thandle, ot_super);
2468 LASSERT(oh->ot_handle == NULL);
2471 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
2472 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2473 /* Recycle idle OI leaf may cause additional three OI blocks
2475 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
2476 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
2477 /* one less inode */
2478 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2479 -1, oh, obj, false, NULL, false);
2482 /* data to be truncated */
2483 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2484 0, oh, obj, true, NULL, false);
2488 static int osd_object_destroy(const struct lu_env *env,
2489 struct dt_object *dt,
2492 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2493 struct osd_object *obj = osd_dt_obj(dt);
2494 struct inode *inode = obj->oo_inode;
2495 struct osd_device *osd = osd_obj2dev(obj);
2496 struct osd_thandle *oh;
2500 oh = container_of0(th, struct osd_thandle, ot_super);
2501 LASSERT(oh->ot_handle);
2503 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2505 if (unlikely(fid_is_acct(fid)))
2508 if (S_ISDIR(inode->i_mode)) {
2509 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
2510 inode->i_nlink == 2);
2511 /* it will check/delete the inode from remote parent,
2512 * how to optimize it? unlink performance impaction XXX */
2513 result = osd_delete_from_remote_parent(env, osd, obj, oh);
2514 if (result != 0 && result != -ENOENT) {
2515 CERROR("%s: delete inode "DFID": rc = %d\n",
2516 osd_name(osd), PFID(fid), result);
2518 spin_lock(&obj->oo_guard);
2520 spin_unlock(&obj->oo_guard);
2521 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2524 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
2526 result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
2529 osd_trans_exec_check(env, th, OSD_OT_DESTROY);
2530 /* XXX: add to ext3 orphan list */
2531 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2533 /* not needed in the cache anymore */
2534 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2535 obj->oo_destroyed = 1;
2541 * Put the fid into lustre_mdt_attrs, and then place the structure
2542 * inode's ea. This fid should not be altered during the life time
2545 * \retval +ve, on success
2546 * \retval -ve, on error
2548 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2550 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
2551 const struct lu_fid *fid, __u32 compat, __u32 incompat)
2553 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2557 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
2560 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_OST_EA_FID_SET))
2563 lustre_lma_init(lma, fid, compat, incompat);
2564 lustre_lma_swab(lma);
2566 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
2568 /* LMA may already exist, but we need to check that all the
2569 * desired compat/incompat flags have been added. */
2570 if (unlikely(rc == -EEXIST)) {
2571 if (compat == 0 && incompat == 0)
2574 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
2575 XATTR_NAME_LMA, info->oti_mdt_attrs_old,
2580 lustre_lma_swab(lma);
2581 if (!(~lma->lma_compat & compat) &&
2582 !(~lma->lma_incompat & incompat))
2585 lma->lma_compat |= compat;
2586 lma->lma_incompat |= incompat;
2587 lustre_lma_swab(lma);
2588 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2589 sizeof(*lma), XATTR_REPLACE);
2596 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2597 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2598 * To have compatilibility with 1.8 ldiskfs driver we need to have
2599 * magic number at start of fid data.
2600 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2603 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2604 const struct lu_fid *fid)
2606 if (!fid_is_namespace_visible(fid) ||
2607 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
2608 param->edp_magic = 0;
2612 param->edp_magic = LDISKFS_LUFID_MAGIC;
2613 param->edp_len = sizeof(struct lu_fid) + 1;
2614 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
2618 * Try to read the fid from inode ea into dt_rec.
2620 * \param fid object fid.
2622 * \retval 0 on success
2624 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2625 __u32 ino, struct lu_fid *fid,
2626 struct osd_inode_id *id)
2628 struct osd_thread_info *info = osd_oti_get(env);
2629 struct inode *inode;
2632 osd_id_gen(id, ino, OSD_OII_NOGEN);
2633 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2635 RETURN(PTR_ERR(inode));
2641 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
2643 struct inode *parent_dir,
2644 const struct lu_fid *dot_fid,
2645 const struct lu_fid *dot_dot_fid,
2646 struct osd_thandle *oth)
2648 struct ldiskfs_dentry_param *dot_ldp;
2649 struct ldiskfs_dentry_param *dot_dot_ldp;
2651 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
2652 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
2654 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2655 dot_ldp->edp_magic = 0;
2657 return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
2658 dir, dot_ldp, dot_dot_ldp);
2662 * Create an local agent inode for remote entry
2664 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
2665 struct osd_device *osd,
2666 struct osd_object *pobj,
2667 const struct lu_fid *fid,
2671 struct osd_thread_info *info = osd_oti_get(env);
2672 struct inode *local;
2673 struct osd_thandle *oh;
2678 oh = container_of(th, struct osd_thandle, ot_super);
2679 LASSERT(oh->ot_handle->h_transaction != NULL);
2681 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type);
2682 if (IS_ERR(local)) {
2683 CERROR("%s: create local error %d\n", osd_name(osd),
2684 (int)PTR_ERR(local));
2688 ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
2689 unlock_new_inode(local);
2691 /* Set special LMA flag for local agent inode */
2692 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
2694 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
2695 osd_name(osd), PFID(fid), rc);
2696 RETURN(ERR_PTR(rc));
2702 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
2703 lu_object_fid(&pobj->oo_dt.do_lu),
2706 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
2707 osd_name(osd), PFID(fid), rc);
2708 RETURN(ERR_PTR(rc));
2715 * when direntry is deleted, we have to take care of possible agent inode
2716 * referenced by that. unfortunately we can't do this at that point:
2717 * iget() within a running transaction leads to deadlock and we better do
2718 * not call that every delete declaration to save performance. so we put
2719 * a potention agent inode on a list and process that once the transaction
2720 * is over. Notice it's not any worse in terms of real orphans as regular
2721 * object destroy doesn't put inodes on the on-disk orphan list. this should
2722 * be addressed separately
2724 static int osd_schedule_agent_inode_removal(const struct lu_env *env,
2725 struct osd_thandle *oh,
2728 struct osd_device *osd = osd_dt_dev(oh->ot_super.th_dev);
2729 struct osd_obj_orphan *oor;
2736 oor->oor_env = (struct lu_env *)env;
2737 spin_lock(&osd->od_osfs_lock);
2738 list_add(&oor->oor_list, &osd->od_orphan_list);
2739 spin_unlock(&osd->od_osfs_lock);
2741 oh->ot_remove_agents = 1;
2747 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
2748 struct osd_device *osd)
2750 struct osd_thread_info *info = osd_oti_get(env);
2751 struct osd_obj_orphan *oor, *tmp;
2752 struct osd_inode_id id;
2753 struct list_head list;
2754 struct inode *inode;
2759 INIT_LIST_HEAD(&list);
2761 spin_lock(&osd->od_osfs_lock);
2762 list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
2763 if (oor->oor_env == env) {
2764 list_del(&oor->oor_list);
2765 list_add(&oor->oor_list, &list);
2768 spin_unlock(&osd->od_osfs_lock);
2770 list_for_each_entry_safe(oor, tmp, &list, oor_list) {
2774 list_del(&oor->oor_list);
2777 osd_id_gen(&id, ino, OSD_OII_NOGEN);
2778 inode = osd_iget_fid(info, osd, &id, &fid);
2782 if (!osd_remote_fid(env, osd, &fid)) {
2787 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC, 1);
2789 mark_inode_dirty(inode);
2790 ldiskfs_journal_stop(jh);
2798 * OSD layer object create function for interoperability mode (b11826).
2799 * This is mostly similar to osd_object_create(). Only difference being, fid is
2800 * inserted into inode ea here.
2802 * \retval 0, on success
2803 * \retval -ve, on error
2805 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2806 struct lu_attr *attr,
2807 struct dt_allocation_hint *hint,
2808 struct dt_object_format *dof,
2811 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2812 struct osd_object *obj = osd_dt_obj(dt);
2813 struct osd_thread_info *info = osd_oti_get(env);
2818 if (dt_object_exists(dt))
2821 LASSERT(osd_invariant(obj));
2822 LASSERT(!dt_object_remote(dt));
2823 LASSERT(osd_write_locked(env, obj));
2824 LASSERT(th != NULL);
2826 if (unlikely(fid_is_acct(fid)))
2827 /* Quota files can't be created from the kernel any more,
2828 * 'tune2fs -O quota' will take care of creating them */
2831 result = __osd_object_create(info, obj, attr, hint, dof, th);
2833 if (fid_is_idif(fid) &&
2834 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
2835 struct lu_fid *tfid = &info->oti_fid;
2836 struct ost_id *oi = &info->oti_ostid;
2838 fid_to_ostid(fid, oi);
2839 ostid_to_fid(tfid, oi, 0);
2840 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
2841 LMAC_FID_ON_OST, 0);
2843 result = osd_ea_fid_set(info, obj->oo_inode, fid,
2844 fid_is_on_ost(info, osd_obj2dev(obj),
2845 fid, OI_CHECK_FLD) ?
2846 LMAC_FID_ON_OST : 0, 0);
2848 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
2849 obj->oo_dt.do_body_ops = &osd_body_ops;
2853 result = __osd_oi_insert(env, obj, fid, th);
2855 LASSERT(ergo(result == 0,
2856 dt_object_exists(dt) && !dt_object_remote(dt)));
2857 LINVRNT(osd_invariant(obj));
2861 static int osd_declare_object_ref_add(const struct lu_env *env,
2862 struct dt_object *dt,
2863 struct thandle *handle)
2865 struct osd_thandle *oh;
2867 /* it's possible that object doesn't exist yet */
2868 LASSERT(handle != NULL);
2870 oh = container_of0(handle, struct osd_thandle, ot_super);
2871 LASSERT(oh->ot_handle == NULL);
2873 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
2874 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2880 * Concurrency: @dt is write locked.
2882 static int osd_object_ref_add(const struct lu_env *env,
2883 struct dt_object *dt, struct thandle *th)
2885 struct osd_object *obj = osd_dt_obj(dt);
2886 struct inode *inode = obj->oo_inode;
2887 struct osd_thandle *oh;
2890 if (!dt_object_exists(dt))
2893 LINVRNT(osd_invariant(obj));
2894 LASSERT(!dt_object_remote(dt));
2895 LASSERT(osd_write_locked(env, obj));
2896 LASSERT(th != NULL);
2898 oh = container_of0(th, struct osd_thandle, ot_super);
2899 LASSERT(oh->ot_handle != NULL);
2901 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
2903 CDEBUG(D_INODE, DFID" increase nlink %d\n",
2904 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2906 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
2907 * (65000) subdirectories by storing "1" in i_nlink if the link count
2908 * would otherwise overflow. Directory tranversal tools understand
2909 * that (st_nlink == 1) indicates that the filesystem dose not track
2910 * hard links count on the directory, and will not abort subdirectory
2911 * scanning early once (st_nlink - 2) subdirs have been found.
2913 * This also has to properly handle the case of inodes with nlink == 0
2914 * in case they are being linked into the PENDING directory
2916 spin_lock(&obj->oo_guard);
2917 if (unlikely(inode->i_nlink == 0))
2918 /* inc_nlink from 0 may cause WARN_ON */
2919 set_nlink(inode, 1);
2921 ldiskfs_inc_count(oh->ot_handle, inode);
2922 if (!S_ISDIR(inode->i_mode))
2923 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2925 spin_unlock(&obj->oo_guard);
2927 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2928 LINVRNT(osd_invariant(obj));
2930 osd_trans_exec_check(env, th, OSD_OT_REF_ADD);
2935 static int osd_declare_object_ref_del(const struct lu_env *env,
2936 struct dt_object *dt,
2937 struct thandle *handle)
2939 struct osd_thandle *oh;
2941 LASSERT(!dt_object_remote(dt));
2942 LASSERT(handle != NULL);
2944 oh = container_of0(handle, struct osd_thandle, ot_super);
2945 LASSERT(oh->ot_handle == NULL);
2947 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
2948 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2954 * Concurrency: @dt is write locked.
2956 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2959 struct osd_object *obj = osd_dt_obj(dt);
2960 struct inode *inode = obj->oo_inode;
2961 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
2962 struct osd_thandle *oh;
2964 if (!dt_object_exists(dt))
2967 LINVRNT(osd_invariant(obj));
2968 LASSERT(!dt_object_remote(dt));
2969 LASSERT(osd_write_locked(env, obj));
2970 LASSERT(th != NULL);
2972 oh = container_of0(th, struct osd_thandle, ot_super);
2973 LASSERT(oh->ot_handle != NULL);
2975 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
2977 spin_lock(&obj->oo_guard);
2978 /* That can be result of upgrade from old Lustre version and
2979 * applied only to local files. Just skip this ref_del call.
2980 * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
2981 if (inode->i_nlink == 0) {
2982 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2983 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
2984 ", maybe an upgraded file? (LU-3915)\n",
2985 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
2986 spin_unlock(&obj->oo_guard);
2990 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
2991 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2993 ldiskfs_dec_count(oh->ot_handle, inode);
2994 spin_unlock(&obj->oo_guard);
2996 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2997 LINVRNT(osd_invariant(obj));
2999 osd_trans_exec_check(env, th, OSD_OT_REF_DEL);
3005 * Get the 64-bit version for an inode.
3007 static int osd_object_version_get(const struct lu_env *env,
3008 struct dt_object *dt, dt_obj_version_t *ver)
3010 struct inode *inode = osd_dt_obj(dt)->oo_inode;
3012 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
3013 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
3014 *ver = LDISKFS_I(inode)->i_fs_version;
3019 * Concurrency: @dt is read locked.
3021 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
3022 struct lu_buf *buf, const char *name)
3024 struct osd_object *obj = osd_dt_obj(dt);
3025 struct inode *inode = obj->oo_inode;
3026 struct osd_thread_info *info = osd_oti_get(env);
3027 struct dentry *dentry = &info->oti_obj_dentry;
3029 /* version get is not real XATTR but uses xattr API */
3030 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3031 /* for version we are just using xattr API but change inode
3033 if (buf->lb_len == 0)
3034 return sizeof(dt_obj_version_t);
3036 if (buf->lb_len < sizeof(dt_obj_version_t))
3039 osd_object_version_get(env, dt, buf->lb_buf);
3041 return sizeof(dt_obj_version_t);
3044 if (!dt_object_exists(dt))
3047 LASSERT(!dt_object_remote(dt));
3048 LASSERT(inode->i_op != NULL);
3049 LASSERT(inode->i_op->getxattr != NULL);
3051 return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
3055 static int osd_declare_xattr_set(const struct lu_env *env,
3056 struct dt_object *dt,
3057 const struct lu_buf *buf, const char *name,
3058 int fl, struct thandle *handle)
3060 struct osd_thandle *oh;
3062 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
3064 LASSERT(handle != NULL);
3066 oh = container_of0(handle, struct osd_thandle, ot_super);
3067 LASSERT(oh->ot_handle == NULL);
3069 if (strcmp(name, XATTR_NAME_LMA) == 0) {
3070 /* For non-upgrading case, the LMA is set first and
3071 * usually fit inode. But for upgrade case, the LMA
3072 * may be in another separated EA block. */
3073 if (!dt_object_exists(dt))
3075 else if (fl == LU_XATTR_REPLACE)
3079 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3083 credits = osd_dto_credits_noquota[DTO_XATTR_SET];
3084 if (buf && buf->lb_len > sb->s_blocksize) {
3085 credits *= (buf->lb_len + sb->s_blocksize - 1) >>
3086 sb->s_blocksize_bits;
3089 * xattr set may involve inode quota change, reserve credits for
3090 * dquot_initialize()
3092 credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
3095 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
3101 * Set the 64-bit version for object
3103 static void osd_object_version_set(const struct lu_env *env,
3104 struct dt_object *dt,
3105 dt_obj_version_t *new_version)
3107 struct inode *inode = osd_dt_obj(dt)->oo_inode;
3109 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
3110 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
3112 LDISKFS_I(inode)->i_fs_version = *new_version;
3113 /** Version is set after all inode operations are finished,
3114 * so we should mark it dirty here */
3115 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3119 * Concurrency: @dt is write locked.
3121 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
3122 const struct lu_buf *buf, const char *name, int fl,
3123 struct thandle *handle)
3125 struct osd_object *obj = osd_dt_obj(dt);
3126 struct inode *inode = obj->oo_inode;
3127 struct osd_thread_info *info = osd_oti_get(env);
3132 LASSERT(handle != NULL);
3134 /* version set is not real XATTR */
3135 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3136 /* for version we are just using xattr API but change inode
3138 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
3139 osd_object_version_set(env, dt, buf->lb_buf);
3140 return sizeof(dt_obj_version_t);
3143 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
3144 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
3146 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3147 if (fl & LU_XATTR_REPLACE)
3148 fs_flags |= XATTR_REPLACE;
3150 if (fl & LU_XATTR_CREATE)
3151 fs_flags |= XATTR_CREATE;
3153 if (strcmp(name, XATTR_NAME_LMV) == 0) {
3154 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
3157 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);