4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <linux/module.h>
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
58 * struct OBD_{ALLOC,FREE}*()
61 #include <obd_support.h>
62 /* struct ptlrpc_thread */
63 #include <lustre_net.h>
64 #include <lustre_fid.h>
66 #include <lustre_param.h>
68 #include "osd_internal.h"
69 #include "osd_dynlocks.h"
71 /* llo_* api support */
72 #include <md_object.h>
73 #include <lustre_quota.h>
75 #include <ldiskfs/xattr.h>
76 #include <lustre_linkea.h>
79 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
80 "ldiskfs with parallel directory operations");
82 int ldiskfs_track_declares_assert;
83 CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
84 "LBUG during tracking of declares");
86 /* Slab to allocate dynlocks */
87 struct kmem_cache *dynlock_cachep;
89 /* Slab to allocate osd_it_ea */
90 struct kmem_cache *osd_itea_cachep;
92 static struct lu_kmem_descr ldiskfs_caches[] = {
94 .ckd_cache = &dynlock_cachep,
95 .ckd_name = "dynlock_cache",
96 .ckd_size = sizeof(struct dynlock_handle)
99 .ckd_cache = &osd_itea_cachep,
100 .ckd_name = "osd_itea_cache",
101 .ckd_size = sizeof(struct osd_it_ea)
108 static const char dot[] = ".";
109 static const char dotdot[] = "..";
110 static const char remote_obj_dir[] = "REM_OBJ_DIR";
112 static const struct lu_object_operations osd_lu_obj_ops;
113 static const struct dt_object_operations osd_obj_ops;
114 static const struct dt_object_operations osd_obj_ea_ops;
115 static const struct dt_object_operations osd_obj_otable_it_ops;
116 static const struct dt_index_operations osd_index_iam_ops;
117 static const struct dt_index_operations osd_index_ea_ops;
119 int osd_trans_declare_op2rb[] = {
120 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
121 [OSD_OT_PUNCH] = OSD_OT_MAX,
122 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
123 [OSD_OT_CREATE] = OSD_OT_DESTROY,
124 [OSD_OT_DESTROY] = OSD_OT_CREATE,
125 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
126 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
127 [OSD_OT_WRITE] = OSD_OT_WRITE,
128 [OSD_OT_INSERT] = OSD_OT_DELETE,
129 [OSD_OT_DELETE] = OSD_OT_INSERT,
130 [OSD_OT_UPDATE] = OSD_OT_MAX,
131 [OSD_OT_QUOTA] = OSD_OT_MAX,
134 static int osd_has_index(const struct osd_object *obj)
136 return obj->oo_dt.do_index_ops != NULL;
139 static int osd_object_invariant(const struct lu_object *l)
141 return osd_invariant(osd_obj(l));
145 * Concurrency: doesn't matter
149 * Concurrency: doesn't matter
151 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
153 struct osd_thread_info *oti = osd_oti_get(env);
154 return oti->oti_w_locks > 0 && o->oo_owner == env;
158 * Concurrency: doesn't access mutable data
160 static int osd_root_get(const struct lu_env *env,
161 struct dt_device *dev, struct lu_fid *f)
163 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
168 * OSD object methods.
172 * Concurrency: no concurrent access is possible that early in object
175 static struct lu_object *osd_object_alloc(const struct lu_env *env,
176 const struct lu_object_header *hdr,
179 struct osd_object *mo;
185 l = &mo->oo_dt.do_lu;
186 dt_object_init(&mo->oo_dt, NULL, d);
187 mo->oo_dt.do_ops = &osd_obj_ea_ops;
188 l->lo_ops = &osd_lu_obj_ops;
189 init_rwsem(&mo->oo_sem);
190 init_rwsem(&mo->oo_ext_idx_sem);
191 spin_lock_init(&mo->oo_guard);
198 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
199 struct dentry *dentry, struct lustre_mdt_attrs *lma)
203 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
204 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
205 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
207 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
208 memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
210 lustre_lma_swab(lma);
211 /* Check LMA compatibility */
212 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
213 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
214 "for fid = "DFID", ino = %lu\n",
215 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
216 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
217 PFID(&lma->lma_self_fid), inode->i_ino);
220 } else if (rc == 0) {
228 * retrieve object from backend ext fs.
230 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
231 struct osd_inode_id *id)
233 struct inode *inode = NULL;
235 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
237 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
238 id->oii_ino, PTR_ERR(inode));
239 } else if (id->oii_gen != OSD_OII_NOGEN &&
240 inode->i_generation != id->oii_gen) {
241 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
242 "i_generation = %u\n",
243 id->oii_ino, id->oii_gen, inode->i_generation);
245 inode = ERR_PTR(-ESTALE);
246 } else if (inode->i_nlink == 0) {
247 /* due to parallel readdir and unlink,
248 * we can have dead inode here. */
249 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
251 inode = ERR_PTR(-ESTALE);
252 } else if (is_bad_inode(inode)) {
253 CWARN("%.16s: bad inode: ino = %u\n",
254 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
256 inode = ERR_PTR(-ENOENT);
258 if (id->oii_gen == OSD_OII_NOGEN)
259 osd_id_gen(id, inode->i_ino, inode->i_generation);
261 /* Do not update file c/mtime in ldiskfs.
262 * NB: we don't have any lock to protect this because we don't
263 * have reference on osd_object now, but contention with
264 * another lookup + attr_set can't happen in the tiny window
265 * between if (...) and set S_NOCMTIME. */
266 if (!(inode->i_flags & S_NOCMTIME))
267 inode->i_flags |= S_NOCMTIME;
272 static struct inode *
273 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
274 struct osd_inode_id *id, struct lu_fid *fid)
276 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
280 inode = osd_iget(info, dev, id);
284 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
286 *fid = lma->lma_self_fid;
287 } else if (rc == -ENODATA) {
288 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
289 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
291 lu_igif_build(fid, inode->i_ino, inode->i_generation);
299 static struct inode *osd_iget_check(struct osd_thread_info *info,
300 struct osd_device *dev,
301 const struct lu_fid *fid,
302 struct osd_inode_id *id,
309 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
312 if (!in_oi || (rc != -ENOENT && rc != -ESTALE)) {
313 CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
322 if (is_bad_inode(inode)) {
325 CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
333 if (id->oii_gen != OSD_OII_NOGEN &&
334 inode->i_generation != id->oii_gen) {
337 CDEBUG(D_INODE, "unmatched inode: ino = %u, "
338 "oii_gen = %u, i_generation = %u\n",
339 id->oii_ino, id->oii_gen, inode->i_generation);
347 if (inode->i_nlink == 0) {
350 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
360 struct osd_inode_id saved_id = *id;
362 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
364 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
365 /* XXX: There are some possible cases:
367 * Backup/restore caused the OI invalid.
369 * Someone unlinked the object but NOT removed
370 * the OI mapping, such as mount target device
371 * as ldiskfs, and modify something directly.
373 * Someone just removed the object between the
374 * former oi_lookup and the iget. It is normal.
375 * 4. Other failure cases.
377 * Generally, when the device is mounted, it will
378 * auto check whether the system is restored from
379 * file-level backup or not. We trust such detect
380 * to distinguish the 1st case from the 2nd case. */
382 if (!IS_ERR(inode) && inode->i_generation != 0 &&
383 inode->i_generation == id->oii_gen) {
386 __u32 level = D_LFSCK;
389 if (!thread_is_running(&dev->od_scrub.os_thread))
392 CDEBUG(level, "%s: the OI mapping for the FID "
393 DFID" become inconsistent, the given ID "
394 "%u/%u, the ID in OI mapping %u/%u\n",
395 osd_name(dev), PFID(fid),
396 saved_id.oii_ino, saved_id.oii_gen,
397 id->oii_ino, id->oii_ino);
401 if (id->oii_gen == OSD_OII_NOGEN)
402 osd_id_gen(id, inode->i_ino, inode->i_generation);
404 /* Do not update file c/mtime in ldiskfs.
405 * NB: we don't have any lock to protect this because we don't
406 * have reference on osd_object now, but contention with
407 * another lookup + attr_set can't happen in the tiny window
408 * between if (...) and set S_NOCMTIME. */
409 if (!(inode->i_flags & S_NOCMTIME))
410 inode->i_flags |= S_NOCMTIME;
427 * \retval +v: new filter_fid, does not contain self-fid
428 * \retval 0: filter_fid_old, contains self-fid
429 * \retval -v: other failure cases
431 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
432 struct dentry *dentry, struct lu_fid *fid)
434 struct filter_fid_old *ff = &info->oti_ff;
435 struct ost_id *ostid = &info->oti_ostid;
438 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
439 if (rc == sizeof(*ff)) {
441 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
442 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
443 /* XXX: should use real OST index in the future. LU-3569 */
444 ostid_to_fid(fid, ostid, 0);
445 } else if (rc == sizeof(struct filter_fid)) {
447 } else if (rc >= 0) {
454 static int osd_lma_self_repair(struct osd_thread_info *info,
455 struct osd_device *osd, struct inode *inode,
456 const struct lu_fid *fid, __u32 compat)
461 LASSERT(current->journal_info == NULL);
463 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
464 osd_dto_credits_noquota[DTO_XATTR_SET]);
467 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
472 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
474 CWARN("%s: cannot self repair the LMA: rc = %d\n",
476 ldiskfs_journal_stop(jh);
480 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
482 struct osd_thread_info *info = osd_oti_get(env);
483 struct osd_device *osd = osd_obj2dev(obj);
484 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
485 struct inode *inode = obj->oo_inode;
486 struct dentry *dentry = &info->oti_obj_dentry;
487 struct lu_fid *fid = NULL;
488 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
492 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
493 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
494 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
495 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
496 fid = &lma->lma_self_fid;
497 rc = osd_get_idif(info, inode, dentry, fid);
498 if ((rc > 0) || (rc == -ENODATA && osd->od_index_in_idif)) {
499 /* For the given OST-object, if it has neither LMA nor
500 * FID in XATTR_NAME_FID, then the given FID (which is
501 * contained in the @obj, from client RPC for locating
502 * the OST-object) is trusted. We use it to generate
504 osd_lma_self_repair(info, osd, inode, rfid,
510 if (unlikely(rc == -ENODATA))
518 lustre_lma_swab(lma);
519 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
520 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
521 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
522 "fid = "DFID", ino = %lu\n", osd_name(osd),
523 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
524 PFID(rfid), inode->i_ino);
526 } else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
527 fid = &lma->lma_self_fid;
531 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
532 __u32 level = D_LFSCK;
534 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
535 struct ost_id *oi = &info->oti_ostid;
536 struct lu_fid *fid1 = &info->oti_fid3;
537 __u32 idx = fid_idif_ost_idx(rfid);
539 /* For old IDIF, the OST index is not part of the IDIF,
540 * Means that different OSTs may have the same IDIFs.
541 * Under such case, we need to make some compatible
542 * check to make sure to trigger OI scrub properly. */
543 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
544 /* Given @rfid is new, LMA is old. */
545 fid_to_ostid(fid, oi);
546 ostid_to_fid(fid1, oi, idx);
547 if (lu_fid_eq(fid1, rfid)) {
548 if (osd->od_index_in_idif)
549 osd_lma_self_repair(info, osd,
559 if (!thread_is_running(&osd->od_scrub.os_thread))
562 CDEBUG(level, "%s: FID "DFID" != self_fid "DFID"\n",
563 osd_name(osd), PFID(rfid), PFID(fid));
569 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
570 const struct lu_fid *fid,
571 const struct lu_object_conf *conf)
573 struct osd_thread_info *info;
574 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
575 struct osd_device *dev;
576 struct osd_idmap_cache *oic;
577 struct osd_inode_id *id;
579 struct osd_scrub *scrub;
580 struct scrub_file *sf;
584 bool triggered = false;
587 LINVRNT(osd_invariant(obj));
588 LASSERT(obj->oo_inode == NULL);
589 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
592 scrub = &dev->od_scrub;
593 sf = &scrub->os_file;
594 info = osd_oti_get(env);
596 oic = &info->oti_cache;
598 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
601 /* For the object is created as locking anchor, or for the object to
602 * be created on disk. No need to osd_oi_lookup() at here because FID
603 * shouldn't never be re-used, if it's really a duplicate FID from
604 * unexpected reason, we should be able to detect it later by calling
605 * do_create->osd_oi_insert(). */
606 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
607 GOTO(out, result = 0);
609 /* Search order: 1. per-thread cache. */
610 if (lu_fid_eq(fid, &oic->oic_fid) &&
611 likely(oic->oic_dev == dev)) {
617 if (!list_empty(&scrub->os_inconsistent_items)) {
618 /* Search order: 2. OI scrub pending list. */
619 result = osd_oii_lookup(dev, fid, id);
624 /* Search order: 3. OI files. */
625 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
626 if (result == -ENOENT) {
627 if (!fid_is_norm(fid) ||
628 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
629 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
631 GOTO(out, result = 0);
642 inode = osd_iget_check(info, dev, fid, id, in_oi);
644 result = PTR_ERR(inode);
645 if (result == -ENOENT || result == -ESTALE) {
647 fid_zero(&oic->oic_fid);
649 GOTO(out, result = -ENOENT);
650 } else if (result == -EREMCHG) {
654 fid_zero(&oic->oic_fid);
656 if (unlikely(triggered))
657 GOTO(out, result = saved);
660 if (thread_is_running(&scrub->os_thread)) {
661 result = -EINPROGRESS;
662 } else if (!dev->od_noscrub) {
663 /* Since we do not know the right OI mapping,
664 * we have to trigger OI scrub to scan the
666 result = osd_scrub_start(dev, SS_AUTO_FULL |
667 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
668 CDEBUG(D_LFSCK | D_CONSOLE, "%.16s: trigger OI "
669 "scrub by RPC for "DFID", rc = %d [1]\n",
670 osd_name(dev), PFID(fid),result);
671 if (result == 0 || result == -EALREADY)
672 result = -EINPROGRESS;
677 /* We still have chance to get the valid inode: for the
678 * object which is referenced by remote name entry, the
679 * object on the local MDT will be linked under the dir
680 * of "/REMOTE_PARENT_DIR" with its FID string as name.
682 * We do not know whether the object for the given FID
683 * is referenced by some remote name entry or not, and
684 * especially for DNE II, a multiple-linked object may
685 * have many name entries reside on many MDTs.
687 * To simplify the operation, OSD will not distinguish
688 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
689 * only happened for the RPC from other MDT during the
690 * OI scrub, or for the client side RPC with FID only,
691 * such as FID to path, or from old connected client. */
693 result = osd_lookup_in_remote_parent(info, dev,
706 obj->oo_inode = inode;
707 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
709 result = osd_check_lma(env, obj);
712 obj->oo_inode = NULL;
713 if (result == -EREMCHG) {
715 result = osd_oi_lookup(info, dev, fid, id,
718 fid_zero(&oic->oic_fid);
729 obj->oo_compat_dot_created = 1;
730 obj->oo_compat_dotdot_created = 1;
732 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
733 GOTO(out, result = 0);
735 LASSERT(obj->oo_hl_head == NULL);
736 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
737 if (obj->oo_hl_head == NULL) {
738 obj->oo_inode = NULL;
740 GOTO(out, result = -ENOMEM);
742 GOTO(out, result = 0);
745 LINVRNT(osd_invariant(obj));
750 * Concurrency: shouldn't matter.
752 static void osd_object_init0(struct osd_object *obj)
754 LASSERT(obj->oo_inode != NULL);
755 obj->oo_dt.do_body_ops = &osd_body_ops;
756 obj->oo_dt.do_lu.lo_header->loh_attr |=
757 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
761 * Concurrency: no concurrent access is possible that early in object
764 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
765 const struct lu_object_conf *conf)
767 struct osd_object *obj = osd_obj(l);
770 LINVRNT(osd_invariant(obj));
772 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
773 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
774 l->lo_header->loh_attr |= LOHA_EXISTS;
778 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
779 obj->oo_dt.do_body_ops = &osd_body_ops_new;
780 if (result == 0 && obj->oo_inode != NULL)
781 osd_object_init0(obj);
783 LINVRNT(osd_invariant(obj));
788 * Concurrency: no concurrent access is possible that late in object
791 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
793 struct osd_object *obj = osd_obj(l);
795 LINVRNT(osd_invariant(obj));
797 dt_object_fini(&obj->oo_dt);
798 if (obj->oo_hl_head != NULL)
799 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
804 * Concurrency: no concurrent access is possible that late in object
807 static void osd_index_fini(struct osd_object *o)
809 struct iam_container *bag;
811 if (o->oo_dir != NULL) {
812 bag = &o->oo_dir->od_container;
813 if (o->oo_inode != NULL) {
814 if (bag->ic_object == o->oo_inode)
815 iam_container_fini(bag);
817 OBD_FREE_PTR(o->oo_dir);
823 * Concurrency: no concurrent access is possible that late in object
824 * life-cycle (for all existing callers, that is. New callers have to provide
825 * their own locking.)
827 static int osd_inode_unlinked(const struct inode *inode)
829 return inode->i_nlink == 0;
833 OSD_TXN_OI_DELETE_CREDITS = 20,
834 OSD_TXN_INODE_DELETE_CREDITS = 20
841 #if OSD_THANDLE_STATS
843 * Set time when the handle is allocated
845 static void osd_th_alloced(struct osd_thandle *oth)
847 oth->oth_alloced = cfs_time_current();
851 * Set time when the handle started
853 static void osd_th_started(struct osd_thandle *oth)
855 oth->oth_started = cfs_time_current();
859 * Helper function to convert time interval to microseconds packed in
862 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
866 cfs_duration_usec(cfs_time_sub(end, start), &val);
867 return val.tv_sec * 1000000 + val.tv_usec;
871 * Check whether the we deal with this handle for too long.
873 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
874 cfs_time_t alloced, cfs_time_t started,
877 cfs_time_t now = cfs_time_current();
879 LASSERT(dev != NULL);
881 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
882 interval_to_usec(alloced, started));
883 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
884 interval_to_usec(started, closed));
885 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
886 interval_to_usec(closed, now));
888 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
889 CWARN("transaction handle %p was open for too long: "
891 "alloced "CFS_TIME_T" ,"
892 "started "CFS_TIME_T" ,"
893 "closed "CFS_TIME_T"\n",
894 oth, now, alloced, started, closed);
895 libcfs_debug_dumpstack(NULL);
899 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
901 cfs_time_t __closed = cfs_time_current(); \
902 cfs_time_t __alloced = oth->oth_alloced; \
903 cfs_time_t __started = oth->oth_started; \
906 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
909 #else /* OSD_THANDLE_STATS */
911 #define osd_th_alloced(h) do {} while(0)
912 #define osd_th_started(h) do {} while(0)
913 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
915 #endif /* OSD_THANDLE_STATS */
918 * Concurrency: doesn't access mutable data.
920 static int osd_param_is_not_sane(const struct osd_device *dev,
921 const struct thandle *th)
923 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
925 return oh->ot_credits > osd_transaction_size(dev);
929 * Concurrency: shouldn't matter.
931 static void osd_trans_commit_cb(struct super_block *sb,
932 struct ldiskfs_journal_cb_entry *jcb, int error)
934 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
935 struct thandle *th = &oh->ot_super;
936 struct lu_device *lud = &th->th_dev->dd_lu_dev;
937 struct dt_txn_commit_cb *dcb, *tmp;
939 LASSERT(oh->ot_handle == NULL);
942 CERROR("transaction @0x%p commit error: %d\n", th, error);
944 dt_txn_hook_commit(th);
946 /* call per-transaction callbacks if any */
947 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
948 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
949 "commit callback entry: magic=%x name='%s'\n",
950 dcb->dcb_magic, dcb->dcb_name);
951 list_del_init(&dcb->dcb_linkage);
952 dcb->dcb_func(NULL, th, dcb, error);
955 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
959 lu_context_exit(&th->th_ctx);
960 lu_context_fini(&th->th_ctx);
964 static struct thandle *osd_trans_create(const struct lu_env *env,
967 struct osd_thread_info *oti = osd_oti_get(env);
968 struct osd_iobuf *iobuf = &oti->oti_iobuf;
969 struct osd_thandle *oh;
973 /* on pending IO in this thread should left from prev. request */
974 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
976 th = ERR_PTR(-ENOMEM);
977 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
979 oh->ot_quota_trans = &oti->oti_quota_trans;
980 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
984 th->th_tags = LCT_TX_HANDLE;
986 atomic_set(&th->th_refc, 1);
987 th->th_alloc_size = sizeof(*oh);
988 oti->oti_dev = osd_dt_dev(d);
989 INIT_LIST_HEAD(&oh->ot_dcb_list);
992 memset(oti->oti_declare_ops, 0,
993 sizeof(oti->oti_declare_ops));
994 memset(oti->oti_declare_ops_rb, 0,
995 sizeof(oti->oti_declare_ops_rb));
996 memset(oti->oti_declare_ops_cred, 0,
997 sizeof(oti->oti_declare_ops_cred));
998 oti->oti_rollback = false;
1004 * Concurrency: shouldn't matter.
1006 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1009 struct osd_thread_info *oti = osd_oti_get(env);
1010 struct osd_device *dev = osd_dt_dev(d);
1012 struct osd_thandle *oh;
1017 LASSERT(current->journal_info == NULL);
1019 oh = container_of0(th, struct osd_thandle, ot_super);
1020 LASSERT(oh != NULL);
1021 LASSERT(oh->ot_handle == NULL);
1023 rc = dt_txn_hook_start(env, d, th);
1027 if (unlikely(osd_param_is_not_sane(dev, th))) {
1028 static unsigned long last_printed;
1029 static int last_credits;
1031 CWARN("%.16s: too many transaction credits (%d > %d)\n",
1032 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
1034 osd_journal(dev)->j_max_transaction_buffers);
1035 CWARN(" create: %u/%u, destroy: %u/%u\n",
1036 oti->oti_declare_ops[OSD_OT_CREATE],
1037 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1038 oti->oti_declare_ops[OSD_OT_DESTROY],
1039 oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
1040 CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
1041 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1042 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1043 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1044 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET]);
1045 CWARN(" write: %u/%u, punch: %u/%u, quota %u/%u\n",
1046 oti->oti_declare_ops[OSD_OT_WRITE],
1047 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1048 oti->oti_declare_ops[OSD_OT_PUNCH],
1049 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1050 oti->oti_declare_ops[OSD_OT_QUOTA],
1051 oti->oti_declare_ops_cred[OSD_OT_QUOTA]);
1052 CWARN(" insert: %u/%u, delete: %u/%u\n",
1053 oti->oti_declare_ops[OSD_OT_INSERT],
1054 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1055 oti->oti_declare_ops[OSD_OT_DELETE],
1056 oti->oti_declare_ops_cred[OSD_OT_DELETE]);
1057 CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
1058 oti->oti_declare_ops[OSD_OT_REF_ADD],
1059 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1060 oti->oti_declare_ops[OSD_OT_REF_DEL],
1061 oti->oti_declare_ops_cred[OSD_OT_REF_DEL]);
1063 if (last_credits != oh->ot_credits &&
1064 time_after(jiffies, last_printed +
1065 msecs_to_jiffies(60 * MSEC_PER_SEC))) {
1066 libcfs_debug_dumpstack(NULL);
1067 last_credits = oh->ot_credits;
1068 last_printed = jiffies;
1070 /* XXX Limit the credits to 'max_transaction_buffers', and
1071 * let the underlying filesystem to catch the error if
1072 * we really need so many credits.
1074 * This should be removed when we can calculate the
1075 * credits precisely. */
1076 oh->ot_credits = osd_transaction_size(dev);
1080 * XXX temporary stuff. Some abstraction layer should
1083 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1087 LASSERT(oti->oti_txns == 0);
1088 lu_context_init(&th->th_ctx, th->th_tags);
1089 lu_context_enter(&th->th_ctx);
1091 lu_device_get(&d->dd_lu_dev);
1092 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1103 static int osd_seq_exists(const struct lu_env *env,
1104 struct osd_device *osd, u64 seq)
1106 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1107 struct seq_server_site *ss = osd_seq_site(osd);
1111 LASSERT(ss != NULL);
1112 LASSERT(ss->ss_server_fld != NULL);
1114 rc = osd_fld_lookup(env, osd, seq, range);
1117 CERROR("%s: can't lookup FLD sequence "LPX64
1118 ": rc = %d\n", osd_name(osd), seq, rc);
1122 RETURN(ss->ss_node_id == range->lsr_index);
1126 * Concurrency: shouldn't matter.
1128 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1132 struct osd_thandle *oh;
1133 struct osd_thread_info *oti = osd_oti_get(env);
1134 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1135 struct qsd_instance *qsd = oti->oti_dev->od_quota_slave;
1136 struct lquota_trans *qtrans;
1139 oh = container_of0(th, struct osd_thandle, ot_super);
1141 qtrans = oh->ot_quota_trans;
1142 oh->ot_quota_trans = NULL;
1144 if (oh->ot_handle != NULL) {
1145 handle_t *hdl = oh->ot_handle;
1148 * add commit callback
1149 * notice we don't do this in osd_trans_start()
1150 * as underlying transaction can change during truncate
1152 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1155 LASSERT(oti->oti_txns == 1);
1157 rc = dt_txn_hook_stop(env, th);
1159 CERROR("Failure in transaction hook: %d\n", rc);
1161 /* hook functions might modify th_sync */
1162 hdl->h_sync = th->th_sync;
1164 oh->ot_handle = NULL;
1165 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
1166 rc = ldiskfs_journal_stop(hdl));
1168 CERROR("Failure to stop transaction: %d\n", rc);
1170 thandle_put(&oh->ot_super);
1173 /* inform the quota slave device that the transaction is stopping */
1174 qsd_op_end(env, qsd, qtrans);
1176 /* as we want IO to journal and data IO be concurrent, we don't block
1177 * awaiting data IO completion in osd_do_bio(), instead we wait here
1178 * once transaction is submitted to the journal. all reqular requests
1179 * don't do direct IO (except read/write), thus this wait_event becomes
1182 * IMPORTANT: we have to wait till any IO submited by the thread is
1183 * completed otherwise iobuf may be corrupted by different request
1185 wait_event(iobuf->dr_wait,
1186 atomic_read(&iobuf->dr_numreqs) == 0);
1187 osd_fini_iobuf(oti->oti_dev, iobuf);
1189 rc = iobuf->dr_error;
1194 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1196 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1199 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1200 LASSERT(&dcb->dcb_func != NULL);
1201 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
1207 * Called just before object is freed. Releases all resources except for
1208 * object itself (that is released by osd_object_free()).
1210 * Concurrency: no concurrent access is possible that late in object
1213 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1215 struct osd_object *obj = osd_obj(l);
1216 struct inode *inode = obj->oo_inode;
1218 LINVRNT(osd_invariant(obj));
1221 * If object is unlinked remove fid->ino mapping from object index.
1224 osd_index_fini(obj);
1225 if (inode != NULL) {
1226 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1227 qid_t uid = i_uid_read(inode);
1228 qid_t gid = i_gid_read(inode);
1231 obj->oo_inode = NULL;
1234 struct osd_thread_info *info = osd_oti_get(env);
1235 struct lquota_id_info *qi = &info->oti_qi;
1237 /* Release granted quota to master if necessary */
1238 qi->lqi_id.qid_uid = uid;
1239 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1241 qi->lqi_id.qid_uid = gid;
1242 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1248 * Concurrency: ->loo_object_release() is called under site spin-lock.
1250 static void osd_object_release(const struct lu_env *env,
1251 struct lu_object *l)
1256 * Concurrency: shouldn't matter.
1258 static int osd_object_print(const struct lu_env *env, void *cookie,
1259 lu_printer_t p, const struct lu_object *l)
1261 struct osd_object *o = osd_obj(l);
1262 struct iam_descr *d;
1264 if (o->oo_dir != NULL)
1265 d = o->oo_dir->od_container.ic_descr;
1268 return (*p)(env, cookie,
1269 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1271 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1272 o->oo_inode ? o->oo_inode->i_generation : 0,
1273 d ? d->id_ops->id_name : "plain");
1276 #define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
1279 * Concurrency: shouldn't matter.
1281 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1282 struct obd_statfs *sfs)
1284 struct osd_device *osd = osd_dt_dev(d);
1285 struct super_block *sb = osd_sb(osd);
1286 struct kstatfs *ksfs;
1289 if (unlikely(osd->od_mnt == NULL))
1290 return -EINPROGRESS;
1292 /* osd_lproc.c call this without env, allocate ksfs for that case */
1293 if (unlikely(env == NULL)) {
1294 OBD_ALLOC_PTR(ksfs);
1298 ksfs = &osd_oti_get(env)->oti_ksfs;
1301 spin_lock(&osd->od_osfs_lock);
1302 result = sb->s_op->statfs(sb->s_root, ksfs);
1303 if (likely(result == 0)) { /* N.B. statfs can't really fail */
1304 statfs_pack(sfs, ksfs);
1305 if (sb->s_flags & MS_RDONLY)
1306 sfs->os_state = OS_STATE_READONLY;
1309 spin_unlock(&osd->od_osfs_lock);
1311 if (unlikely(env == NULL))
1314 /* Reserve a small amount of space for local objects like last_rcvd,
1315 * llog, quota files, ... */
1316 if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
1319 sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
1320 /** Take out metadata overhead for indirect blocks */
1321 sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
1328 * Estimate space needed for file creations. We assume the largest filename
1329 * which is 2^64 - 1, hence a filename of 20 chars.
1330 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
1332 #ifdef __LDISKFS_DIR_REC_LEN
1333 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
1335 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
1339 * Concurrency: doesn't access mutable data.
1341 static void osd_conf_get(const struct lu_env *env,
1342 const struct dt_device *dev,
1343 struct dt_device_param *param)
1345 struct super_block *sb = osd_sb(osd_dt_dev(dev));
1349 * XXX should be taken from not-yet-existing fs abstraction layer.
1351 param->ddp_max_name_len = LDISKFS_NAME_LEN;
1352 param->ddp_max_nlink = LDISKFS_LINK_MAX;
1353 param->ddp_block_shift = sb->s_blocksize_bits;
1354 param->ddp_mount_type = LDD_MT_LDISKFS;
1355 param->ddp_maxbytes = sb->s_maxbytes;
1356 /* Overhead estimate should be fairly accurate, so we really take a tiny
1357 * error margin which also avoids fragmenting the filesystem too much */
1358 param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
1359 /* inode are statically allocated, so per-inode space consumption
1360 * is the space consumed by the directory entry */
1361 param->ddp_inodespace = PER_OBJ_USAGE;
1362 /* per-fragment overhead to be used by the client code */
1363 param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
1364 param->ddp_mntopts = 0;
1365 if (test_opt(sb, XATTR_USER))
1366 param->ddp_mntopts |= MNTOPT_USERXATTR;
1367 if (test_opt(sb, POSIX_ACL))
1368 param->ddp_mntopts |= MNTOPT_ACL;
1370 /* LOD might calculate the max stripe count based on max_ea_size,
1371 * so we need take account in the overhead as well,
1372 * xattr_header + magic + xattr_entry_head */
1373 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
1374 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
1376 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1377 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1378 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
1382 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
1386 * Concurrency: shouldn't matter.
1388 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1392 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1394 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1396 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
1397 LUSTRE_OSD_LDISKFS_NAME, rc);
1403 * Start commit for OSD device.
1405 * An implementation of dt_commit_async method for OSD device.
1406 * Asychronously starts underlayng fs sync and thereby a transaction
1409 * \param env environment
1410 * \param d dt device
1412 * \see dt_device_operations
1414 static int osd_commit_async(const struct lu_env *env,
1415 struct dt_device *d)
1417 struct super_block *s = osd_sb(osd_dt_dev(d));
1420 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1421 RETURN(s->s_op->sync_fs(s, 0));
1425 * Concurrency: shouldn't matter.
1428 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1430 struct super_block *sb = osd_sb(osd_dt_dev(d));
1431 struct block_device *dev = sb->s_bdev;
1432 #ifdef HAVE_DEV_SET_RDONLY
1433 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
1436 int rc = -EOPNOTSUPP;
1440 #ifdef HAVE_DEV_SET_RDONLY
1441 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1443 if (jdev && (jdev != dev)) {
1444 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
1446 dev_set_rdonly(jdev);
1448 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
1449 dev_set_rdonly(dev);
1451 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
1452 osd_dt_dev(d)->od_svname, (long)dev, rc);
1458 * Concurrency: serialization provided by callers.
1460 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1461 int mode, unsigned long timeout, __u32 alg,
1462 struct lustre_capa_key *keys)
1464 struct osd_device *dev = osd_dt_dev(d);
1467 dev->od_fl_capa = mode;
1468 dev->od_capa_timeout = timeout;
1469 dev->od_capa_alg = alg;
1470 dev->od_capa_keys = keys;
1475 * Note: we do not count into QUOTA here.
1476 * If we mount with --data_journal we may need more.
1478 const int osd_dto_credits_noquota[DTO_NR] = {
1481 * INDEX_EXTRA_TRANS_BLOCKS(8) +
1482 * SINGLEDATA_TRANS_BLOCKS(8)
1483 * XXX Note: maybe iam need more, since iam have more level than
1486 [DTO_INDEX_INSERT] = 16,
1489 * just modify a single entry, probably merge few within a block
1491 [DTO_INDEX_DELETE] = 1,
1495 [DTO_INDEX_UPDATE] = 16,
1497 * 4(inode, inode bits, groups, GDT)
1498 * notice: OI updates are counted separately with DTO_INDEX_INSERT
1500 [DTO_OBJECT_CREATE] = 4,
1502 * 4(inode, inode bits, groups, GDT)
1503 * notice: OI updates are counted separately with DTO_INDEX_DELETE
1505 [DTO_OBJECT_DELETE] = 4,
1507 * Attr set credits (inode)
1509 [DTO_ATTR_SET_BASE] = 1,
1511 * Xattr set. The same as xattr of EXT3.
1512 * DATA_TRANS_BLOCKS(14)
1513 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1514 * are also counted in. Do not know why?
1516 [DTO_XATTR_SET] = 14,
1518 * credits for inode change during write.
1520 [DTO_WRITE_BASE] = 3,
1522 * credits for single block write.
1524 [DTO_WRITE_BLOCK] = 14,
1526 * Attr set credits for chown.
1527 * This is extra credits for setattr, and it is null without quota
1529 [DTO_ATTR_SET_CHOWN] = 0
1532 static const struct dt_device_operations osd_dt_ops = {
1533 .dt_root_get = osd_root_get,
1534 .dt_statfs = osd_statfs,
1535 .dt_trans_create = osd_trans_create,
1536 .dt_trans_start = osd_trans_start,
1537 .dt_trans_stop = osd_trans_stop,
1538 .dt_trans_cb_add = osd_trans_cb_add,
1539 .dt_conf_get = osd_conf_get,
1540 .dt_sync = osd_sync,
1542 .dt_commit_async = osd_commit_async,
1543 .dt_init_capa_ctxt = osd_init_capa_ctxt,
1546 static void osd_object_read_lock(const struct lu_env *env,
1547 struct dt_object *dt, unsigned role)
1549 struct osd_object *obj = osd_dt_obj(dt);
1550 struct osd_thread_info *oti = osd_oti_get(env);
1552 LINVRNT(osd_invariant(obj));
1554 LASSERT(obj->oo_owner != env);
1555 down_read_nested(&obj->oo_sem, role);
1557 LASSERT(obj->oo_owner == NULL);
1561 static void osd_object_write_lock(const struct lu_env *env,
1562 struct dt_object *dt, unsigned role)
1564 struct osd_object *obj = osd_dt_obj(dt);
1565 struct osd_thread_info *oti = osd_oti_get(env);
1567 LINVRNT(osd_invariant(obj));
1569 LASSERT(obj->oo_owner != env);
1570 down_write_nested(&obj->oo_sem, role);
1572 LASSERT(obj->oo_owner == NULL);
1573 obj->oo_owner = env;
1577 static void osd_object_read_unlock(const struct lu_env *env,
1578 struct dt_object *dt)
1580 struct osd_object *obj = osd_dt_obj(dt);
1581 struct osd_thread_info *oti = osd_oti_get(env);
1583 LINVRNT(osd_invariant(obj));
1585 LASSERT(oti->oti_r_locks > 0);
1587 up_read(&obj->oo_sem);
1590 static void osd_object_write_unlock(const struct lu_env *env,
1591 struct dt_object *dt)
1593 struct osd_object *obj = osd_dt_obj(dt);
1594 struct osd_thread_info *oti = osd_oti_get(env);
1596 LINVRNT(osd_invariant(obj));
1598 LASSERT(obj->oo_owner == env);
1599 LASSERT(oti->oti_w_locks > 0);
1601 obj->oo_owner = NULL;
1602 up_write(&obj->oo_sem);
1605 static int osd_object_write_locked(const struct lu_env *env,
1606 struct dt_object *dt)
1608 struct osd_object *obj = osd_dt_obj(dt);
1610 LINVRNT(osd_invariant(obj));
1612 return obj->oo_owner == env;
1615 static int capa_is_sane(const struct lu_env *env,
1616 struct osd_device *dev,
1617 struct lustre_capa *capa,
1618 struct lustre_capa_key *keys)
1620 struct osd_thread_info *oti = osd_oti_get(env);
1621 struct lustre_capa *tcapa = &oti->oti_capa;
1622 struct obd_capa *oc;
1626 oc = capa_lookup(dev->od_capa_hash, capa, 0);
1628 if (capa_is_expired(oc)) {
1629 DEBUG_CAPA(D_ERROR, capa, "expired");
1636 if (capa_is_expired_sec(capa)) {
1637 DEBUG_CAPA(D_ERROR, capa, "expired");
1641 spin_lock(&capa_lock);
1642 for (i = 0; i < 2; i++) {
1643 if (keys[i].lk_keyid == capa->lc_keyid) {
1644 oti->oti_capa_key = keys[i];
1648 spin_unlock(&capa_lock);
1651 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1655 rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1659 if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1660 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1664 oc = capa_add(dev->od_capa_hash, capa);
1670 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1671 struct lustre_capa *capa, __u64 opc)
1673 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1674 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
1675 struct lu_capainfo *lci;
1678 if (!osd->od_fl_capa)
1681 if (capa == BYPASS_CAPA)
1684 lci = lu_capainfo_get(env);
1685 if (unlikely(lci == NULL))
1688 if (lci->lci_auth == LC_ID_NONE)
1692 CERROR("%s: no capability provided for FID "DFID": rc = %d\n",
1693 osd_name(osd), PFID(fid), -EACCES);
1697 if (!lu_fid_eq(fid, &capa->lc_fid)) {
1698 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1703 if (!capa_opc_supported(capa, opc)) {
1704 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1708 rc = capa_is_sane(env, osd, capa, osd->od_capa_keys);
1710 DEBUG_CAPA(D_ERROR, capa, "insane: rc = %d", rc);
1717 static struct timespec *osd_inode_time(const struct lu_env *env,
1718 struct inode *inode, __u64 seconds)
1720 struct osd_thread_info *oti = osd_oti_get(env);
1721 struct timespec *t = &oti->oti_time;
1723 t->tv_sec = seconds;
1725 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1730 static void osd_inode_getattr(const struct lu_env *env,
1731 struct inode *inode, struct lu_attr *attr)
1733 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1734 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1735 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
1738 attr->la_atime = LTIME_S(inode->i_atime);
1739 attr->la_mtime = LTIME_S(inode->i_mtime);
1740 attr->la_ctime = LTIME_S(inode->i_ctime);
1741 attr->la_mode = inode->i_mode;
1742 attr->la_size = i_size_read(inode);
1743 attr->la_blocks = inode->i_blocks;
1744 attr->la_uid = i_uid_read(inode);
1745 attr->la_gid = i_gid_read(inode);
1746 attr->la_flags = LDISKFS_I(inode)->i_flags;
1747 attr->la_nlink = inode->i_nlink;
1748 attr->la_rdev = inode->i_rdev;
1749 attr->la_blksize = 1 << inode->i_blkbits;
1750 attr->la_blkbits = inode->i_blkbits;
1753 static int osd_attr_get(const struct lu_env *env,
1754 struct dt_object *dt,
1755 struct lu_attr *attr,
1756 struct lustre_capa *capa)
1758 struct osd_object *obj = osd_dt_obj(dt);
1760 if (!dt_object_exists(dt))
1763 LASSERT(!dt_object_remote(dt));
1764 LINVRNT(osd_invariant(obj));
1766 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1769 spin_lock(&obj->oo_guard);
1770 osd_inode_getattr(env, obj->oo_inode, attr);
1771 spin_unlock(&obj->oo_guard);
1775 static int osd_declare_attr_set(const struct lu_env *env,
1776 struct dt_object *dt,
1777 const struct lu_attr *attr,
1778 struct thandle *handle)
1780 struct osd_thandle *oh;
1781 struct osd_object *obj;
1782 struct osd_thread_info *info = osd_oti_get(env);
1783 struct lquota_id_info *qi = &info->oti_qi;
1791 LASSERT(dt != NULL);
1792 LASSERT(handle != NULL);
1794 obj = osd_dt_obj(dt);
1795 LASSERT(osd_invariant(obj));
1797 oh = container_of0(handle, struct osd_thandle, ot_super);
1798 LASSERT(oh->ot_handle == NULL);
1800 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
1801 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1803 if (attr == NULL || obj->oo_inode == NULL)
1806 bspace = obj->oo_inode->i_blocks;
1807 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1808 bspace = toqb(bspace);
1810 /* Changing ownership is always preformed by super user, it should not
1813 * We still need to call the osd_declare_qid() to calculate the journal
1814 * credits for updating quota accounting files and to trigger quota
1815 * space adjustment once the operation is completed.*/
1816 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
1818 uid = i_uid_read(obj->oo_inode);
1819 qi->lqi_type = USRQUOTA;
1820 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
1821 /* inode accounting */
1822 qi->lqi_is_blk = false;
1824 /* one more inode for the new uid ... */
1825 qi->lqi_id.qid_uid = attr->la_uid;
1827 /* Reserve credits for the new uid */
1828 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1829 if (rc == -EDQUOT || rc == -EINPROGRESS)
1834 /* and one less inode for the current uid */
1835 qi->lqi_id.qid_uid = uid;
1837 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1838 if (rc == -EDQUOT || rc == -EINPROGRESS)
1843 /* block accounting */
1844 qi->lqi_is_blk = true;
1846 /* more blocks for the new uid ... */
1847 qi->lqi_id.qid_uid = attr->la_uid;
1848 qi->lqi_space = bspace;
1850 * Credits for the new uid has been reserved, re-use "obj"
1851 * to save credit reservation.
1853 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1854 if (rc == -EDQUOT || rc == -EINPROGRESS)
1859 /* and finally less blocks for the current uid */
1860 qi->lqi_id.qid_uid = uid;
1861 qi->lqi_space = -bspace;
1862 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1863 if (rc == -EDQUOT || rc == -EINPROGRESS)
1869 gid = i_gid_read(obj->oo_inode);
1870 qi->lqi_type = GRPQUOTA;
1871 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
1873 /* inode accounting */
1874 qi->lqi_is_blk = false;
1876 /* one more inode for the new gid ... */
1877 qi->lqi_id.qid_gid = attr->la_gid;
1879 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1880 if (rc == -EDQUOT || rc == -EINPROGRESS)
1885 /* and one less inode for the current gid */
1886 qi->lqi_id.qid_gid = gid;
1888 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1889 if (rc == -EDQUOT || rc == -EINPROGRESS)
1894 /* block accounting */
1895 qi->lqi_is_blk = true;
1897 /* more blocks for the new gid ... */
1898 qi->lqi_id.qid_gid = attr->la_gid;
1899 qi->lqi_space = bspace;
1900 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1901 if (rc == -EDQUOT || rc == -EINPROGRESS)
1906 /* and finally less blocks for the current gid */
1907 qi->lqi_id.qid_gid = gid;
1908 qi->lqi_space = -bspace;
1909 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1910 if (rc == -EDQUOT || rc == -EINPROGRESS)
1919 static int osd_inode_setattr(const struct lu_env *env,
1920 struct inode *inode, const struct lu_attr *attr)
1922 __u64 bits = attr->la_valid;
1924 /* Only allow set size for regular file */
1925 if (!S_ISREG(inode->i_mode))
1926 bits &= ~(LA_SIZE | LA_BLOCKS);
1931 if (bits & LA_ATIME)
1932 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1933 if (bits & LA_CTIME)
1934 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1935 if (bits & LA_MTIME)
1936 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1937 if (bits & LA_SIZE) {
1938 LDISKFS_I(inode)->i_disksize = attr->la_size;
1939 i_size_write(inode, attr->la_size);
1943 /* OSD should not change "i_blocks" which is used by quota.
1944 * "i_blocks" should be changed by ldiskfs only. */
1945 if (bits & LA_BLOCKS)
1946 inode->i_blocks = attr->la_blocks;
1949 inode->i_mode = (inode->i_mode & S_IFMT) |
1950 (attr->la_mode & ~S_IFMT);
1952 i_uid_write(inode, attr->la_uid);
1954 i_gid_write(inode, attr->la_gid);
1955 if (bits & LA_NLINK)
1956 set_nlink(inode, attr->la_nlink);
1958 inode->i_rdev = attr->la_rdev;
1960 if (bits & LA_FLAGS) {
1961 /* always keep S_NOCMTIME */
1962 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1968 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1970 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
1971 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
1975 ll_vfs_dq_init(inode);
1977 if (attr->la_valid & LA_UID)
1978 iattr.ia_valid |= ATTR_UID;
1979 if (attr->la_valid & LA_GID)
1980 iattr.ia_valid |= ATTR_GID;
1981 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
1982 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
1984 rc = ll_vfs_dq_transfer(inode, &iattr);
1986 CERROR("%s: quota transfer failed: rc = %d. Is quota "
1987 "enforcement enabled on the ldiskfs "
1988 "filesystem?\n", inode->i_sb->s_id, rc);
1995 static int osd_attr_set(const struct lu_env *env,
1996 struct dt_object *dt,
1997 const struct lu_attr *attr,
1998 struct thandle *handle,
1999 struct lustre_capa *capa)
2001 struct osd_object *obj = osd_dt_obj(dt);
2002 struct inode *inode;
2005 if (!dt_object_exists(dt))
2008 LASSERT(handle != NULL);
2009 LASSERT(!dt_object_remote(dt));
2010 LASSERT(osd_invariant(obj));
2012 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2015 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
2017 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
2018 struct osd_thread_info *oti = osd_oti_get(env);
2019 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
2020 struct lu_fid *fid1 = &oti->oti_fid;
2021 struct osd_inode_id *id = &oti->oti_id;
2022 struct iam_path_descr *ipd;
2023 struct iam_container *bag;
2024 struct osd_thandle *oh;
2027 fid_cpu_to_be(fid1, fid0);
2028 memset(id, 1, sizeof(*id));
2029 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
2030 fid0)->oi_dir.od_container;
2031 ipd = osd_idx_ipd_get(env, bag);
2032 if (unlikely(ipd == NULL))
2035 oh = container_of0(handle, struct osd_thandle, ot_super);
2036 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
2037 (const struct iam_rec *)id, ipd);
2038 osd_ipd_put(env, bag, ipd);
2039 return(rc > 0 ? 0 : rc);
2042 inode = obj->oo_inode;
2044 rc = osd_quota_transfer(inode, attr);
2048 spin_lock(&obj->oo_guard);
2049 rc = osd_inode_setattr(env, inode, attr);
2050 spin_unlock(&obj->oo_guard);
2053 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2057 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
2058 struct osd_object *obj,
2059 const char *name, const int namelen)
2061 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
2064 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
2065 umode_t mode, struct dt_allocation_hint *hint,
2069 struct osd_device *osd = osd_obj2dev(obj);
2070 struct osd_thandle *oth;
2071 struct dt_object *parent = NULL;
2072 struct inode *inode;
2074 LINVRNT(osd_invariant(obj));
2075 LASSERT(obj->oo_inode == NULL);
2076 LASSERT(obj->oo_hl_head == NULL);
2078 if (S_ISDIR(mode) && ldiskfs_pdo) {
2079 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
2080 if (obj->oo_hl_head == NULL)
2084 oth = container_of(th, struct osd_thandle, ot_super);
2085 LASSERT(oth->ot_handle->h_transaction != NULL);
2087 if (hint && hint->dah_parent)
2088 parent = hint->dah_parent;
2090 inode = ldiskfs_create_inode(oth->ot_handle,
2091 parent ? osd_dt_obj(parent)->oo_inode :
2092 osd_sb(osd)->s_root->d_inode,
2094 if (!IS_ERR(inode)) {
2095 /* Do not update file c/mtime in ldiskfs.
2096 * NB: don't need any lock because no contention at this
2098 inode->i_flags |= S_NOCMTIME;
2100 /* For new created object, it must be consistent,
2101 * and it is unnecessary to scrub against it. */
2102 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
2103 obj->oo_inode = inode;
2106 if (obj->oo_hl_head != NULL) {
2107 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
2108 obj->oo_hl_head = NULL;
2110 result = PTR_ERR(inode);
2112 LINVRNT(osd_invariant(obj));
2120 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
2121 struct lu_attr *attr,
2122 struct dt_allocation_hint *hint,
2123 struct dt_object_format *dof,
2127 struct osd_thandle *oth;
2128 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
2130 LASSERT(S_ISDIR(attr->la_mode));
2132 oth = container_of(th, struct osd_thandle, ot_super);
2133 LASSERT(oth->ot_handle->h_transaction != NULL);
2134 result = osd_mkfile(info, obj, mode, hint, th);
2139 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
2140 struct lu_attr *attr,
2141 struct dt_allocation_hint *hint,
2142 struct dt_object_format *dof,
2146 struct osd_thandle *oth;
2147 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
2149 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
2151 LASSERT(S_ISREG(attr->la_mode));
2153 oth = container_of(th, struct osd_thandle, ot_super);
2154 LASSERT(oth->ot_handle->h_transaction != NULL);
2156 result = osd_mkfile(info, obj, mode, hint, th);
2158 LASSERT(obj->oo_inode != NULL);
2159 if (feat->dif_flags & DT_IND_VARKEY)
2160 result = iam_lvar_create(obj->oo_inode,
2161 feat->dif_keysize_max,
2163 feat->dif_recsize_max,
2166 result = iam_lfix_create(obj->oo_inode,
2167 feat->dif_keysize_max,
2169 feat->dif_recsize_max,
2176 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
2177 struct lu_attr *attr,
2178 struct dt_allocation_hint *hint,
2179 struct dt_object_format *dof,
2182 LASSERT(S_ISREG(attr->la_mode));
2183 return osd_mkfile(info, obj, (attr->la_mode &
2184 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2187 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
2188 struct lu_attr *attr,
2189 struct dt_allocation_hint *hint,
2190 struct dt_object_format *dof,
2193 LASSERT(S_ISLNK(attr->la_mode));
2194 return osd_mkfile(info, obj, (attr->la_mode &
2195 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2198 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
2199 struct lu_attr *attr,
2200 struct dt_allocation_hint *hint,
2201 struct dt_object_format *dof,
2204 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
2207 LINVRNT(osd_invariant(obj));
2208 LASSERT(obj->oo_inode == NULL);
2209 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
2210 S_ISFIFO(mode) || S_ISSOCK(mode));
2212 result = osd_mkfile(info, obj, mode, hint, th);
2214 LASSERT(obj->oo_inode != NULL);
2216 * This inode should be marked dirty for i_rdev. Currently
2217 * that is done in the osd_attr_init().
2219 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
2222 LINVRNT(osd_invariant(obj));
2226 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
2228 struct dt_allocation_hint *hint,
2229 struct dt_object_format *dof,
2232 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
2234 osd_obj_type_f result;
2250 result = osd_mk_index;
2261 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2262 struct dt_object *parent, struct dt_object *child,
2267 ah->dah_parent = parent;
2268 ah->dah_mode = child_mode;
2271 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2272 struct lu_attr *attr, struct dt_object_format *dof)
2274 struct inode *inode = obj->oo_inode;
2275 __u64 valid = attr->la_valid;
2278 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2280 if (dof->dof_type != DFT_NODE)
2281 attr->la_valid &= ~LA_RDEV;
2282 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2283 attr->la_valid &= ~LA_ATIME;
2284 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2285 attr->la_valid &= ~LA_CTIME;
2286 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2287 attr->la_valid &= ~LA_MTIME;
2289 result = osd_quota_transfer(inode, attr);
2293 if (attr->la_valid != 0) {
2294 result = osd_inode_setattr(info->oti_env, inode, attr);
2296 * The osd_inode_setattr() should always succeed here. The
2297 * only error that could be returned is EDQUOT when we are
2298 * trying to change the UID or GID of the inode. However, this
2299 * should not happen since quota enforcement is no longer
2300 * enabled on ldiskfs (lquota takes care of it).
2302 LASSERTF(result == 0, "%d\n", result);
2303 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2306 attr->la_valid = valid;
2310 * Helper function for osd_object_create()
2312 * \retval 0, on success
2314 static int __osd_object_create(struct osd_thread_info *info,
2315 struct osd_object *obj, struct lu_attr *attr,
2316 struct dt_allocation_hint *hint,
2317 struct dt_object_format *dof,
2323 /* we drop umask so that permissions we pass are not affected */
2324 umask = current->fs->umask;
2325 current->fs->umask = 0;
2327 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2330 osd_attr_init(info, obj, attr, dof);
2331 osd_object_init0(obj);
2333 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
2334 unlock_new_inode(obj->oo_inode);
2337 /* restore previous umask value */
2338 current->fs->umask = umask;
2344 * Helper function for osd_object_create()
2346 * \retval 0, on success
2348 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2349 const struct lu_fid *fid, struct thandle *th)
2351 struct osd_thread_info *info = osd_oti_get(env);
2352 struct osd_inode_id *id = &info->oti_id;
2353 struct osd_device *osd = osd_obj2dev(obj);
2354 struct osd_thandle *oh;
2356 LASSERT(obj->oo_inode != NULL);
2358 oh = container_of0(th, struct osd_thandle, ot_super);
2359 LASSERT(oh->ot_handle);
2361 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2362 return osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
2365 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2366 u64 seq, struct lu_seq_range *range)
2368 struct seq_server_site *ss = osd_seq_site(osd);
2370 if (fid_seq_is_idif(seq)) {
2371 fld_range_set_ost(range);
2372 range->lsr_index = idif_ost_idx(seq);
2376 if (!fid_seq_in_fldb(seq)) {
2377 fld_range_set_mdt(range);
2379 /* FIXME: If ss is NULL, it suppose not get lsr_index
2381 range->lsr_index = ss->ss_node_id;
2385 LASSERT(ss != NULL);
2386 fld_range_set_any(range);
2387 /* OSD will only do local fld lookup */
2388 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
2392 * Concurrency: no external locking is necessary.
2394 static int osd_declare_object_create(const struct lu_env *env,
2395 struct dt_object *dt,
2396 struct lu_attr *attr,
2397 struct dt_allocation_hint *hint,
2398 struct dt_object_format *dof,
2399 struct thandle *handle)
2401 struct osd_thandle *oh;
2405 LASSERT(handle != NULL);
2407 oh = container_of0(handle, struct osd_thandle, ot_super);
2408 LASSERT(oh->ot_handle == NULL);
2410 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
2411 osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
2412 /* Reuse idle OI block may cause additional one OI block
2414 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2415 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
2417 /* If this is directory, then we expect . and .. to be inserted as
2418 * well. The one directory block always needs to be created for the
2419 * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
2420 * block), there is no danger of needing a tree for the first block.
2422 if (attr && S_ISDIR(attr->la_mode)) {
2423 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2424 osd_dto_credits_noquota[DTO_WRITE_BASE]);
2425 osd_trans_declare_op(env, oh, OSD_OT_INSERT, 0);
2431 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
2432 osd_dt_obj(dt), false, NULL, false);
2439 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2440 struct lu_attr *attr,
2441 struct dt_allocation_hint *hint,
2442 struct dt_object_format *dof, struct thandle *th)
2444 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2445 struct osd_object *obj = osd_dt_obj(dt);
2446 struct osd_thread_info *info = osd_oti_get(env);
2450 if (dt_object_exists(dt))
2453 LINVRNT(osd_invariant(obj));
2454 LASSERT(!dt_object_remote(dt));
2455 LASSERT(osd_write_locked(env, obj));
2456 LASSERT(th != NULL);
2458 if (unlikely(fid_is_acct(fid)))
2459 /* Quota files can't be created from the kernel any more,
2460 * 'tune2fs -O quota' will take care of creating them */
2463 osd_trans_exec_op(env, th, OSD_OT_CREATE);
2464 osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2466 result = __osd_object_create(info, obj, attr, hint, dof, th);
2468 result = __osd_oi_insert(env, obj, fid, th);
2470 LASSERT(ergo(result == 0,
2471 dt_object_exists(dt) && !dt_object_remote(dt)));
2473 LASSERT(osd_invariant(obj));
2478 * Called to destroy on-disk representation of the object
2480 * Concurrency: must be locked
2482 static int osd_declare_object_destroy(const struct lu_env *env,
2483 struct dt_object *dt,
2486 struct osd_object *obj = osd_dt_obj(dt);
2487 struct inode *inode = obj->oo_inode;
2488 struct osd_thandle *oh;
2492 oh = container_of0(th, struct osd_thandle, ot_super);
2493 LASSERT(oh->ot_handle == NULL);
2496 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
2497 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2498 /* Recycle idle OI leaf may cause additional three OI blocks
2500 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
2501 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
2502 /* one less inode */
2503 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2504 -1, oh, obj, false, NULL, false);
2507 /* data to be truncated */
2508 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2509 0, oh, obj, true, NULL, false);
2513 static int osd_object_destroy(const struct lu_env *env,
2514 struct dt_object *dt,
2517 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2518 struct osd_object *obj = osd_dt_obj(dt);
2519 struct inode *inode = obj->oo_inode;
2520 struct osd_device *osd = osd_obj2dev(obj);
2521 struct osd_thandle *oh;
2525 oh = container_of0(th, struct osd_thandle, ot_super);
2526 LASSERT(oh->ot_handle);
2528 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2530 if (unlikely(fid_is_acct(fid)))
2533 if (S_ISDIR(inode->i_mode)) {
2534 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
2535 inode->i_nlink == 2);
2536 /* it will check/delete the inode from remote parent,
2537 * how to optimize it? unlink performance impaction XXX */
2538 result = osd_delete_from_remote_parent(env, osd, obj, oh);
2539 if (result != 0 && result != -ENOENT) {
2540 CERROR("%s: delete inode "DFID": rc = %d\n",
2541 osd_name(osd), PFID(fid), result);
2543 spin_lock(&obj->oo_guard);
2545 spin_unlock(&obj->oo_guard);
2546 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2549 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
2551 result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
2554 /* XXX: add to ext3 orphan list */
2555 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2557 /* not needed in the cache anymore */
2558 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2564 * Put the fid into lustre_mdt_attrs, and then place the structure
2565 * inode's ea. This fid should not be altered during the life time
2568 * \retval +ve, on success
2569 * \retval -ve, on error
2571 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2573 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
2574 const struct lu_fid *fid, __u32 compat, __u32 incompat)
2576 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2580 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
2583 lustre_lma_init(lma, fid, compat, incompat);
2584 lustre_lma_swab(lma);
2586 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
2588 /* LMA may already exist, but we need to check that all the
2589 * desired compat/incompat flags have been added. */
2590 if (unlikely(rc == -EEXIST)) {
2591 if (compat == 0 && incompat == 0)
2594 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
2595 XATTR_NAME_LMA, info->oti_mdt_attrs_old,
2600 lustre_lma_swab(lma);
2601 if (!(~lma->lma_compat & compat) &&
2602 !(~lma->lma_incompat & incompat))
2605 lma->lma_compat |= compat;
2606 lma->lma_incompat |= incompat;
2607 lustre_lma_swab(lma);
2608 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2609 sizeof(*lma), XATTR_REPLACE);
2616 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2617 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2618 * To have compatilibility with 1.8 ldiskfs driver we need to have
2619 * magic number at start of fid data.
2620 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2623 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2624 const struct lu_fid *fid)
2626 if (!fid_is_namespace_visible(fid) ||
2627 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
2628 param->edp_magic = 0;
2632 param->edp_magic = LDISKFS_LUFID_MAGIC;
2633 param->edp_len = sizeof(struct lu_fid) + 1;
2634 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
2638 * Try to read the fid from inode ea into dt_rec.
2640 * \param fid object fid.
2642 * \retval 0 on success
2644 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2645 __u32 ino, struct lu_fid *fid,
2646 struct osd_inode_id *id)
2648 struct osd_thread_info *info = osd_oti_get(env);
2649 struct inode *inode;
2652 osd_id_gen(id, ino, OSD_OII_NOGEN);
2653 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2655 RETURN(PTR_ERR(inode));
2661 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
2663 struct inode *parent_dir,
2664 const struct lu_fid *dot_fid,
2665 const struct lu_fid *dot_dot_fid,
2666 struct osd_thandle *oth)
2668 struct ldiskfs_dentry_param *dot_ldp;
2669 struct ldiskfs_dentry_param *dot_dot_ldp;
2671 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
2672 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
2674 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2675 dot_ldp->edp_magic = 0;
2676 return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
2677 dir, dot_ldp, dot_dot_ldp);
2681 * Create an local agent inode for remote entry
2683 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
2684 struct osd_device *osd,
2685 struct osd_object *pobj,
2686 const struct lu_fid *fid,
2690 struct osd_thread_info *info = osd_oti_get(env);
2691 struct inode *local;
2692 struct osd_thandle *oh;
2697 oh = container_of(th, struct osd_thandle, ot_super);
2698 LASSERT(oh->ot_handle->h_transaction != NULL);
2700 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type);
2701 if (IS_ERR(local)) {
2702 CERROR("%s: create local error %d\n", osd_name(osd),
2703 (int)PTR_ERR(local));
2707 /* Set special LMA flag for local agent inode */
2708 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
2710 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
2711 osd_name(osd), PFID(fid), rc);
2712 RETURN(ERR_PTR(rc));
2718 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
2719 lu_object_fid(&pobj->oo_dt.do_lu),
2722 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
2723 osd_name(osd), PFID(fid), rc);
2724 RETURN(ERR_PTR(rc));
2731 * Delete local agent inode for remote entry
2733 static int osd_delete_local_agent_inode(const struct lu_env *env,
2734 struct osd_device *osd,
2735 const struct lu_fid *fid,
2736 __u32 ino, struct osd_thandle *oh)
2738 struct osd_thread_info *oti = osd_oti_get(env);
2739 struct osd_inode_id *id = &oti->oti_id;
2740 struct inode *inode;
2743 id->oii_ino = le32_to_cpu(ino);
2744 id->oii_gen = OSD_OII_NOGEN;
2745 inode = osd_iget(oti, osd, id);
2746 if (IS_ERR(inode)) {
2747 CERROR("%s: iget error "DFID" id %u:%u\n", osd_name(osd),
2748 PFID(fid), id->oii_ino, id->oii_gen);
2749 RETURN(PTR_ERR(inode));
2753 mark_inode_dirty(inode);
2754 CDEBUG(D_INODE, "%s: delete remote inode "DFID" %lu\n",
2755 osd_name(osd), PFID(fid), inode->i_ino);
2761 * OSD layer object create function for interoperability mode (b11826).
2762 * This is mostly similar to osd_object_create(). Only difference being, fid is
2763 * inserted into inode ea here.
2765 * \retval 0, on success
2766 * \retval -ve, on error
2768 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2769 struct lu_attr *attr,
2770 struct dt_allocation_hint *hint,
2771 struct dt_object_format *dof,
2774 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2775 struct osd_object *obj = osd_dt_obj(dt);
2776 struct osd_thread_info *info = osd_oti_get(env);
2781 if (dt_object_exists(dt))
2784 LASSERT(osd_invariant(obj));
2785 LASSERT(!dt_object_remote(dt));
2786 LASSERT(osd_write_locked(env, obj));
2787 LASSERT(th != NULL);
2789 if (unlikely(fid_is_acct(fid)))
2790 /* Quota files can't be created from the kernel any more,
2791 * 'tune2fs -O quota' will take care of creating them */
2794 osd_trans_exec_op(env, th, OSD_OT_CREATE);
2795 osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2797 result = __osd_object_create(info, obj, attr, hint, dof, th);
2799 if (fid_is_idif(fid) &&
2800 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
2801 struct lu_fid *tfid = &info->oti_fid;
2802 struct ost_id *oi = &info->oti_ostid;
2804 fid_to_ostid(fid, oi);
2805 ostid_to_fid(tfid, oi, 0);
2806 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
2807 LMAC_FID_ON_OST, 0);
2809 result = osd_ea_fid_set(info, obj->oo_inode, fid,
2810 fid_is_on_ost(info, osd_obj2dev(obj),
2811 fid, OI_CHECK_FLD) ?
2812 LMAC_FID_ON_OST : 0, 0);
2817 result = __osd_oi_insert(env, obj, fid, th);
2819 LASSERT(ergo(result == 0,
2820 dt_object_exists(dt) && !dt_object_remote(dt)));
2821 LINVRNT(osd_invariant(obj));
2825 static int osd_declare_object_ref_add(const struct lu_env *env,
2826 struct dt_object *dt,
2827 struct thandle *handle)
2829 struct osd_thandle *oh;
2831 /* it's possible that object doesn't exist yet */
2832 LASSERT(handle != NULL);
2834 oh = container_of0(handle, struct osd_thandle, ot_super);
2835 LASSERT(oh->ot_handle == NULL);
2837 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
2838 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2844 * Concurrency: @dt is write locked.
2846 static int osd_object_ref_add(const struct lu_env *env,
2847 struct dt_object *dt, struct thandle *th)
2849 struct osd_object *obj = osd_dt_obj(dt);
2850 struct inode *inode = obj->oo_inode;
2851 struct osd_thandle *oh;
2854 if (!dt_object_exists(dt))
2857 LINVRNT(osd_invariant(obj));
2858 LASSERT(!dt_object_remote(dt));
2859 LASSERT(osd_write_locked(env, obj));
2860 LASSERT(th != NULL);
2862 oh = container_of0(th, struct osd_thandle, ot_super);
2863 LASSERT(oh->ot_handle != NULL);
2865 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
2867 CDEBUG(D_INODE, DFID" increase nlink %d\n",
2868 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2870 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
2871 * (65000) subdirectories by storing "1" in i_nlink if the link count
2872 * would otherwise overflow. Directory tranversal tools understand
2873 * that (st_nlink == 1) indicates that the filesystem dose not track
2874 * hard links count on the directory, and will not abort subdirectory
2875 * scanning early once (st_nlink - 2) subdirs have been found.
2877 * This also has to properly handle the case of inodes with nlink == 0
2878 * in case they are being linked into the PENDING directory
2880 spin_lock(&obj->oo_guard);
2881 if (unlikely(inode->i_nlink == 0))
2882 /* inc_nlink from 0 may cause WARN_ON */
2883 set_nlink(inode, 1);
2885 ldiskfs_inc_count(oh->ot_handle, inode);
2886 if (!S_ISDIR(inode->i_mode))
2887 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2889 spin_unlock(&obj->oo_guard);
2891 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2892 LINVRNT(osd_invariant(obj));
2897 static int osd_declare_object_ref_del(const struct lu_env *env,
2898 struct dt_object *dt,
2899 struct thandle *handle)
2901 struct osd_thandle *oh;
2903 LASSERT(!dt_object_remote(dt));
2904 LASSERT(handle != NULL);
2906 oh = container_of0(handle, struct osd_thandle, ot_super);
2907 LASSERT(oh->ot_handle == NULL);
2909 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
2910 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2916 * Concurrency: @dt is write locked.
2918 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2921 struct osd_object *obj = osd_dt_obj(dt);
2922 struct inode *inode = obj->oo_inode;
2923 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
2924 struct osd_thandle *oh;
2926 if (!dt_object_exists(dt))
2929 LINVRNT(osd_invariant(obj));
2930 LASSERT(!dt_object_remote(dt));
2931 LASSERT(osd_write_locked(env, obj));
2932 LASSERT(th != NULL);
2934 oh = container_of0(th, struct osd_thandle, ot_super);
2935 LASSERT(oh->ot_handle != NULL);
2937 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
2939 spin_lock(&obj->oo_guard);
2940 /* That can be result of upgrade from old Lustre version and
2941 * applied only to local files. Just skip this ref_del call.
2942 * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
2943 if (inode->i_nlink == 0) {
2944 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2945 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
2946 ", maybe an upgraded file? (LU-3915)\n",
2947 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
2948 spin_unlock(&obj->oo_guard);
2952 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
2953 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2955 ldiskfs_dec_count(oh->ot_handle, inode);
2956 spin_unlock(&obj->oo_guard);
2958 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2959 LINVRNT(osd_invariant(obj));
2965 * Get the 64-bit version for an inode.
2967 static int osd_object_version_get(const struct lu_env *env,
2968 struct dt_object *dt, dt_obj_version_t *ver)
2970 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2972 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2973 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2974 *ver = LDISKFS_I(inode)->i_fs_version;
2979 * Concurrency: @dt is read locked.
2981 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2982 struct lu_buf *buf, const char *name,
2983 struct lustre_capa *capa)
2985 struct osd_object *obj = osd_dt_obj(dt);
2986 struct inode *inode = obj->oo_inode;
2987 struct osd_thread_info *info = osd_oti_get(env);
2988 struct dentry *dentry = &info->oti_obj_dentry;
2990 /* version get is not real XATTR but uses xattr API */
2991 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2992 /* for version we are just using xattr API but change inode
2994 if (buf->lb_len == 0)
2995 return sizeof(dt_obj_version_t);
2997 if (buf->lb_len < sizeof(dt_obj_version_t))
3000 osd_object_version_get(env, dt, buf->lb_buf);
3002 return sizeof(dt_obj_version_t);
3005 if (!dt_object_exists(dt))
3008 LASSERT(!dt_object_remote(dt));
3009 LASSERT(inode->i_op != NULL);
3010 LASSERT(inode->i_op->getxattr != NULL);
3012 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
3015 return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
3019 static int osd_declare_xattr_set(const struct lu_env *env,
3020 struct dt_object *dt,
3021 const struct lu_buf *buf, const char *name,
3022 int fl, struct thandle *handle)
3024 struct osd_thandle *oh;
3026 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
3028 LASSERT(handle != NULL);
3030 oh = container_of0(handle, struct osd_thandle, ot_super);
3031 LASSERT(oh->ot_handle == NULL);
3033 /* optimistic optimization: LMA is set first and usually fit inode */
3034 if (strcmp(name, XATTR_NAME_LMA) == 0) {
3035 if (dt_object_exists(dt))
3039 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3042 credits = osd_dto_credits_noquota[DTO_XATTR_SET];
3043 if (buf && buf->lb_len > sb->s_blocksize) {
3044 credits *= (buf->lb_len + sb->s_blocksize - 1) >>
3045 sb->s_blocksize_bits;
3048 * xattr set may involve inode quota change, reserve credits for
3049 * dquot_initialize()
3051 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
3054 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
3060 * Set the 64-bit version for object
3062 static void osd_object_version_set(const struct lu_env *env,
3063 struct dt_object *dt,
3064 dt_obj_version_t *new_version)
3066 struct inode *inode = osd_dt_obj(dt)->oo_inode;
3068 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
3069 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
3071 LDISKFS_I(inode)->i_fs_version = *new_version;
3072 /** Version is set after all inode operations are finished,
3073 * so we should mark it dirty here */
3074 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3078 * Concurrency: @dt is write locked.
3080 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
3081 const struct lu_buf *buf, const char *name, int fl,
3082 struct thandle *handle, struct lustre_capa *capa)
3084 struct osd_object *obj = osd_dt_obj(dt);
3085 struct inode *inode = obj->oo_inode;
3086 struct osd_thread_info *info = osd_oti_get(env);
3090 LASSERT(handle != NULL);
3092 /* version set is not real XATTR */
3093 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3094 /* for version we are just using xattr API but change inode
3096 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
3097 osd_object_version_set(env, dt, buf->lb_buf);
3098 return sizeof(dt_obj_version_t);
3101 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
3104 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
3105 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
3107 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3108 if (fl & LU_XATTR_REPLACE)
3109 fs_flags |= XATTR_REPLACE;
3111 if (fl & LU_XATTR_CREATE)
3112 fs_flags |= XATTR_CREATE;
3114 if (strcmp(name, XATTR_NAME_LMV) == 0) {
3115 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
3118 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
3122 lma->lma_incompat |= LMAI_STRIPED;
3123 lustre_lma_swab(lma);
3124 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3125 sizeof(*lma), XATTR_REPLACE);
3130 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_OVERFLOW) &&
3131 strcmp(name, XATTR_NAME_LINK) == 0)
3134 return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
3139 * Concurrency: @dt is read locked.
3141 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
3142 const struct lu_buf *buf, struct lustre_capa *capa)
3144 struct osd_object *obj = osd_dt_obj(dt);
3145 struct inode *inode = obj->oo_inode;
3146 struct osd_thread_info *info = osd_oti_get(env);
3147 struct dentry *dentry = &info->oti_obj_dentry;
3149 if (!dt_object_exists(dt))
3152 LASSERT(!dt_object_remote(dt));
3153 LASSERT(inode->i_op != NULL);
3154 LASSERT(inode->i_op->listxattr != NULL);
3156 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
3159 dentry->d_inode = inode;
3160 dentry->d_sb = inode->i_sb;
3161 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
3164 static int osd_declare_xattr_del(const struct lu_env *env,
3165 struct dt_object *dt, const char *name,
3166 struct thandle *handle)