4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <linux/module.h>
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
58 * struct OBD_{ALLOC,FREE}*()
61 #include <obd_support.h>
62 /* struct ptlrpc_thread */
63 #include <lustre_net.h>
64 #include <lustre_fid.h>
66 #include <lustre_param.h>
68 #include "osd_internal.h"
69 #include "osd_dynlocks.h"
71 /* llo_* api support */
72 #include <md_object.h>
73 #include <lustre_quota.h>
75 #include <ldiskfs/xattr.h>
76 #include <lustre_linkea.h>
79 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
80 "ldiskfs with parallel directory operations");
82 int ldiskfs_track_declares_assert;
83 CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
84 "LBUG during tracking of declares");
86 /* Slab to allocate dynlocks */
87 struct kmem_cache *dynlock_cachep;
89 static struct lu_kmem_descr ldiskfs_caches[] = {
91 .ckd_cache = &dynlock_cachep,
92 .ckd_name = "dynlock_cache",
93 .ckd_size = sizeof(struct dynlock_handle)
100 static const char dot[] = ".";
101 static const char dotdot[] = "..";
102 static const char remote_obj_dir[] = "REM_OBJ_DIR";
104 static const struct lu_object_operations osd_lu_obj_ops;
105 static const struct dt_object_operations osd_obj_ops;
106 static const struct dt_object_operations osd_obj_ea_ops;
107 static const struct dt_object_operations osd_obj_otable_it_ops;
108 static const struct dt_index_operations osd_index_iam_ops;
109 static const struct dt_index_operations osd_index_ea_ops;
111 int osd_trans_declare_op2rb[] = {
112 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
113 [OSD_OT_PUNCH] = OSD_OT_MAX,
114 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
115 [OSD_OT_CREATE] = OSD_OT_DESTROY,
116 [OSD_OT_DESTROY] = OSD_OT_CREATE,
117 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
118 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
119 [OSD_OT_WRITE] = OSD_OT_WRITE,
120 [OSD_OT_INSERT] = OSD_OT_DELETE,
121 [OSD_OT_DELETE] = OSD_OT_INSERT,
122 [OSD_OT_UPDATE] = OSD_OT_MAX,
123 [OSD_OT_QUOTA] = OSD_OT_MAX,
126 static int osd_has_index(const struct osd_object *obj)
128 return obj->oo_dt.do_index_ops != NULL;
131 static int osd_object_invariant(const struct lu_object *l)
133 return osd_invariant(osd_obj(l));
137 * Concurrency: doesn't matter
141 * Concurrency: doesn't matter
143 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
145 struct osd_thread_info *oti = osd_oti_get(env);
146 return oti->oti_w_locks > 0 && o->oo_owner == env;
150 * Concurrency: doesn't access mutable data
152 static int osd_root_get(const struct lu_env *env,
153 struct dt_device *dev, struct lu_fid *f)
155 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
160 * OSD object methods.
164 * Concurrency: no concurrent access is possible that early in object
167 static struct lu_object *osd_object_alloc(const struct lu_env *env,
168 const struct lu_object_header *hdr,
171 struct osd_object *mo;
177 l = &mo->oo_dt.do_lu;
178 dt_object_init(&mo->oo_dt, NULL, d);
179 mo->oo_dt.do_ops = &osd_obj_ea_ops;
180 l->lo_ops = &osd_lu_obj_ops;
181 init_rwsem(&mo->oo_sem);
182 init_rwsem(&mo->oo_ext_idx_sem);
183 spin_lock_init(&mo->oo_guard);
190 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
191 struct dentry *dentry, struct lustre_mdt_attrs *lma)
195 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
196 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
197 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
199 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
200 memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
202 lustre_lma_swab(lma);
203 /* Check LMA compatibility */
204 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
205 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
206 "for fid = "DFID", ino = %lu\n",
207 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
208 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
209 PFID(&lma->lma_self_fid), inode->i_ino);
212 } else if (rc == 0) {
220 * retrieve object from backend ext fs.
222 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
223 struct osd_inode_id *id)
225 struct inode *inode = NULL;
227 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
229 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
230 id->oii_ino, PTR_ERR(inode));
231 } else if (id->oii_gen != OSD_OII_NOGEN &&
232 inode->i_generation != id->oii_gen) {
233 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
234 "i_generation = %u\n",
235 id->oii_ino, id->oii_gen, inode->i_generation);
237 inode = ERR_PTR(-ESTALE);
238 } else if (inode->i_nlink == 0) {
239 /* due to parallel readdir and unlink,
240 * we can have dead inode here. */
241 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
243 inode = ERR_PTR(-ESTALE);
244 } else if (is_bad_inode(inode)) {
245 CWARN("%.16s: bad inode: ino = %u\n",
246 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
248 inode = ERR_PTR(-ENOENT);
250 if (id->oii_gen == OSD_OII_NOGEN)
251 osd_id_gen(id, inode->i_ino, inode->i_generation);
253 /* Do not update file c/mtime in ldiskfs.
254 * NB: we don't have any lock to protect this because we don't
255 * have reference on osd_object now, but contention with
256 * another lookup + attr_set can't happen in the tiny window
257 * between if (...) and set S_NOCMTIME. */
258 if (!(inode->i_flags & S_NOCMTIME))
259 inode->i_flags |= S_NOCMTIME;
264 static struct inode *
265 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
266 struct osd_inode_id *id, struct lu_fid *fid)
268 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
272 inode = osd_iget(info, dev, id);
276 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
278 *fid = lma->lma_self_fid;
279 } else if (rc == -ENODATA) {
280 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
281 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
283 lu_igif_build(fid, inode->i_ino, inode->i_generation);
291 static struct inode *osd_iget_check(struct osd_thread_info *info,
292 struct osd_device *dev,
293 const struct lu_fid *fid,
294 struct osd_inode_id *id,
301 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
304 if (!in_oi || (rc != -ENOENT && rc != -ESTALE)) {
305 CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
314 if (is_bad_inode(inode)) {
317 CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
325 if (id->oii_gen != OSD_OII_NOGEN &&
326 inode->i_generation != id->oii_gen) {
329 CDEBUG(D_INODE, "unmatched inode: ino = %u, "
330 "oii_gen = %u, i_generation = %u\n",
331 id->oii_ino, id->oii_gen, inode->i_generation);
339 if (inode->i_nlink == 0) {
342 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
352 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
354 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
355 /* XXX: There are some possible cases:
357 * Backup/restore caused the OI invalid.
359 * Someone unlinked the object but NOT removed
360 * the OI mapping, such as mount target device
361 * as ldiskfs, and modify something directly.
363 * Someone just removed the object between the
364 * former oi_lookup and the iget. It is normal.
365 * 4. Other failure cases.
367 * Generally, when the device is mounted, it will
368 * auto check whether the system is restored from
369 * file-level backup or not. We trust such detect
370 * to distinguish the 1st case from the 2nd case. */
372 if (!IS_ERR(inode) && inode->i_generation != 0 &&
373 inode->i_generation == id->oii_gen)
379 if (id->oii_gen == OSD_OII_NOGEN)
380 osd_id_gen(id, inode->i_ino, inode->i_generation);
382 /* Do not update file c/mtime in ldiskfs.
383 * NB: we don't have any lock to protect this because we don't
384 * have reference on osd_object now, but contention with
385 * another lookup + attr_set can't happen in the tiny window
386 * between if (...) and set S_NOCMTIME. */
387 if (!(inode->i_flags & S_NOCMTIME))
388 inode->i_flags |= S_NOCMTIME;
405 * \retval +v: new filter_fid, does not contain self-fid
406 * \retval 0: filter_fid_old, contains self-fid
407 * \retval -v: other failure cases
409 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
410 struct dentry *dentry, struct lu_fid *fid)
412 struct filter_fid_old *ff = &info->oti_ff;
413 struct ost_id *ostid = &info->oti_ostid;
416 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
417 if (rc == sizeof(*ff)) {
419 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
420 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
421 /* XXX: should use real OST index in the future. LU-3569 */
422 ostid_to_fid(fid, ostid, 0);
423 } else if (rc == sizeof(struct filter_fid)) {
425 } else if (rc >= 0) {
432 static int osd_lma_self_repair(struct osd_thread_info *info,
433 struct osd_device *osd, struct inode *inode,
434 const struct lu_fid *fid, __u32 compat)
439 LASSERT(current->journal_info == NULL);
441 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
442 osd_dto_credits_noquota[DTO_XATTR_SET]);
445 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
450 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
452 CWARN("%s: cannot self repair the LMA: rc = %d\n",
454 ldiskfs_journal_stop(jh);
458 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
460 struct osd_thread_info *info = osd_oti_get(env);
461 struct osd_device *osd = osd_obj2dev(obj);
462 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
463 struct inode *inode = obj->oo_inode;
464 struct dentry *dentry = &info->oti_obj_dentry;
465 struct lu_fid *fid = NULL;
466 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
470 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
471 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
472 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
473 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
474 fid = &lma->lma_self_fid;
475 rc = osd_get_idif(info, inode, dentry, fid);
476 if ((rc > 0) || (rc == -ENODATA && osd->od_lma_self_repair)) {
477 /* For the given OST-object, if it has neither LMA nor
478 * FID in XATTR_NAME_FID, then the given FID (which is
479 * contained in the @obj, from client RPC for locating
480 * the OST-object) is trusted. We use it to generate
482 osd_lma_self_repair(info, osd, inode, rfid,
483 fid_is_on_ost(info, osd, fid, OI_CHECK_FLD) ?
484 LMAC_FID_ON_OST : 0);
489 if (unlikely(rc == -ENODATA))
497 lustre_lma_swab(lma);
498 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
499 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
500 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
501 "fid = "DFID", ino = %lu\n", osd_name(osd),
502 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
503 PFID(rfid), inode->i_ino);
505 } else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
506 fid = &lma->lma_self_fid;
510 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
511 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
512 struct ost_id *oi = &info->oti_ostid;
513 struct lu_fid *fid1 = &info->oti_fid3;
514 __u32 idx = fid_idif_ost_idx(rfid);
516 /* For old IDIF, the OST index is not part of the IDIF,
517 * Means that different OSTs may have the same IDIFs.
518 * Under such case, we need to make some compatible
519 * check to make sure to trigger OI scrub properly. */
520 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
521 /* Given @rfid is new, LMA is old. */
522 fid_to_ostid(fid, oi);
523 ostid_to_fid(fid1, oi, idx);
524 if (lu_fid_eq(fid1, rfid)) {
525 if (osd->od_lma_self_repair)
526 osd_lma_self_repair(info, osd,
534 CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
535 osd_name(osd), PFID(rfid), PFID(fid));
542 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
543 const struct lu_fid *fid,
544 const struct lu_object_conf *conf)
546 struct osd_thread_info *info;
547 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
548 struct osd_device *dev;
549 struct osd_idmap_cache *oic;
550 struct osd_inode_id *id;
552 struct osd_scrub *scrub;
553 struct scrub_file *sf;
557 bool triggered = false;
560 LINVRNT(osd_invariant(obj));
561 LASSERT(obj->oo_inode == NULL);
562 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
565 scrub = &dev->od_scrub;
566 sf = &scrub->os_file;
567 info = osd_oti_get(env);
569 oic = &info->oti_cache;
571 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
574 /* For the object is created as locking anchor, or for the object to
575 * be created on disk. No need to osd_oi_lookup() at here because FID
576 * shouldn't never be re-used, if it's really a duplicate FID from
577 * unexpected reason, we should be able to detect it later by calling
578 * do_create->osd_oi_insert(). */
579 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
580 GOTO(out, result = 0);
582 /* Search order: 1. per-thread cache. */
583 if (lu_fid_eq(fid, &oic->oic_fid) &&
584 likely(oic->oic_dev == dev)) {
590 if (!list_empty(&scrub->os_inconsistent_items)) {
591 /* Search order: 2. OI scrub pending list. */
592 result = osd_oii_lookup(dev, fid, id);
597 /* Search order: 3. OI files. */
598 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
599 if (result == -ENOENT) {
600 if (!fid_is_norm(fid) ||
601 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
602 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
604 GOTO(out, result = 0);
615 inode = osd_iget_check(info, dev, fid, id, in_oi);
617 result = PTR_ERR(inode);
618 if (result == -ENOENT || result == -ESTALE) {
620 fid_zero(&oic->oic_fid);
622 GOTO(out, result = -ENOENT);
623 } else if (result == -EREMCHG) {
627 fid_zero(&oic->oic_fid);
629 if (unlikely(triggered))
630 GOTO(out, result = saved);
633 if (thread_is_running(&scrub->os_thread)) {
634 result = -EINPROGRESS;
635 } else if (!dev->od_noscrub) {
636 /* Since we do not know the right OI mapping,
637 * we have to trigger OI scrub to scan the
639 result = osd_scrub_start(dev, SS_AUTO_FULL |
640 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
641 LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
642 "for "DFID", rc = %d [1]\n",
643 osd_name(dev), PFID(fid),result);
644 if (result == 0 || result == -EALREADY)
645 result = -EINPROGRESS;
650 /* We still have chance to get the valid inode: for the
651 * object which is referenced by remote name entry, the
652 * object on the local MDT will be linked under the dir
653 * of "/REMOTE_PARENT_DIR" with its FID string as name.
655 * We do not know whether the object for the given FID
656 * is referenced by some remote name entry or not, and
657 * especially for DNE II, a multiple-linked object may
658 * have many name entries reside on many MDTs.
660 * To simplify the operation, OSD will not distinguish
661 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
662 * only happened for the RPC from other MDT during the
663 * OI scrub, or for the client side RPC with FID only,
664 * such as FID to path, or from old connected client. */
666 result = osd_lookup_in_remote_parent(info, dev,
679 obj->oo_inode = inode;
680 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
682 result = osd_check_lma(env, obj);
685 obj->oo_inode = NULL;
686 if (result == -EREMCHG) {
688 result = osd_oi_lookup(info, dev, fid, id,
691 fid_zero(&oic->oic_fid);
702 obj->oo_compat_dot_created = 1;
703 obj->oo_compat_dotdot_created = 1;
705 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
706 GOTO(out, result = 0);
708 LASSERT(obj->oo_hl_head == NULL);
709 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
710 if (obj->oo_hl_head == NULL) {
711 obj->oo_inode = NULL;
713 GOTO(out, result = -ENOMEM);
715 GOTO(out, result = 0);
718 LINVRNT(osd_invariant(obj));
723 * Concurrency: shouldn't matter.
725 static void osd_object_init0(struct osd_object *obj)
727 LASSERT(obj->oo_inode != NULL);
728 obj->oo_dt.do_body_ops = &osd_body_ops;
729 obj->oo_dt.do_lu.lo_header->loh_attr |=
730 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
734 * Concurrency: no concurrent access is possible that early in object
737 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
738 const struct lu_object_conf *conf)
740 struct osd_object *obj = osd_obj(l);
743 LINVRNT(osd_invariant(obj));
745 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
746 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
747 l->lo_header->loh_attr |= LOHA_EXISTS;
751 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
752 obj->oo_dt.do_body_ops = &osd_body_ops_new;
753 if (result == 0 && obj->oo_inode != NULL)
754 osd_object_init0(obj);
756 LINVRNT(osd_invariant(obj));
761 * Concurrency: no concurrent access is possible that late in object
764 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
766 struct osd_object *obj = osd_obj(l);
768 LINVRNT(osd_invariant(obj));
770 dt_object_fini(&obj->oo_dt);
771 if (obj->oo_hl_head != NULL)
772 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
777 * Concurrency: no concurrent access is possible that late in object
780 static void osd_index_fini(struct osd_object *o)
782 struct iam_container *bag;
784 if (o->oo_dir != NULL) {
785 bag = &o->oo_dir->od_container;
786 if (o->oo_inode != NULL) {
787 if (bag->ic_object == o->oo_inode)
788 iam_container_fini(bag);
790 OBD_FREE_PTR(o->oo_dir);
796 * Concurrency: no concurrent access is possible that late in object
797 * life-cycle (for all existing callers, that is. New callers have to provide
798 * their own locking.)
800 static int osd_inode_unlinked(const struct inode *inode)
802 return inode->i_nlink == 0;
806 OSD_TXN_OI_DELETE_CREDITS = 20,
807 OSD_TXN_INODE_DELETE_CREDITS = 20
814 #if OSD_THANDLE_STATS
816 * Set time when the handle is allocated
818 static void osd_th_alloced(struct osd_thandle *oth)
820 oth->oth_alloced = cfs_time_current();
824 * Set time when the handle started
826 static void osd_th_started(struct osd_thandle *oth)
828 oth->oth_started = cfs_time_current();
832 * Helper function to convert time interval to microseconds packed in
835 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
839 cfs_duration_usec(cfs_time_sub(end, start), &val);
840 return val.tv_sec * 1000000 + val.tv_usec;
844 * Check whether the we deal with this handle for too long.
846 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
847 cfs_time_t alloced, cfs_time_t started,
850 cfs_time_t now = cfs_time_current();
852 LASSERT(dev != NULL);
854 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
855 interval_to_usec(alloced, started));
856 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
857 interval_to_usec(started, closed));
858 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
859 interval_to_usec(closed, now));
861 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
862 CWARN("transaction handle %p was open for too long: "
864 "alloced "CFS_TIME_T" ,"
865 "started "CFS_TIME_T" ,"
866 "closed "CFS_TIME_T"\n",
867 oth, now, alloced, started, closed);
868 libcfs_debug_dumpstack(NULL);
872 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
874 cfs_time_t __closed = cfs_time_current(); \
875 cfs_time_t __alloced = oth->oth_alloced; \
876 cfs_time_t __started = oth->oth_started; \
879 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
882 #else /* OSD_THANDLE_STATS */
884 #define osd_th_alloced(h) do {} while(0)
885 #define osd_th_started(h) do {} while(0)
886 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
888 #endif /* OSD_THANDLE_STATS */
891 * Concurrency: doesn't access mutable data.
893 static int osd_param_is_not_sane(const struct osd_device *dev,
894 const struct thandle *th)
896 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
898 return oh->ot_credits > osd_transaction_size(dev);
902 * Concurrency: shouldn't matter.
904 static void osd_trans_commit_cb(struct super_block *sb,
905 struct ldiskfs_journal_cb_entry *jcb, int error)
907 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
908 struct thandle *th = &oh->ot_super;
909 struct lu_device *lud = &th->th_dev->dd_lu_dev;
910 struct dt_txn_commit_cb *dcb, *tmp;
912 LASSERT(oh->ot_handle == NULL);
915 CERROR("transaction @0x%p commit error: %d\n", th, error);
917 dt_txn_hook_commit(th);
919 /* call per-transaction callbacks if any */
920 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
921 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
922 "commit callback entry: magic=%x name='%s'\n",
923 dcb->dcb_magic, dcb->dcb_name);
924 list_del_init(&dcb->dcb_linkage);
925 dcb->dcb_func(NULL, th, dcb, error);
928 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
932 lu_context_exit(&th->th_ctx);
933 lu_context_fini(&th->th_ctx);
937 static struct thandle *osd_trans_create(const struct lu_env *env,
940 struct osd_thread_info *oti = osd_oti_get(env);
941 struct osd_iobuf *iobuf = &oti->oti_iobuf;
942 struct osd_thandle *oh;
946 /* on pending IO in this thread should left from prev. request */
947 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
949 th = ERR_PTR(-ENOMEM);
950 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
952 oh->ot_quota_trans = &oti->oti_quota_trans;
953 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
957 th->th_tags = LCT_TX_HANDLE;
959 atomic_set(&th->th_refc, 1);
960 th->th_alloc_size = sizeof(*oh);
961 oti->oti_dev = osd_dt_dev(d);
962 INIT_LIST_HEAD(&oh->ot_dcb_list);
965 memset(oti->oti_declare_ops, 0,
966 sizeof(oti->oti_declare_ops));
967 memset(oti->oti_declare_ops_rb, 0,
968 sizeof(oti->oti_declare_ops_rb));
969 memset(oti->oti_declare_ops_cred, 0,
970 sizeof(oti->oti_declare_ops_cred));
971 oti->oti_rollback = false;
977 * Concurrency: shouldn't matter.
979 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
982 struct osd_thread_info *oti = osd_oti_get(env);
983 struct osd_device *dev = osd_dt_dev(d);
985 struct osd_thandle *oh;
990 LASSERT(current->journal_info == NULL);
992 oh = container_of0(th, struct osd_thandle, ot_super);
994 LASSERT(oh->ot_handle == NULL);
996 rc = dt_txn_hook_start(env, d, th);
1000 if (unlikely(osd_param_is_not_sane(dev, th))) {
1001 static unsigned long last_printed;
1002 static int last_credits;
1004 CWARN("%.16s: too many transaction credits (%d > %d)\n",
1005 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
1007 osd_journal(dev)->j_max_transaction_buffers);
1008 CWARN(" create: %u/%u, destroy: %u/%u\n",
1009 oti->oti_declare_ops[OSD_OT_CREATE],
1010 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1011 oti->oti_declare_ops[OSD_OT_DESTROY],
1012 oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
1013 CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
1014 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1015 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1016 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1017 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET]);
1018 CWARN(" write: %u/%u, punch: %u/%u, quota %u/%u\n",
1019 oti->oti_declare_ops[OSD_OT_WRITE],
1020 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1021 oti->oti_declare_ops[OSD_OT_PUNCH],
1022 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1023 oti->oti_declare_ops[OSD_OT_QUOTA],
1024 oti->oti_declare_ops_cred[OSD_OT_QUOTA]);
1025 CWARN(" insert: %u/%u, delete: %u/%u\n",
1026 oti->oti_declare_ops[OSD_OT_INSERT],
1027 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1028 oti->oti_declare_ops[OSD_OT_DELETE],
1029 oti->oti_declare_ops_cred[OSD_OT_DELETE]);
1030 CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
1031 oti->oti_declare_ops[OSD_OT_REF_ADD],
1032 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1033 oti->oti_declare_ops[OSD_OT_REF_DEL],
1034 oti->oti_declare_ops_cred[OSD_OT_REF_DEL]);
1036 if (last_credits != oh->ot_credits &&
1037 time_after(jiffies, last_printed +
1038 msecs_to_jiffies(60 * MSEC_PER_SEC))) {
1039 libcfs_debug_dumpstack(NULL);
1040 last_credits = oh->ot_credits;
1041 last_printed = jiffies;
1043 /* XXX Limit the credits to 'max_transaction_buffers', and
1044 * let the underlying filesystem to catch the error if
1045 * we really need so many credits.
1047 * This should be removed when we can calculate the
1048 * credits precisely. */
1049 oh->ot_credits = osd_transaction_size(dev);
1053 * XXX temporary stuff. Some abstraction layer should
1056 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1060 LASSERT(oti->oti_txns == 0);
1061 lu_context_init(&th->th_ctx, th->th_tags);
1062 lu_context_enter(&th->th_ctx);
1064 lu_device_get(&d->dd_lu_dev);
1065 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1076 static int osd_seq_exists(const struct lu_env *env,
1077 struct osd_device *osd, obd_seq seq)
1079 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1080 struct seq_server_site *ss = osd_seq_site(osd);
1084 LASSERT(ss != NULL);
1085 LASSERT(ss->ss_server_fld != NULL);
1087 rc = osd_fld_lookup(env, osd, seq, range);
1090 CERROR("%s: can't lookup FLD sequence "LPX64
1091 ": rc = %d\n", osd_name(osd), seq, rc);
1095 RETURN(ss->ss_node_id == range->lsr_index);
1099 * Concurrency: shouldn't matter.
1101 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1105 struct osd_thandle *oh;
1106 struct osd_thread_info *oti = osd_oti_get(env);
1107 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1108 struct qsd_instance *qsd = oti->oti_dev->od_quota_slave;
1109 struct lquota_trans *qtrans;
1112 oh = container_of0(th, struct osd_thandle, ot_super);
1114 qtrans = oh->ot_quota_trans;
1115 oh->ot_quota_trans = NULL;
1117 if (oh->ot_handle != NULL) {
1118 handle_t *hdl = oh->ot_handle;
1121 * add commit callback
1122 * notice we don't do this in osd_trans_start()
1123 * as underlying transaction can change during truncate
1125 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1128 LASSERT(oti->oti_txns == 1);
1130 rc = dt_txn_hook_stop(env, th);
1132 CERROR("Failure in transaction hook: %d\n", rc);
1134 /* hook functions might modify th_sync */
1135 hdl->h_sync = th->th_sync;
1137 oh->ot_handle = NULL;
1138 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
1139 rc = ldiskfs_journal_stop(hdl));
1141 CERROR("Failure to stop transaction: %d\n", rc);
1143 thandle_put(&oh->ot_super);
1146 /* inform the quota slave device that the transaction is stopping */
1147 qsd_op_end(env, qsd, qtrans);
1149 /* as we want IO to journal and data IO be concurrent, we don't block
1150 * awaiting data IO completion in osd_do_bio(), instead we wait here
1151 * once transaction is submitted to the journal. all reqular requests
1152 * don't do direct IO (except read/write), thus this wait_event becomes
1155 * IMPORTANT: we have to wait till any IO submited by the thread is
1156 * completed otherwise iobuf may be corrupted by different request
1158 wait_event(iobuf->dr_wait,
1159 atomic_read(&iobuf->dr_numreqs) == 0);
1160 osd_fini_iobuf(oti->oti_dev, iobuf);
1162 rc = iobuf->dr_error;
1167 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1169 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1172 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1173 LASSERT(&dcb->dcb_func != NULL);
1174 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
1180 * Called just before object is freed. Releases all resources except for
1181 * object itself (that is released by osd_object_free()).
1183 * Concurrency: no concurrent access is possible that late in object
1186 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1188 struct osd_object *obj = osd_obj(l);
1189 struct inode *inode = obj->oo_inode;
1191 LINVRNT(osd_invariant(obj));
1194 * If object is unlinked remove fid->ino mapping from object index.
1197 osd_index_fini(obj);
1198 if (inode != NULL) {
1199 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1200 qid_t uid = i_uid_read(inode);
1201 qid_t gid = i_gid_read(inode);
1204 obj->oo_inode = NULL;
1207 struct osd_thread_info *info = osd_oti_get(env);
1208 struct lquota_id_info *qi = &info->oti_qi;
1210 /* Release granted quota to master if necessary */
1211 qi->lqi_id.qid_uid = uid;
1212 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1214 qi->lqi_id.qid_uid = gid;
1215 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1221 * Concurrency: ->loo_object_release() is called under site spin-lock.
1223 static void osd_object_release(const struct lu_env *env,
1224 struct lu_object *l)
1229 * Concurrency: shouldn't matter.
1231 static int osd_object_print(const struct lu_env *env, void *cookie,
1232 lu_printer_t p, const struct lu_object *l)
1234 struct osd_object *o = osd_obj(l);
1235 struct iam_descr *d;
1237 if (o->oo_dir != NULL)
1238 d = o->oo_dir->od_container.ic_descr;
1241 return (*p)(env, cookie,
1242 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1244 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1245 o->oo_inode ? o->oo_inode->i_generation : 0,
1246 d ? d->id_ops->id_name : "plain");
1249 #define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
1252 * Concurrency: shouldn't matter.
1254 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1255 struct obd_statfs *sfs)
1257 struct osd_device *osd = osd_dt_dev(d);
1258 struct super_block *sb = osd_sb(osd);
1259 struct kstatfs *ksfs;
1262 if (unlikely(osd->od_mnt == NULL))
1263 return -EINPROGRESS;
1265 /* osd_lproc.c call this without env, allocate ksfs for that case */
1266 if (unlikely(env == NULL)) {
1267 OBD_ALLOC_PTR(ksfs);
1271 ksfs = &osd_oti_get(env)->oti_ksfs;
1274 spin_lock(&osd->od_osfs_lock);
1275 result = sb->s_op->statfs(sb->s_root, ksfs);
1276 if (likely(result == 0)) { /* N.B. statfs can't really fail */
1277 statfs_pack(sfs, ksfs);
1278 if (sb->s_flags & MS_RDONLY)
1279 sfs->os_state = OS_STATE_READONLY;
1282 spin_unlock(&osd->od_osfs_lock);
1284 if (unlikely(env == NULL))
1287 /* Reserve a small amount of space for local objects like last_rcvd,
1288 * llog, quota files, ... */
1289 if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
1292 sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
1293 /** Take out metadata overhead for indirect blocks */
1294 sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
1301 * Estimate space needed for file creations. We assume the largest filename
1302 * which is 2^64 - 1, hence a filename of 20 chars.
1303 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
1305 #ifdef __LDISKFS_DIR_REC_LEN
1306 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
1308 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
1312 * Concurrency: doesn't access mutable data.
1314 static void osd_conf_get(const struct lu_env *env,
1315 const struct dt_device *dev,
1316 struct dt_device_param *param)
1318 struct super_block *sb = osd_sb(osd_dt_dev(dev));
1322 * XXX should be taken from not-yet-existing fs abstraction layer.
1324 param->ddp_max_name_len = LDISKFS_NAME_LEN;
1325 param->ddp_max_nlink = LDISKFS_LINK_MAX;
1326 param->ddp_block_shift = sb->s_blocksize_bits;
1327 param->ddp_mount_type = LDD_MT_LDISKFS;
1328 param->ddp_maxbytes = sb->s_maxbytes;
1329 /* Overhead estimate should be fairly accurate, so we really take a tiny
1330 * error margin which also avoids fragmenting the filesystem too much */
1331 param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
1332 /* inode are statically allocated, so per-inode space consumption
1333 * is the space consumed by the directory entry */
1334 param->ddp_inodespace = PER_OBJ_USAGE;
1335 /* per-fragment overhead to be used by the client code */
1336 param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
1337 param->ddp_mntopts = 0;
1338 if (test_opt(sb, XATTR_USER))
1339 param->ddp_mntopts |= MNTOPT_USERXATTR;
1340 if (test_opt(sb, POSIX_ACL))
1341 param->ddp_mntopts |= MNTOPT_ACL;
1343 /* LOD might calculate the max stripe count based on max_ea_size,
1344 * so we need take account in the overhead as well,
1345 * xattr_header + magic + xattr_entry_head */
1346 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
1347 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
1349 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1350 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1351 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
1355 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
1359 * Concurrency: shouldn't matter.
1361 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1365 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1367 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1369 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
1370 LUSTRE_OSD_LDISKFS_NAME, rc);
1376 * Start commit for OSD device.
1378 * An implementation of dt_commit_async method for OSD device.
1379 * Asychronously starts underlayng fs sync and thereby a transaction
1382 * \param env environment
1383 * \param d dt device
1385 * \see dt_device_operations
1387 static int osd_commit_async(const struct lu_env *env,
1388 struct dt_device *d)
1390 struct super_block *s = osd_sb(osd_dt_dev(d));
1393 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1394 RETURN(s->s_op->sync_fs(s, 0));
1398 * Concurrency: shouldn't matter.
1401 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1403 struct super_block *sb = osd_sb(osd_dt_dev(d));
1404 struct block_device *dev = sb->s_bdev;
1405 #ifdef HAVE_DEV_SET_RDONLY
1406 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
1409 int rc = -EOPNOTSUPP;
1413 #ifdef HAVE_DEV_SET_RDONLY
1414 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1416 if (jdev && (jdev != dev)) {
1417 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
1419 dev_set_rdonly(jdev);
1421 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
1422 dev_set_rdonly(dev);
1424 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
1425 osd_dt_dev(d)->od_svname, (long)dev, rc);
1431 * Concurrency: serialization provided by callers.
1433 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1434 int mode, unsigned long timeout, __u32 alg,
1435 struct lustre_capa_key *keys)
1437 struct osd_device *dev = osd_dt_dev(d);
1440 dev->od_fl_capa = mode;
1441 dev->od_capa_timeout = timeout;
1442 dev->od_capa_alg = alg;
1443 dev->od_capa_keys = keys;
1448 * Note: we do not count into QUOTA here.
1449 * If we mount with --data_journal we may need more.
1451 const int osd_dto_credits_noquota[DTO_NR] = {
1454 * INDEX_EXTRA_TRANS_BLOCKS(8) +
1455 * SINGLEDATA_TRANS_BLOCKS(8)
1456 * XXX Note: maybe iam need more, since iam have more level than
1459 [DTO_INDEX_INSERT] = 16,
1462 * just modify a single entry, probably merge few within a block
1464 [DTO_INDEX_DELETE] = 1,
1468 [DTO_INDEX_UPDATE] = 16,
1470 * 4(inode, inode bits, groups, GDT)
1471 * notice: OI updates are counted separately with DTO_INDEX_INSERT
1473 [DTO_OBJECT_CREATE] = 4,
1475 * 4(inode, inode bits, groups, GDT)
1476 * notice: OI updates are counted separately with DTO_INDEX_DELETE
1478 [DTO_OBJECT_DELETE] = 4,
1480 * Attr set credits (inode)
1482 [DTO_ATTR_SET_BASE] = 1,
1484 * Xattr set. The same as xattr of EXT3.
1485 * DATA_TRANS_BLOCKS(14)
1486 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1487 * are also counted in. Do not know why?
1489 [DTO_XATTR_SET] = 14,
1491 * credits for inode change during write.
1493 [DTO_WRITE_BASE] = 3,
1495 * credits for single block write.
1497 [DTO_WRITE_BLOCK] = 14,
1499 * Attr set credits for chown.
1500 * This is extra credits for setattr, and it is null without quota
1502 [DTO_ATTR_SET_CHOWN] = 0
1505 static const struct dt_device_operations osd_dt_ops = {
1506 .dt_root_get = osd_root_get,
1507 .dt_statfs = osd_statfs,
1508 .dt_trans_create = osd_trans_create,
1509 .dt_trans_start = osd_trans_start,
1510 .dt_trans_stop = osd_trans_stop,
1511 .dt_trans_cb_add = osd_trans_cb_add,
1512 .dt_conf_get = osd_conf_get,
1513 .dt_sync = osd_sync,
1515 .dt_commit_async = osd_commit_async,
1516 .dt_init_capa_ctxt = osd_init_capa_ctxt,
1519 static void osd_object_read_lock(const struct lu_env *env,
1520 struct dt_object *dt, unsigned role)
1522 struct osd_object *obj = osd_dt_obj(dt);
1523 struct osd_thread_info *oti = osd_oti_get(env);
1525 LINVRNT(osd_invariant(obj));
1527 LASSERT(obj->oo_owner != env);
1528 down_read_nested(&obj->oo_sem, role);
1530 LASSERT(obj->oo_owner == NULL);
1534 static void osd_object_write_lock(const struct lu_env *env,
1535 struct dt_object *dt, unsigned role)
1537 struct osd_object *obj = osd_dt_obj(dt);
1538 struct osd_thread_info *oti = osd_oti_get(env);
1540 LINVRNT(osd_invariant(obj));
1542 LASSERT(obj->oo_owner != env);
1543 down_write_nested(&obj->oo_sem, role);
1545 LASSERT(obj->oo_owner == NULL);
1546 obj->oo_owner = env;
1550 static void osd_object_read_unlock(const struct lu_env *env,
1551 struct dt_object *dt)
1553 struct osd_object *obj = osd_dt_obj(dt);
1554 struct osd_thread_info *oti = osd_oti_get(env);
1556 LINVRNT(osd_invariant(obj));
1558 LASSERT(oti->oti_r_locks > 0);
1560 up_read(&obj->oo_sem);
1563 static void osd_object_write_unlock(const struct lu_env *env,
1564 struct dt_object *dt)
1566 struct osd_object *obj = osd_dt_obj(dt);
1567 struct osd_thread_info *oti = osd_oti_get(env);
1569 LINVRNT(osd_invariant(obj));
1571 LASSERT(obj->oo_owner == env);
1572 LASSERT(oti->oti_w_locks > 0);
1574 obj->oo_owner = NULL;
1575 up_write(&obj->oo_sem);
1578 static int osd_object_write_locked(const struct lu_env *env,
1579 struct dt_object *dt)
1581 struct osd_object *obj = osd_dt_obj(dt);
1583 LINVRNT(osd_invariant(obj));
1585 return obj->oo_owner == env;
1588 static int capa_is_sane(const struct lu_env *env,
1589 struct osd_device *dev,
1590 struct lustre_capa *capa,
1591 struct lustre_capa_key *keys)
1593 struct osd_thread_info *oti = osd_oti_get(env);
1594 struct lustre_capa *tcapa = &oti->oti_capa;
1595 struct obd_capa *oc;
1599 oc = capa_lookup(dev->od_capa_hash, capa, 0);
1601 if (capa_is_expired(oc)) {
1602 DEBUG_CAPA(D_ERROR, capa, "expired");
1609 if (capa_is_expired_sec(capa)) {
1610 DEBUG_CAPA(D_ERROR, capa, "expired");
1614 spin_lock(&capa_lock);
1615 for (i = 0; i < 2; i++) {
1616 if (keys[i].lk_keyid == capa->lc_keyid) {
1617 oti->oti_capa_key = keys[i];
1621 spin_unlock(&capa_lock);
1624 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1628 rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1632 if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1633 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1637 oc = capa_add(dev->od_capa_hash, capa);
1643 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1644 struct lustre_capa *capa, __u64 opc)
1646 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1647 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
1648 struct lu_capainfo *lci;
1651 if (!osd->od_fl_capa)
1654 if (capa == BYPASS_CAPA)
1657 lci = lu_capainfo_get(env);
1658 if (unlikely(lci == NULL))
1661 if (lci->lci_auth == LC_ID_NONE)
1665 CERROR("%s: no capability provided for FID "DFID": rc = %d\n",
1666 osd_name(osd), PFID(fid), -EACCES);
1670 if (!lu_fid_eq(fid, &capa->lc_fid)) {
1671 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1676 if (!capa_opc_supported(capa, opc)) {
1677 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1681 rc = capa_is_sane(env, osd, capa, osd->od_capa_keys);
1683 DEBUG_CAPA(D_ERROR, capa, "insane: rc = %d", rc);
1690 static struct timespec *osd_inode_time(const struct lu_env *env,
1691 struct inode *inode, __u64 seconds)
1693 struct osd_thread_info *oti = osd_oti_get(env);
1694 struct timespec *t = &oti->oti_time;
1696 t->tv_sec = seconds;
1698 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1703 static void osd_inode_getattr(const struct lu_env *env,
1704 struct inode *inode, struct lu_attr *attr)
1706 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1707 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1708 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
1711 attr->la_atime = LTIME_S(inode->i_atime);
1712 attr->la_mtime = LTIME_S(inode->i_mtime);
1713 attr->la_ctime = LTIME_S(inode->i_ctime);
1714 attr->la_mode = inode->i_mode;
1715 attr->la_size = i_size_read(inode);
1716 attr->la_blocks = inode->i_blocks;
1717 attr->la_uid = i_uid_read(inode);
1718 attr->la_gid = i_gid_read(inode);
1719 attr->la_flags = LDISKFS_I(inode)->i_flags;
1720 attr->la_nlink = inode->i_nlink;
1721 attr->la_rdev = inode->i_rdev;
1722 attr->la_blksize = 1 << inode->i_blkbits;
1723 attr->la_blkbits = inode->i_blkbits;
1726 static int osd_attr_get(const struct lu_env *env,
1727 struct dt_object *dt,
1728 struct lu_attr *attr,
1729 struct lustre_capa *capa)
1731 struct osd_object *obj = osd_dt_obj(dt);
1733 LASSERT(dt_object_exists(dt));
1734 LASSERT(!dt_object_remote(dt));
1735 LINVRNT(osd_invariant(obj));
1737 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1740 spin_lock(&obj->oo_guard);
1741 osd_inode_getattr(env, obj->oo_inode, attr);
1742 spin_unlock(&obj->oo_guard);
1746 static int osd_declare_attr_set(const struct lu_env *env,
1747 struct dt_object *dt,
1748 const struct lu_attr *attr,
1749 struct thandle *handle)
1751 struct osd_thandle *oh;
1752 struct osd_object *obj;
1753 struct osd_thread_info *info = osd_oti_get(env);
1754 struct lquota_id_info *qi = &info->oti_qi;
1762 LASSERT(dt != NULL);
1763 LASSERT(handle != NULL);
1765 obj = osd_dt_obj(dt);
1766 LASSERT(osd_invariant(obj));
1768 oh = container_of0(handle, struct osd_thandle, ot_super);
1769 LASSERT(oh->ot_handle == NULL);
1771 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
1772 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1774 if (attr == NULL || obj->oo_inode == NULL)
1777 bspace = obj->oo_inode->i_blocks;
1778 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1779 bspace = toqb(bspace);
1781 /* Changing ownership is always preformed by super user, it should not
1784 * We still need to call the osd_declare_qid() to calculate the journal
1785 * credits for updating quota accounting files and to trigger quota
1786 * space adjustment once the operation is completed.*/
1787 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
1789 uid = i_uid_read(obj->oo_inode);
1790 qi->lqi_type = USRQUOTA;
1791 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
1792 /* inode accounting */
1793 qi->lqi_is_blk = false;
1795 /* one more inode for the new uid ... */
1796 qi->lqi_id.qid_uid = attr->la_uid;
1798 /* Reserve credits for the new uid */
1799 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1800 if (rc == -EDQUOT || rc == -EINPROGRESS)
1805 /* and one less inode for the current uid */
1806 qi->lqi_id.qid_uid = uid;
1808 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1809 if (rc == -EDQUOT || rc == -EINPROGRESS)
1814 /* block accounting */
1815 qi->lqi_is_blk = true;
1817 /* more blocks for the new uid ... */
1818 qi->lqi_id.qid_uid = attr->la_uid;
1819 qi->lqi_space = bspace;
1821 * Credits for the new uid has been reserved, re-use "obj"
1822 * to save credit reservation.
1824 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1825 if (rc == -EDQUOT || rc == -EINPROGRESS)
1830 /* and finally less blocks for the current uid */
1831 qi->lqi_id.qid_uid = uid;
1832 qi->lqi_space = -bspace;
1833 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1834 if (rc == -EDQUOT || rc == -EINPROGRESS)
1840 gid = i_gid_read(obj->oo_inode);
1841 qi->lqi_type = GRPQUOTA;
1842 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
1844 /* inode accounting */
1845 qi->lqi_is_blk = false;
1847 /* one more inode for the new gid ... */
1848 qi->lqi_id.qid_gid = attr->la_gid;
1850 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1851 if (rc == -EDQUOT || rc == -EINPROGRESS)
1856 /* and one less inode for the current gid */
1857 qi->lqi_id.qid_gid = gid;
1859 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1860 if (rc == -EDQUOT || rc == -EINPROGRESS)
1865 /* block accounting */
1866 qi->lqi_is_blk = true;
1868 /* more blocks for the new gid ... */
1869 qi->lqi_id.qid_gid = attr->la_gid;
1870 qi->lqi_space = bspace;
1871 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1872 if (rc == -EDQUOT || rc == -EINPROGRESS)
1877 /* and finally less blocks for the current gid */
1878 qi->lqi_id.qid_gid = gid;
1879 qi->lqi_space = -bspace;
1880 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1881 if (rc == -EDQUOT || rc == -EINPROGRESS)
1890 static int osd_inode_setattr(const struct lu_env *env,
1891 struct inode *inode, const struct lu_attr *attr)
1893 __u64 bits = attr->la_valid;
1895 /* Only allow set size for regular file */
1896 if (!S_ISREG(inode->i_mode))
1897 bits &= ~(LA_SIZE | LA_BLOCKS);
1902 if (bits & LA_ATIME)
1903 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1904 if (bits & LA_CTIME)
1905 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1906 if (bits & LA_MTIME)
1907 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1908 if (bits & LA_SIZE) {
1909 LDISKFS_I(inode)->i_disksize = attr->la_size;
1910 i_size_write(inode, attr->la_size);
1914 /* OSD should not change "i_blocks" which is used by quota.
1915 * "i_blocks" should be changed by ldiskfs only. */
1916 if (bits & LA_BLOCKS)
1917 inode->i_blocks = attr->la_blocks;
1920 inode->i_mode = (inode->i_mode & S_IFMT) |
1921 (attr->la_mode & ~S_IFMT);
1923 i_uid_write(inode, attr->la_uid);
1925 i_gid_write(inode, attr->la_gid);
1926 if (bits & LA_NLINK)
1927 set_nlink(inode, attr->la_nlink);
1929 inode->i_rdev = attr->la_rdev;
1931 if (bits & LA_FLAGS) {
1932 /* always keep S_NOCMTIME */
1933 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1939 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1941 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
1942 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
1946 ll_vfs_dq_init(inode);
1948 if (attr->la_valid & LA_UID)
1949 iattr.ia_valid |= ATTR_UID;
1950 if (attr->la_valid & LA_GID)
1951 iattr.ia_valid |= ATTR_GID;
1952 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
1953 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
1955 rc = ll_vfs_dq_transfer(inode, &iattr);
1957 CERROR("%s: quota transfer failed: rc = %d. Is quota "
1958 "enforcement enabled on the ldiskfs "
1959 "filesystem?\n", inode->i_sb->s_id, rc);
1966 static int osd_attr_set(const struct lu_env *env,
1967 struct dt_object *dt,
1968 const struct lu_attr *attr,
1969 struct thandle *handle,
1970 struct lustre_capa *capa)
1972 struct osd_object *obj = osd_dt_obj(dt);
1973 struct inode *inode;
1976 LASSERT(handle != NULL);
1977 LASSERT(dt_object_exists(dt));
1978 LASSERT(!dt_object_remote(dt));
1979 LASSERT(osd_invariant(obj));
1981 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1984 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
1986 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
1987 struct osd_thread_info *oti = osd_oti_get(env);
1988 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
1989 struct lu_fid *fid1 = &oti->oti_fid;
1990 struct osd_inode_id *id = &oti->oti_id;
1991 struct iam_path_descr *ipd;
1992 struct iam_container *bag;
1993 struct osd_thandle *oh;
1996 fid_cpu_to_be(fid1, fid0);
1997 memset(id, 1, sizeof(*id));
1998 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
1999 fid0)->oi_dir.od_container;
2000 ipd = osd_idx_ipd_get(env, bag);
2001 if (unlikely(ipd == NULL))
2004 oh = container_of0(handle, struct osd_thandle, ot_super);
2005 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
2006 (const struct iam_rec *)id, ipd);
2007 osd_ipd_put(env, bag, ipd);
2008 return(rc > 0 ? 0 : rc);
2011 inode = obj->oo_inode;
2013 rc = osd_quota_transfer(inode, attr);
2017 spin_lock(&obj->oo_guard);
2018 rc = osd_inode_setattr(env, inode, attr);
2019 spin_unlock(&obj->oo_guard);
2022 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2026 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
2027 struct osd_object *obj,
2028 const char *name, const int namelen)
2030 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
2033 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
2034 umode_t mode, struct dt_allocation_hint *hint,
2038 struct osd_device *osd = osd_obj2dev(obj);
2039 struct osd_thandle *oth;
2040 struct dt_object *parent = NULL;
2041 struct inode *inode;
2043 LINVRNT(osd_invariant(obj));
2044 LASSERT(obj->oo_inode == NULL);
2045 LASSERT(obj->oo_hl_head == NULL);
2047 if (S_ISDIR(mode) && ldiskfs_pdo) {
2048 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
2049 if (obj->oo_hl_head == NULL)
2053 oth = container_of(th, struct osd_thandle, ot_super);
2054 LASSERT(oth->ot_handle->h_transaction != NULL);
2056 if (hint && hint->dah_parent)
2057 parent = hint->dah_parent;
2059 inode = ldiskfs_create_inode(oth->ot_handle,
2060 parent ? osd_dt_obj(parent)->oo_inode :
2061 osd_sb(osd)->s_root->d_inode,
2063 if (!IS_ERR(inode)) {
2064 /* Do not update file c/mtime in ldiskfs.
2065 * NB: don't need any lock because no contention at this
2067 inode->i_flags |= S_NOCMTIME;
2069 /* For new created object, it must be consistent,
2070 * and it is unnecessary to scrub against it. */
2071 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
2072 obj->oo_inode = inode;
2075 if (obj->oo_hl_head != NULL) {
2076 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
2077 obj->oo_hl_head = NULL;
2079 result = PTR_ERR(inode);
2081 LINVRNT(osd_invariant(obj));
2089 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
2090 struct lu_attr *attr,
2091 struct dt_allocation_hint *hint,
2092 struct dt_object_format *dof,
2096 struct osd_thandle *oth;
2097 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
2099 LASSERT(S_ISDIR(attr->la_mode));
2101 oth = container_of(th, struct osd_thandle, ot_super);
2102 LASSERT(oth->ot_handle->h_transaction != NULL);
2103 result = osd_mkfile(info, obj, mode, hint, th);
2108 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
2109 struct lu_attr *attr,
2110 struct dt_allocation_hint *hint,
2111 struct dt_object_format *dof,
2115 struct osd_thandle *oth;
2116 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
2118 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
2120 LASSERT(S_ISREG(attr->la_mode));
2122 oth = container_of(th, struct osd_thandle, ot_super);
2123 LASSERT(oth->ot_handle->h_transaction != NULL);
2125 result = osd_mkfile(info, obj, mode, hint, th);
2127 LASSERT(obj->oo_inode != NULL);
2128 if (feat->dif_flags & DT_IND_VARKEY)
2129 result = iam_lvar_create(obj->oo_inode,
2130 feat->dif_keysize_max,
2132 feat->dif_recsize_max,
2135 result = iam_lfix_create(obj->oo_inode,
2136 feat->dif_keysize_max,
2138 feat->dif_recsize_max,
2145 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
2146 struct lu_attr *attr,
2147 struct dt_allocation_hint *hint,
2148 struct dt_object_format *dof,
2151 LASSERT(S_ISREG(attr->la_mode));
2152 return osd_mkfile(info, obj, (attr->la_mode &
2153 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2156 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
2157 struct lu_attr *attr,
2158 struct dt_allocation_hint *hint,
2159 struct dt_object_format *dof,
2162 LASSERT(S_ISLNK(attr->la_mode));
2163 return osd_mkfile(info, obj, (attr->la_mode &
2164 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2167 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
2168 struct lu_attr *attr,
2169 struct dt_allocation_hint *hint,
2170 struct dt_object_format *dof,
2173 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
2176 LINVRNT(osd_invariant(obj));
2177 LASSERT(obj->oo_inode == NULL);
2178 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
2179 S_ISFIFO(mode) || S_ISSOCK(mode));
2181 result = osd_mkfile(info, obj, mode, hint, th);
2183 LASSERT(obj->oo_inode != NULL);
2185 * This inode should be marked dirty for i_rdev. Currently
2186 * that is done in the osd_attr_init().
2188 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
2191 LINVRNT(osd_invariant(obj));
2195 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
2197 struct dt_allocation_hint *hint,
2198 struct dt_object_format *dof,
2201 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
2203 osd_obj_type_f result;
2219 result = osd_mk_index;
2230 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2231 struct dt_object *parent, struct dt_object *child,
2236 ah->dah_parent = parent;
2237 ah->dah_mode = child_mode;
2240 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2241 struct lu_attr *attr, struct dt_object_format *dof)
2243 struct inode *inode = obj->oo_inode;
2244 __u64 valid = attr->la_valid;
2247 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2249 if (dof->dof_type != DFT_NODE)
2250 attr->la_valid &= ~LA_RDEV;
2251 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2252 attr->la_valid &= ~LA_ATIME;
2253 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2254 attr->la_valid &= ~LA_CTIME;
2255 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2256 attr->la_valid &= ~LA_MTIME;
2258 result = osd_quota_transfer(inode, attr);
2262 if (attr->la_valid != 0) {
2263 result = osd_inode_setattr(info->oti_env, inode, attr);
2265 * The osd_inode_setattr() should always succeed here. The
2266 * only error that could be returned is EDQUOT when we are
2267 * trying to change the UID or GID of the inode. However, this
2268 * should not happen since quota enforcement is no longer
2269 * enabled on ldiskfs (lquota takes care of it).
2271 LASSERTF(result == 0, "%d\n", result);
2272 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2275 attr->la_valid = valid;
2279 * Helper function for osd_object_create()
2281 * \retval 0, on success
2283 static int __osd_object_create(struct osd_thread_info *info,
2284 struct osd_object *obj, struct lu_attr *attr,
2285 struct dt_allocation_hint *hint,
2286 struct dt_object_format *dof,
2292 /* we drop umask so that permissions we pass are not affected */
2293 umask = current->fs->umask;
2294 current->fs->umask = 0;
2296 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2299 osd_attr_init(info, obj, attr, dof);
2300 osd_object_init0(obj);
2302 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
2303 unlock_new_inode(obj->oo_inode);
2306 /* restore previous umask value */
2307 current->fs->umask = umask;
2313 * Helper function for osd_object_create()
2315 * \retval 0, on success
2317 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2318 const struct lu_fid *fid, struct thandle *th)
2320 struct osd_thread_info *info = osd_oti_get(env);
2321 struct osd_inode_id *id = &info->oti_id;
2322 struct osd_device *osd = osd_obj2dev(obj);
2323 struct osd_thandle *oh;
2325 LASSERT(obj->oo_inode != NULL);
2327 oh = container_of0(th, struct osd_thandle, ot_super);
2328 LASSERT(oh->ot_handle);
2330 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2331 return osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
2334 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2335 obd_seq seq, struct lu_seq_range *range)
2337 struct seq_server_site *ss = osd_seq_site(osd);
2339 if (fid_seq_is_idif(seq)) {
2340 fld_range_set_ost(range);
2341 range->lsr_index = idif_ost_idx(seq);
2345 if (!fid_seq_in_fldb(seq)) {
2346 fld_range_set_mdt(range);
2348 /* FIXME: If ss is NULL, it suppose not get lsr_index
2350 range->lsr_index = ss->ss_node_id;
2354 LASSERT(ss != NULL);
2355 fld_range_set_any(range);
2356 /* OSD will only do local fld lookup */
2357 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
2361 * Concurrency: no external locking is necessary.
2363 static int osd_declare_object_create(const struct lu_env *env,
2364 struct dt_object *dt,
2365 struct lu_attr *attr,
2366 struct dt_allocation_hint *hint,
2367 struct dt_object_format *dof,
2368 struct thandle *handle)
2370 struct osd_thandle *oh;
2374 LASSERT(handle != NULL);
2376 oh = container_of0(handle, struct osd_thandle, ot_super);
2377 LASSERT(oh->ot_handle == NULL);
2379 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
2380 osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
2381 /* Reuse idle OI block may cause additional one OI block
2383 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2384 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
2386 /* If this is directory, then we expect . and .. to be inserted as
2387 * well. The one directory block always needs to be created for the
2388 * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
2389 * block), there is no danger of needing a tree for the first block.
2391 if (attr && S_ISDIR(attr->la_mode)) {
2392 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2393 osd_dto_credits_noquota[DTO_WRITE_BASE]);
2394 osd_trans_declare_op(env, oh, OSD_OT_INSERT, 0);
2400 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
2401 osd_dt_obj(dt), false, NULL, false);
2408 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2409 struct lu_attr *attr,
2410 struct dt_allocation_hint *hint,
2411 struct dt_object_format *dof, struct thandle *th)
2413 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2414 struct osd_object *obj = osd_dt_obj(dt);
2415 struct osd_thread_info *info = osd_oti_get(env);
2419 LINVRNT(osd_invariant(obj));
2420 LASSERT(!dt_object_exists(dt));
2421 LASSERT(!dt_object_remote(dt));
2422 LASSERT(osd_write_locked(env, obj));
2423 LASSERT(th != NULL);
2425 if (unlikely(fid_is_acct(fid)))
2426 /* Quota files can't be created from the kernel any more,
2427 * 'tune2fs -O quota' will take care of creating them */
2430 osd_trans_exec_op(env, th, OSD_OT_CREATE);
2431 osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2433 result = __osd_object_create(info, obj, attr, hint, dof, th);
2435 result = __osd_oi_insert(env, obj, fid, th);
2437 LASSERT(ergo(result == 0,
2438 dt_object_exists(dt) && !dt_object_remote(dt)));
2440 LASSERT(osd_invariant(obj));
2445 * Called to destroy on-disk representation of the object
2447 * Concurrency: must be locked
2449 static int osd_declare_object_destroy(const struct lu_env *env,
2450 struct dt_object *dt,
2453 struct osd_object *obj = osd_dt_obj(dt);
2454 struct inode *inode = obj->oo_inode;
2455 struct osd_thandle *oh;
2459 oh = container_of0(th, struct osd_thandle, ot_super);
2460 LASSERT(oh->ot_handle == NULL);
2463 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
2464 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2465 /* Recycle idle OI leaf may cause additional three OI blocks
2467 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
2468 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
2469 /* one less inode */
2470 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2471 -1, oh, obj, false, NULL, false);
2474 /* data to be truncated */
2475 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2476 0, oh, obj, true, NULL, false);
2480 static int osd_object_destroy(const struct lu_env *env,
2481 struct dt_object *dt,
2484 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2485 struct osd_object *obj = osd_dt_obj(dt);
2486 struct inode *inode = obj->oo_inode;
2487 struct osd_device *osd = osd_obj2dev(obj);
2488 struct osd_thandle *oh;
2492 oh = container_of0(th, struct osd_thandle, ot_super);
2493 LASSERT(oh->ot_handle);
2495 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2497 if (unlikely(fid_is_acct(fid)))
2500 if (S_ISDIR(inode->i_mode)) {
2501 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
2502 inode->i_nlink == 2);
2503 /* it will check/delete the inode from remote parent,
2504 * how to optimize it? unlink performance impaction XXX */
2505 result = osd_delete_from_remote_parent(env, osd, obj, oh);
2506 if (result != 0 && result != -ENOENT) {
2507 CERROR("%s: delete inode "DFID": rc = %d\n",
2508 osd_name(osd), PFID(fid), result);
2510 spin_lock(&obj->oo_guard);
2512 spin_unlock(&obj->oo_guard);
2513 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2516 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
2518 result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
2521 /* XXX: add to ext3 orphan list */
2522 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2524 /* not needed in the cache anymore */
2525 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2531 * Put the fid into lustre_mdt_attrs, and then place the structure
2532 * inode's ea. This fid should not be altered during the life time
2535 * \retval +ve, on success
2536 * \retval -ve, on error
2538 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2540 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
2541 const struct lu_fid *fid, __u32 compat, __u32 incompat)
2543 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2547 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
2550 lustre_lma_init(lma, fid, compat, incompat);
2551 lustre_lma_swab(lma);
2553 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
2555 /* LMA may already exist, but we need to check that all the
2556 * desired compat/incompat flags have been added. */
2557 if (unlikely(rc == -EEXIST)) {
2558 if (compat == 0 && incompat == 0)
2561 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
2562 XATTR_NAME_LMA, info->oti_mdt_attrs_old,
2567 lustre_lma_swab(lma);
2568 if (!(~lma->lma_compat & compat) &&
2569 !(~lma->lma_incompat & incompat))
2572 lma->lma_compat |= compat;
2573 lma->lma_incompat |= incompat;
2574 lustre_lma_swab(lma);
2575 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2576 sizeof(*lma), XATTR_REPLACE);
2583 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2584 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2585 * To have compatilibility with 1.8 ldiskfs driver we need to have
2586 * magic number at start of fid data.
2587 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2590 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2591 const struct lu_fid *fid)
2593 if (!fid_is_namespace_visible(fid) ||
2594 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
2595 param->edp_magic = 0;
2599 param->edp_magic = LDISKFS_LUFID_MAGIC;
2600 param->edp_len = sizeof(struct lu_fid) + 1;
2601 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
2605 * Try to read the fid from inode ea into dt_rec.
2607 * \param fid object fid.
2609 * \retval 0 on success
2611 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2612 __u32 ino, struct lu_fid *fid,
2613 struct osd_inode_id *id)
2615 struct osd_thread_info *info = osd_oti_get(env);
2616 struct inode *inode;
2619 osd_id_gen(id, ino, OSD_OII_NOGEN);
2620 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2622 RETURN(PTR_ERR(inode));
2628 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
2630 struct inode *parent_dir,
2631 const struct lu_fid *dot_fid,
2632 const struct lu_fid *dot_dot_fid,
2633 struct osd_thandle *oth)
2635 struct ldiskfs_dentry_param *dot_ldp;
2636 struct ldiskfs_dentry_param *dot_dot_ldp;
2638 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
2639 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
2641 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2642 dot_ldp->edp_magic = 0;
2643 return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
2644 dir, dot_ldp, dot_dot_ldp);
2648 * Create an local agent inode for remote entry
2650 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
2651 struct osd_device *osd,
2652 struct osd_object *pobj,
2653 const struct lu_fid *fid,
2657 struct osd_thread_info *info = osd_oti_get(env);
2658 struct inode *local;
2659 struct osd_thandle *oh;
2664 oh = container_of(th, struct osd_thandle, ot_super);
2665 LASSERT(oh->ot_handle->h_transaction != NULL);
2667 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type);
2668 if (IS_ERR(local)) {
2669 CERROR("%s: create local error %d\n", osd_name(osd),
2670 (int)PTR_ERR(local));
2674 /* Set special LMA flag for local agent inode */
2675 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
2677 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
2678 osd_name(osd), PFID(fid), rc);
2679 RETURN(ERR_PTR(rc));
2685 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
2686 lu_object_fid(&pobj->oo_dt.do_lu),
2689 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
2690 osd_name(osd), PFID(fid), rc);
2691 RETURN(ERR_PTR(rc));
2698 * Delete local agent inode for remote entry
2700 static int osd_delete_local_agent_inode(const struct lu_env *env,
2701 struct osd_device *osd,
2702 const struct lu_fid *fid,
2703 __u32 ino, struct osd_thandle *oh)
2705 struct osd_thread_info *oti = osd_oti_get(env);
2706 struct osd_inode_id *id = &oti->oti_id;
2707 struct inode *inode;
2710 id->oii_ino = le32_to_cpu(ino);
2711 id->oii_gen = OSD_OII_NOGEN;
2712 inode = osd_iget(oti, osd, id);
2713 if (IS_ERR(inode)) {
2714 CERROR("%s: iget error "DFID" id %u:%u\n", osd_name(osd),
2715 PFID(fid), id->oii_ino, id->oii_gen);
2716 RETURN(PTR_ERR(inode));
2720 mark_inode_dirty(inode);
2721 CDEBUG(D_INODE, "%s: delete remote inode "DFID" %lu\n",
2722 osd_name(osd), PFID(fid), inode->i_ino);
2728 * OSD layer object create function for interoperability mode (b11826).
2729 * This is mostly similar to osd_object_create(). Only difference being, fid is
2730 * inserted into inode ea here.
2732 * \retval 0, on success
2733 * \retval -ve, on error
2735 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2736 struct lu_attr *attr,
2737 struct dt_allocation_hint *hint,
2738 struct dt_object_format *dof,
2741 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2742 struct osd_object *obj = osd_dt_obj(dt);
2743 struct osd_thread_info *info = osd_oti_get(env);
2748 LASSERT(osd_invariant(obj));
2749 LASSERT(!dt_object_exists(dt));
2750 LASSERT(!dt_object_remote(dt));
2751 LASSERT(osd_write_locked(env, obj));
2752 LASSERT(th != NULL);
2754 if (unlikely(fid_is_acct(fid)))
2755 /* Quota files can't be created from the kernel any more,
2756 * 'tune2fs -O quota' will take care of creating them */
2759 osd_trans_exec_op(env, th, OSD_OT_CREATE);
2760 osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2762 result = __osd_object_create(info, obj, attr, hint, dof, th);
2764 result = osd_ea_fid_set(info, obj->oo_inode, fid,
2765 fid_is_on_ost(info, osd_obj2dev(obj),
2766 fid, OI_CHECK_FLD) ?
2767 LMAC_FID_ON_OST : 0, 0);
2770 result = __osd_oi_insert(env, obj, fid, th);
2772 LASSERT(ergo(result == 0,
2773 dt_object_exists(dt) && !dt_object_remote(dt)));
2774 LINVRNT(osd_invariant(obj));
2778 static int osd_declare_object_ref_add(const struct lu_env *env,
2779 struct dt_object *dt,
2780 struct thandle *handle)
2782 struct osd_thandle *oh;
2784 /* it's possible that object doesn't exist yet */
2785 LASSERT(handle != NULL);
2787 oh = container_of0(handle, struct osd_thandle, ot_super);
2788 LASSERT(oh->ot_handle == NULL);
2790 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
2791 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2797 * Concurrency: @dt is write locked.
2799 static int osd_object_ref_add(const struct lu_env *env,
2800 struct dt_object *dt, struct thandle *th)
2802 struct osd_object *obj = osd_dt_obj(dt);
2803 struct inode *inode = obj->oo_inode;
2804 struct osd_thandle *oh;
2807 LINVRNT(osd_invariant(obj));
2808 LASSERT(dt_object_exists(dt));
2809 LASSERT(!dt_object_remote(dt));
2810 LASSERT(osd_write_locked(env, obj));
2811 LASSERT(th != NULL);
2813 oh = container_of0(th, struct osd_thandle, ot_super);
2814 LASSERT(oh->ot_handle != NULL);
2816 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
2818 CDEBUG(D_INODE, DFID" increase nlink %d\n",
2819 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2821 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
2822 * (65000) subdirectories by storing "1" in i_nlink if the link count
2823 * would otherwise overflow. Directory tranversal tools understand
2824 * that (st_nlink == 1) indicates that the filesystem dose not track
2825 * hard links count on the directory, and will not abort subdirectory
2826 * scanning early once (st_nlink - 2) subdirs have been found.
2828 * This also has to properly handle the case of inodes with nlink == 0
2829 * in case they are being linked into the PENDING directory
2831 spin_lock(&obj->oo_guard);
2832 if (unlikely(inode->i_nlink == 0))
2833 /* inc_nlink from 0 may cause WARN_ON */
2834 set_nlink(inode, 1);
2836 ldiskfs_inc_count(oh->ot_handle, inode);
2837 if (!S_ISDIR(inode->i_mode))
2838 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2840 spin_unlock(&obj->oo_guard);
2842 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2843 LINVRNT(osd_invariant(obj));
2848 static int osd_declare_object_ref_del(const struct lu_env *env,
2849 struct dt_object *dt,
2850 struct thandle *handle)
2852 struct osd_thandle *oh;
2854 LASSERT(dt_object_exists(dt));
2855 LASSERT(!dt_object_remote(dt));
2856 LASSERT(handle != NULL);
2858 oh = container_of0(handle, struct osd_thandle, ot_super);
2859 LASSERT(oh->ot_handle == NULL);
2861 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
2862 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2868 * Concurrency: @dt is write locked.
2870 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2873 struct osd_object *obj = osd_dt_obj(dt);
2874 struct inode *inode = obj->oo_inode;
2875 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
2876 struct osd_thandle *oh;
2878 LINVRNT(osd_invariant(obj));
2879 LASSERT(dt_object_exists(dt));
2880 LASSERT(!dt_object_remote(dt));
2881 LASSERT(osd_write_locked(env, obj));
2882 LASSERT(th != NULL);
2884 oh = container_of0(th, struct osd_thandle, ot_super);
2885 LASSERT(oh->ot_handle != NULL);
2887 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
2889 spin_lock(&obj->oo_guard);
2890 /* That can be result of upgrade from old Lustre version and
2891 * applied only to local files. Just skip this ref_del call.
2892 * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
2893 if (inode->i_nlink == 0) {
2894 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2895 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
2896 ", maybe an upgraded file? (LU-3915)\n",
2897 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
2898 spin_unlock(&obj->oo_guard);
2902 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
2903 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2905 ldiskfs_dec_count(oh->ot_handle, inode);
2906 spin_unlock(&obj->oo_guard);
2908 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2909 LINVRNT(osd_invariant(obj));
2915 * Get the 64-bit version for an inode.
2917 static int osd_object_version_get(const struct lu_env *env,
2918 struct dt_object *dt, dt_obj_version_t *ver)
2920 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2922 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2923 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2924 *ver = LDISKFS_I(inode)->i_fs_version;
2929 * Concurrency: @dt is read locked.
2931 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2932 struct lu_buf *buf, const char *name,
2933 struct lustre_capa *capa)
2935 struct osd_object *obj = osd_dt_obj(dt);
2936 struct inode *inode = obj->oo_inode;
2937 struct osd_thread_info *info = osd_oti_get(env);
2938 struct dentry *dentry = &info->oti_obj_dentry;
2940 /* version get is not real XATTR but uses xattr API */
2941 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2942 /* for version we are just using xattr API but change inode
2944 if (buf->lb_len == 0)
2945 return sizeof(dt_obj_version_t);
2947 if (buf->lb_len < sizeof(dt_obj_version_t))
2950 osd_object_version_get(env, dt, buf->lb_buf);
2952 return sizeof(dt_obj_version_t);
2955 LASSERT(dt_object_exists(dt));
2956 LASSERT(!dt_object_remote(dt));
2957 LASSERT(inode->i_op != NULL);
2958 LASSERT(inode->i_op->getxattr != NULL);
2960 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2963 return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
2967 static int osd_declare_xattr_set(const struct lu_env *env,
2968 struct dt_object *dt,
2969 const struct lu_buf *buf, const char *name,
2970 int fl, struct thandle *handle)
2972 struct osd_thandle *oh;
2974 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
2976 LASSERT(handle != NULL);
2978 oh = container_of0(handle, struct osd_thandle, ot_super);
2979 LASSERT(oh->ot_handle == NULL);
2981 /* optimistic optimization: LMA is set first and usually fit inode */
2982 if (strcmp(name, XATTR_NAME_LMA) == 0) {
2983 if (dt_object_exists(dt))
2987 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2990 credits = osd_dto_credits_noquota[DTO_XATTR_SET];
2991 if (buf && buf->lb_len > sb->s_blocksize) {
2992 credits *= (buf->lb_len + sb->s_blocksize - 1) >>
2993 sb->s_blocksize_bits;
2996 * xattr set may involve inode quota change, reserve credits for
2997 * dquot_initialize()
2999 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
3002 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
3008 * Set the 64-bit version for object
3010 static void osd_object_version_set(const struct lu_env *env,
3011 struct dt_object *dt,
3012 dt_obj_version_t *new_version)
3014 struct inode *inode = osd_dt_obj(dt)->oo_inode;
3016 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
3017 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
3019 LDISKFS_I(inode)->i_fs_version = *new_version;
3020 /** Version is set after all inode operations are finished,
3021 * so we should mark it dirty here */
3022 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3026 * Concurrency: @dt is write locked.
3028 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
3029 const struct lu_buf *buf, const char *name, int fl,
3030 struct thandle *handle, struct lustre_capa *capa)
3032 struct osd_object *obj = osd_dt_obj(dt);
3033 struct inode *inode = obj->oo_inode;
3034 struct osd_thread_info *info = osd_oti_get(env);
3038 LASSERT(handle != NULL);
3040 /* version set is not real XATTR */
3041 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
3042 /* for version we are just using xattr API but change inode
3044 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
3045 osd_object_version_set(env, dt, buf->lb_buf);
3046 return sizeof(dt_obj_version_t);
3049 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
3052 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
3053 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
3055 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3056 if (fl & LU_XATTR_REPLACE)
3057 fs_flags |= XATTR_REPLACE;
3059 if (fl & LU_XATTR_CREATE)
3060 fs_flags |= XATTR_CREATE;
3062 if (strcmp(name, XATTR_NAME_LMV) == 0) {
3063 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
3066 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
3070 lma->lma_incompat |= LMAI_STRIPED;
3071 lustre_lma_swab(lma);
3072 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3073 sizeof(*lma), XATTR_REPLACE);
3078 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_OVERFLOW) &&
3079 strcmp(name, XATTR_NAME_LINK) == 0)
3082 return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
3087 * Concurrency: @dt is read locked.
3089 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
3090 struct lu_buf *buf, struct lustre_capa *capa)
3092 struct osd_object *obj = osd_dt_obj(dt);
3093 struct inode *inode = obj->oo_inode;
3094 struct osd_thread_info *info = osd_oti_get(env);
3095 struct dentry *dentry = &info->oti_obj_dentry;
3097 LASSERT(dt_object_exists(dt));
3098 LASSERT(!dt_object_remote(dt));
3099 LASSERT(inode->i_op != NULL);
3100 LASSERT(inode->i_op->listxattr != NULL);
3102 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
3105 dentry->d_inode = inode;
3106 dentry->d_sb = inode->i_sb;
3107 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
3110 static int osd_declare_xattr_del(const struct lu_env *env,
3111 struct dt_object *dt, const char *name,
3112 struct thandle *handle)
3114 struct osd_thandle *oh;
3115 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
3117 LASSERT(dt_object_exists(dt));
3118 LASSERT(!dt_object_remote(dt));
3119 LASSERT(handle != NULL);
3121 oh = container_of0(handle, struct osd_thandle, ot_super);
3122 LASSERT(oh->ot_handle == NULL);
3124 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
3125 osd_dto_credits_noquota[DTO_XATTR_SET]);
3127 * xattr del may involve inode quota change, reserve credits for
3128 * dquot_initialize()
3130 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
3136 * Concurrency: @dt is write locked.
3138 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
3139 const char *name, struct thandle *handle,
3140 struct lustre_capa *capa)
3142 struct osd_object *obj = osd_dt_obj(dt);
3143 struct inode *inode = obj->oo_inode;
3144 struct osd_thread_info *info = osd_oti_get(env);
3145 struct dentry *dentry = &info->oti_obj_dentry;
3148 LASSERT(dt_object_exists(dt));
3149 LASSERT(!dt_object_remote(dt));
3150 LASSERT(inode->i_op != NULL);
3151 LASSERT(inode->i_op->removexattr != NULL);
3152 LASSERT(handle != NULL);
3154 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
3157 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3159 ll_vfs_dq_init(inode);
3160 dentry->d_inode = inode;
3161 dentry->d_sb = inode->i_sb;
3162 rc = inode->i_op->removexattr(dentry, name);
3166 static struct obd_capa *osd_capa_get(const struct lu_env *env,
3167 struct dt_object *dt,
3168 struct lustre_capa *old, __u64 opc)
3170 struct osd_thread_info *info = osd_oti_get(env);
3171 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3172 struct osd_object *obj = osd_dt_obj(d