4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <linux/module.h>
47 #include <linux/user_namespace.h>
48 #ifdef HAVE_UIDGID_HEADER
49 # include <linux/uidgid.h>
52 /* LUSTRE_VERSION_CODE */
53 #include <lustre_ver.h>
54 /* prerequisite for linux/xattr.h */
55 #include <linux/types.h>
56 /* prerequisite for linux/xattr.h */
58 /* XATTR_{REPLACE,CREATE} */
59 #include <linux/xattr.h>
61 #include <ldiskfs/ldiskfs.h>
62 #include <ldiskfs/xattr.h>
65 * struct OBD_{ALLOC,FREE}*()
68 #include <obd_support.h>
69 /* struct ptlrpc_thread */
70 #include <lustre_net.h>
71 #include <lustre_fid.h>
73 #include <lustre_param.h>
75 #include "osd_internal.h"
76 #include "osd_dynlocks.h"
78 /* llo_* api support */
79 #include <md_object.h>
80 #include <lustre_quota.h>
82 #include <lustre_linkea.h>
85 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
86 "ldiskfs with parallel directory operations");
88 int ldiskfs_track_declares_assert;
89 CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
90 "LBUG during tracking of declares");
92 /* Slab to allocate dynlocks */
93 struct kmem_cache *dynlock_cachep;
95 /* Slab to allocate osd_it_ea */
96 struct kmem_cache *osd_itea_cachep;
98 static struct lu_kmem_descr ldiskfs_caches[] = {
100 .ckd_cache = &dynlock_cachep,
101 .ckd_name = "dynlock_cache",
102 .ckd_size = sizeof(struct dynlock_handle)
105 .ckd_cache = &osd_itea_cachep,
106 .ckd_name = "osd_itea_cache",
107 .ckd_size = sizeof(struct osd_it_ea)
114 static const char dot[] = ".";
115 static const char dotdot[] = "..";
116 static const char remote_obj_dir[] = "REM_OBJ_DIR";
118 static const struct lu_object_operations osd_lu_obj_ops;
119 static const struct dt_object_operations osd_obj_ops;
120 static const struct dt_object_operations osd_obj_ea_ops;
121 static const struct dt_object_operations osd_obj_otable_it_ops;
122 static const struct dt_index_operations osd_index_iam_ops;
123 static const struct dt_index_operations osd_index_ea_ops;
125 int osd_trans_declare_op2rb[] = {
126 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
127 [OSD_OT_PUNCH] = OSD_OT_MAX,
128 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
129 [OSD_OT_CREATE] = OSD_OT_DESTROY,
130 [OSD_OT_DESTROY] = OSD_OT_CREATE,
131 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
132 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
133 [OSD_OT_WRITE] = OSD_OT_WRITE,
134 [OSD_OT_INSERT] = OSD_OT_DELETE,
135 [OSD_OT_DELETE] = OSD_OT_INSERT,
136 [OSD_OT_QUOTA] = OSD_OT_MAX,
139 static int osd_has_index(const struct osd_object *obj)
141 return obj->oo_dt.do_index_ops != NULL;
144 static int osd_object_invariant(const struct lu_object *l)
146 return osd_invariant(osd_obj(l));
150 * Concurrency: doesn't matter
154 * Concurrency: doesn't matter
156 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
158 struct osd_thread_info *oti = osd_oti_get(env);
159 return oti->oti_w_locks > 0 && o->oo_owner == env;
163 * Concurrency: doesn't access mutable data
165 static int osd_root_get(const struct lu_env *env,
166 struct dt_device *dev, struct lu_fid *f)
168 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
173 * OSD object methods.
177 * Concurrency: no concurrent access is possible that early in object
180 static struct lu_object *osd_object_alloc(const struct lu_env *env,
181 const struct lu_object_header *hdr,
184 struct osd_object *mo;
190 l = &mo->oo_dt.do_lu;
191 dt_object_init(&mo->oo_dt, NULL, d);
192 mo->oo_dt.do_ops = &osd_obj_ea_ops;
193 l->lo_ops = &osd_lu_obj_ops;
194 init_rwsem(&mo->oo_sem);
195 init_rwsem(&mo->oo_ext_idx_sem);
196 spin_lock_init(&mo->oo_guard);
203 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
204 struct dentry *dentry, struct lustre_mdt_attrs *lma)
208 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
209 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
210 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
212 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
213 memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
215 lustre_lma_swab(lma);
216 /* Check LMA compatibility */
217 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
218 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
219 "for fid = "DFID", ino = %lu\n",
220 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
221 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
222 PFID(&lma->lma_self_fid), inode->i_ino);
225 } else if (rc == 0) {
233 * retrieve object from backend ext fs.
235 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
236 struct osd_inode_id *id)
238 struct inode *inode = NULL;
240 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
242 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
243 id->oii_ino, PTR_ERR(inode));
244 } else if (id->oii_gen != OSD_OII_NOGEN &&
245 inode->i_generation != id->oii_gen) {
246 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
247 "i_generation = %u\n",
248 id->oii_ino, id->oii_gen, inode->i_generation);
250 inode = ERR_PTR(-ESTALE);
251 } else if (inode->i_nlink == 0) {
252 /* due to parallel readdir and unlink,
253 * we can have dead inode here. */
254 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
256 inode = ERR_PTR(-ESTALE);
257 } else if (is_bad_inode(inode)) {
258 CWARN("%.16s: bad inode: ino = %u\n",
259 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
261 inode = ERR_PTR(-ENOENT);
263 if (id->oii_gen == OSD_OII_NOGEN)
264 osd_id_gen(id, inode->i_ino, inode->i_generation);
266 /* Do not update file c/mtime in ldiskfs.
267 * NB: we don't have any lock to protect this because we don't
268 * have reference on osd_object now, but contention with
269 * another lookup + attr_set can't happen in the tiny window
270 * between if (...) and set S_NOCMTIME. */
271 if (!(inode->i_flags & S_NOCMTIME))
272 inode->i_flags |= S_NOCMTIME;
277 static struct inode *
278 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
279 struct osd_inode_id *id, struct lu_fid *fid)
281 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
285 inode = osd_iget(info, dev, id);
289 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
291 *fid = lma->lma_self_fid;
292 } else if (rc == -ENODATA) {
293 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
294 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
296 lu_igif_build(fid, inode->i_ino, inode->i_generation);
305 * \retval +v: new filter_fid, does not contain self-fid
306 * \retval 0: filter_fid_old, contains self-fid
307 * \retval -v: other failure cases
309 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
310 struct dentry *dentry, struct lu_fid *fid)
312 struct filter_fid_old *ff = &info->oti_ff;
313 struct ost_id *ostid = &info->oti_ostid;
316 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
317 if (rc == sizeof(*ff)) {
319 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
320 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
321 /* XXX: should use real OST index in the future. LU-3569 */
322 ostid_to_fid(fid, ostid, 0);
323 } else if (rc == sizeof(struct filter_fid)) {
325 } else if (rc >= 0) {
332 static int osd_lma_self_repair(struct osd_thread_info *info,
333 struct osd_device *osd, struct inode *inode,
334 const struct lu_fid *fid, __u32 compat)
339 LASSERT(current->journal_info == NULL);
341 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
342 osd_dto_credits_noquota[DTO_XATTR_SET]);
345 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
350 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
352 CWARN("%s: cannot self repair the LMA: rc = %d\n",
354 ldiskfs_journal_stop(jh);
358 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
360 struct osd_thread_info *info = osd_oti_get(env);
361 struct osd_device *osd = osd_obj2dev(obj);
362 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
363 struct inode *inode = obj->oo_inode;
364 struct dentry *dentry = &info->oti_obj_dentry;
365 struct lu_fid *fid = NULL;
366 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
370 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
371 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
372 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
373 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
374 fid = &lma->lma_self_fid;
375 rc = osd_get_idif(info, inode, dentry, fid);
376 if ((rc > 0) || (rc == -ENODATA && osd->od_index_in_idif)) {
377 /* For the given OST-object, if it has neither LMA nor
378 * FID in XATTR_NAME_FID, then the given FID (which is
379 * contained in the @obj, from client RPC for locating
380 * the OST-object) is trusted. We use it to generate
382 osd_lma_self_repair(info, osd, inode, rfid,
388 if (unlikely(rc == -ENODATA))
396 lustre_lma_swab(lma);
397 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
398 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
399 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
400 "fid = "DFID", ino = %lu\n", osd_name(osd),
401 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
402 PFID(rfid), inode->i_ino);
405 fid = &lma->lma_self_fid;
409 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
410 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
411 struct ost_id *oi = &info->oti_ostid;
412 struct lu_fid *fid1 = &info->oti_fid3;
413 __u32 idx = fid_idif_ost_idx(rfid);
415 /* For old IDIF, the OST index is not part of the IDIF,
416 * Means that different OSTs may have the same IDIFs.
417 * Under such case, we need to make some compatible
418 * check to make sure to trigger OI scrub properly. */
419 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
420 /* Given @rfid is new, LMA is old. */
421 fid_to_ostid(fid, oi);
422 ostid_to_fid(fid1, oi, idx);
423 if (lu_fid_eq(fid1, rfid)) {
424 if (osd->od_index_in_idif)
425 osd_lma_self_repair(info, osd,
439 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
440 const struct lu_fid *fid,
441 const struct lu_object_conf *conf)
443 struct osd_thread_info *info;
444 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
445 struct osd_device *dev;
446 struct osd_idmap_cache *oic;
447 struct osd_inode_id *id;
449 struct osd_scrub *scrub;
450 struct scrub_file *sf;
454 bool in_cache = false;
455 bool triggered = false;
458 LINVRNT(osd_invariant(obj));
459 LASSERT(obj->oo_inode == NULL);
460 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
463 scrub = &dev->od_scrub;
464 sf = &scrub->os_file;
465 info = osd_oti_get(env);
467 oic = &info->oti_cache;
469 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
472 /* For the object is created as locking anchor, or for the object to
473 * be created on disk. No need to osd_oi_lookup() at here because FID
474 * shouldn't never be re-used, if it's really a duplicate FID from
475 * unexpected reason, we should be able to detect it later by calling
476 * do_create->osd_oi_insert(). */
477 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
478 GOTO(out, result = 0);
480 /* Search order: 1. per-thread cache. */
481 if (lu_fid_eq(fid, &oic->oic_fid) &&
482 likely(oic->oic_dev == dev)) {
489 if (!list_empty(&scrub->os_inconsistent_items)) {
490 /* Search order: 2. OI scrub pending list. */
491 result = osd_oii_lookup(dev, fid, id);
496 /* Search order: 3. OI files. */
497 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
498 if (result == -ENOENT) {
499 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
500 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
502 GOTO(out, result = 0);
513 inode = osd_iget(info, dev, id);
515 result = PTR_ERR(inode);
516 if (result != -ENOENT && result != -ESTALE)
520 fid_zero(&oic->oic_fid);
522 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
524 GOTO(out, result = (result == -ENOENT ? 0 : result));
526 /* The OI mapping is there, but the inode is NOT there.
527 * Two possible cases for that:
529 * 1) Backup/restore caused the OI invalid.
530 * 2) Someone unlinked the object but NOT removed
531 * the OI mapping, such as mount target device
532 * as ldiskfs, and modify something directly.
534 * Generally, when the device is mounted, it will
535 * auto check whether the system is restored from
536 * file-level backup or not. We trust such detect
537 * to distinguish the 1st case from the 2nd case. */
538 if (!(scrub->os_file.sf_flags & SF_INCONSISTENT))
539 GOTO(out, result = 0);
542 if (unlikely(triggered))
543 GOTO(out, result = saved);
546 if (thread_is_running(&scrub->os_thread)) {
547 result = -EINPROGRESS;
548 } else if (!dev->od_noscrub) {
549 /* Since we do not know the right OI mapping, we have
550 * to trigger OI scrub to scan the whole device. */
551 result = osd_scrub_start(dev, SS_AUTO_FULL |
552 SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
553 CDEBUG(D_LFSCK | D_CONSOLE, "%.16s: trigger OI "
554 "scrub by RPC for "DFID", rc = %d [1]\n",
555 osd_name(dev), PFID(fid), result);
556 if (result == 0 || result == -EALREADY)
557 result = -EINPROGRESS;
562 /* We still have chance to get the valid inode: for the
563 * object which is referenced by remote name entry, the
564 * object on the local MDT will be linked under the dir
565 * of "/REMOTE_PARENT_DIR" with its FID string as name.
567 * We do not know whether the object for the given FID
568 * is referenced by some remote name entry or not, and
569 * especially for DNE II, a multiple-linked object may
570 * have many name entries reside on many MDTs.
572 * To simplify the operation, OSD will not distinguish
573 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
574 * only happened for the RPC from other MDT during the
575 * OI scrub, or for the client side RPC with FID only,
576 * such as FID to path, or from old connected client. */
578 result = osd_lookup_in_remote_parent(info, dev, fid, id);
584 GOTO(out, result = saved);
587 obj->oo_inode = inode;
588 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
590 result = osd_check_lma(env, obj);
593 obj->oo_inode = NULL;
595 if (result != -EREMCHG)
599 fid_zero(&oic->oic_fid);
601 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
605 if (result != -ENOENT)
608 if (!in_oi && (fid_is_norm(fid) || fid_is_igif(fid)) &&
609 ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
613 GOTO(out, result = 0);
616 obj->oo_compat_dot_created = 1;
617 obj->oo_compat_dotdot_created = 1;
619 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
620 GOTO(out, result = 0);
622 LASSERT(obj->oo_hl_head == NULL);
623 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
624 if (obj->oo_hl_head == NULL) {
625 obj->oo_inode = NULL;
627 GOTO(out, result = -ENOMEM);
629 GOTO(out, result = 0);
632 LINVRNT(osd_invariant(obj));
637 * Concurrency: shouldn't matter.
639 static void osd_object_init0(struct osd_object *obj)
641 LASSERT(obj->oo_inode != NULL);
642 obj->oo_dt.do_body_ops = &osd_body_ops;
643 obj->oo_dt.do_lu.lo_header->loh_attr |=
644 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
648 * Concurrency: no concurrent access is possible that early in object
651 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
652 const struct lu_object_conf *conf)
654 struct osd_object *obj = osd_obj(l);
657 LINVRNT(osd_invariant(obj));
659 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
660 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
661 l->lo_header->loh_attr |= LOHA_EXISTS;
665 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
666 obj->oo_dt.do_body_ops = &osd_body_ops_new;
667 if (result == 0 && obj->oo_inode != NULL)
668 osd_object_init0(obj);
670 LINVRNT(osd_invariant(obj));
675 * Concurrency: no concurrent access is possible that late in object
678 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
680 struct osd_object *obj = osd_obj(l);
682 LINVRNT(osd_invariant(obj));
684 dt_object_fini(&obj->oo_dt);
685 if (obj->oo_hl_head != NULL)
686 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
691 * Concurrency: no concurrent access is possible that late in object
694 static void osd_index_fini(struct osd_object *o)
696 struct iam_container *bag;
698 if (o->oo_dir != NULL) {
699 bag = &o->oo_dir->od_container;
700 if (o->oo_inode != NULL) {
701 if (bag->ic_object == o->oo_inode)
702 iam_container_fini(bag);
704 OBD_FREE_PTR(o->oo_dir);
710 * Concurrency: no concurrent access is possible that late in object
711 * life-cycle (for all existing callers, that is. New callers have to provide
712 * their own locking.)
714 static int osd_inode_unlinked(const struct inode *inode)
716 return inode->i_nlink == 0;
720 OSD_TXN_OI_DELETE_CREDITS = 20,
721 OSD_TXN_INODE_DELETE_CREDITS = 20
728 #if OSD_THANDLE_STATS
730 * Set time when the handle is allocated
732 static void osd_th_alloced(struct osd_thandle *oth)
734 oth->oth_alloced = cfs_time_current();
738 * Set time when the handle started
740 static void osd_th_started(struct osd_thandle *oth)
742 oth->oth_started = cfs_time_current();
746 * Helper function to convert time interval to microseconds packed in
749 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
753 cfs_duration_usec(cfs_time_sub(end, start), &val);
754 return val.tv_sec * 1000000 + val.tv_usec;
758 * Check whether the we deal with this handle for too long.
760 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
761 cfs_time_t alloced, cfs_time_t started,
764 cfs_time_t now = cfs_time_current();
766 LASSERT(dev != NULL);
768 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
769 interval_to_usec(alloced, started));
770 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
771 interval_to_usec(started, closed));
772 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
773 interval_to_usec(closed, now));
775 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
776 CWARN("transaction handle %p was open for too long: "
778 "alloced "CFS_TIME_T" ,"
779 "started "CFS_TIME_T" ,"
780 "closed "CFS_TIME_T"\n",
781 oth, now, alloced, started, closed);
782 libcfs_debug_dumpstack(NULL);
786 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
788 cfs_time_t __closed = cfs_time_current(); \
789 cfs_time_t __alloced = oth->oth_alloced; \
790 cfs_time_t __started = oth->oth_started; \
793 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
796 #else /* OSD_THANDLE_STATS */
798 #define osd_th_alloced(h) do {} while(0)
799 #define osd_th_started(h) do {} while(0)
800 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
802 #endif /* OSD_THANDLE_STATS */
805 * Concurrency: doesn't access mutable data.
807 static int osd_param_is_not_sane(const struct osd_device *dev,
808 const struct thandle *th)
810 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
812 return oh->ot_credits > osd_transaction_size(dev);
816 * Concurrency: shouldn't matter.
818 static void osd_trans_commit_cb(struct super_block *sb,
819 struct ldiskfs_journal_cb_entry *jcb, int error)
821 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
822 struct thandle *th = &oh->ot_super;
823 struct lu_device *lud = &th->th_dev->dd_lu_dev;
824 struct dt_txn_commit_cb *dcb, *tmp;
826 LASSERT(oh->ot_handle == NULL);
829 CERROR("transaction @0x%p commit error: %d\n", th, error);
831 dt_txn_hook_commit(th);
833 /* call per-transaction callbacks if any */
834 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
835 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
836 "commit callback entry: magic=%x name='%s'\n",
837 dcb->dcb_magic, dcb->dcb_name);
838 list_del_init(&dcb->dcb_linkage);
839 dcb->dcb_func(NULL, th, dcb, error);
842 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
846 lu_context_exit(&th->th_ctx);
847 lu_context_fini(&th->th_ctx);
851 static struct thandle *osd_trans_create(const struct lu_env *env,
854 struct osd_thread_info *oti = osd_oti_get(env);
855 struct osd_iobuf *iobuf = &oti->oti_iobuf;
856 struct osd_thandle *oh;
860 /* on pending IO in this thread should left from prev. request */
861 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
863 th = ERR_PTR(-ENOMEM);
864 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
866 oh->ot_quota_trans = &oti->oti_quota_trans;
867 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
871 th->th_tags = LCT_TX_HANDLE;
873 INIT_LIST_HEAD(&oh->ot_dcb_list);
876 memset(oti->oti_declare_ops, 0,
877 sizeof(oti->oti_declare_ops));
878 memset(oti->oti_declare_ops_rb, 0,
879 sizeof(oti->oti_declare_ops_rb));
880 memset(oti->oti_declare_ops_cred, 0,
881 sizeof(oti->oti_declare_ops_cred));
882 oti->oti_rollback = false;
888 * Concurrency: shouldn't matter.
890 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
893 struct osd_thread_info *oti = osd_oti_get(env);
894 struct osd_device *dev = osd_dt_dev(d);
896 struct osd_thandle *oh;
901 LASSERT(current->journal_info == NULL);
903 oh = container_of0(th, struct osd_thandle, ot_super);
905 LASSERT(oh->ot_handle == NULL);
907 rc = dt_txn_hook_start(env, d, th);
911 if (unlikely(osd_param_is_not_sane(dev, th))) {
912 static unsigned long last_printed;
913 static int last_credits;
915 CWARN("%.16s: too many transaction credits (%d > %d)\n",
916 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
918 osd_journal(dev)->j_max_transaction_buffers);
919 CWARN(" create: %u/%u, destroy: %u/%u\n",
920 oti->oti_declare_ops[OSD_OT_CREATE],
921 oti->oti_declare_ops_cred[OSD_OT_CREATE],
922 oti->oti_declare_ops[OSD_OT_DESTROY],
923 oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
924 CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
925 oti->oti_declare_ops[OSD_OT_ATTR_SET],
926 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
927 oti->oti_declare_ops[OSD_OT_XATTR_SET],
928 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET]);
929 CWARN(" write: %u/%u, punch: %u/%u, quota %u/%u\n",
930 oti->oti_declare_ops[OSD_OT_WRITE],
931 oti->oti_declare_ops_cred[OSD_OT_WRITE],
932 oti->oti_declare_ops[OSD_OT_PUNCH],
933 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
934 oti->oti_declare_ops[OSD_OT_QUOTA],
935 oti->oti_declare_ops_cred[OSD_OT_QUOTA]);
936 CWARN(" insert: %u/%u, delete: %u/%u\n",
937 oti->oti_declare_ops[OSD_OT_INSERT],
938 oti->oti_declare_ops_cred[OSD_OT_INSERT],
939 oti->oti_declare_ops[OSD_OT_DELETE],
940 oti->oti_declare_ops_cred[OSD_OT_DELETE]);
941 CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
942 oti->oti_declare_ops[OSD_OT_REF_ADD],
943 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
944 oti->oti_declare_ops[OSD_OT_REF_DEL],
945 oti->oti_declare_ops_cred[OSD_OT_REF_DEL]);
947 if (last_credits != oh->ot_credits &&
948 time_after(jiffies, last_printed +
949 msecs_to_jiffies(60 * MSEC_PER_SEC))) {
950 libcfs_debug_dumpstack(NULL);
951 last_credits = oh->ot_credits;
952 last_printed = jiffies;
954 /* XXX Limit the credits to 'max_transaction_buffers', and
955 * let the underlying filesystem to catch the error if
956 * we really need so many credits.
958 * This should be removed when we can calculate the
959 * credits precisely. */
960 oh->ot_credits = osd_transaction_size(dev);
964 * XXX temporary stuff. Some abstraction layer should
967 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
971 LASSERT(oti->oti_txns == 0);
972 lu_context_init(&th->th_ctx, th->th_tags);
973 lu_context_enter(&th->th_ctx);
975 lu_device_get(&d->dd_lu_dev);
976 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
987 static int osd_seq_exists(const struct lu_env *env,
988 struct osd_device *osd, u64 seq)
990 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
991 struct seq_server_site *ss = osd_seq_site(osd);
996 LASSERT(ss->ss_server_fld != NULL);
998 rc = osd_fld_lookup(env, osd, seq, range);
1001 CERROR("%s: can't lookup FLD sequence "LPX64
1002 ": rc = %d\n", osd_name(osd), seq, rc);
1006 RETURN(ss->ss_node_id == range->lsr_index);
1010 * Concurrency: shouldn't matter.
1012 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1016 struct osd_thandle *oh;
1017 struct osd_thread_info *oti = osd_oti_get(env);
1018 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1019 struct osd_device *osd = osd_dt_dev(th->th_dev);
1020 struct qsd_instance *qsd = osd->od_quota_slave;
1021 struct lquota_trans *qtrans;
1024 oh = container_of0(th, struct osd_thandle, ot_super);
1026 qtrans = oh->ot_quota_trans;
1027 oh->ot_quota_trans = NULL;
1029 if (oh->ot_handle != NULL) {
1030 handle_t *hdl = oh->ot_handle;
1033 * add commit callback
1034 * notice we don't do this in osd_trans_start()
1035 * as underlying transaction can change during truncate
1037 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1040 LASSERT(oti->oti_txns == 1);
1042 rc = dt_txn_hook_stop(env, th);
1044 CERROR("%s: failed in transaction hook: rc = %d\n",
1047 /* hook functions might modify th_sync */
1048 hdl->h_sync = th->th_sync;
1050 oh->ot_handle = NULL;
1051 OSD_CHECK_SLOW_TH(oh, osd, rc = ldiskfs_journal_stop(hdl));
1053 CERROR("%s: failed to stop transaction: rc = %d\n",
1059 /* inform the quota slave device that the transaction is stopping */
1060 qsd_op_end(env, qsd, qtrans);
1062 /* as we want IO to journal and data IO be concurrent, we don't block
1063 * awaiting data IO completion in osd_do_bio(), instead we wait here
1064 * once transaction is submitted to the journal. all reqular requests
1065 * don't do direct IO (except read/write), thus this wait_event becomes
1068 * IMPORTANT: we have to wait till any IO submited by the thread is
1069 * completed otherwise iobuf may be corrupted by different request
1071 wait_event(iobuf->dr_wait,
1072 atomic_read(&iobuf->dr_numreqs) == 0);
1073 osd_fini_iobuf(osd, iobuf);
1075 rc = iobuf->dr_error;
1080 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1082 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1085 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1086 LASSERT(&dcb->dcb_func != NULL);
1087 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
1093 * Called just before object is freed. Releases all resources except for
1094 * object itself (that is released by osd_object_free()).
1096 * Concurrency: no concurrent access is possible that late in object
1099 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1101 struct osd_object *obj = osd_obj(l);
1102 struct inode *inode = obj->oo_inode;
1104 LINVRNT(osd_invariant(obj));
1107 * If object is unlinked remove fid->ino mapping from object index.
1110 osd_index_fini(obj);
1111 if (inode != NULL) {
1112 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1113 qid_t uid = i_uid_read(inode);
1114 qid_t gid = i_gid_read(inode);
1117 obj->oo_inode = NULL;
1120 struct osd_thread_info *info = osd_oti_get(env);
1121 struct lquota_id_info *qi = &info->oti_qi;
1123 /* Release granted quota to master if necessary */
1124 qi->lqi_id.qid_uid = uid;
1125 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1127 qi->lqi_id.qid_uid = gid;
1128 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1134 * Concurrency: ->loo_object_release() is called under site spin-lock.
1136 static void osd_object_release(const struct lu_env *env,
1137 struct lu_object *l)
1142 * Concurrency: shouldn't matter.
1144 static int osd_object_print(const struct lu_env *env, void *cookie,
1145 lu_printer_t p, const struct lu_object *l)
1147 struct osd_object *o = osd_obj(l);
1148 struct iam_descr *d;
1150 if (o->oo_dir != NULL)
1151 d = o->oo_dir->od_container.ic_descr;
1154 return (*p)(env, cookie,
1155 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1157 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1158 o->oo_inode ? o->oo_inode->i_generation : 0,
1159 d ? d->id_ops->id_name : "plain");
1162 #define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
1165 * Concurrency: shouldn't matter.
1167 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1168 struct obd_statfs *sfs)
1170 struct osd_device *osd = osd_dt_dev(d);
1171 struct super_block *sb = osd_sb(osd);
1172 struct kstatfs *ksfs;
1175 if (unlikely(osd->od_mnt == NULL))
1176 return -EINPROGRESS;
1178 /* osd_lproc.c call this without env, allocate ksfs for that case */
1179 if (unlikely(env == NULL)) {
1180 OBD_ALLOC_PTR(ksfs);
1184 ksfs = &osd_oti_get(env)->oti_ksfs;
1187 spin_lock(&osd->od_osfs_lock);
1188 result = sb->s_op->statfs(sb->s_root, ksfs);
1189 if (likely(result == 0)) { /* N.B. statfs can't really fail */
1190 statfs_pack(sfs, ksfs);
1191 if (sb->s_flags & MS_RDONLY)
1192 sfs->os_state = OS_STATE_READONLY;
1195 spin_unlock(&osd->od_osfs_lock);
1197 if (unlikely(env == NULL))
1200 /* Reserve a small amount of space for local objects like last_rcvd,
1201 * llog, quota files, ... */
1202 if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
1205 sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
1206 /** Take out metadata overhead for indirect blocks */
1207 sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
1214 * Estimate space needed for file creations. We assume the largest filename
1215 * which is 2^64 - 1, hence a filename of 20 chars.
1216 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
1218 #ifdef __LDISKFS_DIR_REC_LEN
1219 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
1221 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
1225 * Concurrency: doesn't access mutable data.
1227 static void osd_conf_get(const struct lu_env *env,
1228 const struct dt_device *dev,
1229 struct dt_device_param *param)
1231 struct super_block *sb = osd_sb(osd_dt_dev(dev));
1235 * XXX should be taken from not-yet-existing fs abstraction layer.
1237 param->ddp_max_name_len = LDISKFS_NAME_LEN;
1238 param->ddp_max_nlink = LDISKFS_LINK_MAX;
1239 param->ddp_block_shift = sb->s_blocksize_bits;
1240 param->ddp_mount_type = LDD_MT_LDISKFS;
1241 param->ddp_maxbytes = sb->s_maxbytes;
1242 /* Overhead estimate should be fairly accurate, so we really take a tiny
1243 * error margin which also avoids fragmenting the filesystem too much */
1244 param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
1245 /* inode are statically allocated, so per-inode space consumption
1246 * is the space consumed by the directory entry */
1247 param->ddp_inodespace = PER_OBJ_USAGE;
1248 /* per-fragment overhead to be used by the client code */
1249 param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
1250 param->ddp_mntopts = 0;
1251 if (test_opt(sb, XATTR_USER))
1252 param->ddp_mntopts |= MNTOPT_USERXATTR;
1253 if (test_opt(sb, POSIX_ACL))
1254 param->ddp_mntopts |= MNTOPT_ACL;
1256 /* LOD might calculate the max stripe count based on max_ea_size,
1257 * so we need take account in the overhead as well,
1258 * xattr_header + magic + xattr_entry_head */
1259 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
1260 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
1262 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1263 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1264 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
1268 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
1272 * Concurrency: shouldn't matter.
1274 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1278 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1280 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1282 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
1283 LUSTRE_OSD_LDISKFS_NAME, rc);
1289 * Start commit for OSD device.
1291 * An implementation of dt_commit_async method for OSD device.
1292 * Asychronously starts underlayng fs sync and thereby a transaction
1295 * \param env environment
1296 * \param d dt device
1298 * \see dt_device_operations
1300 static int osd_commit_async(const struct lu_env *env,
1301 struct dt_device *d)
1303 struct super_block *s = osd_sb(osd_dt_dev(d));
1306 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1307 RETURN(s->s_op->sync_fs(s, 0));
1311 * Concurrency: shouldn't matter.
1314 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1316 struct super_block *sb = osd_sb(osd_dt_dev(d));
1317 struct block_device *dev = sb->s_bdev;
1318 #ifdef HAVE_DEV_SET_RDONLY
1319 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
1322 int rc = -EOPNOTSUPP;
1326 #ifdef HAVE_DEV_SET_RDONLY
1327 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1329 if (jdev && (jdev != dev)) {
1330 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
1332 dev_set_rdonly(jdev);
1334 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
1335 dev_set_rdonly(dev);
1337 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
1338 osd_dt_dev(d)->od_svname, (long)dev, rc);
1344 * Note: we do not count into QUOTA here.
1345 * If we mount with --data_journal we may need more.
1347 const int osd_dto_credits_noquota[DTO_NR] = {
1350 * INDEX_EXTRA_TRANS_BLOCKS(8) +
1351 * SINGLEDATA_TRANS_BLOCKS(8)
1352 * XXX Note: maybe iam need more, since iam have more level than
1355 [DTO_INDEX_INSERT] = 16,
1358 * just modify a single entry, probably merge few within a block
1360 [DTO_INDEX_DELETE] = 1,
1364 [DTO_INDEX_UPDATE] = 16,
1366 * 4(inode, inode bits, groups, GDT)
1367 * notice: OI updates are counted separately with DTO_INDEX_INSERT
1369 [DTO_OBJECT_CREATE] = 4,
1371 * 4(inode, inode bits, groups, GDT)
1372 * notice: OI updates are counted separately with DTO_INDEX_DELETE
1374 [DTO_OBJECT_DELETE] = 4,
1376 * Attr set credits (inode)
1378 [DTO_ATTR_SET_BASE] = 1,
1380 * Xattr set. The same as xattr of EXT3.
1381 * DATA_TRANS_BLOCKS(14)
1382 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1383 * are also counted in. Do not know why?
1385 [DTO_XATTR_SET] = 14,
1387 * credits for inode change during write.
1389 [DTO_WRITE_BASE] = 3,
1391 * credits for single block write.
1393 [DTO_WRITE_BLOCK] = 14,
1395 * Attr set credits for chown.
1396 * This is extra credits for setattr, and it is null without quota
1398 [DTO_ATTR_SET_CHOWN] = 0
1401 static const struct dt_device_operations osd_dt_ops = {
1402 .dt_root_get = osd_root_get,
1403 .dt_statfs = osd_statfs,
1404 .dt_trans_create = osd_trans_create,
1405 .dt_trans_start = osd_trans_start,
1406 .dt_trans_stop = osd_trans_stop,
1407 .dt_trans_cb_add = osd_trans_cb_add,
1408 .dt_conf_get = osd_conf_get,
1409 .dt_sync = osd_sync,
1411 .dt_commit_async = osd_commit_async,
1414 static void osd_object_read_lock(const struct lu_env *env,
1415 struct dt_object *dt, unsigned role)
1417 struct osd_object *obj = osd_dt_obj(dt);
1418 struct osd_thread_info *oti = osd_oti_get(env);
1420 LINVRNT(osd_invariant(obj));
1422 LASSERT(obj->oo_owner != env);
1423 down_read_nested(&obj->oo_sem, role);
1425 LASSERT(obj->oo_owner == NULL);
1429 static void osd_object_write_lock(const struct lu_env *env,
1430 struct dt_object *dt, unsigned role)
1432 struct osd_object *obj = osd_dt_obj(dt);
1433 struct osd_thread_info *oti = osd_oti_get(env);
1435 LINVRNT(osd_invariant(obj));
1437 LASSERT(obj->oo_owner != env);
1438 down_write_nested(&obj->oo_sem, role);
1440 LASSERT(obj->oo_owner == NULL);
1441 obj->oo_owner = env;
1445 static void osd_object_read_unlock(const struct lu_env *env,
1446 struct dt_object *dt)
1448 struct osd_object *obj = osd_dt_obj(dt);
1449 struct osd_thread_info *oti = osd_oti_get(env);
1451 LINVRNT(osd_invariant(obj));
1453 LASSERT(oti->oti_r_locks > 0);
1455 up_read(&obj->oo_sem);
1458 static void osd_object_write_unlock(const struct lu_env *env,
1459 struct dt_object *dt)
1461 struct osd_object *obj = osd_dt_obj(dt);
1462 struct osd_thread_info *oti = osd_oti_get(env);
1464 LINVRNT(osd_invariant(obj));
1466 LASSERT(obj->oo_owner == env);
1467 LASSERT(oti->oti_w_locks > 0);
1469 obj->oo_owner = NULL;
1470 up_write(&obj->oo_sem);
1473 static int osd_object_write_locked(const struct lu_env *env,
1474 struct dt_object *dt)
1476 struct osd_object *obj = osd_dt_obj(dt);
1478 LINVRNT(osd_invariant(obj));
1480 return obj->oo_owner == env;
1483 static struct timespec *osd_inode_time(const struct lu_env *env,
1484 struct inode *inode, __u64 seconds)
1486 struct osd_thread_info *oti = osd_oti_get(env);
1487 struct timespec *t = &oti->oti_time;
1489 t->tv_sec = seconds;
1491 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1496 static void osd_inode_getattr(const struct lu_env *env,
1497 struct inode *inode, struct lu_attr *attr)
1499 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1500 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1501 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
1504 attr->la_atime = LTIME_S(inode->i_atime);
1505 attr->la_mtime = LTIME_S(inode->i_mtime);
1506 attr->la_ctime = LTIME_S(inode->i_ctime);
1507 attr->la_mode = inode->i_mode;
1508 attr->la_size = i_size_read(inode);
1509 attr->la_blocks = inode->i_blocks;
1510 attr->la_uid = i_uid_read(inode);
1511 attr->la_gid = i_gid_read(inode);
1512 attr->la_flags = LDISKFS_I(inode)->i_flags;
1513 attr->la_nlink = inode->i_nlink;
1514 attr->la_rdev = inode->i_rdev;
1515 attr->la_blksize = 1 << inode->i_blkbits;
1516 attr->la_blkbits = inode->i_blkbits;
1519 static int osd_attr_get(const struct lu_env *env,
1520 struct dt_object *dt,
1521 struct lu_attr *attr)
1523 struct osd_object *obj = osd_dt_obj(dt);
1525 if (!dt_object_exists(dt))
1528 LASSERT(!dt_object_remote(dt));
1529 LINVRNT(osd_invariant(obj));
1531 spin_lock(&obj->oo_guard);
1532 osd_inode_getattr(env, obj->oo_inode, attr);
1533 spin_unlock(&obj->oo_guard);
1537 static int osd_declare_attr_set(const struct lu_env *env,
1538 struct dt_object *dt,
1539 const struct lu_attr *attr,
1540 struct thandle *handle)
1542 struct osd_thandle *oh;
1543 struct osd_object *obj;
1544 struct osd_thread_info *info = osd_oti_get(env);
1545 struct lquota_id_info *qi = &info->oti_qi;
1553 LASSERT(dt != NULL);
1554 LASSERT(handle != NULL);
1556 obj = osd_dt_obj(dt);
1557 LASSERT(osd_invariant(obj));
1559 oh = container_of0(handle, struct osd_thandle, ot_super);
1560 LASSERT(oh->ot_handle == NULL);
1562 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
1563 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1565 if (attr == NULL || obj->oo_inode == NULL)
1568 bspace = obj->oo_inode->i_blocks;
1569 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1570 bspace = toqb(bspace);
1572 /* Changing ownership is always preformed by super user, it should not
1575 * We still need to call the osd_declare_qid() to calculate the journal
1576 * credits for updating quota accounting files and to trigger quota
1577 * space adjustment once the operation is completed.*/
1578 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
1580 uid = i_uid_read(obj->oo_inode);
1581 qi->lqi_type = USRQUOTA;
1582 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
1583 /* inode accounting */
1584 qi->lqi_is_blk = false;
1586 /* one more inode for the new uid ... */
1587 qi->lqi_id.qid_uid = attr->la_uid;
1589 /* Reserve credits for the new uid */
1590 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1591 if (rc == -EDQUOT || rc == -EINPROGRESS)
1596 /* and one less inode for the current uid */
1597 qi->lqi_id.qid_uid = uid;
1599 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1600 if (rc == -EDQUOT || rc == -EINPROGRESS)
1605 /* block accounting */
1606 qi->lqi_is_blk = true;
1608 /* more blocks for the new uid ... */
1609 qi->lqi_id.qid_uid = attr->la_uid;
1610 qi->lqi_space = bspace;
1612 * Credits for the new uid has been reserved, re-use "obj"
1613 * to save credit reservation.
1615 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1616 if (rc == -EDQUOT || rc == -EINPROGRESS)
1621 /* and finally less blocks for the current uid */
1622 qi->lqi_id.qid_uid = uid;
1623 qi->lqi_space = -bspace;
1624 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1625 if (rc == -EDQUOT || rc == -EINPROGRESS)
1631 gid = i_gid_read(obj->oo_inode);
1632 qi->lqi_type = GRPQUOTA;
1633 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
1635 /* inode accounting */
1636 qi->lqi_is_blk = false;
1638 /* one more inode for the new gid ... */
1639 qi->lqi_id.qid_gid = attr->la_gid;
1641 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
1642 if (rc == -EDQUOT || rc == -EINPROGRESS)
1647 /* and one less inode for the current gid */
1648 qi->lqi_id.qid_gid = gid;
1650 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1651 if (rc == -EDQUOT || rc == -EINPROGRESS)
1656 /* block accounting */
1657 qi->lqi_is_blk = true;
1659 /* more blocks for the new gid ... */
1660 qi->lqi_id.qid_gid = attr->la_gid;
1661 qi->lqi_space = bspace;
1662 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1663 if (rc == -EDQUOT || rc == -EINPROGRESS)
1668 /* and finally less blocks for the current gid */
1669 qi->lqi_id.qid_gid = gid;
1670 qi->lqi_space = -bspace;
1671 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
1672 if (rc == -EDQUOT || rc == -EINPROGRESS)
1681 static int osd_inode_setattr(const struct lu_env *env,
1682 struct inode *inode, const struct lu_attr *attr)
1684 __u64 bits = attr->la_valid;
1686 /* Only allow set size for regular file */
1687 if (!S_ISREG(inode->i_mode))
1688 bits &= ~(LA_SIZE | LA_BLOCKS);
1693 if (bits & LA_ATIME)
1694 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1695 if (bits & LA_CTIME)
1696 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1697 if (bits & LA_MTIME)
1698 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1699 if (bits & LA_SIZE) {
1700 LDISKFS_I(inode)->i_disksize = attr->la_size;
1701 i_size_write(inode, attr->la_size);
1705 /* OSD should not change "i_blocks" which is used by quota.
1706 * "i_blocks" should be changed by ldiskfs only. */
1707 if (bits & LA_BLOCKS)
1708 inode->i_blocks = attr->la_blocks;
1711 inode->i_mode = (inode->i_mode & S_IFMT) |
1712 (attr->la_mode & ~S_IFMT);
1714 i_uid_write(inode, attr->la_uid);
1716 i_gid_write(inode, attr->la_gid);
1717 if (bits & LA_NLINK)
1718 set_nlink(inode, attr->la_nlink);
1720 inode->i_rdev = attr->la_rdev;
1722 if (bits & LA_FLAGS) {
1723 /* always keep S_NOCMTIME */
1724 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1730 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1732 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
1733 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
1737 ll_vfs_dq_init(inode);
1739 if (attr->la_valid & LA_UID)
1740 iattr.ia_valid |= ATTR_UID;
1741 if (attr->la_valid & LA_GID)
1742 iattr.ia_valid |= ATTR_GID;
1743 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
1744 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
1746 rc = ll_vfs_dq_transfer(inode, &iattr);
1748 CERROR("%s: quota transfer failed: rc = %d. Is quota "
1749 "enforcement enabled on the ldiskfs "
1750 "filesystem?\n", inode->i_sb->s_id, rc);
1757 static int osd_attr_set(const struct lu_env *env,
1758 struct dt_object *dt,
1759 const struct lu_attr *attr,
1760 struct thandle *handle)
1762 struct osd_object *obj = osd_dt_obj(dt);
1763 struct inode *inode;
1766 if (!dt_object_exists(dt))
1769 LASSERT(handle != NULL);
1770 LASSERT(!dt_object_remote(dt));
1771 LASSERT(osd_invariant(obj));
1773 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
1775 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
1776 struct osd_thread_info *oti = osd_oti_get(env);
1777 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
1778 struct lu_fid *fid1 = &oti->oti_fid;
1779 struct osd_inode_id *id = &oti->oti_id;
1780 struct iam_path_descr *ipd;
1781 struct iam_container *bag;
1782 struct osd_thandle *oh;
1785 fid_cpu_to_be(fid1, fid0);
1786 memset(id, 1, sizeof(*id));
1787 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
1788 fid0)->oi_dir.od_container;
1789 ipd = osd_idx_ipd_get(env, bag);
1790 if (unlikely(ipd == NULL))
1793 oh = container_of0(handle, struct osd_thandle, ot_super);
1794 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
1795 (const struct iam_rec *)id, ipd);
1796 osd_ipd_put(env, bag, ipd);
1797 return(rc > 0 ? 0 : rc);
1800 inode = obj->oo_inode;
1802 rc = osd_quota_transfer(inode, attr);
1806 spin_lock(&obj->oo_guard);
1807 rc = osd_inode_setattr(env, inode, attr);
1808 spin_unlock(&obj->oo_guard);
1811 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1815 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
1816 struct osd_object *obj,
1817 const char *name, const int namelen)
1819 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
1822 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1823 umode_t mode, struct dt_allocation_hint *hint,
1827 struct osd_device *osd = osd_obj2dev(obj);
1828 struct osd_thandle *oth;
1829 struct dt_object *parent = NULL;
1830 struct inode *inode;
1832 LINVRNT(osd_invariant(obj));
1833 LASSERT(obj->oo_inode == NULL);
1834 LASSERT(obj->oo_hl_head == NULL);
1836 if (S_ISDIR(mode) && ldiskfs_pdo) {
1837 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1838 if (obj->oo_hl_head == NULL)
1842 oth = container_of(th, struct osd_thandle, ot_super);
1843 LASSERT(oth->ot_handle->h_transaction != NULL);
1845 if (hint && hint->dah_parent)
1846 parent = hint->dah_parent;
1848 inode = ldiskfs_create_inode(oth->ot_handle,
1849 parent ? osd_dt_obj(parent)->oo_inode :
1850 osd_sb(osd)->s_root->d_inode,
1852 if (!IS_ERR(inode)) {
1853 /* Do not update file c/mtime in ldiskfs. */
1854 inode->i_flags |= S_NOCMTIME;
1856 /* For new created object, it must be consistent,
1857 * and it is unnecessary to scrub against it. */
1858 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
1860 obj->oo_inode = inode;
1863 if (obj->oo_hl_head != NULL) {
1864 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1865 obj->oo_hl_head = NULL;
1867 result = PTR_ERR(inode);
1869 LINVRNT(osd_invariant(obj));
1877 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1878 struct lu_attr *attr,
1879 struct dt_allocation_hint *hint,
1880 struct dt_object_format *dof,
1884 struct osd_thandle *oth;
1885 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1887 LASSERT(S_ISDIR(attr->la_mode));
1889 oth = container_of(th, struct osd_thandle, ot_super);
1890 LASSERT(oth->ot_handle->h_transaction != NULL);
1891 result = osd_mkfile(info, obj, mode, hint, th);
1896 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1897 struct lu_attr *attr,
1898 struct dt_allocation_hint *hint,
1899 struct dt_object_format *dof,
1903 struct osd_thandle *oth;
1904 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1906 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
1908 LASSERT(S_ISREG(attr->la_mode));
1910 oth = container_of(th, struct osd_thandle, ot_super);
1911 LASSERT(oth->ot_handle->h_transaction != NULL);
1913 result = osd_mkfile(info, obj, mode, hint, th);
1915 LASSERT(obj->oo_inode != NULL);
1916 if (feat->dif_flags & DT_IND_VARKEY)
1917 result = iam_lvar_create(obj->oo_inode,
1918 feat->dif_keysize_max,
1920 feat->dif_recsize_max,
1923 result = iam_lfix_create(obj->oo_inode,
1924 feat->dif_keysize_max,
1926 feat->dif_recsize_max,
1933 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1934 struct lu_attr *attr,
1935 struct dt_allocation_hint *hint,
1936 struct dt_object_format *dof,
1939 LASSERT(S_ISREG(attr->la_mode));
1940 return osd_mkfile(info, obj, (attr->la_mode &
1941 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1944 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1945 struct lu_attr *attr,
1946 struct dt_allocation_hint *hint,
1947 struct dt_object_format *dof,
1950 LASSERT(S_ISLNK(attr->la_mode));
1951 return osd_mkfile(info, obj, (attr->la_mode &
1952 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1955 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1956 struct lu_attr *attr,
1957 struct dt_allocation_hint *hint,
1958 struct dt_object_format *dof,
1961 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
1964 LINVRNT(osd_invariant(obj));
1965 LASSERT(obj->oo_inode == NULL);
1966 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1967 S_ISFIFO(mode) || S_ISSOCK(mode));
1969 result = osd_mkfile(info, obj, mode, hint, th);
1971 LASSERT(obj->oo_inode != NULL);
1973 * This inode should be marked dirty for i_rdev. Currently
1974 * that is done in the osd_attr_init().
1976 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
1979 LINVRNT(osd_invariant(obj));
1983 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1985 struct dt_allocation_hint *hint,
1986 struct dt_object_format *dof,
1989 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1991 osd_obj_type_f result;
2007 result = osd_mk_index;
2018 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2019 struct dt_object *parent, struct dt_object *child,
2024 ah->dah_parent = parent;
2025 ah->dah_mode = child_mode;
2028 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2029 struct lu_attr *attr, struct dt_object_format *dof)
2031 struct inode *inode = obj->oo_inode;
2032 __u64 valid = attr->la_valid;
2035 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2037 if (dof->dof_type != DFT_NODE)
2038 attr->la_valid &= ~LA_RDEV;
2039 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2040 attr->la_valid &= ~LA_ATIME;
2041 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2042 attr->la_valid &= ~LA_CTIME;
2043 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2044 attr->la_valid &= ~LA_MTIME;
2046 result = osd_quota_transfer(inode, attr);
2050 if (attr->la_valid != 0) {
2051 result = osd_inode_setattr(info->oti_env, inode, attr);
2053 * The osd_inode_setattr() should always succeed here. The
2054 * only error that could be returned is EDQUOT when we are
2055 * trying to change the UID or GID of the inode. However, this
2056 * should not happen since quota enforcement is no longer
2057 * enabled on ldiskfs (lquota takes care of it).
2059 LASSERTF(result == 0, "%d\n", result);
2060 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2063 attr->la_valid = valid;
2067 * Helper function for osd_object_create()
2069 * \retval 0, on success
2071 static int __osd_object_create(struct osd_thread_info *info,
2072 struct osd_object *obj, struct lu_attr *attr,
2073 struct dt_allocation_hint *hint,
2074 struct dt_object_format *dof,
2080 /* we drop umask so that permissions we pass are not affected */
2081 umask = current->fs->umask;
2082 current->fs->umask = 0;
2084 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2086 if (likely(obj->oo_inode != NULL)) {
2087 LASSERT(obj->oo_inode->i_state & I_NEW);
2089 /* Unlock the inode before attr initialization to avoid
2090 * unnecessary dqget operations. LU-6378 */
2091 unlock_new_inode(obj->oo_inode);
2094 if (likely(result == 0)) {
2095 osd_attr_init(info, obj, attr, dof);
2096 osd_object_init0(obj);
2099 /* restore previous umask value */
2100 current->fs->umask = umask;
2106 * Helper function for osd_object_create()
2108 * \retval 0, on success
2110 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2111 const struct lu_fid *fid, struct thandle *th)
2113 struct osd_thread_info *info = osd_oti_get(env);
2114 struct osd_inode_id *id = &info->oti_id;
2115 struct osd_device *osd = osd_obj2dev(obj);
2116 struct osd_thandle *oh;
2118 LASSERT(obj->oo_inode != NULL);
2120 oh = container_of0(th, struct osd_thandle, ot_super);
2121 LASSERT(oh->ot_handle);
2123 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2124 return osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
2127 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2128 u64 seq, struct lu_seq_range *range)
2130 struct seq_server_site *ss = osd_seq_site(osd);
2132 if (fid_seq_is_idif(seq)) {
2133 fld_range_set_ost(range);
2134 range->lsr_index = idif_ost_idx(seq);
2138 if (!fid_seq_in_fldb(seq)) {
2139 fld_range_set_mdt(range);
2141 /* FIXME: If ss is NULL, it suppose not get lsr_index
2143 range->lsr_index = ss->ss_node_id;
2147 LASSERT(ss != NULL);
2148 fld_range_set_any(range);
2149 /* OSD will only do local fld lookup */
2150 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
2154 * Concurrency: no external locking is necessary.
2156 static int osd_declare_object_create(const struct lu_env *env,
2157 struct dt_object *dt,
2158 struct lu_attr *attr,
2159 struct dt_allocation_hint *hint,
2160 struct dt_object_format *dof,
2161 struct thandle *handle)
2163 struct osd_thandle *oh;
2167 LASSERT(handle != NULL);
2169 oh = container_of0(handle, struct osd_thandle, ot_super);
2170 LASSERT(oh->ot_handle == NULL);
2172 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
2173 osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
2174 /* Reuse idle OI block may cause additional one OI block
2176 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2177 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
2179 /* If this is directory, then we expect . and .. to be inserted as
2180 * well. The one directory block always needs to be created for the
2181 * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
2182 * block), there is no danger of needing a tree for the first block.
2184 if (attr && S_ISDIR(attr->la_mode)) {
2185 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2186 osd_dto_credits_noquota[DTO_WRITE_BASE]);
2187 osd_trans_declare_op(env, oh, OSD_OT_INSERT, 0);
2193 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
2194 osd_dt_obj(dt), false, NULL, false);
2201 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2202 struct lu_attr *attr,
2203 struct dt_allocation_hint *hint,
2204 struct dt_object_format *dof, struct thandle *th)
2206 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2207 struct osd_object *obj = osd_dt_obj(dt);
2208 struct osd_thread_info *info = osd_oti_get(env);
2212 if (dt_object_exists(dt))
2215 LINVRNT(osd_invariant(obj));
2216 LASSERT(!dt_object_remote(dt));
2217 LASSERT(osd_write_locked(env, obj));
2218 LASSERT(th != NULL);
2220 if (unlikely(fid_is_acct(fid)))
2221 /* Quota files can't be created from the kernel any more,
2222 * 'tune2fs -O quota' will take care of creating them */
2225 osd_trans_exec_op(env, th, OSD_OT_CREATE);
2226 osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2228 result = __osd_object_create(info, obj, attr, hint, dof, th);
2230 result = __osd_oi_insert(env, obj, fid, th);
2232 LASSERT(ergo(result == 0,
2233 dt_object_exists(dt) && !dt_object_remote(dt)));
2235 LASSERT(osd_invariant(obj));
2240 * Called to destroy on-disk representation of the object
2242 * Concurrency: must be locked
2244 static int osd_declare_object_destroy(const struct lu_env *env,
2245 struct dt_object *dt,
2248 struct osd_object *obj = osd_dt_obj(dt);
2249 struct inode *inode = obj->oo_inode;
2250 struct osd_thandle *oh;
2254 oh = container_of0(th, struct osd_thandle, ot_super);
2255 LASSERT(oh->ot_handle == NULL);
2258 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
2259 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2260 /* Recycle idle OI leaf may cause additional three OI blocks
2262 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
2263 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
2264 /* one less inode */
2265 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2266 -1, oh, obj, false, NULL, false);
2269 /* data to be truncated */
2270 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2271 0, oh, obj, true, NULL, false);
2275 static int osd_object_destroy(const struct lu_env *env,
2276 struct dt_object *dt,
2279 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2280 struct osd_object *obj = osd_dt_obj(dt);
2281 struct inode *inode = obj->oo_inode;
2282 struct osd_device *osd = osd_obj2dev(obj);
2283 struct osd_thandle *oh;
2287 oh = container_of0(th, struct osd_thandle, ot_super);
2288 LASSERT(oh->ot_handle);
2290 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2292 if (unlikely(fid_is_acct(fid)))
2295 if (S_ISDIR(inode->i_mode)) {
2296 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
2297 inode->i_nlink == 2);
2298 /* it will check/delete the inode from remote parent,
2299 * how to optimize it? unlink performance impaction XXX */
2300 result = osd_delete_from_remote_parent(env, osd, obj, oh);
2301 if (result != 0 && result != -ENOENT) {
2302 CERROR("%s: delete inode "DFID": rc = %d\n",
2303 osd_name(osd), PFID(fid), result);
2305 spin_lock(&obj->oo_guard);
2307 spin_unlock(&obj->oo_guard);
2308 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2311 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
2313 result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
2316 /* XXX: add to ext3 orphan list */
2317 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2319 /* not needed in the cache anymore */
2320 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2326 * Put the fid into lustre_mdt_attrs, and then place the structure
2327 * inode's ea. This fid should not be altered during the life time
2330 * \retval +ve, on success
2331 * \retval -ve, on error
2333 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2335 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
2336 const struct lu_fid *fid, __u32 compat, __u32 incompat)
2338 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2342 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
2345 lustre_lma_init(lma, fid, compat, incompat);
2346 lustre_lma_swab(lma);
2348 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
2350 /* LMA may already exist, but we need to check that all the
2351 * desired compat/incompat flags have been added. */
2352 if (unlikely(rc == -EEXIST)) {
2353 if (compat == 0 && incompat == 0)
2356 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
2357 XATTR_NAME_LMA, info->oti_mdt_attrs_old,
2362 lustre_lma_swab(lma);
2363 if (!(~lma->lma_compat & compat) &&
2364 !(~lma->lma_incompat & incompat))
2367 lma->lma_compat |= compat;
2368 lma->lma_incompat |= incompat;
2369 lustre_lma_swab(lma);
2370 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2371 sizeof(*lma), XATTR_REPLACE);
2378 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2379 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2380 * To have compatilibility with 1.8 ldiskfs driver we need to have
2381 * magic number at start of fid data.
2382 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2385 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2386 const struct lu_fid *fid)
2388 if (!fid_is_namespace_visible(fid) ||
2389 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
2390 param->edp_magic = 0;
2394 param->edp_magic = LDISKFS_LUFID_MAGIC;
2395 param->edp_len = sizeof(struct lu_fid) + 1;
2396 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
2400 * Try to read the fid from inode ea into dt_rec.
2402 * \param fid object fid.
2404 * \retval 0 on success
2406 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2407 __u32 ino, struct lu_fid *fid,
2408 struct osd_inode_id *id)
2410 struct osd_thread_info *info = osd_oti_get(env);
2411 struct inode *inode;
2414 osd_id_gen(id, ino, OSD_OII_NOGEN);
2415 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2417 RETURN(PTR_ERR(inode));
2423 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
2425 struct inode *parent_dir,
2426 const struct lu_fid *dot_fid,
2427 const struct lu_fid *dot_dot_fid,
2428 struct osd_thandle *oth)
2430 struct ldiskfs_dentry_param *dot_ldp;
2431 struct ldiskfs_dentry_param *dot_dot_ldp;
2433 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
2434 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
2436 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2437 dot_ldp->edp_magic = 0;
2438 return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
2439 dir, dot_ldp, dot_dot_ldp);
2443 * Create an local agent inode for remote entry
2445 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
2446 struct osd_device *osd,
2447 struct osd_object *pobj,
2448 const struct lu_fid *fid,
2452 struct osd_thread_info *info = osd_oti_get(env);
2453 struct inode *local;
2454 struct osd_thandle *oh;
2459 oh = container_of(th, struct osd_thandle, ot_super);
2460 LASSERT(oh->ot_handle->h_transaction != NULL);
2462 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type);
2463 if (IS_ERR(local)) {
2464 CERROR("%s: create local error %d\n", osd_name(osd),
2465 (int)PTR_ERR(local));
2469 ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
2470 unlock_new_inode(local);
2472 /* Set special LMA flag for local agent inode */
2473 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
2475 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
2476 osd_name(osd), PFID(fid), rc);
2477 RETURN(ERR_PTR(rc));
2483 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
2484 lu_object_fid(&pobj->oo_dt.do_lu),
2487 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
2488 osd_name(osd), PFID(fid), rc);
2489 RETURN(ERR_PTR(rc));
2496 * Delete local agent inode for remote entry
2498 static int osd_delete_local_agent_inode(const struct lu_env *env,
2499 struct osd_device *osd,
2500 const struct lu_fid *fid,
2501 __u32 ino, struct osd_thandle *oh)
2503 struct osd_thread_info *oti = osd_oti_get(env);
2504 struct osd_inode_id *id = &oti->oti_id;
2505 struct inode *inode;
2508 id->oii_ino = le32_to_cpu(ino);
2509 id->oii_gen = OSD_OII_NOGEN;
2510 inode = osd_iget(oti, osd, id);
2511 if (IS_ERR(inode)) {
2512 CERROR("%s: iget error "DFID" id %u:%u\n", osd_name(osd),
2513 PFID(fid), id->oii_ino, id->oii_gen);
2514 RETURN(PTR_ERR(inode));
2518 mark_inode_dirty(inode);
2519 CDEBUG(D_INODE, "%s: delete remote inode "DFID" %lu\n",
2520 osd_name(osd), PFID(fid), inode->i_ino);
2526 * OSD layer object create function for interoperability mode (b11826).
2527 * This is mostly similar to osd_object_create(). Only difference being, fid is
2528 * inserted into inode ea here.
2530 * \retval 0, on success
2531 * \retval -ve, on error
2533 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2534 struct lu_attr *attr,
2535 struct dt_allocation_hint *hint,
2536 struct dt_object_format *dof,
2539 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2540 struct osd_object *obj = osd_dt_obj(dt);
2541 struct osd_thread_info *info = osd_oti_get(env);
2546 if (dt_object_exists(dt))
2549 LASSERT(osd_invariant(obj));
2550 LASSERT(!dt_object_remote(dt));
2551 LASSERT(osd_write_locked(env, obj));
2552 LASSERT(th != NULL);
2554 if (unlikely(fid_is_acct(fid)))
2555 /* Quota files can't be created from the kernel any more,
2556 * 'tune2fs -O quota' will take care of creating them */
2559 osd_trans_exec_op(env, th, OSD_OT_CREATE);
2560 osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2562 result = __osd_object_create(info, obj, attr, hint, dof, th);
2564 if (fid_is_idif(fid) &&
2565 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
2566 struct lu_fid *tfid = &info->oti_fid;
2567 struct ost_id *oi = &info->oti_ostid;
2569 fid_to_ostid(fid, oi);
2570 ostid_to_fid(tfid, oi, 0);
2571 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
2572 LMAC_FID_ON_OST, 0);
2574 result = osd_ea_fid_set(info, obj->oo_inode, fid,
2575 fid_is_on_ost(info, osd_obj2dev(obj),
2576 fid, OI_CHECK_FLD) ?
2577 LMAC_FID_ON_OST : 0, 0);
2582 result = __osd_oi_insert(env, obj, fid, th);
2584 LASSERT(ergo(result == 0,
2585 dt_object_exists(dt) && !dt_object_remote(dt)));
2586 LINVRNT(osd_invariant(obj));
2590 static int osd_declare_object_ref_add(const struct lu_env *env,
2591 struct dt_object *dt,
2592 struct thandle *handle)
2594 struct osd_thandle *oh;
2596 /* it's possible that object doesn't exist yet */
2597 LASSERT(handle != NULL);
2599 oh = container_of0(handle, struct osd_thandle, ot_super);
2600 LASSERT(oh->ot_handle == NULL);
2602 osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
2603 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2609 * Concurrency: @dt is write locked.
2611 static int osd_object_ref_add(const struct lu_env *env,
2612 struct dt_object *dt, struct thandle *th)
2614 struct osd_object *obj = osd_dt_obj(dt);
2615 struct inode *inode = obj->oo_inode;
2616 struct osd_thandle *oh;
2619 if (!dt_object_exists(dt))
2622 LINVRNT(osd_invariant(obj));
2623 LASSERT(!dt_object_remote(dt));
2624 LASSERT(osd_write_locked(env, obj));
2625 LASSERT(th != NULL);
2627 oh = container_of0(th, struct osd_thandle, ot_super);
2628 LASSERT(oh->ot_handle != NULL);
2630 osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
2632 CDEBUG(D_INODE, DFID" increase nlink %d\n",
2633 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2635 * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
2636 * (65000) subdirectories by storing "1" in i_nlink if the link count
2637 * would otherwise overflow. Directory tranversal tools understand
2638 * that (st_nlink == 1) indicates that the filesystem dose not track
2639 * hard links count on the directory, and will not abort subdirectory
2640 * scanning early once (st_nlink - 2) subdirs have been found.
2642 * This also has to properly handle the case of inodes with nlink == 0
2643 * in case they are being linked into the PENDING directory
2645 spin_lock(&obj->oo_guard);
2646 if (unlikely(inode->i_nlink == 0))
2647 /* inc_nlink from 0 may cause WARN_ON */
2648 set_nlink(inode, 1);
2650 ldiskfs_inc_count(oh->ot_handle, inode);
2651 if (!S_ISDIR(inode->i_mode))
2652 LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2654 spin_unlock(&obj->oo_guard);
2656 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2657 LINVRNT(osd_invariant(obj));
2662 static int osd_declare_object_ref_del(const struct lu_env *env,
2663 struct dt_object *dt,
2664 struct thandle *handle)
2666 struct osd_thandle *oh;
2668 LASSERT(!dt_object_remote(dt));
2669 LASSERT(handle != NULL);
2671 oh = container_of0(handle, struct osd_thandle, ot_super);
2672 LASSERT(oh->ot_handle == NULL);
2674 osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
2675 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2681 * Concurrency: @dt is write locked.
2683 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2686 struct osd_object *obj = osd_dt_obj(dt);
2687 struct inode *inode = obj->oo_inode;
2688 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
2689 struct osd_thandle *oh;
2691 if (!dt_object_exists(dt))
2694 LINVRNT(osd_invariant(obj));
2695 LASSERT(!dt_object_remote(dt));
2696 LASSERT(osd_write_locked(env, obj));
2697 LASSERT(th != NULL);
2699 oh = container_of0(th, struct osd_thandle, ot_super);
2700 LASSERT(oh->ot_handle != NULL);
2702 osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
2704 spin_lock(&obj->oo_guard);
2705 /* That can be result of upgrade from old Lustre version and
2706 * applied only to local files. Just skip this ref_del call.
2707 * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
2708 if (inode->i_nlink == 0) {
2709 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2710 D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
2711 ", maybe an upgraded file? (LU-3915)\n",
2712 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
2713 spin_unlock(&obj->oo_guard);
2717 CDEBUG(D_INODE, DFID" decrease nlink %d\n",
2718 PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
2720 ldiskfs_dec_count(oh->ot_handle, inode);
2721 spin_unlock(&obj->oo_guard);
2723 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2724 LINVRNT(osd_invariant(obj));
2730 * Get the 64-bit version for an inode.
2732 static int osd_object_version_get(const struct lu_env *env,
2733 struct dt_object *dt, dt_obj_version_t *ver)
2735 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2737 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2738 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2739 *ver = LDISKFS_I(inode)->i_fs_version;
2744 * Concurrency: @dt is read locked.
2746 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2747 struct lu_buf *buf, const char *name)
2749 struct osd_object *obj = osd_dt_obj(dt);
2750 struct inode *inode = obj->oo_inode;
2751 struct osd_thread_info *info = osd_oti_get(env);
2752 struct dentry *dentry = &info->oti_obj_dentry;
2754 /* version get is not real XATTR but uses xattr API */
2755 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2756 /* for version we are just using xattr API but change inode
2758 if (buf->lb_len == 0)
2759 return sizeof(dt_obj_version_t);
2761 if (buf->lb_len < sizeof(dt_obj_version_t))
2764 osd_object_version_get(env, dt, buf->lb_buf);
2766 return sizeof(dt_obj_version_t);
2769 if (!dt_object_exists(dt))
2772 LASSERT(!dt_object_remote(dt));
2773 LASSERT(inode->i_op != NULL);
2774 LASSERT(inode->i_op->getxattr != NULL);
2776 return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
2780 static int osd_declare_xattr_set(const struct lu_env *env,
2781 struct dt_object *dt,
2782 const struct lu_buf *buf, const char *name,
2783 int fl, struct thandle *handle)
2785 struct osd_thandle *oh;
2787 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
2789 LASSERT(handle != NULL);
2791 oh = container_of0(handle, struct osd_thandle, ot_super);
2792 LASSERT(oh->ot_handle == NULL);
2794 /* optimistic optimization: LMA is set first and usually fit inode */
2795 if (strcmp(name, XATTR_NAME_LMA) == 0) {
2796 if (dt_object_exists(dt))
2800 } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2803 credits = osd_dto_credits_noquota[DTO_XATTR_SET];
2804 if (buf && buf->lb_len > sb->s_blocksize) {
2805 credits *= (buf->lb_len + sb->s_blocksize - 1) >>
2806 sb->s_blocksize_bits;
2809 * xattr set may involve inode quota change, reserve credits for
2810 * dquot_initialize()
2812 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
2815 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
2821 * Set the 64-bit version for object
2823 static void osd_object_version_set(const struct lu_env *env,
2824 struct dt_object *dt,
2825 dt_obj_version_t *new_version)
2827 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2829 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2830 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2832 LDISKFS_I(inode)->i_fs_version = *new_version;
2833 /** Version is set after all inode operations are finished,
2834 * so we should mark it dirty here */
2835 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2839 * Concurrency: @dt is write locked.
2841 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2842 const struct lu_buf *buf, const char *name, int fl,
2843 struct thandle *handle)
2845 struct osd_object *obj = osd_dt_obj(dt);
2846 struct inode *inode = obj->oo_inode;
2847 struct osd_thread_info *info = osd_oti_get(env);
2851 LASSERT(handle != NULL);
2853 /* version set is not real XATTR */
2854 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2855 /* for version we are just using xattr API but change inode
2857 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2858 osd_object_version_set(env, dt, buf->lb_buf);
2859 return sizeof(dt_obj_version_t);
2862 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
2863 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
2865 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
2866 if (fl & LU_XATTR_REPLACE)
2867 fs_flags |= XATTR_REPLACE;
2869 if (fl & LU_XATTR_CREATE)
2870 fs_flags |= XATTR_CREATE;
2872 if (strcmp(name, XATTR_NAME_LMV) == 0) {
2873 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2876 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
2880 lma->lma_incompat |= LMAI_STRIPED;
2881 lustre_lma_swab(lma);
2882 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2883 sizeof(*lma), XATTR_REPLACE);
2888 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_OVERFLOW) &&
2889 strcmp(name, XATTR_NAME_LINK) == 0)
2892 return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
2897 * Concurrency: @dt is read locked.
2899 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
2900 const struct lu_buf *buf)
2902 struct osd_object *obj = osd_dt_obj(dt);
2903 struct inode *inode = obj->oo_inode;
2904 struct osd_thread_info *info = osd_oti_get(env);
2905 struct dentry *dentry = &info->oti_obj_dentry;
2907 if (!dt_object_exists(dt))
2910 LASSERT(!dt_object_remote(dt));
2911 LASSERT(inode->i_op != NULL);
2912 LASSERT(inode->i_op->listxattr != NULL);
2914 dentry->d_inode = inode;
2915 dentry->d_sb = inode->i_sb;
2916 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2919 static int osd_declare_xattr_del(const struct lu_env *env,
2920 struct dt_object *dt, const char *name,
2921 struct thandle *handle)
2923 struct osd_thandle *oh;
2924 struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
2926 LASSERT(!dt_object_remote(dt));
2927 LASSERT(handle != NULL);
2929 oh = container_of0(handle, struct osd_thandle, ot_super);
2930 LASSERT(oh->ot_handle == NULL);
2932 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2933 osd_dto_credits_noquota[DTO_XATTR_SET]);
2935 * xattr del may involve inode quota change, reserve credits for
2936 * dquot_initialize()
2938 oh->ot_credits += LDISKFS_MAXQUOTAS_INIT_BLOCKS(sb);
2944 * Concurrency: @dt is write locked.
2946 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
2947 const char *name, struct thandle *handle)
2949 struct osd_object *obj = osd_dt_obj(dt);
2950 struct inode *inode = obj->oo_inode;
2951 struct osd_thread_info *info = osd_oti_get(env);
2952 struct dentry *dentry = &info->oti_obj_dentry;
2955 if (!dt_object_exists(dt))
2958 LASSERT(!dt_object_remote(dt));
2959 LASSERT(inode->i_op != NULL);
2960 LASSERT(inode->i_op->removexattr != NULL);
2961 LASSERT(handle != NULL);
2963 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
2965 ll_vfs_dq_init(inode);
2966 dentry->d_inode = inode;
2967 dentry->d_sb = inode->i_sb;
2968 rc = inode->i_op->removexattr(dentry, name);
2972 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2973 __u64 start, __u64 end)
2975 struct osd_object *obj = osd_dt_obj(dt);
2976 struct inode *inode = obj->oo_inode;
2977 struct osd_thread_info *info = osd_oti_get(env);
2978 struct dentry *dentry = &info->oti_obj_dentry;
2979 struct file *file = &info->oti_file;
2984 dentry->d_inode = inode;
2985 dentry->d_sb = inode->i_sb;
2986 file->f_dentry = dentry;
2987 file->f_mapping = inode->i_mapping;
2988 file->f_op = inode->i_fop;
2989 set_file_inode(file, inode);
2991 rc = ll_vfs_fsync_range(file, start, end, 0);
3000 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
3001 const struct dt_index_features *feat)
3003 struct iam_descr *descr;
3005 if (osd_object_is_root(o))
3006 return feat == &dt_directory_features;
3008 LASSERT(o->oo_dir != NULL);
3010 descr = o->oo_dir->od_container.ic_descr;
3011 if (feat == &dt_directory_features) {
3012 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
3018 feat->dif_keysize_min <= descr->id_key_size &&
3019 descr->id_key_size <= feat->dif_keysize_max &&
3020 feat->dif_recsize_min <= descr->id_rec_size &&
3021 descr->id_rec_size <= feat->dif_recsize_max &&
3022 !(feat->dif_flags & (DT_IND_VARKEY |
3023 DT_IND_VARREC | DT_IND_NONUNQ)) &&
3024 ergo(feat->dif_flags & DT_IND_UPDATE,
3025 1 /* XXX check that object (and file system) is
3030 static int osd_iam_container_init(const struct lu_env *env,
3031 struct osd_object *obj,
3032 struct osd_directory *dir)
3034 struct iam_container *bag = &dir->od_container;
3037 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
3041 result = iam_container_setup(bag);
3043 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
3045 iam_container_fini(bag);
3052 * Concurrency: no external locking is necessary.
3054 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
3055 const struct dt_index_features *feat)
3059 struct osd_object *obj = osd_dt_obj(dt);
3061 LINVRNT(osd_invariant(obj));
3063 if (osd_object_is_root(obj)) {
3064 dt->do_index_ops = &osd_index_ea_ops;
3066 } else if (feat == &dt_directory_features) {
3067 dt->do_index_ops = &osd_index_ea_ops;
3068 if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
3073 } else if (unlikely(feat == &dt_otable_features)) {
3074 dt->do_index_ops = &osd_otable_ops;
3076 } else if (unlikely(feat == &dt_acct_features)) {
3077 dt->do_index_ops = &osd_acct_index_ops;
3080 } else if (!osd_has_index(obj)) {
3081 struct osd_directory *dir;
3086 spin_lock(&obj->oo_guard);
3087 if (obj->oo_dir == NULL)
3091 * Concurrent thread allocated container data.
3094 spin_unlock(&obj->oo_guard);
3096 * Now, that we have container data, serialize its
3099 down_write(&obj->oo_ext_idx_sem);
3101 * recheck under lock.
3103 if (!osd_has_index(obj))
3104 result = osd_iam_container_init(env, obj,
3108 up_write(&obj->oo_ext_idx_sem);
3116 if (result == 0 && skip_iam == 0) {
3117 if (!osd_iam_index_probe(env, obj, feat))
3120 LINVRNT(osd_invariant(obj));
3122 if (result == 0 && is_quota_glb_feat(feat) &&
3123 fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
3124 result = osd_quota_migration(env, dt, feat);
3129 static int osd_otable_it_attr_get(const struct lu_env *env,
3130 struct dt_object *dt,
3131 struct lu_attr *attr)
3137 static const struct dt_object_operations osd_obj_ops = {
3138 .do_read_lock = osd_object_read_lock,
3139 .do_write_lock = osd_object_write_lock,
3140 .do_read_unlock = osd_object_read_unlock,
3141 .do_write_unlock = osd_object_write_unlock,
3142 .do_write_locked = osd_object_write_locked,
3143 .do_attr_get = osd_attr_get,
3144 .do_declare_attr_set = osd_declare_attr_set,
3145 .do_attr_set = osd_attr_set,
3146 .do_ah_init = osd_ah_init,
3147 .do_declare_create = osd_declare_object_create,
3148 .do_create = osd_object_create,
3149 .do_declare_destroy = osd_declare_object_destroy,
3150 .do_destroy = osd_object_destroy,
3151 .do_index_try = osd_index_try,
3152 .do_declare_ref_add = osd_declare_object_ref_add,
3153 .do_ref_add = osd_object_ref_add,
3154 .do_declare_ref_del = osd_declare_object_ref_del,
3155 .do_ref_del = osd_object_ref_del,
3156 .do_xattr_get = osd_xattr_get,
3157 .do_declare_xattr_set = osd_declare_xattr_set,
3158 .do_xattr_set = osd_xattr_set,
3159 .do_declare_xattr_del = osd_declare_xattr_del,
3160 .do_xattr_del = osd_xattr_del,
3161 .do_xattr_list = osd_xattr_list,
3162 .do_object_sync = osd_object_sync,
3166 * dt_object_operations for interoperability mode
3167 * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
3169 static const struct dt_object_operations osd_obj_ea_ops = {
3170 .do_read_lock = osd_object_read_lock,
3171 .do_write_lock = osd_object_write_lock,
3172 .do_read_unlock = osd_object_read_unlock,
3173 .do_write_unlock = osd_object_write_unlock,
3174 .do_write_locked = osd_object_write_locked,
3175 .do_attr_get = osd_attr_get,
3176 .do_declare_attr_set = osd_declare_attr_set,
3177 .do_attr_set = osd_attr_set,
3178 .do_ah_init = osd_ah_init,
3179 .do_declare_create = osd_declare_object_create,
3180 .do_create = osd_object_ea_create,
3181 .do_declare_destroy = osd_declare_object_destroy,
3182 .do_destroy = osd_object_destroy,
3183 .do_index_try = osd_index_try,
3184 .do_declare_ref_add = osd_declare_object_ref_add,
3185 .do_ref_add = osd_object_ref_add,
3186 .do_declare_ref_del = osd_declare_object_ref_del,
3187 .do_ref_del = osd_object_ref_del,
3188 .do_xattr_get = osd_xattr_get,
3189 .do_declare_xattr_set = osd_declare_xattr_set,
3190 .do_xattr_set = osd_xattr_set,
3191 .do_declare_xattr_del = osd_declare_xattr_del,
3192 .do_xattr_del = osd_xattr_del,
3193 .do_xattr_list = osd_xattr_list,
3194 .do_object_sync = osd_object_sync,
3197 static const struct dt_object_operations osd_obj_otable_it_ops = {
3198 .do_attr_get = osd_otable_it_attr_get,
3199 .do_index_try = osd_index_try,
3202 static int osd_index_declare_iam_delete(const struct lu_env *env,
3203 struct dt_object *dt,
3204 const struct dt_key *key,
3205 struct thandle *handle)
3207 struct osd_thandle *oh;
3209 oh = container_of0(handle, struct osd_thandle, ot_super);
3210 LASSERT(oh->ot_handle == NULL);
3212 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3213 osd_dto_credits_noquota[DTO_INDEX_DELETE]);
3219 * delete a (key, value) pair from index \a dt specified by \a key
3221 * \param dt osd index object
3222 * \param key key for index
3223 * \param rec record reference
3224 * \param handle transaction handler
3227 * \retval -ve failure
3229 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
3230 const struct dt_key *key,
3231 struct thandle *handle)
3233 struct osd_thread_info *oti = osd_oti_get(env);
3234 struct osd_object *obj = osd_dt_obj(dt);
3235 struct osd_thandle *oh;
3236 struct iam_path_descr *ipd;
3237 struct iam_container *bag = &obj->oo_dir->od_container;
3241 if (!dt_object_exists(dt))
3244 LINVRNT(osd_invariant(obj));
3245 LASSERT(!dt_object_remote(dt));
3246 LASSERT(bag->ic_object == obj->oo_inode);
3247 LASSERT(handle != NULL);
3249 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
3251 ipd = osd_idx_ipd_get(env, bag);
3252 if (unlikely(ipd == NULL))
3255 oh = container_of0(handle, struct osd_thandle, ot_super);
3256 LASSERT(oh->ot_handle != NULL);
3257 LASSERT(oh->ot_handle->h_transaction != NULL);
3259 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3260 /* swab quota uid/gid provided by caller */
3261 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3262 key = (const struct dt_key *)&oti->oti_quota_id;
3265 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
3266 osd_ipd_put(env, bag, ipd);
3267 LINVRNT(osd_invariant(obj));
3271 static int osd_index_declare_ea_delete(const struct lu_env *env,
3272 struct dt_object *dt,
3273 const struct dt_key *key,
3274 struct thandle *handle)
3276 struct osd_thandle *oh;
3277 struct inode *inode;
3281 LASSERT(!dt_object_remote(dt));
3282 LASSERT(handle != NULL);
3284 oh = container_of0(handle, struct osd_thandle, ot_super);
3285 LASSERT(oh->ot_handle == NULL);
3287 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3288 osd_dto_credits_noquota[DTO_INDEX_DELETE]);
3290 inode = osd_dt_obj(dt)->oo_inode;
3293 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3294 0, oh, osd_dt_obj(dt), true, NULL, false);
3298 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
3301 struct osd_fid_pack *rec;
3304 if (de->file_type & LDISKFS_DIRENT_LUFID) {
3305 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
3306 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
3311 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
3312 const struct lu_fid *fid)
3314 struct seq_server_site *ss = osd_seq_site(osd);
3317 /* FID seqs not in FLDB, must be local seq */
3318 if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
3321 /* If FLD is not being initialized yet, it only happens during the
3322 * initialization, likely during mgs initialization, and we assume
3323 * this is local FID. */
3324 if (ss == NULL || ss->ss_server_fld == NULL)
3327 /* Only check the local FLDB here */
3328 if (osd_seq_exists(env, osd, fid_seq(fid)))
3335 * Index delete function for interoperability mode (b11826).
3336 * It will remove the directory entry added by osd_index_ea_insert().
3337 * This entry is needed to maintain name->fid mapping.
3339 * \param key, key i.e. file entry to be deleted
3341 * \retval 0, on success
3342 * \retval -ve, on error
3344 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
3345 const struct dt_key *key, struct thandle *handle)
3347 struct osd_object *obj = osd_dt_obj(dt);
3348 struct inode *dir = obj->oo_inode;
3349 struct dentry *dentry;
3350 struct osd_thandle *oh;
3351 struct ldiskfs_dir_entry_2 *de = NULL;
3352 struct buffer_head *bh;
3353 struct htree_lock *hlock = NULL;
3354 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
3355 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
3359 if (!dt_object_exists(dt))
3362 LINVRNT(osd_invariant(obj));
3363 LASSERT(!dt_object_remote(dt));
3364 LASSERT(handle != NULL);
3366 osd_trans_exec_op(env, handle, OSD_OT_DELETE);
3368 oh = container_of(handle, struct osd_thandle, ot_super);
3369 LASSERT(oh->ot_handle != NULL);
3370 LASSERT(oh->ot_handle->h_transaction != NULL);
3372 ll_vfs_dq_init(dir);
3373 dentry = osd_child_dentry_get(env, obj,
3374 (char *)key, strlen((char *)key));
3376 if (obj->oo_hl_head != NULL) {
3377 hlock = osd_oti_get(env)->oti_hlock;
3378 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3379 dir, LDISKFS_HLOCK_DEL);
3381 down_write(&obj->oo_ext_idx_sem);
3384 bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
3388 /* If this is not the ".." entry, it might be a remote DNE
3389 * entry and we need to check if the FID is for a remote
3390 * MDT. If the FID is not in the directory entry (e.g.
3391 * upgraded 1.8 filesystem without dirdata enabled) then
3392 * we need to get the FID from the LMA. For a remote directory
3393 * there HAS to be an LMA, it cannot be an IGIF inode in this
3396 * Delete the entry before the agent inode in order to
3397 * simplify error handling. At worst an error after deleting
3398 * the entry first might leak the agent inode afterward. The
3399 * reverse would need filesystem abort in case of error deleting
3400 * the entry after the agent had been removed, or leave a
3401 * dangling entry pointing at a random inode. */
3402 if (strcmp((char *)key, dotdot) != 0) {
3403 LASSERT(de != NULL);
3404 rc = osd_get_fid_from_dentry(de, (struct dt_rec *)fid);
3405 /* If Fid is not in dentry, try to get it from LMA */
3406 if (rc == -ENODATA) {
3407 struct osd_inode_id *id;
3408 struct inode *inode;
3410 /* Before trying to get fid from the inode,
3411 * check whether the inode is valid.
3413 * If the inode has been deleted, do not go
3414 * ahead to do osd_ea_fid_get, which will set
3415 * the inode to bad inode, which might cause
3416 * the inode to be deleted uncorrectly */
3417 inode = ldiskfs_iget(osd_sb(osd),
3418 le32_to_cpu(de->inode));
3419 if (IS_ERR(inode)) {
3420 CDEBUG(D_INODE, "%s: "DFID"get inode"
3421 "error.\n", osd_name(osd),
3423 rc = PTR_ERR(inode);
3425 if (likely(inode->i_nlink != 0)) {
3426 id = &osd_oti_get(env)->oti_id;
3427 rc = osd_ea_fid_get(env, obj,
3428 le32_to_cpu(de->inode),
3431 CDEBUG(D_INFO, "%s: %u "DFID
3434 le32_to_cpu(de->inode),
3442 unlikely(osd_remote_fid(env, osd, fid)))
3443 /* Need to delete agent inode */
3444 ino = le32_to_cpu(de->inode);
3446 rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
3448 if (rc == 0 && unlikely(ino != 0)) {
3449 rc = osd_delete_local_agent_inode(env, osd, fid, ino,
3452 CERROR("%s: del local inode "DFID": rc = %d\n",
3453 osd_name(osd), PFID(fid), rc);
3459 ldiskfs_htree_unlock(hlock);
3461 up_write(&obj->oo_ext_idx_sem);
3466 /* For inode on the remote MDT, .. will point to
3467 * /Agent directory, Check whether it needs to delete
3468 * from agent directory */
3469 if (unlikely(strcmp((char *)key, dotdot) == 0)) {
3470 rc = osd_delete_from_remote_parent(env, osd_obj2dev(obj), obj,
3472 if (rc != 0 && rc != -ENOENT) {
3473 CERROR("%s: delete agent inode "DFID": rc = %d\n",
3474 osd_name(osd), PFID(fid), rc);
3484 LASSERT(osd_invariant(obj));
3489 * Lookup index for \a key and copy record to \a rec.
3491 * \param dt osd index object
3492 * \param key key for index
3493 * \param rec record reference
3495 * \retval +ve success : exact mach
3496 * \retval 0 return record with key not greater than \a key
3497 * \retval -ve failure
3499 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
3500 struct dt_rec *rec, const struct dt_key *key)
3502 struct osd_object *obj = osd_dt_obj(dt);
3503 struct iam_path_descr *ipd;
3504 struct iam_container *bag = &obj->oo_dir->od_container;
3505 struct osd_thread_info *oti = osd_oti_get(env);
3506 struct iam_iterator *it = &oti->oti_idx_it;
3507 struct iam_rec *iam_rec;
3511 if (!dt_object_exists(dt))
3514 LASSERT(osd_invariant(obj));
3515 LASSERT(!dt_object_remote(dt));
3516 LASSERT(bag->ic_object == obj->oo_inode);
3518 ipd = osd_idx_ipd_get(env, bag);
3522 /* got ipd now we can start iterator. */
3523 iam_it_init(it, bag, 0, ipd);
3525 if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3526 /* swab quota uid/gid provided by caller */
3527 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3528 key = (const struct dt_key *)&oti->oti_quota_id;
3531 rc = iam_it_get(it, (struct iam_key *)key);
3533 if (S_ISDIR(obj->oo_inode->i_mode))
3534 iam_rec = (struct iam_rec *)oti->oti_ldp;
3536 iam_rec = (struct iam_rec *) rec;
3538 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
3540 if (S_ISDIR(obj->oo_inode->i_mode))
3541 osd_fid_unpack((struct lu_fid *) rec,
3542 (struct osd_fid_pack *)iam_rec);
3543 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
3544 osd_quota_unpack(obj, rec);
3549 osd_ipd_put(env, bag, ipd);
3551 LINVRNT(osd_invariant(obj));
3556 static int osd_index_declare_iam_insert(const struct lu_env *env,
3557 struct dt_object *dt,
3558 const struct dt_rec *rec,
3559 const struct dt_key *key,
3560 struct thandle *handle)
3562 struct osd_thandle *oh;
3564 LASSERT(handle != NULL);
3566 oh = container_of0(handle, struct osd_thandle, ot_super);
3567 LASSERT(oh->ot_handle == NULL);
3569 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3570 osd_dto_credits_noquota[DTO_INDEX_INSERT]);
3576 * Inserts (key, value) pair in \a dt index object.
3578 * \param dt osd index object
3579 * \param key key for index
3580 * \param rec record reference
3581 * \param th transaction handler
3584 * \retval -ve failure
3586 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
3587 const struct dt_rec *rec,
3588 const struct dt_key *key, struct thandle *th,
3591 struct osd_object *obj = osd_dt_obj(dt);
3592 struct iam_path_descr *ipd;
3593 struct osd_thandle *oh;
3594 struct iam_container *bag;
3595 struct osd_thread_info *oti = osd_oti_get(env);
3596 struct iam_rec *iam_rec;
3600 if (!dt_object_exists(dt))
3603 LINVRNT(osd_invariant(obj));
3604 LASSERT(!dt_object_remote(dt));
3606 bag = &obj->oo_dir->od_container;
3607 LASSERT(bag->ic_object == obj->oo_inode);
3608 LASSERT(th != NULL);
3610 osd_trans_exec_op(env, th, OSD_OT_INSERT);
3612 ipd = osd_idx_ipd_get(env, bag);
3613 if (unlikely(ipd == NULL))
3616 oh = container_of0(th, struct osd_thandle, ot_super);
3617 LASSERT(oh->ot_handle != NULL);
3618 LASSERT(oh->ot_handle->h_transaction != NULL);
3619 if (S_ISDIR(obj->oo_inode->i_mode)) {
3620 iam_rec = (struct iam_rec *)oti->oti_ldp;
3621 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
3622 } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3623 /* pack quota uid/gid */
3624 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3625 key = (const struct dt_key *)&oti->oti_quota_id;
3626 /* pack quota record */
3627 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
3628 iam_rec = (struct iam_rec *)rec;
3630 iam_rec = (struct iam_rec *)rec;
3633 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
3635 osd_ipd_put(env, bag, ipd);
3636 LINVRNT(osd_invariant(obj));
3641 * Calls ldiskfs_add_entry() to add directory entry
3642 * into the directory. This is required for
3643 * interoperability mode (b11826)
3645 * \retval 0, on success
3646 * \retval -ve, on error
3648 static int __osd_ea_add_rec(struct osd_thread_info *info,