4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd/osd_handler.c
34 * Top-level entry points into osd module
36 * Author: Nikita Danilov <nikita@clusterfs.com>
37 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
40 #define DEBUG_SUBSYSTEM S_OSD
42 #include <linux/module.h>
43 #include <linux/user_namespace.h>
44 #ifdef HAVE_UIDGID_HEADER
45 # include <linux/uidgid.h>
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
57 #include <ldiskfs/ldiskfs.h>
58 #include <ldiskfs/xattr.h>
59 #include <ldiskfs/ldiskfs_extents.h>
62 * struct OBD_{ALLOC,FREE}*()
65 #include <obd_support.h>
66 /* struct ptlrpc_thread */
67 #include <lustre_net.h>
68 #include <lustre_fid.h>
70 #include <lustre_param.h>
72 #include "osd_internal.h"
73 #include "osd_dynlocks.h"
75 /* llo_* api support */
76 #include <md_object.h>
77 #include <lustre_quota.h>
79 #include <lustre_linkea.h>
81 #define PFID_STRIPE_IDX_BITS 16
82 #define PFID_STRIPE_COUNT_MASK ((1 << PFID_STRIPE_IDX_BITS) - 1)
85 module_param(ldiskfs_pdo, int, 0644);
86 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
88 int ldiskfs_track_declares_assert;
89 module_param(ldiskfs_track_declares_assert, int, 0644);
90 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
92 /* Slab to allocate dynlocks */
93 struct kmem_cache *dynlock_cachep;
95 /* Slab to allocate osd_it_ea */
96 struct kmem_cache *osd_itea_cachep;
98 static struct lu_kmem_descr ldiskfs_caches[] = {
100 .ckd_cache = &dynlock_cachep,
101 .ckd_name = "dynlock_cache",
102 .ckd_size = sizeof(struct dynlock_handle)
105 .ckd_cache = &osd_itea_cachep,
106 .ckd_name = "osd_itea_cache",
107 .ckd_size = sizeof(struct osd_it_ea)
114 static const char dot[] = ".";
115 static const char dotdot[] = "..";
116 static const char remote_obj_dir[] = "REM_OBJ_DIR";
118 static const struct lu_object_operations osd_lu_obj_ops;
119 static const struct dt_object_operations osd_obj_ops;
120 static const struct dt_object_operations osd_obj_ea_ops;
121 static const struct dt_object_operations osd_obj_otable_it_ops;
122 static const struct dt_index_operations osd_index_iam_ops;
123 static const struct dt_index_operations osd_index_ea_ops;
125 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
126 const struct lu_fid *fid);
127 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
128 struct osd_device *osd);
130 int osd_trans_declare_op2rb[] = {
131 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
132 [OSD_OT_PUNCH] = OSD_OT_MAX,
133 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
134 [OSD_OT_CREATE] = OSD_OT_DESTROY,
135 [OSD_OT_DESTROY] = OSD_OT_CREATE,
136 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
137 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
138 [OSD_OT_WRITE] = OSD_OT_WRITE,
139 [OSD_OT_INSERT] = OSD_OT_DELETE,
140 [OSD_OT_DELETE] = OSD_OT_INSERT,
141 [OSD_OT_QUOTA] = OSD_OT_MAX,
144 static int osd_has_index(const struct osd_object *obj)
146 return obj->oo_dt.do_index_ops != NULL;
149 static int osd_object_invariant(const struct lu_object *l)
151 return osd_invariant(osd_obj(l));
155 * Concurrency: doesn't matter
159 * Concurrency: doesn't matter
161 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
163 struct osd_thread_info *oti = osd_oti_get(env);
164 return oti->oti_w_locks > 0 && o->oo_owner == env;
168 * Concurrency: doesn't access mutable data
170 static int osd_root_get(const struct lu_env *env,
171 struct dt_device *dev, struct lu_fid *f)
173 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
178 * the following set of functions are used to maintain per-thread
179 * cache of FID->ino mapping. this mechanism is needed to resolve
180 * FID to inode at dt_insert() which in turn stores ino in the
181 * directory entries to keep ldiskfs compatible with ext[34].
182 * due to locking-originated restrictions we can't lookup ino
183 * using LU cache (deadlock is possible). lookup using OI is quite
184 * expensive. so instead we maintain this cache and methods like
185 * dt_create() fill it. so in the majority of cases dt_insert() is
186 * able to find needed mapping in lockless manner.
188 static struct osd_idmap_cache *
189 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
190 const struct lu_fid *fid)
192 struct osd_thread_info *oti = osd_oti_get(env);
193 struct osd_idmap_cache *idc = oti->oti_ins_cache;
195 for (i = 0; i < oti->oti_ins_cache_used; i++) {
196 if (!lu_fid_eq(&idc[i].oic_fid, fid))
198 if (idc[i].oic_dev != osd)
207 static struct osd_idmap_cache *
208 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
209 const struct lu_fid *fid)
211 struct osd_thread_info *oti = osd_oti_get(env);
212 struct osd_idmap_cache *idc;
215 if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
216 i = oti->oti_ins_cache_size * 2;
218 i = OSD_INS_CACHE_SIZE;
219 OBD_ALLOC(idc, sizeof(*idc) * i);
221 return ERR_PTR(-ENOMEM);
222 if (oti->oti_ins_cache != NULL) {
223 memcpy(idc, oti->oti_ins_cache,
224 oti->oti_ins_cache_used * sizeof(*idc));
225 OBD_FREE(oti->oti_ins_cache,
226 oti->oti_ins_cache_used * sizeof(*idc));
228 oti->oti_ins_cache = idc;
229 oti->oti_ins_cache_size = i;
232 idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
235 idc->oic_lid.oii_ino = 0;
236 idc->oic_lid.oii_gen = 0;
243 * lookup mapping for the given fid in the cache, initialize a
244 * new one if not found. the initialization checks whether the
245 * object is local or remote. for local objects, OI is used to
246 * learn ino/generation. the function is used when the caller
247 * has no information about the object, e.g. at dt_insert().
249 static struct osd_idmap_cache *
250 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
251 const struct lu_fid *fid)
253 struct osd_idmap_cache *idc;
256 idc = osd_idc_find(env, osd, fid);
257 LASSERT(!IS_ERR(idc));
261 /* new mapping is needed */
262 idc = osd_idc_add(env, osd, fid);
267 rc = osd_remote_fid(env, osd, fid);
268 if (unlikely(rc < 0))
272 /* the object is local, lookup in OI */
273 /* XXX: probably cheaper to lookup in LU first? */
274 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
276 if (unlikely(rc < 0)) {
277 CERROR("can't lookup: rc = %d\n", rc);
281 /* the object is remote */
289 * lookup mapping for given FID and fill it from the given object.
290 * the object is lolcal by definition.
292 static int osd_idc_find_and_init(const struct lu_env *env,
293 struct osd_device *osd,
294 struct osd_object *obj)
296 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
297 struct osd_idmap_cache *idc;
299 idc = osd_idc_find(env, osd, fid);
300 LASSERT(!IS_ERR(idc));
302 if (obj->oo_inode == NULL)
304 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
305 LASSERT(idc->oic_lid.oii_ino == 0);
306 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
307 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
312 /* new mapping is needed */
313 idc = osd_idc_add(env, osd, fid);
317 if (obj->oo_inode != NULL) {
318 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
319 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
325 * OSD object methods.
329 * Concurrency: no concurrent access is possible that early in object
332 static struct lu_object *osd_object_alloc(const struct lu_env *env,
333 const struct lu_object_header *hdr,
336 struct osd_object *mo;
342 l = &mo->oo_dt.do_lu;
343 dt_object_init(&mo->oo_dt, NULL, d);
344 mo->oo_dt.do_ops = &osd_obj_ea_ops;
345 l->lo_ops = &osd_lu_obj_ops;
346 init_rwsem(&mo->oo_sem);
347 init_rwsem(&mo->oo_ext_idx_sem);
348 spin_lock_init(&mo->oo_guard);
349 INIT_LIST_HEAD(&mo->oo_xattr_list);
356 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
357 struct dentry *dentry, struct lustre_ost_attrs *loa)
361 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
362 (void *)loa, sizeof(*loa));
364 struct lustre_mdt_attrs *lma = &loa->loa_lma;
366 if (rc < sizeof(*lma))
370 lustre_loa_swab(loa, true);
371 /* Check LMA compatibility */
372 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
373 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
374 "for fid = "DFID", ino = %lu\n",
375 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
376 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
377 PFID(&lma->lma_self_fid), inode->i_ino);
380 } else if (rc == 0) {
388 * retrieve object from backend ext fs.
390 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
391 struct osd_inode_id *id)
394 struct inode *inode = NULL;
396 /* if we look for an inode withing a running
397 * transaction, then we risk to deadlock */
398 /* osd_dirent_check_repair() breaks this */
399 /*LASSERT(current->journal_info == NULL);*/
401 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
403 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
404 id->oii_ino, PTR_ERR(inode));
405 } else if (id->oii_gen != OSD_OII_NOGEN &&
406 inode->i_generation != id->oii_gen) {
407 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
408 "i_generation = %u\n",
409 id->oii_ino, id->oii_gen, inode->i_generation);
411 inode = ERR_PTR(-ESTALE);
412 } else if (inode->i_nlink == 0) {
413 /* due to parallel readdir and unlink,
414 * we can have dead inode here. */
415 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
417 inode = ERR_PTR(-ESTALE);
418 } else if (is_bad_inode(inode)) {
419 CWARN("%.16s: bad inode: ino = %u\n",
420 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
422 inode = ERR_PTR(-ENOENT);
423 } else if ((rc = osd_attach_jinode(inode))) {
427 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
428 if (id->oii_gen == OSD_OII_NOGEN)
429 osd_id_gen(id, inode->i_ino, inode->i_generation);
431 /* Do not update file c/mtime in ldiskfs.
432 * NB: we don't have any lock to protect this because we don't
433 * have reference on osd_object now, but contention with
434 * another lookup + attr_set can't happen in the tiny window
435 * between if (...) and set S_NOCMTIME. */
436 if (!(inode->i_flags & S_NOCMTIME))
437 inode->i_flags |= S_NOCMTIME;
442 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
443 handle_t *handle, struct dentry *child,
444 struct inode *inode, struct htree_lock *hlock)
448 rc = __ldiskfs_add_entry(handle, child, inode, hlock);
449 if (rc == -ENOBUFS || rc == -ENOSPC) {
450 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
451 struct inode *parent = child->d_parent->d_inode;
452 struct lu_fid *fid = NULL;
454 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
456 fid = &loa->loa_lma.lma_self_fid;
457 } else if (rc2 == -ENODATA) {
458 if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
459 fid = &info->oti_fid3;
460 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
461 } else if (!osd->od_is_ost && osd->od_index == 0) {
462 fid = &info->oti_fid3;
463 lu_igif_build(fid, parent->i_ino,
464 parent->i_generation);
469 CWARN("%s: directory (inode: %lu, FID: "DFID") %s "
470 "maximum entry limit\n",
471 osd_name(osd), parent->i_ino, PFID(fid),
472 rc == -ENOSPC ? "has reached" : "is approaching");
474 CWARN("%s: directory (inode: %lu, FID: unknown) %s "
475 "maximum entry limit\n",
476 osd_name(osd), parent->i_ino,
477 rc == -ENOSPC ? "has reached" : "is approaching");
479 /* ignore such error now */
488 static struct inode *
489 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
490 struct osd_inode_id *id, struct lu_fid *fid)
492 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
496 inode = osd_iget(info, dev, id);
500 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
502 *fid = loa->loa_lma.lma_self_fid;
503 } else if (rc == -ENODATA) {
504 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
505 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
507 lu_igif_build(fid, inode->i_ino, inode->i_generation);
515 static struct inode *osd_iget_check(struct osd_thread_info *info,
516 struct osd_device *dev,
517 const struct lu_fid *fid,
518 struct osd_inode_id *id,
525 /* The cached OI mapping is trustable. If we cannot locate the inode
526 * via the cached OI mapping, then return the failure to the caller
527 * directly without further OI checking. */
530 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
533 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
536 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
537 PFID(fid), id->oii_ino, rc);
541 if (is_bad_inode(inode)) {
546 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
547 PFID(fid), id->oii_ino);
551 if (id->oii_gen != OSD_OII_NOGEN &&
552 inode->i_generation != id->oii_gen) {
557 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
558 "oii_gen = %u, i_generation = %u\n", PFID(fid),
559 id->oii_ino, id->oii_gen, inode->i_generation);
563 if (inode->i_nlink == 0) {
568 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
569 PFID(fid), id->oii_ino);
573 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
577 __u32 saved_ino = id->oii_ino;
578 __u32 saved_gen = id->oii_gen;
581 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
583 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
584 /* XXX: There are four possible cases:
586 * Backup/restore caused the OI invalid.
588 * Someone unlinked the object but NOT removed
589 * the OI mapping, such as mount target device
590 * as ldiskfs, and modify something directly.
592 * Someone just removed the object between the
593 * former oi_lookup and the iget. It is normal.
594 * 4. Other failure cases.
596 * Generally, when the device is mounted, it will
597 * auto check whether the system is restored from
598 * file-level backup or not. We trust such detect
599 * to distinguish the 1st case from the 2nd case:
600 * if the OI files are consistent but may contain
601 * stale OI mappings because of case 2, if iget()
602 * returns -ENOENT or -ESTALE, then it should be
605 /* If the OI mapping was in OI file before the
606 * osd_iget_check(), but now, it is disappear,
607 * then it must be removed by race. That is a
608 * normal race case. */
611 /* It is the OI scrub updated the OI mapping by race.
612 * The new OI mapping must be valid. */
613 if (saved_ino != id->oii_ino || saved_gen != id->oii_gen) {
619 if (dev->od_scrub.os_file.sf_flags & SF_INCONSISTENT)
620 /* It still can be the case 2, but we cannot
621 * distinguish it from the case 1. So return
622 * -EREMCHG to block current operation until
623 * OI scrub rebuilt the OI mappings. */
631 if (inode->i_generation == id->oii_gen)
636 if (id->oii_gen == OSD_OII_NOGEN)
637 osd_id_gen(id, inode->i_ino, inode->i_generation);
639 /* Do not update file c/mtime in ldiskfs.
640 * NB: we don't have any lock to protect this because we don't
641 * have reference on osd_object now, but contention with
642 * another lookup + attr_set can't happen in the tiny window
643 * between if (...) and set S_NOCMTIME. */
644 if (!(inode->i_flags & S_NOCMTIME))
645 inode->i_flags |= S_NOCMTIME;
662 * \retval +v: new filter_fid, does not contain self-fid
663 * \retval 0: filter_fid_old, contains self-fid
664 * \retval -v: other failure cases
666 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
667 struct dentry *dentry, struct lu_fid *fid)
669 struct filter_fid_old *ff = &info->oti_ff;
670 struct ost_id *ostid = &info->oti_ostid;
673 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
674 if (rc == sizeof(*ff)) {
676 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
677 rc = ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
679 * XXX: use 0 as the index for compatibility, the caller will
680 * handle index related issues when necessary.
683 ostid_to_fid(fid, ostid, 0);
684 } else if (rc == sizeof(struct filter_fid)) {
686 } else if (rc >= 0) {
693 static int osd_lma_self_repair(struct osd_thread_info *info,
694 struct osd_device *osd, struct inode *inode,
695 const struct lu_fid *fid, __u32 compat)
700 LASSERT(current->journal_info == NULL);
702 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
703 osd_dto_credits_noquota[DTO_XATTR_SET]);
706 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
711 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
713 CWARN("%s: cannot self repair the LMA: rc = %d\n",
715 ldiskfs_journal_stop(jh);
719 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
721 struct osd_thread_info *info = osd_oti_get(env);
722 struct osd_device *osd = osd_obj2dev(obj);
723 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
724 struct lustre_mdt_attrs *lma = &loa->loa_lma;
725 struct inode *inode = obj->oo_inode;
726 struct dentry *dentry = &info->oti_obj_dentry;
727 struct lu_fid *fid = NULL;
728 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
732 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
733 (void *)loa, sizeof(*loa));
734 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
735 fid = &lma->lma_self_fid;
736 rc = osd_get_idif(info, inode, dentry, fid);
737 if ((rc > 0) || (rc == -ENODATA && osd->od_index_in_idif)) {
738 /* For the given OST-object, if it has neither LMA nor
739 * FID in XATTR_NAME_FID, then the given FID (which is
740 * contained in the @obj, from client RPC for locating
741 * the OST-object) is trusted. We use it to generate
743 osd_lma_self_repair(info, osd, inode, rfid,
754 lustre_lma_swab(lma);
755 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
756 (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
757 S_ISREG(inode->i_mode)))) {
758 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
759 "fid = "DFID", ino = %lu\n", osd_name(osd),
760 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
761 PFID(rfid), inode->i_ino);
764 fid = &lma->lma_self_fid;
768 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
769 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
770 struct ost_id *oi = &info->oti_ostid;
771 struct lu_fid *fid1 = &info->oti_fid3;
772 __u32 idx = fid_idif_ost_idx(rfid);
774 /* For old IDIF, the OST index is not part of the IDIF,
775 * Means that different OSTs may have the same IDIFs.
776 * Under such case, we need to make some compatible
777 * check to make sure to trigger OI scrub properly. */
778 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
779 /* Given @rfid is new, LMA is old. */
780 fid_to_ostid(fid, oi);
781 ostid_to_fid(fid1, oi, idx);
782 if (lu_fid_eq(fid1, rfid)) {
783 if (osd->od_index_in_idif)
784 osd_lma_self_repair(info, osd,
798 struct osd_check_lmv_buf {
799 #ifdef HAVE_DIR_CONTEXT
800 /* please keep it as first member */
801 struct dir_context ctx;
803 struct osd_thread_info *oclb_info;
804 struct osd_device *oclb_dev;
805 struct osd_idmap_cache *oclb_oic;
809 * It is called internally by ->readdir() to filter out the
810 * local slave object's FID of the striped directory.
812 * \retval 1 found the local slave's FID
813 * \retval 0 continue to check next item
814 * \retval -ve for failure
816 #ifdef HAVE_FILLDIR_USE_CTX
817 static int osd_stripe_dir_filldir(struct dir_context *buf,
819 static int osd_stripe_dir_filldir(void *buf,
821 const char *name, int namelen,
822 loff_t offset, __u64 ino, unsigned d_type)
824 struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
825 struct osd_thread_info *oti = oclb->oclb_info;
826 struct lu_fid *fid = &oti->oti_fid3;
827 struct osd_inode_id *id = &oti->oti_id3;
828 struct osd_device *dev = oclb->oclb_dev;
829 struct osd_idmap_cache *oic = oclb->oclb_oic;
837 sscanf(name + 1, SFID, RFID(fid));
838 if (!fid_is_sane(fid))
841 if (osd_remote_fid(oti->oti_env, dev, fid))
844 osd_id_gen(id, ino, OSD_OII_NOGEN);
845 inode = osd_iget(oti, dev, id);
847 return PTR_ERR(inode);
850 osd_add_oi_cache(oti, dev, id, fid);
854 rc = osd_oii_insert(dev, oic, true);
856 return rc == 0 ? 1 : rc;
859 /* When lookup item under striped directory, we need to locate the master
860 * MDT-object of the striped directory firstly, then the client will send
861 * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
862 * and the item's name. If the system is restored from MDT file level backup,
863 * then before the OI scrub completely built the OI files, the OI mappings of
864 * the master MDT-object and slave MDT-object may be invalid. Usually, it is
865 * not a problem for the master MDT-object. Because when locate the master
866 * MDT-object, we will do name based lookup (for the striped directory itself)
867 * firstly, during such process we can setup the correct OI mapping for the
868 * master MDT-object. But it will be trouble for the slave MDT-object. Because
869 * the client will not trigger name based lookup on the MDT to locate the slave
870 * MDT-object before locating item under the striped directory, then when
871 * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
872 * is invalid and does not know what the right OI mapping is, then the MDT has
873 * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
874 * the OI file, related OI mapping is unknown yet, please try again later. And
875 * then client will re-try the RPC again and again until related OI mapping has
876 * been updated. That is quite inefficient.
878 * To resolve above trouble, we will handle it as the following two cases:
880 * 1) The slave MDT-object and the master MDT-object are on different MDTs.
881 * It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
882 * is linked under /REMOTE_PARENT_DIR with the name of its FID string.
883 * We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
884 * directly. Please check osd_fid_lookup().
886 * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
887 * Under such case, during lookup the master MDT-object, we will lookup the
888 * slave MDT-object via readdir against the master MDT-object, because the
889 * slave MDT-objects information are stored as sub-directories with the name
890 * "${FID}:${index}". Then when find the local slave MDT-object, its OI
891 * mapping will be recorded. Then subsequent osd_fid_lookup() will know
892 * the correct OI mapping for the slave MDT-object. */
893 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
894 struct inode *inode, struct osd_idmap_cache *oic)
896 struct lu_buf *buf = &oti->oti_big_buf;
897 struct dentry *dentry = &oti->oti_obj_dentry;
898 struct file *filp = &oti->oti_file;
899 const struct file_operations *fops;
900 struct lmv_mds_md_v1 *lmv1;
901 struct osd_check_lmv_buf oclb = {
902 #ifdef HAVE_DIR_CONTEXT
903 .ctx.actor = osd_stripe_dir_filldir,
913 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
916 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
918 lu_buf_realloc(buf, rc);
919 if (buf->lb_buf == NULL)
920 GOTO(out, rc = -ENOMEM);
926 if (unlikely(rc == 0 || rc == -ENODATA))
932 if (unlikely(buf->lb_buf == NULL)) {
933 lu_buf_realloc(buf, rc);
934 if (buf->lb_buf == NULL)
935 GOTO(out, rc = -ENOMEM);
941 if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
945 dentry->d_inode = inode;
946 dentry->d_sb = inode->i_sb;
948 filp->f_path.dentry = dentry;
949 filp->f_mode = FMODE_64BITHASH;
950 filp->f_mapping = inode->i_mapping;
952 filp->private_data = NULL;
953 set_file_inode(filp, inode);
955 #ifdef HAVE_DIR_CONTEXT
956 oclb.ctx.pos = filp->f_pos;
957 rc = fops->iterate(filp, &oclb.ctx);
958 filp->f_pos = oclb.ctx.pos;
960 rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
962 fops->release(inode, filp);
966 CDEBUG(D_LFSCK, "%.16s: fail to check LMV EA, inode = %lu/%u,"
968 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
969 inode->i_ino, inode->i_generation,
970 PFID(&oic->oic_fid), rc);
977 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
978 const struct lu_fid *fid,
979 const struct lu_object_conf *conf)
981 struct osd_thread_info *info;
982 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
983 struct osd_device *dev;
984 struct osd_idmap_cache *oic;
985 struct osd_inode_id *id;
986 struct inode *inode = NULL;
987 struct osd_scrub *scrub;
988 struct scrub_file *sf;
989 __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
996 bool updated = false;
999 LINVRNT(osd_invariant(obj));
1000 LASSERT(obj->oo_inode == NULL);
1001 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
1003 dev = osd_dev(ldev);
1004 scrub = &dev->od_scrub;
1005 sf = &scrub->os_file;
1006 info = osd_oti_get(env);
1008 oic = &info->oti_cache;
1010 if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1013 /* For the object is created as locking anchor, or for the object to
1014 * be created on disk. No need to osd_oi_lookup() at here because FID
1015 * shouldn't never be re-used, if it's really a duplicate FID from
1016 * unexpected reason, we should be able to detect it later by calling
1017 * do_create->osd_oi_insert(). */
1018 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
1019 GOTO(out, result = 0);
1021 /* Search order: 1. per-thread cache. */
1022 if (lu_fid_eq(fid, &oic->oic_fid) &&
1023 likely(oic->oic_dev == dev)) {
1029 if (!list_empty(&scrub->os_inconsistent_items)) {
1030 /* Search order: 2. OI scrub pending list. */
1031 result = osd_oii_lookup(dev, fid, id);
1036 /* The OI mapping in the OI file can be updated by the OI scrub
1037 * when we locate the inode via FID. So it may be not trustable. */
1040 /* Search order: 3. OI files. */
1041 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1042 if (result == -ENOENT) {
1043 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1044 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1045 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
1047 GOTO(out, result = 0);
1056 inode = osd_iget_check(info, dev, fid, id, trusted);
1057 if (IS_ERR(inode)) {
1058 result = PTR_ERR(inode);
1059 if (result == -ENOENT || result == -ESTALE)
1060 GOTO(out, result = 0);
1062 if (result == -EREMCHG) {
1065 /* We still have chance to get the valid inode: for the
1066 * object which is referenced by remote name entry, the
1067 * object on the local MDT will be linked under the dir
1068 * of "/REMOTE_PARENT_DIR" with its FID string as name.
1070 * We do not know whether the object for the given FID
1071 * is referenced by some remote name entry or not, and
1072 * especially for DNE II, a multiple-linked object may
1073 * have many name entries reside on many MDTs.
1075 * To simplify the operation, OSD will not distinguish
1076 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1077 * only happened for the RPC from other MDT during the
1078 * OI scrub, or for the client side RPC with FID only,
1079 * such as FID to path, or from old connected client. */
1081 !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1082 rc1 = osd_lookup_in_remote_parent(info, dev,
1087 flags |= SS_AUTO_PARTIAL;
1088 flags &= ~SS_AUTO_FULL;
1093 if (thread_is_running(&scrub->os_thread)) {
1094 if (scrub->os_partial_scan &&
1095 !scrub->os_in_join) {
1098 if (inode != NULL && !IS_ERR(inode)) {
1101 osd_add_oi_cache(info, dev, id,
1103 osd_oii_insert(dev, oic, true);
1105 result = -EINPROGRESS;
1108 } else if (!dev->od_noscrub) {
1111 rc1 = osd_scrub_start(dev, flags);
1112 LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
1113 "for the "DFID" with flags 0x%x,"
1114 " rc = %d\n", osd_name(dev),
1115 PFID(fid), flags, rc1);
1116 if (rc1 == 0 || rc1 == -EALREADY) {
1117 if (inode != NULL && !IS_ERR(inode)) {
1120 osd_add_oi_cache(info, dev, id,
1122 osd_oii_insert(dev, oic, true);
1124 result = -EINPROGRESS;
1134 if (inode == NULL || IS_ERR(inode))
1136 } else if (remote) {
1140 obj->oo_inode = inode;
1141 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
1143 result = osd_check_lma(env, obj);
1147 LASSERTF(id->oii_ino == inode->i_ino &&
1148 id->oii_gen == inode->i_generation,
1149 "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1150 PFID(fid), id->oii_ino, id->oii_gen,
1151 inode->i_ino, inode->i_generation);
1153 saved_ino = inode->i_ino;
1154 saved_gen = inode->i_generation;
1156 if (unlikely(result == -ENODATA)) {
1157 /* If the OI scrub updated the OI mapping by race, it
1158 * must be valid. Trust the inode that has no LMA EA. */
1162 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1164 /* The OI mapping is still there, the inode is still
1165 * valid. It is just becaues the inode has no LMA EA. */
1166 if (saved_ino == id->oii_ino &&
1167 saved_gen == id->oii_gen)
1170 /* It is the OI scrub updated the OI mapping by race.
1171 * The new OI mapping must be valid. */
1174 obj->oo_inode = NULL;
1180 /* "result == -ENOENT" means that the OI mappinghas been
1181 * removed by race, so the inode belongs to other object.
1183 * Others error can be returned directly. */
1184 if (result == -ENOENT) {
1193 obj->oo_inode = NULL;
1195 if (result != -EREMCHG)
1200 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1201 /* "result == -ENOENT" means the cached OI mapping has been removed
1202 * from the OI file by race, above inode belongs to other object. */
1203 if (result == -ENOENT) {
1206 GOTO(out, result = 0);
1212 if (saved_ino == id->oii_ino && saved_gen == id->oii_gen)
1215 /* It is the OI scrub updated the OI mapping by race.
1216 * The new OI mapping must be valid. */
1222 obj->oo_compat_dot_created = 1;
1223 obj->oo_compat_dotdot_created = 1;
1225 if (S_ISDIR(inode->i_mode) &&
1226 (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1227 osd_check_lmv(info, dev, inode, oic);
1229 result = osd_attach_jinode(inode);
1231 obj->oo_inode = NULL;
1237 GOTO(out, result = 0);
1239 LASSERT(obj->oo_hl_head == NULL);
1240 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1241 if (obj->oo_hl_head == NULL) {
1242 obj->oo_inode = NULL;
1244 GOTO(out, result = -ENOMEM);
1246 GOTO(out, result = 0);
1249 if (result != 0 && trusted)
1250 fid_zero(&oic->oic_fid);
1252 LINVRNT(osd_invariant(obj));
1257 * Concurrency: shouldn't matter.
1259 static void osd_object_init0(struct osd_object *obj)
1261 LASSERT(obj->oo_inode != NULL);
1262 obj->oo_dt.do_body_ops = &osd_body_ops;
1263 obj->oo_dt.do_lu.lo_header->loh_attr |=
1264 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1268 * Concurrency: no concurrent access is possible that early in object
1271 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1272 const struct lu_object_conf *conf)
1274 struct osd_object *obj = osd_obj(l);
1277 LINVRNT(osd_invariant(obj));
1279 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1280 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1281 l->lo_header->loh_attr |= LOHA_EXISTS;
1285 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1286 obj->oo_dt.do_body_ops = &osd_body_ops_new;
1287 if (result == 0 && obj->oo_inode != NULL) {
1288 struct osd_thread_info *oti = osd_oti_get(env);
1289 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1291 osd_object_init0(obj);
1292 result = osd_get_lma(oti, obj->oo_inode,
1293 &oti->oti_obj_dentry, loa);
1295 /* Convert LMAI flags to lustre LMA flags
1296 * and cache it to oo_lma_flags */
1298 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1299 } else if (result == -ENODATA) {
1304 LINVRNT(osd_invariant(obj));
1308 /* The first part of oxe_buf is xattr name, and is '\0' terminated.
1309 * The left part is for value, binary mode. */
1310 struct osd_xattr_entry {
1311 struct list_head oxe_list;
1315 struct rcu_head oxe_rcu;
1319 static int osd_oxc_get(struct osd_object *obj, const char *name,
1322 struct osd_xattr_entry *tmp;
1323 struct osd_xattr_entry *oxe = NULL;
1324 size_t namelen = strlen(name);
1329 list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1330 if (namelen == tmp->oxe_namelen &&
1331 strncmp(name, tmp->oxe_buf, namelen) == 0) {
1338 GOTO(out, rc = -ENOENT);
1340 if (!oxe->oxe_exist)
1341 GOTO(out, rc = -ENODATA);
1344 rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1347 if (buf->lb_buf == NULL)
1350 if (buf->lb_len < rc)
1351 GOTO(out, rc = -ERANGE);
1353 memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1361 static void osd_oxc_free(struct rcu_head *head)
1363 struct osd_xattr_entry *oxe;
1365 oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1366 OBD_FREE(oxe, oxe->oxe_len);
1369 static void osd_oxc_add(struct osd_object *obj, const char *name,
1370 const char *buf, int buflen)
1372 struct osd_xattr_entry *oxe;
1373 struct osd_xattr_entry *old = NULL;
1374 struct osd_xattr_entry *tmp;
1375 size_t namelen = strlen(name);
1376 size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1378 OBD_ALLOC(oxe, len);
1382 INIT_LIST_HEAD(&oxe->oxe_list);
1384 oxe->oxe_namelen = namelen;
1385 memcpy(oxe->oxe_buf, name, namelen);
1387 LASSERT(buf != NULL);
1388 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1389 oxe->oxe_exist = true;
1391 oxe->oxe_exist = false;
1394 /* this should be rarely called, just remove old and add new */
1395 spin_lock(&obj->oo_guard);
1396 list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1397 if (namelen == tmp->oxe_namelen &&
1398 strncmp(name, tmp->oxe_buf, namelen) == 0) {
1404 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1405 call_rcu(&old->oxe_rcu, osd_oxc_free);
1407 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1409 spin_unlock(&obj->oo_guard);
1412 static void osd_oxc_del(struct osd_object *obj, const char *name)
1414 struct osd_xattr_entry *oxe;
1415 size_t namelen = strlen(name);
1417 spin_lock(&obj->oo_guard);
1418 list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1419 if (namelen == oxe->oxe_namelen &&
1420 strncmp(name, oxe->oxe_buf, namelen) == 0) {
1421 list_del_rcu(&oxe->oxe_list);
1422 call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1426 spin_unlock(&obj->oo_guard);
1429 static void osd_oxc_fini(struct osd_object *obj)
1431 struct osd_xattr_entry *oxe, *next;
1433 list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1434 list_del(&oxe->oxe_list);
1435 OBD_FREE(oxe, oxe->oxe_len);
1440 * Concurrency: no concurrent access is possible that late in object
1443 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1445 struct osd_object *obj = osd_obj(l);
1447 LINVRNT(osd_invariant(obj));
1450 dt_object_fini(&obj->oo_dt);
1451 if (obj->oo_hl_head != NULL)
1452 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1457 * Concurrency: no concurrent access is possible that late in object
1460 static void osd_index_fini(struct osd_object *o)
1462 struct iam_container *bag;
1464 if (o->oo_dir != NULL) {
1465 bag = &o->oo_dir->od_container;
1466 if (o->oo_inode != NULL) {
1467 if (bag->ic_object == o->oo_inode)
1468 iam_container_fini(bag);
1470 OBD_FREE_PTR(o->oo_dir);
1476 * Concurrency: no concurrent access is possible that late in object
1477 * life-cycle (for all existing callers, that is. New callers have to provide
1478 * their own locking.)
1480 static int osd_inode_unlinked(const struct inode *inode)
1482 return inode->i_nlink == 0;
1486 OSD_TXN_OI_DELETE_CREDITS = 20,
1487 OSD_TXN_INODE_DELETE_CREDITS = 20
1494 #if OSD_THANDLE_STATS
1496 * Set time when the handle is allocated
1498 static void osd_th_alloced(struct osd_thandle *oth)
1500 oth->oth_alloced = ktime_get();
1504 * Set time when the handle started
1506 static void osd_th_started(struct osd_thandle *oth)
1508 oth->oth_started = ktime_get();
1512 * Check whether the we deal with this handle for too long.
1514 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1515 ktime_t alloced, ktime_t started,
1518 ktime_t now = ktime_get();
1520 LASSERT(dev != NULL);
1522 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1523 ktime_us_delta(started, alloced));
1524 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1525 ktime_us_delta(closed, started));
1526 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1527 ktime_us_delta(now, closed));
1529 if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1530 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1531 oth, now, alloced, started, closed);
1532 libcfs_debug_dumpstack(NULL);
1536 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
1538 ktime_t __closed = ktime_get(); \
1539 ktime_t __alloced = oth->oth_alloced; \
1540 ktime_t __started = oth->oth_started; \
1543 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
1546 #else /* OSD_THANDLE_STATS */
1548 #define osd_th_alloced(h) do {} while(0)
1549 #define osd_th_started(h) do {} while(0)
1550 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
1552 #endif /* OSD_THANDLE_STATS */
1555 * Concurrency: doesn't access mutable data.
1557 static int osd_param_is_not_sane(const struct osd_device *dev,
1558 const struct thandle *th)
1560 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1562 return oh->ot_credits > osd_transaction_size(dev);
1566 * Concurrency: shouldn't matter.
1568 static void osd_trans_commit_cb(struct super_block *sb,
1569 struct ldiskfs_journal_cb_entry *jcb, int error)
1571 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
1572 struct thandle *th = &oh->ot_super;
1573 struct lu_device *lud = &th->th_dev->dd_lu_dev;
1574 struct dt_txn_commit_cb *dcb, *tmp;
1576 LASSERT(oh->ot_handle == NULL);
1579 CERROR("transaction @0x%p commit error: %d\n", th, error);
1581 dt_txn_hook_commit(th);
1583 /* call per-transaction callbacks if any */
1584 list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1586 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1587 "commit callback entry: magic=%x name='%s'\n",
1588 dcb->dcb_magic, dcb->dcb_name);
1589 list_del_init(&dcb->dcb_linkage);
1590 dcb->dcb_func(NULL, th, dcb, error);
1593 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1597 lu_context_exit(&th->th_ctx);
1598 lu_context_fini(&th->th_ctx);
1602 #ifndef HAVE_SB_START_WRITE
1603 # define sb_start_write(sb) do {} while (0)
1604 # define sb_end_write(sb) do {} while (0)
1607 static struct thandle *osd_trans_create(const struct lu_env *env,
1608 struct dt_device *d)
1610 struct osd_thread_info *oti = osd_oti_get(env);
1611 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1612 struct osd_thandle *oh;
1617 CERROR("%s: someone try to start transaction under "
1618 "readonly mode, should be disabled.\n",
1619 osd_name(osd_dt_dev(d)));
1621 RETURN(ERR_PTR(-EROFS));
1624 /* on pending IO in this thread should left from prev. request */
1625 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1627 sb_start_write(osd_sb(osd_dt_dev(d)));
1629 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
1631 oh->ot_quota_trans = &oti->oti_quota_trans;
1632 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1636 th->th_tags = LCT_TX_HANDLE;
1638 INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1639 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1642 memset(oti->oti_declare_ops, 0,
1643 sizeof(oti->oti_declare_ops));
1644 memset(oti->oti_declare_ops_cred, 0,
1645 sizeof(oti->oti_declare_ops_cred));
1646 memset(oti->oti_declare_ops_used, 0,
1647 sizeof(oti->oti_declare_ops_used));
1649 sb_end_write(osd_sb(osd_dt_dev(d)));
1650 th = ERR_PTR(-ENOMEM);
1655 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1657 struct osd_thread_info *oti = osd_oti_get(env);
1658 struct osd_thandle *oh;
1660 oh = container_of0(th, struct osd_thandle, ot_super);
1661 LASSERT(oh != NULL);
1663 CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
1664 oti->oti_declare_ops[OSD_OT_CREATE],
1665 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1666 oti->oti_declare_ops_used[OSD_OT_CREATE],
1667 oti->oti_declare_ops[OSD_OT_DESTROY],
1668 oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1669 oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1670 CWARN(" attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1671 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1672 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1673 oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1674 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1675 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1676 oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1677 CWARN(" write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1678 oti->oti_declare_ops[OSD_OT_WRITE],
1679 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1680 oti->oti_declare_ops_used[OSD_OT_WRITE],
1681 oti->oti_declare_ops[OSD_OT_PUNCH],
1682 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1683 oti->oti_declare_ops_used[OSD_OT_PUNCH],
1684 oti->oti_declare_ops[OSD_OT_QUOTA],
1685 oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1686 oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1687 CWARN(" insert: %u/%u/%u, delete: %u/%u/%u\n",
1688 oti->oti_declare_ops[OSD_OT_INSERT],
1689 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1690 oti->oti_declare_ops_used[OSD_OT_INSERT],
1691 oti->oti_declare_ops[OSD_OT_DELETE],
1692 oti->oti_declare_ops_cred[OSD_OT_DELETE],
1693 oti->oti_declare_ops_used[OSD_OT_DELETE]);
1694 CWARN(" ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1695 oti->oti_declare_ops[OSD_OT_REF_ADD],
1696 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1697 oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1698 oti->oti_declare_ops[OSD_OT_REF_DEL],
1699 oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1700 oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1704 * Concurrency: shouldn't matter.
1706 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1709 struct osd_thread_info *oti = osd_oti_get(env);
1710 struct osd_device *dev = osd_dt_dev(d);
1712 struct osd_thandle *oh;
1717 LASSERT(current->journal_info == NULL);
1719 oh = container_of0(th, struct osd_thandle, ot_super);
1720 LASSERT(oh != NULL);
1721 LASSERT(oh->ot_handle == NULL);
1723 rc = dt_txn_hook_start(env, d, th);
1727 if (unlikely(osd_param_is_not_sane(dev, th))) {
1728 static unsigned long last_printed;
1729 static int last_credits;
1731 /* don't make noise on a tiny testing systems
1732 * actual credits misuse will be caught anyway */
1733 if (last_credits != oh->ot_credits &&
1734 time_after(jiffies, last_printed +
1735 msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
1736 osd_transaction_size(dev) > 512) {
1737 CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1738 oh->ot_credits, osd_transaction_size(dev));
1739 osd_trans_dump_creds(env, th);
1740 libcfs_debug_dumpstack(NULL);
1741 last_credits = oh->ot_credits;
1742 last_printed = jiffies;
1744 /* XXX Limit the credits to 'max_transaction_buffers', and
1745 * let the underlying filesystem to catch the error if
1746 * we really need so many credits.
1748 * This should be removed when we can calculate the
1749 * credits precisely. */
1750 oh->ot_credits = osd_transaction_size(dev);
1751 } else if (ldiskfs_track_declares_assert != 0) {
1752 /* reserve few credits to prevent an assertion in JBD
1753 * our debugging mechanism will be able to detected
1754 * overuse. this can help to debug single-update
1756 oh->ot_credits += 10;
1757 if (unlikely(osd_param_is_not_sane(dev, th)))
1758 oh->ot_credits = osd_transaction_size(dev);
1762 * XXX temporary stuff. Some abstraction layer should
1765 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1769 LASSERT(oti->oti_txns == 0);
1770 lu_context_init(&th->th_ctx, th->th_tags);
1771 lu_context_enter(&th->th_ctx);
1773 lu_device_get(&d->dd_lu_dev);
1774 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1785 static int osd_seq_exists(const struct lu_env *env,
1786 struct osd_device *osd, u64 seq)
1788 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1789 struct seq_server_site *ss = osd_seq_site(osd);
1793 LASSERT(ss != NULL);
1794 LASSERT(ss->ss_server_fld != NULL);
1796 rc = osd_fld_lookup(env, osd, seq, range);
1799 CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
1800 osd_name(osd), seq, rc);
1804 RETURN(ss->ss_node_id == range->lsr_index);
1807 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1809 struct dt_txn_commit_cb *dcb;
1810 struct dt_txn_commit_cb *tmp;
1812 /* call per-transaction stop callbacks if any */
1813 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1815 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1816 "commit callback entry: magic=%x name='%s'\n",
1817 dcb->dcb_magic, dcb->dcb_name);
1818 list_del_init(&dcb->dcb_linkage);
1819 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1824 * Concurrency: shouldn't matter.
1826 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1829 int rc = 0, remove_agents = 0;
1830 struct osd_thandle *oh;
1831 struct osd_thread_info *oti = osd_oti_get(env);
1832 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1833 struct osd_device *osd = osd_dt_dev(th->th_dev);
1834 struct qsd_instance *qsd = osd->od_quota_slave;
1835 struct lquota_trans *qtrans;
1838 oh = container_of0(th, struct osd_thandle, ot_super);
1840 /* reset OI cache for safety */
1841 oti->oti_ins_cache_used = 0;
1843 remove_agents = oh->ot_remove_agents;
1845 qtrans = oh->ot_quota_trans;
1846 oh->ot_quota_trans = NULL;
1848 if (oh->ot_handle != NULL) {
1850 handle_t *hdl = oh->ot_handle;
1853 * add commit callback
1854 * notice we don't do this in osd_trans_start()
1855 * as underlying transaction can change during truncate
1857 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1860 LASSERT(oti->oti_txns == 1);
1863 rc = dt_txn_hook_stop(env, th);
1865 CERROR("%s: failed in transaction hook: rc = %d\n",
1868 osd_trans_stop_cb(oh, rc);
1869 /* hook functions might modify th_sync */
1870 hdl->h_sync = th->th_sync;
1872 oh->ot_handle = NULL;
1873 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
1875 CERROR("%s: failed to stop transaction: rc = %d\n",
1876 osd_name(osd), rc2);
1880 osd_trans_stop_cb(oh, th->th_result);
1884 /* inform the quota slave device that the transaction is stopping */
1885 qsd_op_end(env, qsd, qtrans);
1887 /* as we want IO to journal and data IO be concurrent, we don't block
1888 * awaiting data IO completion in osd_do_bio(), instead we wait here
1889 * once transaction is submitted to the journal. all reqular requests
1890 * don't do direct IO (except read/write), thus this wait_event becomes
1893 * IMPORTANT: we have to wait till any IO submited by the thread is
1894 * completed otherwise iobuf may be corrupted by different request
1896 wait_event(iobuf->dr_wait,
1897 atomic_read(&iobuf->dr_numreqs) == 0);
1898 osd_fini_iobuf(osd, iobuf);
1900 rc = iobuf->dr_error;
1902 if (unlikely(remove_agents != 0))
1903 osd_process_scheduled_agent_removals(env, osd);
1905 sb_end_write(osd_sb(osd));
1910 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1912 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1915 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1916 LASSERT(&dcb->dcb_func != NULL);
1917 if (dcb->dcb_flags & DCB_TRANS_STOP)
1918 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
1920 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
1926 * Called just before object is freed. Releases all resources except for
1927 * object itself (that is released by osd_object_free()).
1929 * Concurrency: no concurrent access is possible that late in object
1932 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1934 struct osd_object *obj = osd_obj(l);
1935 struct inode *inode = obj->oo_inode;
1937 LINVRNT(osd_invariant(obj));
1940 * If object is unlinked remove fid->ino mapping from object index.
1943 osd_index_fini(obj);
1944 if (inode != NULL) {
1945 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1946 qid_t uid = i_uid_read(inode);
1947 qid_t gid = i_gid_read(inode);
1950 obj->oo_inode = NULL;
1953 struct osd_thread_info *info = osd_oti_get(env);
1954 struct lquota_id_info *qi = &info->oti_qi;
1956 /* Release granted quota to master if necessary */
1957 qi->lqi_id.qid_uid = uid;
1958 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1960 qi->lqi_id.qid_uid = gid;
1961 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1963 qi->lqi_id.qid_uid = i_projid_read(inode);
1964 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
1970 * Concurrency: ->loo_object_release() is called under site spin-lock.
1972 static void osd_object_release(const struct lu_env *env,
1973 struct lu_object *l)
1975 struct osd_object *o = osd_obj(l);
1976 /* nobody should be releasing a non-destroyed object with nlink=0
1977 * the API allows this, but ldiskfs doesn't like and then report
1978 * this inode as deleted */
1979 if (unlikely(!o->oo_destroyed && o->oo_inode && o->oo_inode->i_nlink == 0))
1984 * Concurrency: shouldn't matter.
1986 static int osd_object_print(const struct lu_env *env, void *cookie,
1987 lu_printer_t p, const struct lu_object *l)
1989 struct osd_object *o = osd_obj(l);
1990 struct iam_descr *d;
1992 if (o->oo_dir != NULL)
1993 d = o->oo_dir->od_container.ic_descr;
1996 return (*p)(env, cookie,
1997 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1999 o->oo_inode ? o->oo_inode->i_ino : 0UL,
2000 o->oo_inode ? o->oo_inode->i_generation : 0,
2001 d ? d->id_ops->id_name : "plain");
2005 * Concurrency: shouldn't matter.
2007 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2008 struct obd_statfs *sfs)
2010 struct osd_device *osd = osd_dt_dev(d);
2011 struct super_block *sb = osd_sb(osd);
2012 struct kstatfs *ksfs;
2016 if (unlikely(osd->od_mnt == NULL))
2017 return -EINPROGRESS;
2019 /* osd_lproc.c call this without env, allocate ksfs for that case */
2020 if (unlikely(env == NULL)) {
2021 OBD_ALLOC_PTR(ksfs);
2025 ksfs = &osd_oti_get(env)->oti_ksfs;
2028 result = sb->s_op->statfs(sb->s_root, ksfs);
2032 statfs_pack(sfs, ksfs);
2033 if (unlikely(sb->s_flags & MS_RDONLY))
2034 sfs->os_state |= OS_STATE_READONLY;
2035 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
2036 LDISKFS_FEATURE_INCOMPAT_EXTENTS))
2037 sfs->os_maxbytes = sb->s_maxbytes;
2039 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2042 * Reserve some space so to avoid fragmenting the filesystem too much.
2043 * Fragmentation not only impacts performance, but can also increase
2044 * metadata overhead significantly, causing grant calculation to be
2047 * Reserve 0.78% of total space, at least 8MB for small filesystems.
2049 CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
2050 reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2051 if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2052 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2054 sfs->os_blocks -= reserved;
2055 sfs->os_bfree -= min(reserved, sfs->os_bfree);
2056 sfs->os_bavail -= min(reserved, sfs->os_bavail);
2059 if (unlikely(env == NULL))
2065 * Estimate space needed for file creations. We assume the largest filename
2066 * which is 2^64 - 1, hence a filename of 20 chars.
2067 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2069 #ifdef __LDISKFS_DIR_REC_LEN
2070 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2072 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2076 * Concurrency: doesn't access mutable data.
2078 static void osd_conf_get(const struct lu_env *env,
2079 const struct dt_device *dev,
2080 struct dt_device_param *param)
2082 struct super_block *sb = osd_sb(osd_dt_dev(dev));
2086 * XXX should be taken from not-yet-existing fs abstraction layer.
2088 param->ddp_max_name_len = LDISKFS_NAME_LEN;
2089 param->ddp_max_nlink = LDISKFS_LINK_MAX;
2090 param->ddp_symlink_max = sb->s_blocksize;
2091 param->ddp_mount_type = LDD_MT_LDISKFS;
2092 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
2093 param->ddp_maxbytes = sb->s_maxbytes;
2095 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2096 /* inode are statically allocated, so per-inode space consumption
2097 * is the space consumed by the directory entry */
2098 param->ddp_inodespace = PER_OBJ_USAGE;
2099 /* EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2100 * = 128MB) which is unlikely to be hit in real life. Report a smaller
2101 * maximum length to not under count the actual number of extents
2102 * needed for writing a file. */
2103 param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
2104 /* worst-case extent insertion metadata overhead */
2105 param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2106 param->ddp_mntopts = 0;
2107 if (test_opt(sb, XATTR_USER))
2108 param->ddp_mntopts |= MNTOPT_USERXATTR;
2109 if (test_opt(sb, POSIX_ACL))
2110 param->ddp_mntopts |= MNTOPT_ACL;
2112 /* LOD might calculate the max stripe count based on max_ea_size,
2113 * so we need take account in the overhead as well,
2114 * xattr_header + magic + xattr_entry_head */
2115 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2116 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2118 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2119 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
2120 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2124 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2128 * Concurrency: shouldn't matter.
2130 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2134 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
2136 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
2138 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
2139 LUSTRE_OSD_LDISKFS_NAME, rc);
2145 * Start commit for OSD device.
2147 * An implementation of dt_commit_async method for OSD device.
2148 * Asychronously starts underlayng fs sync and thereby a transaction
2151 * \param env environment
2152 * \param d dt device
2154 * \see dt_device_operations
2156 static int osd_commit_async(const struct lu_env *env,
2157 struct dt_device *d)
2159 struct super_block *s = osd_sb(osd_dt_dev(d));
2162 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
2163 RETURN(s->s_op->sync_fs(s, 0));
2167 * Concurrency: shouldn't matter.
2170 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2172 struct super_block *sb = osd_sb(osd_dt_dev(d));
2173 struct block_device *dev = sb->s_bdev;
2174 #ifdef HAVE_DEV_SET_RDONLY
2175 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
2178 int rc = -EOPNOTSUPP;
2182 #ifdef HAVE_DEV_SET_RDONLY
2183 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
2185 if (sb->s_op->freeze_fs) {
2186 rc = sb->s_op->freeze_fs(sb);
2191 if (jdev && (jdev != dev)) {
2192 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
2194 dev_set_rdonly(jdev);
2196 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
2197 dev_set_rdonly(dev);
2199 if (sb->s_op->unfreeze_fs)
2200 sb->s_op->unfreeze_fs(sb);
2205 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2206 osd_dt_dev(d)->od_svname, (long)dev, rc);
2212 * Note: we do not count into QUOTA here.
2213 * If we mount with --data_journal we may need more.
2215 const int osd_dto_credits_noquota[DTO_NR] = {
2218 * INDEX_EXTRA_TRANS_BLOCKS(8) +
2219 * SINGLEDATA_TRANS_BLOCKS(8)
2220 * XXX Note: maybe iam need more, since iam have more level than
2223 [DTO_INDEX_INSERT] = 16,
2226 * just modify a single entry, probably merge few within a block
2228 [DTO_INDEX_DELETE] = 1,
2232 [DTO_INDEX_UPDATE] = 16,
2234 * 4(inode, inode bits, groups, GDT)
2235 * notice: OI updates are counted separately with DTO_INDEX_INSERT
2237 [DTO_OBJECT_CREATE] = 4,
2239 * 4(inode, inode bits, groups, GDT)
2240 * notice: OI updates are counted separately with DTO_INDEX_DELETE
2242 [DTO_OBJECT_DELETE] = 4,
2244 * Attr set credits (inode)
2246 [DTO_ATTR_SET_BASE] = 1,
2248 * Xattr set. The same as xattr of EXT3.
2249 * DATA_TRANS_BLOCKS(14)
2250 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2251 * are also counted in. Do not know why?
2253 [DTO_XATTR_SET] = 14,
2255 * credits for inode change during write.
2257 [DTO_WRITE_BASE] = 3,
2259 * credits for single block write.
2261 [DTO_WRITE_BLOCK] = 14,
2263 * Attr set credits for chown.
2264 * This is extra credits for setattr, and it is null without quota
2266 [DTO_ATTR_SET_CHOWN] = 0
2269 static const struct dt_device_operations osd_dt_ops = {
2270 .dt_root_get = osd_root_get,
2271 .dt_statfs = osd_statfs,
2272 .dt_trans_create = osd_trans_create,
2273 .dt_trans_start = osd_trans_start,
2274 .dt_trans_stop = osd_trans_stop,
2275 .dt_trans_cb_add = osd_trans_cb_add,
2276 .dt_conf_get = osd_conf_get,
2277 .dt_sync = osd_sync,
2279 .dt_commit_async = osd_commit_async,
2282 static void osd_object_read_lock(const struct lu_env *env,
2283 struct dt_object *dt, unsigned role)
2285 struct osd_object *obj = osd_dt_obj(dt);
2286 struct osd_thread_info *oti = osd_oti_get(env);
2288 LINVRNT(osd_invariant(obj));
2290 LASSERT(obj->oo_owner != env);
2291 down_read_nested(&obj->oo_sem, role);
2293 LASSERT(obj->oo_owner == NULL);
2297 static void osd_object_write_lock(const struct lu_env *env,
2298 struct dt_object *dt, unsigned role)
2300 struct osd_object *obj = osd_dt_obj(dt);
2301 struct osd_thread_info *oti = osd_oti_get(env);
2303 LINVRNT(osd_invariant(obj));
2305 LASSERT(obj->oo_owner != env);
2306 down_write_nested(&obj->oo_sem, role);
2308 LASSERT(obj->oo_owner == NULL);
2309 obj->oo_owner = env;
2313 static void osd_object_read_unlock(const struct lu_env *env,
2314 struct dt_object *dt)
2316 struct osd_object *obj = osd_dt_obj(dt);
2317 struct osd_thread_info *oti = osd_oti_get(env);
2319 LINVRNT(osd_invariant(obj));
2321 LASSERT(oti->oti_r_locks > 0);
2323 up_read(&obj->oo_sem);
2326 static void osd_object_write_unlock(const struct lu_env *env,
2327 struct dt_object *dt)
2329 struct osd_object *obj = osd_dt_obj(dt);
2330 struct osd_thread_info *oti = osd_oti_get(env);
2332 LINVRNT(osd_invariant(obj));
2334 LASSERT(obj->oo_owner == env);
2335 LASSERT(oti->oti_w_locks > 0);
2337 obj->oo_owner = NULL;
2338 up_write(&obj->oo_sem);
2341 static int osd_object_write_locked(const struct lu_env *env,
2342 struct dt_object *dt)
2344 struct osd_object *obj = osd_dt_obj(dt);
2346 LINVRNT(osd_invariant(obj));
2348 return obj->oo_owner == env;
2351 static struct timespec *osd_inode_time(const struct lu_env *env,
2352 struct inode *inode, __u64 seconds)
2354 struct osd_thread_info *oti = osd_oti_get(env);
2355 struct timespec *t = &oti->oti_time;
2357 t->tv_sec = seconds;
2359 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
2363 static void osd_inode_getattr(const struct lu_env *env,
2364 struct inode *inode, struct lu_attr *attr)
2366 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2367 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2368 LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2369 LA_BLKSIZE | LA_TYPE;
2371 attr->la_atime = LTIME_S(inode->i_atime);
2372 attr->la_mtime = LTIME_S(inode->i_mtime);
2373 attr->la_ctime = LTIME_S(inode->i_ctime);
2374 attr->la_mode = inode->i_mode;
2375 attr->la_size = i_size_read(inode);
2376 attr->la_blocks = inode->i_blocks;
2377 attr->la_uid = i_uid_read(inode);
2378 attr->la_gid = i_gid_read(inode);
2379 attr->la_projid = i_projid_read(inode);
2380 attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
2381 attr->la_nlink = inode->i_nlink;
2382 attr->la_rdev = inode->i_rdev;
2383 attr->la_blksize = 1 << inode->i_blkbits;
2384 attr->la_blkbits = inode->i_blkbits;
2387 static int osd_attr_get(const struct lu_env *env,
2388 struct dt_object *dt,
2389 struct lu_attr *attr)
2391 struct osd_object *obj = osd_dt_obj(dt);
2393 if (unlikely(!dt_object_exists(dt)))
2395 if (unlikely(obj->oo_destroyed))
2398 LASSERT(!dt_object_remote(dt));
2399 LINVRNT(osd_invariant(obj));
2401 spin_lock(&obj->oo_guard);
2402 osd_inode_getattr(env, obj->oo_inode, attr);
2403 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
2404 attr->la_flags |= LUSTRE_ORPHAN_FL;
2405 spin_unlock(&obj->oo_guard);
2410 static int osd_declare_attr_qid(const struct lu_env *env,
2411 struct osd_object *obj,
2412 struct osd_thandle *oh, long long bspace,
2413 qid_t old_id, qid_t new_id, bool enforce,
2417 struct osd_thread_info *info = osd_oti_get(env);
2418 struct lquota_id_info *qi = &info->oti_qi;
2420 qi->lqi_type = type;
2421 /* inode accounting */
2422 qi->lqi_is_blk = false;
2424 /* one more inode for the new id ... */
2425 qi->lqi_id.qid_uid = new_id;
2427 /* Reserve credits for the new id */
2428 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2429 if (rc == -EDQUOT || rc == -EINPROGRESS)
2434 /* and one less inode for the current id */
2435 qi->lqi_id.qid_uid = old_id;
2437 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2438 if (rc == -EDQUOT || rc == -EINPROGRESS)
2443 /* block accounting */
2444 qi->lqi_is_blk = true;
2446 /* more blocks for the new uid ... */
2447 qi->lqi_id.qid_uid = new_id;
2448 qi->lqi_space = bspace;
2450 * Credits for the new uid has been reserved, re-use "obj"
2451 * to save credit reservation.
2453 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2454 if (rc == -EDQUOT || rc == -EINPROGRESS)
2459 /* and finally less blocks for the current uid */
2460 qi->lqi_id.qid_uid = old_id;
2461 qi->lqi_space = -bspace;
2462 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2463 if (rc == -EDQUOT || rc == -EINPROGRESS)
2469 static int osd_declare_attr_set(const struct lu_env *env,
2470 struct dt_object *dt,
2471 const struct lu_attr *attr,
2472 struct thandle *handle)
2474 struct osd_thandle *oh;
2475 struct osd_object *obj;
2483 LASSERT(dt != NULL);
2484 LASSERT(handle != NULL);
2486 obj = osd_dt_obj(dt);
2487 LASSERT(osd_invariant(obj));
2489 oh = container_of0(handle, struct osd_thandle, ot_super);
2490 LASSERT(oh->ot_handle == NULL);
2492 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2493 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2495 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2496 osd_dto_credits_noquota[DTO_XATTR_SET]);
2498 if (attr == NULL || obj->oo_inode == NULL)
2501 bspace = obj->oo_inode->i_blocks;
2502 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
2503 bspace = toqb(bspace);
2505 /* Changing ownership is always preformed by super user, it should not
2508 * We still need to call the osd_declare_qid() to calculate the journal
2509 * credits for updating quota accounting files and to trigger quota
2510 * space adjustment once the operation is completed.*/
2511 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2513 uid = i_uid_read(obj->oo_inode);
2514 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2515 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2516 attr->la_uid, enforce, USRQUOTA);
2520 gid = i_gid_read(obj->oo_inode);
2521 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2522 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2523 i_gid_read(obj->oo_inode), attr->la_gid,
2529 #ifdef HAVE_PROJECT_QUOTA
2530 if (attr->la_valid & LA_PROJID) {
2531 __u32 projid = i_projid_read(obj->oo_inode);
2532 enforce = (attr->la_valid & LA_PROJID) &&
2533 (attr->la_projid != projid);
2534 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2535 (qid_t)projid, (qid_t)attr->la_projid,
2544 static int osd_inode_setattr(const struct lu_env *env,
2545 struct inode *inode, const struct lu_attr *attr)
2547 __u64 bits = attr->la_valid;
2549 /* Only allow set size for regular file */
2550 if (!S_ISREG(inode->i_mode))
2551 bits &= ~(LA_SIZE | LA_BLOCKS);
2556 if (bits & LA_ATIME)
2557 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
2558 if (bits & LA_CTIME)
2559 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
2560 if (bits & LA_MTIME)
2561 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
2562 if (bits & LA_SIZE) {
2563 spin_lock(&inode->i_lock);
2564 LDISKFS_I(inode)->i_disksize = attr->la_size;
2565 i_size_write(inode, attr->la_size);
2566 spin_unlock(&inode->i_lock);
2569 /* OSD should not change "i_blocks" which is used by quota.
2570 * "i_blocks" should be changed by ldiskfs only. */
2572 inode->i_mode = (inode->i_mode & S_IFMT) |
2573 (attr->la_mode & ~S_IFMT);
2575 i_uid_write(inode, attr->la_uid);
2577 i_gid_write(inode, attr->la_gid);
2578 if (bits & LA_PROJID)
2579 i_projid_write(inode, attr->la_projid);
2580 if (bits & LA_NLINK)
2581 set_nlink(inode, attr->la_nlink);
2583 inode->i_rdev = attr->la_rdev;
2585 if (bits & LA_FLAGS) {
2586 /* always keep S_NOCMTIME */
2587 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2593 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
2597 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
2598 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
2601 ll_vfs_dq_init(inode);
2603 if (attr->la_valid & LA_UID)
2604 iattr.ia_valid |= ATTR_UID;
2605 if (attr->la_valid & LA_GID)
2606 iattr.ia_valid |= ATTR_GID;
2607 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
2608 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
2610 rc = ll_vfs_dq_transfer(inode, &iattr);
2612 CERROR("%s: quota transfer failed: rc = %d. Is quota "
2613 "enforcement enabled on the ldiskfs "
2614 "filesystem?\n", inode->i_sb->s_id, rc);
2619 #ifdef HAVE_PROJECT_QUOTA
2620 /* Handle project id Transfer here properly */
2621 if (attr->la_valid & LA_PROJID && attr->la_projid !=
2622 i_projid_read(inode)) {
2623 rc = __ldiskfs_ioctl_setproject(inode, attr->la_projid);
2625 CERROR("%s: quota transfer failed: rc = %d. Is quota "
2626 "enforcement enabled on the ldiskfs "
2627 "filesystem?\n", inode->i_sb->s_id, rc);
2635 static int osd_attr_set(const struct lu_env *env,
2636 struct dt_object *dt,
2637 const struct lu_attr *attr,
2638 struct thandle *handle)
2640 struct osd_object *obj = osd_dt_obj(dt);
2641 struct inode *inode;
2644 if (!dt_object_exists(dt))
2647 LASSERT(handle != NULL);
2648 LASSERT(!dt_object_remote(dt));
2649 LASSERT(osd_invariant(obj));
2651 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
2653 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
2654 struct osd_thread_info *oti = osd_oti_get(env);
2655 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
2656 struct lu_fid *fid1 = &oti->oti_fid;
2657 struct osd_inode_id *id = &oti->oti_id;
2658 struct iam_path_descr *ipd;
2659 struct iam_container *bag;
2660 struct osd_thandle *oh;
2663 fid_cpu_to_be(fid1, fid0);
2664 memset(id, 1, sizeof(*id));
2665 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
2666 fid0)->oi_dir.od_container;
2667 ipd = osd_idx_ipd_get(env, bag);
2668 if (unlikely(ipd == NULL))
2671 oh = container_of0(handle, struct osd_thandle, ot_super);
2672 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
2673 (const struct iam_rec *)id, ipd);
2674 osd_ipd_put(env, bag, ipd);
2675 return(rc > 0 ? 0 : rc);
2678 inode = obj->oo_inode;
2680 rc = osd_quota_transfer(inode, attr);
2684 spin_lock(&obj->oo_guard);
2685 rc = osd_inode_setattr(env, inode, attr);
2686 spin_unlock(&obj->oo_guard);
2690 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2692 if (!(attr->la_valid & LA_FLAGS))
2695 /* Let's check if there are extra flags need to be set into LMA */
2696 if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
2697 struct osd_thread_info *info = osd_oti_get(env);
2698 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
2700 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
2701 &info->oti_ost_attrs);
2705 lma->lma_incompat |=
2706 lustre_to_lma_flags(attr->la_flags);
2707 lustre_lma_swab(lma);
2708 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
2709 lma, sizeof(*lma), XATTR_REPLACE);
2711 struct osd_device *osd = osd_obj2dev(obj);
2713 CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
2714 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
2715 lma->lma_incompat, rc);
2718 attr->la_flags & LUSTRE_LMA_FL_MASKS;
2720 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
2723 osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
2728 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
2729 struct osd_object *obj,
2730 const char *name, const int namelen)
2732 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
2735 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
2736 umode_t mode, struct dt_allocation_hint *hint,
2740 struct osd_device *osd = osd_obj2dev(obj);
2741 struct osd_thandle *oth;
2742 struct dt_object *parent = NULL;
2743 struct inode *inode;
2745 LINVRNT(osd_invariant(obj));
2746 LASSERT(obj->oo_inode == NULL);
2747 LASSERT(obj->oo_hl_head == NULL);
2749 if (S_ISDIR(mode) && ldiskfs_pdo) {
2750 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
2751 if (obj->oo_hl_head == NULL)
2755 oth = container_of(th, struct osd_thandle, ot_super);
2756 LASSERT(oth->ot_handle->h_transaction != NULL);
2758 if (hint != NULL && hint->dah_parent != NULL &&
2759 !dt_object_remote(hint->dah_parent))
2760 parent = hint->dah_parent;
2762 inode = ldiskfs_create_inode(oth->ot_handle,
2763 parent ? osd_dt_obj(parent)->oo_inode :
2764 osd_sb(osd)->s_root->d_inode,
2766 if (!IS_ERR(inode)) {
2767 /* Do not update file c/mtime in ldiskfs. */
2768 inode->i_flags |= S_NOCMTIME;
2770 /* For new created object, it must be consistent,
2771 * and it is unnecessary to scrub against it. */
2772 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
2774 obj->oo_inode = inode;
2777 if (obj->oo_hl_head != NULL) {
2778 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
2779 obj->oo_hl_head = NULL;
2781 result = PTR_ERR(inode);
2783 LINVRNT(osd_invariant(obj));
2791 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
2792 struct lu_attr *attr,
2793 struct dt_allocation_hint *hint,
2794 struct dt_object_format *dof,
2798 struct osd_thandle *oth;
2799 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX | S_ISGID));
2801 LASSERT(S_ISDIR(attr->la_mode));
2803 oth = container_of(th, struct osd_thandle, ot_super);
2804 LASSERT(oth->ot_handle->h_transaction != NULL);
2805 result = osd_mkfile(info, obj, mode, hint, th);
2810 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
2811 struct lu_attr *attr,
2812 struct dt_allocation_hint *hint,
2813 struct dt_object_format *dof,
2817 struct osd_thandle *oth;
2818 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
2820 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
2822 LASSERT(S_ISREG(attr->la_mode));
2824 oth = container_of(th, struct osd_thandle, ot_super);
2825 LASSERT(oth->ot_handle->h_transaction != NULL);
2827 result = osd_mkfile(info, obj, mode, hint, th);
2829 LASSERT(obj->oo_inode != NULL);
2830 if (feat->dif_flags & DT_IND_VARKEY)
2831 result = iam_lvar_create(obj->oo_inode,
2832 feat->dif_keysize_max,
2834 feat->dif_recsize_max,
2837 result = iam_lfix_create(obj->oo_inode,
2838 feat->dif_keysize_max,
2840 feat->dif_recsize_max,
2847 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
2848 struct lu_attr *attr,
2849 struct dt_allocation_hint *hint,
2850 struct dt_object_format *dof,
2853 LASSERT(S_ISREG(attr->la_mode));
2854 return osd_mkfile(info, obj, (attr->la_mode &
2855 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2858 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
2859 struct lu_attr *attr,
2860 struct dt_allocation_hint *hint,
2861 struct dt_object_format *dof,
2864 LASSERT(S_ISLNK(attr->la_mode));
2865 return osd_mkfile(info, obj, (attr->la_mode &
2866 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2869 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
2870 struct lu_attr *attr,
2871 struct dt_allocation_hint *hint,
2872 struct dt_object_format *dof,
2875 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
2878 LINVRNT(osd_invariant(obj));
2879 LASSERT(obj->oo_inode == NULL);
2880 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
2881 S_ISFIFO(mode) || S_ISSOCK(mode));
2883 result = osd_mkfile(info, obj, mode, hint, th);
2885 LASSERT(obj->oo_inode != NULL);
2887 * This inode should be marked dirty for i_rdev. Currently
2888 * that is done in the osd_attr_init().
2890 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
2893 LINVRNT(osd_invariant(obj));
2897 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
2899 struct dt_allocation_hint *hint,
2900 struct dt_object_format *dof,
2903 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
2905 osd_obj_type_f result;
2921 result = osd_mk_index;
2932 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2933 struct dt_object *parent, struct dt_object *child,
2938 ah->dah_parent = parent;
2939 ah->dah_mode = child_mode;
2941 if (parent != NULL && !dt_object_remote(parent)) {
2942 /* will help to find FID->ino at dt_insert("..") */
2943 struct osd_object *pobj = osd_dt_obj(parent);
2944 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
2948 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2949 struct lu_attr *attr, struct dt_object_format *dof)
2951 struct inode *inode = obj->oo_inode;
2952 __u64 valid = attr->la_valid;
2955 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2957 if (dof->dof_type != DFT_NODE)
2958 attr->la_valid &= ~LA_RDEV;
2959 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2960 attr->la_valid &= ~LA_ATIME;
2961 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2962 attr->la_valid &= ~LA_CTIME;
2963 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2964 attr->la_valid &= ~LA_MTIME;
2966 result = osd_quota_transfer(inode, attr);
2970 if (attr->la_valid != 0) {
2971 result = osd_inode_setattr(info->oti_env, inode, attr);
2973 * The osd_inode_setattr() should always succeed here. The
2974 * only error that could be returned is EDQUOT when we are
2975 * trying to change the UID or GID of the inode. However, this
2976 * should not happen since quota enforcement is no longer
2977 * enabled on ldiskfs (lquota takes care of it).
2979 LASSERTF(result == 0, "%d\n", result);
2980 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2983 attr->la_valid = valid;
2987 * Helper function for osd_object_create()
2989 * \retval 0, on success
2991 static int __osd_object_create(struct osd_thread_info *info,
2992 struct osd_object *obj, struct lu_attr *attr,
2993 struct dt_allocation_hint *hint,
2994 struct dt_object_format *dof,
3000 osd_trans_exec_op(info->oti_env, th, OSD_OT_CREATE);
3002 /* we drop umask so that permissions we pass are not affected */
3003 umask = current->fs->umask;
3004 current->fs->umask = 0;
3006 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
3008 if (likely(obj->oo_inode != NULL)) {
3009 LASSERT(obj->oo_inode->i_state & I_NEW);
3011 /* Unlock the inode before attr initialization to avoid
3012 * unnecessary dqget operations. LU-6378 */
3013 unlock_new_inode(obj->oo_inode);
3016 if (likely(result == 0)) {
3017 osd_attr_init(info, obj, attr, dof);
3018 osd_object_init0(obj);
3021 /* restore previous umask value */
3022 current->fs->umask = umask;
3024 osd_trans_exec_check(info->oti_env, th, OSD_OT_CREATE);
3030 * Helper function for osd_object_create()
3032 * \retval 0, on success
3034 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
3035 const struct lu_fid *fid, struct thandle *th)
3037 struct osd_thread_info *info = osd_oti_get(env);
3038 struct osd_inode_id *id = &info->oti_id;
3039 struct osd_device *osd = osd_obj2dev(obj);
3040 struct osd_thandle *oh;
3043 LASSERT(obj->oo_inode != NULL);
3045 oh = container_of0(th, struct osd_thandle, ot_super);
3046 LASSERT(oh->ot_handle);
3047 osd_trans_exec_op(env, th, OSD_OT_INSERT);
3049 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
3050 rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
3051 OI_CHECK_FLD, NULL);
3052 osd_trans_exec_check(env, th, OSD_OT_INSERT);
3057 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
3058 u64 seq, struct lu_seq_range *range)
3060 struct seq_server_site *ss = osd_seq_site(osd);
3062 if (fid_seq_is_idif(seq)) {
3063 fld_range_set_ost(range);
3064 range->lsr_index = idif_ost_idx(seq);
3068 if (!fid_seq_in_fldb(seq)) {
3069 fld_range_set_mdt(range);
3071 /* FIXME: If ss is NULL, it suppose not get lsr_index
3073 range->lsr_index = ss->ss_node_id;
3077 LASSERT(ss != NULL);
3078 fld_range_set_any(range);
3079 /* OSD will only do local fld lookup */
3080 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
3084 * Concurrency: no external locking is necessary.
3086 static int osd_declare_object_create(const struct lu_env *env,
3087 struct dt_object *dt,
3088 struct lu_attr *attr,
3089 struct dt_allocation_hint *hint,
3090 struct dt_object_format *dof,
3091 struct thandle *handle)
3093 struct osd_thandle *oh;
3097 LASSERT(handle != NULL);
3099 oh = container_of0(handle, struct osd_thandle, ot_super);
3100 LASSERT(oh->ot_handle == NULL);
3102 /* EA object consumes more credits than regular object: osd_mk_index
3103 * vs. osd_mkreg: osd_mk_index will create 2 blocks for root_node and
3104 * leaf_node, could involves the block, block bitmap, groups, GDT
3105 * change for each block, so add 4 * 2 credits in that case. */
3106 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
3107 osd_dto_credits_noquota[DTO_OBJECT_CREATE] +
3108 (dof->dof_type == DFT_INDEX) ? 4 * 2 : 0);
3109 /* Reuse idle OI block may cause additional one OI block
3111 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3112 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
3117 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid,
3118 attr->la_projid, 1, oh, osd_dt_obj(dt),
3119 false, NULL, false);
3123 /* will help to find FID->ino mapping at dt_insert() */
3124 rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
3130 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
3131 struct lu_attr *attr,
3132 struct dt_allocation_hint *hint,
3133 struct dt_object_format *dof, struct thandle *th)
3135 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3136 struct osd_object *obj = osd_dt_obj(dt);
3137 struct osd_thread_info *info = osd_oti_get(env);
3141 if (dt_object_exists(dt))
3144 LINVRNT(osd_invariant(obj));
3145 LASSERT(!dt_object_remote(dt));
3146 LASSERT(osd_write_locked(env, obj));
3147 LASSERT(th != NULL);
3149 if (unlikely(fid_is_acct(fid)))
3150 /* Quota files can't be created from the kernel any more,
3151 * 'tune2fs -O quota' will take care of creating them */
3154 result = __osd_object_create(info, obj, attr, hint, dof, th);
3156 result = __osd_oi_insert(env, obj, fid, th);
3157 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
3158 obj->oo_dt.do_body_ops = &osd_body_ops;
3160 LASSERT(ergo(result == 0,
3161 dt_object_exists(dt) && !dt_object_remote(dt)));
3163 LASSERT(osd_invariant(obj));
3168 * Called to destroy on-disk representation of the object
3170 * Concurrency: must be locked
3172 static int osd_declare_object_destroy(const struct lu_env *env,
3173 struct dt_object *dt,
3176 struct osd_object *obj = osd_dt_obj(dt);
3177 struct inode *inode = obj->oo_inode;
3178 struct osd_thandle *oh;
3185 oh = container_of0(th, struct osd_thandle, ot_super);
3186 LASSERT(oh->ot_handle == NULL);
3188 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
3189 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
3190 /* Recycle idle OI leaf may cause additional three OI blocks
3192 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ2))
3193 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3194 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
3195 /* one less inode */
3196 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3197 i_projid_read(inode), -1, oh, obj, false,
3201 /* data to be truncated */
3202 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3203 i_projid_read(inode), 0, oh, obj, true,
3208 /* will help to find FID->ino when this object is being
3209 * added to PENDING/ */
3210 rc = osd_idc_find_and_init(env, osd_obj2dev(obj), obj);
3215 static int osd_object_destroy(const struct lu_env *env,
3216 struct dt_object *dt,
3219 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3220 struct osd_object *obj = osd_dt_obj(dt);
3221 struct inode *inode = obj->oo_inode;
3222 struct osd_device *osd = osd_obj2dev(obj);
3223 struct osd_thandle *oh;
3227 oh = container_of0(th, struct osd_thandle, ot_super);
3228 LASSERT(oh->ot_handle);
3230 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
3232 if (unlikely(fid_is_acct(fid)))
3235 if (S_ISDIR(inode->i_mode)) {
3236 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
3237 inode->i_nlink == 2);
3238 /* it will check/delete the inode from remote parent,
3239 * how to optimize it? unlink performance impaction XXX */
3240 result = osd_delete_from_remote_parent(env, osd, obj, oh);
3242 CERROR("%s: delete inode "DFID": rc = %d\n",
3243 osd_name(osd), PFID(fid), result);
3245 spin_lock(&obj->oo_guard);
3247 spin_unlock(&obj->oo_guard);
3248 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3251 osd_trans_exec_op(env, th, OSD_OT_DESTROY);
3253 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
3255 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ2))
3256 result = osd_oi_delete(osd_oti_get(env), osd, fid,
3257 oh->ot_handle, OI_CHECK_FLD);
3259 osd_trans_exec_check(env, th, OSD_OT_DESTROY);
3260 /* XXX: add to ext3 orphan list */
3261 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
3263 /* not needed in the cache anymore */
3264 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
3265 obj->oo_destroyed = 1;
3271 * Put the fid into lustre_mdt_attrs, and then place the structure
3272 * inode's ea. This fid should not be altered during the life time
3275 * \retval +ve, on success
3276 * \retval -ve, on error
3278 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
3280 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
3281 const struct lu_fid *fid, __u32 compat, __u32 incompat)
3283 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
3284 struct lustre_mdt_attrs *lma = &loa->loa_lma;
3288 if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
3291 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_OST_EA_FID_SET))
3294 lustre_loa_init(loa, fid, compat, incompat);
3295 lustre_loa_swab(loa, false);
3297 /* For the OST device with 256 bytes inode size by default,
3298 * the PFID EA will be stored together with LMA EA to avoid
3299 * performance trouble. Otherwise the PFID EA can be stored
3300 * independently. LU-8998 */
3301 if ((compat & LMAC_FID_ON_OST) &&
3302 LDISKFS_INODE_SIZE(inode->i_sb) <= 256)
3303 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
3304 sizeof(*loa), XATTR_CREATE);
3306 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3307 sizeof(*lma), XATTR_CREATE);
3308 /* LMA may already exist, but we need to check that all the
3309 * desired compat/incompat flags have been added. */
3310 if (unlikely(rc == -EEXIST)) {
3311 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
3312 XATTR_NAME_LMA, (void *)loa, sizeof(*loa));
3316 if (rc < sizeof(*lma))
3319 lustre_loa_swab(loa, true);
3320 if (lu_fid_eq(fid, &lma->lma_self_fid) &&
3321 ((compat == 0 && incompat == 0) ||
3322 (!(~lma->lma_compat & compat) &&
3323 !(~lma->lma_incompat & incompat))))
3326 lma->lma_self_fid = *fid;
3327 lma->lma_compat |= compat;
3328 lma->lma_incompat |= incompat;
3329 if (rc == sizeof(*lma)) {
3330 lustre_lma_swab(lma);
3331 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
3332 sizeof(*lma), XATTR_REPLACE);
3334 lustre_loa_swab(loa, false);
3335 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
3336 sizeof(*loa), XATTR_REPLACE);
3344 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
3345 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
3346 * To have compatilibility with 1.8 ldiskfs driver we need to have
3347 * magic number at start of fid data.
3348 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
3351 static void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
3352 const struct lu_fid *fid)
3354 if (!fid_is_namespace_visible(fid) ||
3355 OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
3356 param->edp_magic = 0;
3360 param->edp_magic = LDISKFS_LUFID_MAGIC;
3361 param->edp_len = sizeof(struct lu_fid) + 1;
3362 fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
3366 * Try to read the fid from inode ea into dt_rec.
3368 * \param fid object fid.
3370 * \retval 0 on success
3372 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
3373 __u32 ino, struct lu_fid *fid,
3374 struct osd_inode_id *id)
3376 struct osd_thread_info *info = osd_oti_get(env);
3377 struct inode *inode;
3380 osd_id_gen(id, ino, OSD_OII_NOGEN);
3381 inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
3383 RETURN(PTR_ERR(inode));
3389 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
3391 struct inode *parent_dir,
3392 const struct lu_fid *dot_fid,
3393 const struct lu_fid *dot_dot_fid,
3394 struct osd_thandle *oth)
3396 struct ldiskfs_dentry_param *dot_ldp;
3397 struct ldiskfs_dentry_param *dot_dot_ldp;
3398 __u32 saved_nlink = dir->i_nlink;
3401 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3402 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3404 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3405 dot_ldp->edp_magic = 0;
3407 rc = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3408 dir, dot_ldp, dot_dot_ldp);
3409 /* The ldiskfs_add_dot_dotdot() may dir->i_nlink as 2, then
3410 * the subseqent ref_add() will increase the dir->i_nlink
3411 * as 3. That is incorrect for new created directory.
3413 * It looks like hack, because we want to make the OSD API
3414 * to be order-independent for new created directory object
3415 * between dt_insert(..) and ref_add() operations.
3417 * Here, we only restore the in-RAM dir-inode's nlink attr,
3418 * becuase if the nlink attr is not 2, then there will be
3419 * ref_add() called following the dt_insert(..), such call
3420 * will make both the in-RAM and on-disk dir-inode's nlink
3421 * attr to be set as 2. LU-7447 */
3422 set_nlink(dir, saved_nlink);
3427 * Create an local agent inode for remote entry
3429 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
3430 struct osd_device *osd,
3431 struct osd_object *pobj,
3432 const struct lu_fid *fid,
3436 struct osd_thread_info *info = osd_oti_get(env);
3437 struct inode *local;
3438 struct osd_thandle *oh;
3443 oh = container_of(th, struct osd_thandle, ot_super);
3444 LASSERT(oh->ot_handle->h_transaction != NULL);
3446 local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, type);
3447 if (IS_ERR(local)) {
3448 CERROR("%s: create local error %d\n", osd_name(osd),
3449 (int)PTR_ERR(local));
3453 /* restore i_gid in case S_ISGID is set, we will inherit S_ISGID and set
3454 * correct gid on remote file, not agent here */
3455 local->i_gid = current_fsgid();
3456 ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
3457 unlock_new_inode(local);
3459 /* Set special LMA flag for local agent inode */
3460 rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
3462 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
3463 osd_name(osd), PFID(fid), rc);
3464 RETURN(ERR_PTR(rc));
3470 rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
3471 lu_object_fid(&pobj->oo_dt.do_lu),
3474 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
3475 osd_name(osd), PFID(fid), rc);
3476 RETURN(ERR_PTR(rc));
3483 * when direntry is deleted, we have to take care of possible agent inode
3484 * referenced by that. unfortunately we can't do this at that point:
3485 * iget() within a running transaction leads to deadlock and we better do
3486 * not call that every delete declaration to save performance. so we put
3487 * a potention agent inode on a list and process that once the transaction
3488 * is over. Notice it's not any worse in terms of real orphans as regular
3489 * object destroy doesn't put inodes on the on-disk orphan list. this should
3490 * be addressed separately
3492 static int osd_schedule_agent_inode_removal(const struct lu_env *env,
3493 struct osd_thandle *oh,
3496 struct osd_device *osd = osd_dt_dev(oh->ot_super.th_dev);
3497 struct osd_obj_orphan *oor;
3504 oor->oor_env = (struct lu_env *)env;
3505 spin_lock(&osd->od_osfs_lock);
3506 list_add(&oor->oor_list, &osd->od_orphan_list);
3507 spin_unlock(&osd->od_osfs_lock);
3509 oh->ot_remove_agents = 1;
3515 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
3516 struct osd_device *osd)
3518 struct osd_thread_info *info = osd_oti_get(env);
3519 struct osd_obj_orphan *oor, *tmp;
3520 struct osd_inode_id id;
3521 struct list_head list;
3522 struct inode *inode;
3527 INIT_LIST_HEAD(&list);
3529 spin_lock(&osd->od_osfs_lock);
3530 list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
3531 if (oor->oor_env == env) {
3532 list_del(&oor->oor_list);
3533 list_add(&oor->oor_list, &list);
3536 spin_unlock(&osd->od_osfs_lock);
3538 list_for_each_entry_safe(oor, tmp, &list, oor_list) {
3542 list_del(&oor->oor_list);
3545 osd_id_gen(&id, ino, OSD_OII_NOGEN);
3546 inode = osd_iget_fid(info, osd, &id, &fid);
3550 if (!osd_remote_fid(env, osd, &fid)) {
3555 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC, 1);
3557 mark_inode_dirty(inode);
3558 ldiskfs_journal_stop(jh);
3566 * OSD layer object create function for interoperability mode (b11826).
3567 * This is mostly similar to osd_object_create(). Only difference being, fid is
3568 * inserted into inode ea here.
3570 * \retval 0, on success
3571 * \retval -ve, on error
3573 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
3574 struct lu_attr *attr,
3575 struct dt_allocation_hint *hint,
3576 struct dt_object_format *dof,
3579 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3580 struct osd_object *obj = osd_dt_obj(dt);
3581 struct osd_thread_info *info = osd_oti_get(env);
3582 int result, on_ost = 0;
3586 if (dt_object_exists(dt))
3589 LASSERT(osd_invariant(obj));
3590 LASSERT(!dt_object_remote(dt));
3591 LASSERT(osd_write_locked(env, obj));
3592 LASSERT(th != NULL);
3594 if (unlikely(fid_is_acct(fid)))
3595 /* Quota files can't be created from the kernel any more,
3596 * 'tune2fs -O quota' will take care of creating them */
3599 result = __osd_object_create(info, obj, attr, hint, dof, th);
3601 if (fid_is_idif(fid) &&
3602 !osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
3603 struct lu_fid *tfid = &info->oti_fid;
3604 struct ost_id *oi = &info->oti_ostid;
3606 fid_to_ostid(fid, oi);
3607 ostid_to_fid(tfid, oi, 0);
3609 result = osd_ea_fid_set(info, obj->oo_inode, tfid,
3610 LMAC_FID_ON_OST, 0);
3612 on_ost = fid_is_on_ost(info, osd_obj2dev(obj),
3614 result = osd_ea_fid_set(info, obj->oo_inode, fid,
3615 on_ost ? LMAC_FID_ON_OST : 0,
3618 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
3619 obj->oo_dt.do_body_ops = &osd_body_ops;
3623 result = __osd_oi_insert(env, obj, fid, th);
3625 /* a small optimization - dt_insert() isn't usually applied
3626 * to OST objects, so we don't need to cache OI mapping for
3628 if (result == 0 && on_ost == 0) {
3629 struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
3630 result = osd_idc_find_and_init(env, osd, obj);
3631 LASSERT(result == 0);
3634 LASSERT(ergo(result == 0,
3635 dt_object_exists(dt) && !dt_object_remote(dt)));
3636 LINVRNT(osd_invariant(obj));