4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <linux/module.h>
47 #include <linux/user_namespace.h>
48 #ifdef HAVE_UIDGID_HEADER
49 # include <linux/uidgid.h>
52 /* LUSTRE_VERSION_CODE */
53 #include <lustre_ver.h>
54 /* prerequisite for linux/xattr.h */
55 #include <linux/types.h>
56 /* prerequisite for linux/xattr.h */
58 /* XATTR_{REPLACE,CREATE} */
59 #include <linux/xattr.h>
61 #include <ldiskfs/ldiskfs.h>
62 #include <ldiskfs/xattr.h>
63 #include <ldiskfs/ldiskfs_extents.h>
66 * struct OBD_{ALLOC,FREE}*()
69 #include <obd_support.h>
70 /* struct ptlrpc_thread */
71 #include <lustre_net.h>
72 #include <lustre_fid.h>
74 #include <lustre_param.h>
76 #include "osd_internal.h"
77 #include "osd_dynlocks.h"
79 /* llo_* api support */
80 #include <md_object.h>
81 #include <lustre_quota.h>
83 #include <lustre_linkea.h>
86 module_param(ldiskfs_pdo, int, 0644);
87 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
89 int ldiskfs_track_declares_assert;
90 module_param(ldiskfs_track_declares_assert, int, 0644);
91 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
93 /* Slab to allocate dynlocks */
94 struct kmem_cache *dynlock_cachep;
96 /* Slab to allocate osd_it_ea */
97 struct kmem_cache *osd_itea_cachep;
99 static struct lu_kmem_descr ldiskfs_caches[] = {
101 .ckd_cache = &dynlock_cachep,
102 .ckd_name = "dynlock_cache",
103 .ckd_size = sizeof(struct dynlock_handle)
106 .ckd_cache = &osd_itea_cachep,
107 .ckd_name = "osd_itea_cache",
108 .ckd_size = sizeof(struct osd_it_ea)
115 static const char dot[] = ".";
116 static const char dotdot[] = "..";
117 static const char remote_obj_dir[] = "REM_OBJ_DIR";
119 static const struct lu_object_operations osd_lu_obj_ops;
120 static const struct dt_object_operations osd_obj_ops;
121 static const struct dt_object_operations osd_obj_ea_ops;
122 static const struct dt_object_operations osd_obj_otable_it_ops;
123 static const struct dt_index_operations osd_index_iam_ops;
124 static const struct dt_index_operations osd_index_ea_ops;
126 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
127 const struct lu_fid *fid);
128 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
129 struct osd_device *osd);
131 int osd_trans_declare_op2rb[] = {
132 [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
133 [OSD_OT_PUNCH] = OSD_OT_MAX,
134 [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
135 [OSD_OT_CREATE] = OSD_OT_DESTROY,
136 [OSD_OT_DESTROY] = OSD_OT_CREATE,
137 [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
138 [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
139 [OSD_OT_WRITE] = OSD_OT_WRITE,
140 [OSD_OT_INSERT] = OSD_OT_DELETE,
141 [OSD_OT_DELETE] = OSD_OT_INSERT,
142 [OSD_OT_QUOTA] = OSD_OT_MAX,
145 static int osd_has_index(const struct osd_object *obj)
147 return obj->oo_dt.do_index_ops != NULL;
150 static int osd_object_invariant(const struct lu_object *l)
152 return osd_invariant(osd_obj(l));
156 * Concurrency: doesn't matter
160 * Concurrency: doesn't matter
162 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
164 struct osd_thread_info *oti = osd_oti_get(env);
165 return oti->oti_w_locks > 0 && o->oo_owner == env;
169 * Concurrency: doesn't access mutable data
171 static int osd_root_get(const struct lu_env *env,
172 struct dt_device *dev, struct lu_fid *f)
174 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
179 * the following set of functions are used to maintain per-thread
180 * cache of FID->ino mapping. this mechanism is needed to resolve
181 * FID to inode at dt_insert() which in turn stores ino in the
182 * directory entries to keep ldiskfs compatible with ext[34].
183 * due to locking-originated restrictions we can't lookup ino
184 * using LU cache (deadlock is possible). lookup using OI is quite
185 * expensive. so instead we maintain this cache and methods like
186 * dt_create() fill it. so in the majority of cases dt_insert() is
187 * able to find needed mapping in lockless manner.
189 static struct osd_idmap_cache *
190 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
191 const struct lu_fid *fid)
193 struct osd_thread_info *oti = osd_oti_get(env);
194 struct osd_idmap_cache *idc = oti->oti_ins_cache;
196 for (i = 0; i < oti->oti_ins_cache_used; i++) {
197 if (!lu_fid_eq(&idc[i].oic_fid, fid))
199 if (idc[i].oic_dev != osd)
208 static struct osd_idmap_cache *
209 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
210 const struct lu_fid *fid)
212 struct osd_thread_info *oti = osd_oti_get(env);
213 struct osd_idmap_cache *idc;
216 if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
217 i = oti->oti_ins_cache_size * 2;
219 i = OSD_INS_CACHE_SIZE;
220 OBD_ALLOC(idc, sizeof(*idc) * i);
222 return ERR_PTR(-ENOMEM);
223 if (oti->oti_ins_cache != NULL) {
224 memcpy(idc, oti->oti_ins_cache,
225 oti->oti_ins_cache_used * sizeof(*idc));
226 OBD_FREE(oti->oti_ins_cache,
227 oti->oti_ins_cache_used * sizeof(*idc));
229 oti->oti_ins_cache = idc;
230 oti->oti_ins_cache_size = i;
233 idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
236 idc->oic_lid.oii_ino = 0;
237 idc->oic_lid.oii_gen = 0;
244 * lookup mapping for the given fid in the cache, initialize a
245 * new one if not found. the initialization checks whether the
246 * object is local or remote. for local objects, OI is used to
247 * learn ino/generation. the function is used when the caller
248 * has no information about the object, e.g. at dt_insert().
250 static struct osd_idmap_cache *
251 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
252 const struct lu_fid *fid)
254 struct osd_idmap_cache *idc;
257 idc = osd_idc_find(env, osd, fid);
258 LASSERT(!IS_ERR(idc));
262 /* new mapping is needed */
263 idc = osd_idc_add(env, osd, fid);
268 rc = osd_remote_fid(env, osd, fid);
269 if (unlikely(rc < 0))
273 /* the object is local, lookup in OI */
274 /* XXX: probably cheaper to lookup in LU first? */
275 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
277 if (unlikely(rc < 0)) {
278 CERROR("can't lookup: rc = %d\n", rc);
282 /* the object is remote */
290 * lookup mapping for given FID and fill it from the given object.
291 * the object is lolcal by definition.
293 static int osd_idc_find_and_init(const struct lu_env *env,
294 struct osd_device *osd,
295 struct osd_object *obj)
297 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
298 struct osd_idmap_cache *idc;
300 idc = osd_idc_find(env, osd, fid);
301 LASSERT(!IS_ERR(idc));
303 if (obj->oo_inode == NULL)
305 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
306 LASSERT(idc->oic_lid.oii_ino == 0);
307 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
308 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
313 /* new mapping is needed */
314 idc = osd_idc_add(env, osd, fid);
318 if (obj->oo_inode != NULL) {
319 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
320 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
326 * OSD object methods.
330 * Concurrency: no concurrent access is possible that early in object
333 static struct lu_object *osd_object_alloc(const struct lu_env *env,
334 const struct lu_object_header *hdr,
337 struct osd_object *mo;
343 l = &mo->oo_dt.do_lu;
344 dt_object_init(&mo->oo_dt, NULL, d);
345 mo->oo_dt.do_ops = &osd_obj_ea_ops;
346 l->lo_ops = &osd_lu_obj_ops;
347 init_rwsem(&mo->oo_sem);
348 init_rwsem(&mo->oo_ext_idx_sem);
349 spin_lock_init(&mo->oo_guard);
350 INIT_LIST_HEAD(&mo->oo_xattr_list);
357 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
358 struct dentry *dentry, struct lustre_mdt_attrs *lma)
362 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
363 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
364 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
366 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
367 memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
369 lustre_lma_swab(lma);
370 /* Check LMA compatibility */
371 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
372 CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
373 "for fid = "DFID", ino = %lu\n",
374 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
375 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
376 PFID(&lma->lma_self_fid), inode->i_ino);
379 } else if (rc == 0) {
387 * retrieve object from backend ext fs.
389 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
390 struct osd_inode_id *id)
392 struct inode *inode = NULL;
394 /* if we look for an inode withing a running
395 * transaction, then we risk to deadlock */
396 /* osd_dirent_check_repair() breaks this */
397 /*LASSERT(current->journal_info == NULL);*/
399 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
401 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
402 id->oii_ino, PTR_ERR(inode));
403 } else if (id->oii_gen != OSD_OII_NOGEN &&
404 inode->i_generation != id->oii_gen) {
405 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
406 "i_generation = %u\n",
407 id->oii_ino, id->oii_gen, inode->i_generation);
409 inode = ERR_PTR(-ESTALE);
410 } else if (inode->i_nlink == 0) {
411 /* due to parallel readdir and unlink,
412 * we can have dead inode here. */
413 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
415 inode = ERR_PTR(-ESTALE);
416 } else if (is_bad_inode(inode)) {
417 CWARN("%.16s: bad inode: ino = %u\n",
418 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
420 inode = ERR_PTR(-ENOENT);
422 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
423 if (id->oii_gen == OSD_OII_NOGEN)
424 osd_id_gen(id, inode->i_ino, inode->i_generation);
426 /* Do not update file c/mtime in ldiskfs.
427 * NB: we don't have any lock to protect this because we don't
428 * have reference on osd_object now, but contention with
429 * another lookup + attr_set can't happen in the tiny window
430 * between if (...) and set S_NOCMTIME. */
431 if (!(inode->i_flags & S_NOCMTIME))
432 inode->i_flags |= S_NOCMTIME;
437 int osd_ldiskfs_add_entry(struct osd_thread_info *info,
438 handle_t *handle, struct dentry *child,
439 struct inode *inode, struct htree_lock *hlock)
443 rc = __ldiskfs_add_entry(handle, child, inode, hlock);
444 if (rc == -ENOBUFS || rc == -ENOSPC) {
445 char fidbuf[FID_LEN + 1];
446 struct lustre_mdt_attrs lma;
447 struct lu_fid fid = { };
449 struct dentry *p_dentry = child->d_parent;
451 rc2 = osd_get_lma(info, p_dentry->d_inode, p_dentry,
454 fid = lma.lma_self_fid;
455 snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
456 } else if (rc2 == -ENODATA) {
457 if (unlikely(p_dentry->d_inode ==
458 inode->i_sb->s_root->d_inode))
459 lu_local_obj_fid(&fid, OSD_FS_ROOT_OID);
460 else if (info->oti_dev && !info->oti_dev->od_is_ost &&
461 fid_seq_is_mdt0(fid_seq(&fid)))
462 lu_igif_build(&fid, p_dentry->d_inode->i_ino,
463 p_dentry->d_inode->i_generation);
464 snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
466 snprintf(fidbuf, FID_LEN, "%s", "unknown");
470 errstr = "has reached";
472 errstr = "is approaching";
473 CWARN("%.16s: directory (inode: %lu FID: %s) %s maximum entry limit\n",
474 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
475 p_dentry->d_inode->i_ino, fidbuf, errstr);
476 /* ignore such error now */
484 static struct inode *
485 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
486 struct osd_inode_id *id, struct lu_fid *fid)
488 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
492 inode = osd_iget(info, dev, id);
496 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
498 *fid = lma->lma_self_fid;
499 } else if (rc == -ENODATA) {
500 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
501 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
503 lu_igif_build(fid, inode->i_ino, inode->i_generation);
511 static struct inode *osd_iget_check(struct osd_thread_info *info,
512 struct osd_device *dev,
513 const struct lu_fid *fid,
514 struct osd_inode_id *id,
521 /* The cached OI mapping is trustable. If we cannot locate the inode
522 * via the cached OI mapping, then return the failure to the caller
523 * directly without further OI checking. */
525 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
528 if (cached || (rc != -ENOENT && rc != -ESTALE)) {
529 CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
538 if (is_bad_inode(inode)) {
541 CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
549 if (id->oii_gen != OSD_OII_NOGEN &&
550 inode->i_generation != id->oii_gen) {
553 CDEBUG(D_INODE, "unmatched inode: ino = %u, "
554 "oii_gen = %u, i_generation = %u\n",
555 id->oii_ino, id->oii_gen, inode->i_generation);
563 if (inode->i_nlink == 0) {
566 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
574 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
578 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
580 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
581 /* XXX: There are four possible cases:
583 * Backup/restore caused the OI invalid.
585 * Someone unlinked the object but NOT removed
586 * the OI mapping, such as mount target device
587 * as ldiskfs, and modify something directly.
589 * Someone just removed the object between the
590 * former oi_lookup and the iget. It is normal.
591 * 4. Other failure cases.
593 * Generally, when the device is mounted, it will
594 * auto check whether the system is restored from
595 * file-level backup or not. We trust such detect
596 * to distinguish the 1st case from the 2nd case. */
598 if (!IS_ERR(inode) && inode->i_generation != 0 &&
599 inode->i_generation == id->oii_gen)
600 /* "id->oii_gen != OSD_OII_NOGEN" is for
601 * "@cached == false" case. */
606 /* If the OI mapping was in OI file before the
607 * osd_iget_check(), but now, it is disappear,
608 * then it must be removed by race. That is a
609 * normal race case. */
612 if (id->oii_gen == OSD_OII_NOGEN)
613 osd_id_gen(id, inode->i_ino, inode->i_generation);
615 /* Do not update file c/mtime in ldiskfs.
616 * NB: we don't have any lock to protect this because we don't
617 * have reference on osd_object now, but contention with
618 * another lookup + attr_set can't happen in the tiny window
619 * between if (...) and set S_NOCMTIME. */
620 if (!(inode->i_flags & S_NOCMTIME))
621 inode->i_flags |= S_NOCMTIME;
638 * \retval +v: new filter_fid, does not contain self-fid
639 * \retval 0: filter_fid_old, contains self-fid
640 * \retval -v: other failure cases
642 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
643 struct dentry *dentry, struct lu_fid *fid)
645 struct filter_fid_old *ff = &info->oti_ff;
646 struct ost_id *ostid = &info->oti_ostid;
649 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
650 if (rc == sizeof(*ff)) {
652 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
653 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
654 /* XXX: use 0 as the index for compatibility, the caller will
655 * handle index related issues when necessarry. */
656 ostid_to_fid(fid, ostid, 0);
657 } else if (rc == sizeof(struct filter_fid)) {
659 } else if (rc >= 0) {
666 static int osd_lma_self_repair(struct osd_thread_info *info,
667 struct osd_device *osd, struct inode *inode,
668 const struct lu_fid *fid, __u32 compat)
673 LASSERT(current->journal_info == NULL);
675 jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
676 osd_dto_credits_noquota[DTO_XATTR_SET]);
679 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
684 rc = osd_ea_fid_set(info, inode, fid, compat, 0);
686 CWARN("%s: cannot self repair the LMA: rc = %d\n",
688 ldiskfs_journal_stop(jh);
692 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
694 struct osd_thread_info *info = osd_oti_get(env);
695 struct osd_device *osd = osd_obj2dev(obj);
696 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
697 struct inode *inode = obj->oo_inode;
698 struct dentry *dentry = &info->oti_obj_dentry;
699 struct lu_fid *fid = NULL;
700 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
704 CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
705 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
706 info->oti_mdt_attrs_old, LMA_OLD_SIZE);
707 if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
708 fid = &lma->lma_self_fid;
709 rc = osd_get_idif(info, inode, dentry, fid);
710 if ((rc > 0) || (rc == -ENODATA && osd->od_index_in_idif)) {
711 /* For the given OST-object, if it has neither LMA nor
712 * FID in XATTR_NAME_FID, then the given FID (which is
713 * contained in the @obj, from client RPC for locating
714 * the OST-object) is trusted. We use it to generate
716 osd_lma_self_repair(info, osd, inode, rfid,
727 lustre_lma_swab(lma);
728 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
729 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
730 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
731 "fid = "DFID", ino = %lu\n", osd_name(osd),
732 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
733 PFID(rfid), inode->i_ino);
736 fid = &lma->lma_self_fid;
740 if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
741 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
742 struct ost_id *oi = &info->oti_ostid;
743 struct lu_fid *fid1 = &info->oti_fid3;
744 __u32 idx = fid_idif_ost_idx(rfid);
746 /* For old IDIF, the OST index is not part of the IDIF,
747 * Means that different OSTs may have the same IDIFs.
748 * Under such case, we need to make some compatible
749 * check to make sure to trigger OI scrub properly. */
750 if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
751 /* Given @rfid is new, LMA is old. */
752 fid_to_ostid(fid, oi);
753 ostid_to_fid(fid1, oi, idx);
754 if (lu_fid_eq(fid1, rfid)) {
755 if (osd->od_index_in_idif)
756 osd_lma_self_repair(info, osd,
770 struct osd_check_lmv_buf {
771 #ifdef HAVE_DIR_CONTEXT
772 /* please keep it as first member */
773 struct dir_context ctx;
775 struct osd_thread_info *oclb_info;
776 struct osd_device *oclb_dev;
777 struct osd_idmap_cache *oclb_oic;
781 * It is called internally by ->readdir() to filter out the
782 * local slave object's FID of the striped directory.
784 * \retval 1 found the local slave's FID
785 * \retval 0 continue to check next item
786 * \retval -ve for failure
788 #ifdef HAVE_FILLDIR_USE_CTX
789 static int osd_stripe_dir_filldir(struct dir_context *buf,
791 static int osd_stripe_dir_filldir(void *buf,
793 const char *name, int namelen,
794 loff_t offset, __u64 ino, unsigned d_type)
796 struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
797 struct osd_thread_info *oti = oclb->oclb_info;
798 struct lu_fid *fid = &oti->oti_fid3;
799 struct osd_inode_id *id = &oti->oti_id3;
800 struct osd_device *dev = oclb->oclb_dev;
801 struct osd_idmap_cache *oic = oclb->oclb_oic;
809 sscanf(name + 1, SFID, RFID(fid));
810 if (!fid_is_sane(fid))
813 if (osd_remote_fid(oti->oti_env, dev, fid))
816 osd_id_gen(id, ino, OSD_OII_NOGEN);
817 inode = osd_iget(oti, dev, id);
819 return PTR_ERR(inode);
822 osd_add_oi_cache(oti, dev, id, fid);
826 rc = osd_oii_insert(dev, oic, true);
828 return rc == 0 ? 1 : rc;
831 /* When lookup item under striped directory, we need to locate the master
832 * MDT-object of the striped directory firstly, then the client will send
833 * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
834 * and the item's name. If the system is restored from MDT file level backup,
835 * then before the OI scrub completely built the OI files, the OI mappings of
836 * the master MDT-object and slave MDT-object may be invalid. Usually, it is
837 * not a problem for the master MDT-object. Because when locate the master
838 * MDT-object, we will do name based lookup (for the striped directory itself)
839 * firstly, during such process we can setup the correct OI mapping for the
840 * master MDT-object. But it will be trouble for the slave MDT-object. Because
841 * the client will not trigger name based lookup on the MDT to locate the slave
842 * MDT-object before locating item under the striped directory, then when
843 * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
844 * is invalid and does not know what the right OI mapping is, then the MDT has
845 * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
846 * the OI file, related OI mapping is unknown yet, please try again later. And
847 * then client will re-try the RPC again and again until related OI mapping has
848 * been updated. That is quite inefficient.
850 * To resolve above trouble, we will handle it as the following two cases:
852 * 1) The slave MDT-object and the master MDT-object are on different MDTs.
853 * It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
854 * is linked under /REMOTE_PARENT_DIR with the name of its FID string.
855 * We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
856 * directly. Please check osd_fid_lookup().
858 * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
859 * Under such case, during lookup the master MDT-object, we will lookup the
860 * slave MDT-object via readdir against the master MDT-object, because the
861 * slave MDT-objects information are stored as sub-directories with the name
862 * "${FID}:${index}". Then when find the local slave MDT-object, its OI
863 * mapping will be recorded. Then subsequent osd_fid_lookup() will know
864 * the correct OI mapping for the slave MDT-object. */
865 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
866 struct inode *inode, struct osd_idmap_cache *oic)
868 struct lu_buf *buf = &oti->oti_big_buf;
869 struct dentry *dentry = &oti->oti_obj_dentry;
870 struct file *filp = &oti->oti_file;
871 const struct file_operations *fops;
872 struct lmv_mds_md_v1 *lmv1;
873 struct osd_check_lmv_buf oclb = {
874 #ifdef HAVE_DIR_CONTEXT
875 .ctx.actor = osd_stripe_dir_filldir,
885 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
888 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
890 lu_buf_realloc(buf, rc);
891 if (buf->lb_buf == NULL)
892 GOTO(out, rc = -ENOMEM);
898 if (unlikely(rc == 0 || rc == -ENODATA))
904 if (unlikely(buf->lb_buf == NULL)) {
905 lu_buf_realloc(buf, rc);
906 if (buf->lb_buf == NULL)
907 GOTO(out, rc = -ENOMEM);
913 if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
917 dentry->d_inode = inode;
918 dentry->d_sb = inode->i_sb;
920 filp->f_path.dentry = dentry;
921 filp->f_mode = FMODE_64BITHASH;
922 filp->f_mapping = inode->i_mapping;
924 filp->private_data = NULL;
925 set_file_inode(filp, inode);
927 #ifdef HAVE_DIR_CONTEXT
928 oclb.ctx.pos = filp->f_pos;
929 rc = fops->iterate(filp, &oclb.ctx);
930 filp->f_pos = oclb.ctx.pos;
932 rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
934 fops->release(inode, filp);
938 CDEBUG(D_LFSCK, "%.16s: fail to check LMV EA, inode = %lu/%u,"
940 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
941 inode->i_ino, inode->i_generation,
942 PFID(&oic->oic_fid), rc);
949 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
950 const struct lu_fid *fid,
951 const struct lu_object_conf *conf)
953 struct osd_thread_info *info;
954 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
955 struct osd_device *dev;
956 struct osd_idmap_cache *oic;
957 struct osd_inode_id *id;
958 struct inode *inode = NULL;
959 struct osd_scrub *scrub;
960 struct scrub_file *sf;
961 __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT |
969 LINVRNT(osd_invariant(obj));
970 LASSERT(obj->oo_inode == NULL);
971 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
974 scrub = &dev->od_scrub;
975 sf = &scrub->os_file;
976 info = osd_oti_get(env);
978 oic = &info->oti_cache;
980 if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
983 /* For the object is created as locking anchor, or for the object to
984 * be created on disk. No need to osd_oi_lookup() at here because FID
985 * shouldn't never be re-used, if it's really a duplicate FID from
986 * unexpected reason, we should be able to detect it later by calling
987 * do_create->osd_oi_insert(). */
988 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
989 GOTO(out, result = 0);
991 /* Search order: 1. per-thread cache. */
992 if (lu_fid_eq(fid, &oic->oic_fid) &&
993 likely(oic->oic_dev == dev)) {
999 if (!list_empty(&scrub->os_inconsistent_items)) {
1000 /* Search order: 2. OI scrub pending list. */
1001 result = osd_oii_lookup(dev, fid, id);
1007 /* Search order: 3. OI files. */
1008 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1009 if (result == -ENOENT) {
1010 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1011 fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1012 !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
1014 GOTO(out, result = 0);
1023 inode = osd_iget_check(info, dev, fid, id, cached);
1024 if (IS_ERR(inode)) {
1025 result = PTR_ERR(inode);
1026 if (result == -ENOENT || result == -ESTALE)
1027 GOTO(out, result = -ENOENT);
1029 if (result == -EREMCHG) {
1032 /* We still have chance to get the valid inode: for the
1033 * object which is referenced by remote name entry, the
1034 * object on the local MDT will be linked under the dir
1035 * of "/REMOTE_PARENT_DIR" with its FID string as name.
1037 * We do not know whether the object for the given FID
1038 * is referenced by some remote name entry or not, and
1039 * especially for DNE II, a multiple-linked object may
1040 * have many name entries reside on many MDTs.
1042 * To simplify the operation, OSD will not distinguish
1043 * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1044 * only happened for the RPC from other MDT during the
1045 * OI scrub, or for the client side RPC with FID only,
1046 * such as FID to path, or from old connected client. */
1048 !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1049 rc1 = osd_lookup_in_remote_parent(info, dev,
1054 flags |= SS_AUTO_PARTIAL;
1055 flags &= ~SS_AUTO_FULL;
1060 if (thread_is_running(&scrub->os_thread)) {
1061 if (scrub->os_partial_scan &&
1062 !scrub->os_in_join) {
1065 if (inode != NULL && !IS_ERR(inode)) {
1068 osd_add_oi_cache(info, dev, id,
1070 osd_oii_insert(dev, oic, true);
1072 result = -EINPROGRESS;
1075 } else if (!dev->od_noscrub) {
1078 rc1 = osd_scrub_start(dev, flags);
1079 LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
1080 "for the "DFID" with flags 0x%x,"
1081 " rc = %d\n", osd_name(dev),
1082 PFID(fid), flags, rc1);
1083 if (rc1 == 0 || rc1 == -EALREADY) {
1084 if (inode != NULL && !IS_ERR(inode)) {
1087 osd_add_oi_cache(info, dev, id,
1089 osd_oii_insert(dev, oic, true);
1091 result = -EINPROGRESS;
1101 if (inode == NULL || IS_ERR(inode))
1103 } else if (remote) {
1107 obj->oo_inode = inode;
1108 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
1110 result = osd_check_lma(env, obj);
1112 if (result == -ENODATA) {
1114 result = osd_oi_lookup(info, dev, fid, id,
1117 /* result == -ENOENT means that the OI
1118 * mapping has been removed by race,
1119 * the target inode belongs to other
1122 * Others error also can be returned
1125 obj->oo_inode = NULL;
1128 /* result == 0 means the cached OI
1129 * mapping is still in the OI file,
1130 * the target the inode is valid. */
1133 /* The current OI mapping is from the OI file,
1134 * since the inode has been found via
1135 * osd_iget_check(), no need recheck OI. */
1143 obj->oo_inode = NULL;
1144 if (result != -EREMCHG)
1148 result = osd_oi_lookup(info, dev, fid, id,
1150 /* result == -ENOENT means the cached OI mapping
1151 * has been removed from the OI file by race,
1152 * above target inode belongs to other object.
1154 * Others error also can be returned directly. */
1158 /* result == 0, goto trigger */
1160 /* The current OI mapping is from the OI file,
1161 * since the inode has been found via
1162 * osd_iget_check(), no need recheck OI. */
1169 obj->oo_compat_dot_created = 1;
1170 obj->oo_compat_dotdot_created = 1;
1172 if (S_ISDIR(inode->i_mode) &&
1173 (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1174 osd_check_lmv(info, dev, inode, oic);
1177 GOTO(out, result = 0);
1179 LASSERT(obj->oo_hl_head == NULL);
1180 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1181 if (obj->oo_hl_head == NULL) {
1182 obj->oo_inode = NULL;
1184 GOTO(out, result = -ENOMEM);
1186 GOTO(out, result = 0);
1189 if (result != 0 && cached)
1190 fid_zero(&oic->oic_fid);
1192 LINVRNT(osd_invariant(obj));
1197 * Concurrency: shouldn't matter.
1199 static void osd_object_init0(struct osd_object *obj)
1201 LASSERT(obj->oo_inode != NULL);
1202 obj->oo_dt.do_body_ops = &osd_body_ops;
1203 obj->oo_dt.do_lu.lo_header->loh_attr |=
1204 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1208 * Concurrency: no concurrent access is possible that early in object
1211 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1212 const struct lu_object_conf *conf)
1214 struct osd_object *obj = osd_obj(l);
1217 LINVRNT(osd_invariant(obj));
1219 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1220 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1221 l->lo_header->loh_attr |= LOHA_EXISTS;
1225 result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1226 obj->oo_dt.do_body_ops = &osd_body_ops_new;
1227 if (result == 0 && obj->oo_inode != NULL) {
1228 struct osd_thread_info *oti = osd_oti_get(env);
1229 struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
1231 osd_object_init0(obj);
1232 result = osd_get_lma(oti, obj->oo_inode,
1233 &oti->oti_obj_dentry, lma);
1235 /* Convert LMAI flags to lustre LMA flags
1236 * and cache it to oo_lma_flags */
1238 lma_to_lustre_flags(lma->lma_incompat);
1239 } else if (result == -ENODATA) {
1244 LINVRNT(osd_invariant(obj));
1248 /* The first part of oxe_buf is xattr name, and is '\0' terminated.
1249 * The left part is for value, binary mode. */
1250 struct osd_xattr_entry {
1251 struct list_head oxe_list;
1255 struct rcu_head oxe_rcu;
1259 static struct osd_xattr_entry *osd_oxc_lookup(struct osd_object *obj,
1263 struct osd_xattr_entry *oxe;
1265 list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1266 if (namelen == oxe->oxe_namelen &&
1267 strncmp(name, oxe->oxe_buf, namelen) == 0)
1274 static int osd_oxc_get(struct osd_object *obj, const char *name,
1277 struct osd_xattr_entry *oxe;
1282 oxe = osd_oxc_lookup(obj, name, strlen(name));
1288 if (!oxe->oxe_exist) {
1293 vallen = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1294 LASSERT(vallen > 0);
1296 if (buf->lb_buf == NULL) {
1301 if (buf->lb_len < vallen) {
1306 memcpy(buf->lb_buf, oxe->oxe_buf + oxe->oxe_namelen + 1, vallen);
1312 static void osd_oxc_free(struct rcu_head *head)
1314 struct osd_xattr_entry *oxe;
1316 oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1317 OBD_FREE(oxe, oxe->oxe_len);
1320 static inline void __osd_oxc_del(struct osd_object *obj, const char *name)
1322 struct osd_xattr_entry *oxe;
1324 oxe = osd_oxc_lookup(obj, name, strlen(name));
1326 list_del(&oxe->oxe_list);
1327 call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1331 static void osd_oxc_add(struct osd_object *obj, const char *name,
1332 const char *buf, int buflen)
1334 struct osd_xattr_entry *oxe;
1335 size_t namelen = strlen(name);
1336 size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1338 OBD_ALLOC(oxe, len);
1342 INIT_LIST_HEAD(&oxe->oxe_list);
1344 oxe->oxe_namelen = namelen;
1345 memcpy(oxe->oxe_buf, name, namelen);
1347 LASSERT(buf != NULL);
1348 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1349 oxe->oxe_exist = true;
1351 oxe->oxe_exist = false;
1354 /* this should be rarely called, just remove old and add new */
1355 spin_lock(&obj->oo_guard);
1356 __osd_oxc_del(obj, name);
1357 list_add_tail(&oxe->oxe_list, &obj->oo_xattr_list);
1358 spin_unlock(&obj->oo_guard);
1361 static void osd_oxc_del(struct osd_object *obj, const char *name)
1363 spin_lock(&obj->oo_guard);
1364 __osd_oxc_del(obj, name);
1365 spin_unlock(&obj->oo_guard);
1368 static void osd_oxc_fini(struct osd_object *obj)
1370 struct osd_xattr_entry *oxe, *next;
1372 list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1373 list_del(&oxe->oxe_list);
1374 OBD_FREE(oxe, oxe->oxe_len);
1379 * Concurrency: no concurrent access is possible that late in object
1382 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1384 struct osd_object *obj = osd_obj(l);
1386 LINVRNT(osd_invariant(obj));
1389 dt_object_fini(&obj->oo_dt);
1390 if (obj->oo_hl_head != NULL)
1391 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1396 * Concurrency: no concurrent access is possible that late in object
1399 static void osd_index_fini(struct osd_object *o)
1401 struct iam_container *bag;
1403 if (o->oo_dir != NULL) {
1404 bag = &o->oo_dir->od_container;
1405 if (o->oo_inode != NULL) {
1406 if (bag->ic_object == o->oo_inode)
1407 iam_container_fini(bag);
1409 OBD_FREE_PTR(o->oo_dir);
1415 * Concurrency: no concurrent access is possible that late in object
1416 * life-cycle (for all existing callers, that is. New callers have to provide
1417 * their own locking.)
1419 static int osd_inode_unlinked(const struct inode *inode)
1421 return inode->i_nlink == 0;
1425 OSD_TXN_OI_DELETE_CREDITS = 20,
1426 OSD_TXN_INODE_DELETE_CREDITS = 20
1433 #if OSD_THANDLE_STATS
1435 * Set time when the handle is allocated
1437 static void osd_th_alloced(struct osd_thandle *oth)
1439 oth->oth_alloced = cfs_time_current();
1443 * Set time when the handle started
1445 static void osd_th_started(struct osd_thandle *oth)
1447 oth->oth_started = cfs_time_current();
1451 * Helper function to convert time interval to microseconds packed in
1454 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
1458 cfs_duration_usec(cfs_time_sub(end, start), &val);
1459 return val.tv_sec * 1000000 + val.tv_usec;
1463 * Check whether the we deal with this handle for too long.
1465 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1466 cfs_time_t alloced, cfs_time_t started,
1469 cfs_time_t now = cfs_time_current();
1471 LASSERT(dev != NULL);
1473 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1474 interval_to_usec(alloced, started));
1475 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1476 interval_to_usec(started, closed));
1477 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1478 interval_to_usec(closed, now));
1480 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
1481 CWARN("transaction handle %p was open for too long: "
1482 "now "CFS_TIME_T" ,"
1483 "alloced "CFS_TIME_T" ,"
1484 "started "CFS_TIME_T" ,"
1485 "closed "CFS_TIME_T"\n",
1486 oth, now, alloced, started, closed);
1487 libcfs_debug_dumpstack(NULL);
1491 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
1493 cfs_time_t __closed = cfs_time_current(); \
1494 cfs_time_t __alloced = oth->oth_alloced; \
1495 cfs_time_t __started = oth->oth_started; \
1498 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
1501 #else /* OSD_THANDLE_STATS */
1503 #define osd_th_alloced(h) do {} while(0)
1504 #define osd_th_started(h) do {} while(0)
1505 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
1507 #endif /* OSD_THANDLE_STATS */
1510 * Concurrency: doesn't access mutable data.
1512 static int osd_param_is_not_sane(const struct osd_device *dev,
1513 const struct thandle *th)
1515 struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1517 return oh->ot_credits > osd_transaction_size(dev);
1521 * Concurrency: shouldn't matter.
1523 static void osd_trans_commit_cb(struct super_block *sb,
1524 struct ldiskfs_journal_cb_entry *jcb, int error)
1526 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
1527 struct thandle *th = &oh->ot_super;
1528 struct lu_device *lud = &th->th_dev->dd_lu_dev;
1529 struct dt_txn_commit_cb *dcb, *tmp;
1531 LASSERT(oh->ot_handle == NULL);
1534 CERROR("transaction @0x%p commit error: %d\n", th, error);
1536 dt_txn_hook_commit(th);
1538 /* call per-transaction callbacks if any */
1539 list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1541 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1542 "commit callback entry: magic=%x name='%s'\n",
1543 dcb->dcb_magic, dcb->dcb_name);
1544 list_del_init(&dcb->dcb_linkage);
1545 dcb->dcb_func(NULL, th, dcb, error);
1548 lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1552 lu_context_exit(&th->th_ctx);
1553 lu_context_fini(&th->th_ctx);
1557 #ifndef HAVE_SB_START_WRITE
1558 # define sb_start_write(sb) do {} while (0)
1559 # define sb_end_write(sb) do {} while (0)
1562 static struct thandle *osd_trans_create(const struct lu_env *env,
1563 struct dt_device *d)
1565 struct osd_thread_info *oti = osd_oti_get(env);
1566 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1567 struct osd_thandle *oh;
1571 /* on pending IO in this thread should left from prev. request */
1572 LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1574 sb_start_write(osd_sb(osd_dt_dev(d)));
1576 OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
1578 oh->ot_quota_trans = &oti->oti_quota_trans;
1579 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1583 th->th_tags = LCT_TX_HANDLE;
1585 INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1586 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1589 memset(oti->oti_declare_ops, 0,
1590 sizeof(oti->oti_declare_ops));
1591 memset(oti->oti_declare_ops_cred, 0,
1592 sizeof(oti->oti_declare_ops_cred));
1593 memset(oti->oti_declare_ops_used, 0,
1594 sizeof(oti->oti_declare_ops_used));
1596 sb_end_write(osd_sb(osd_dt_dev(d)));
1597 th = ERR_PTR(-ENOMEM);
1602 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1604 struct osd_thread_info *oti = osd_oti_get(env);
1605 struct osd_thandle *oh;
1607 oh = container_of0(th, struct osd_thandle, ot_super);
1608 LASSERT(oh != NULL);
1610 CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
1611 oti->oti_declare_ops[OSD_OT_CREATE],
1612 oti->oti_declare_ops_cred[OSD_OT_CREATE],
1613 oti->oti_declare_ops_used[OSD_OT_CREATE],
1614 oti->oti_declare_ops[OSD_OT_DESTROY],
1615 oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1616 oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1617 CWARN(" attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1618 oti->oti_declare_ops[OSD_OT_ATTR_SET],
1619 oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1620 oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1621 oti->oti_declare_ops[OSD_OT_XATTR_SET],
1622 oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1623 oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1624 CWARN(" write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1625 oti->oti_declare_ops[OSD_OT_WRITE],
1626 oti->oti_declare_ops_cred[OSD_OT_WRITE],
1627 oti->oti_declare_ops_used[OSD_OT_WRITE],
1628 oti->oti_declare_ops[OSD_OT_PUNCH],
1629 oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1630 oti->oti_declare_ops_used[OSD_OT_PUNCH],
1631 oti->oti_declare_ops[OSD_OT_QUOTA],
1632 oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1633 oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1634 CWARN(" insert: %u/%u/%u, delete: %u/%u/%u\n",
1635 oti->oti_declare_ops[OSD_OT_INSERT],
1636 oti->oti_declare_ops_cred[OSD_OT_INSERT],
1637 oti->oti_declare_ops_used[OSD_OT_INSERT],
1638 oti->oti_declare_ops[OSD_OT_DELETE],
1639 oti->oti_declare_ops_cred[OSD_OT_DELETE],
1640 oti->oti_declare_ops_used[OSD_OT_DELETE]);
1641 CWARN(" ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1642 oti->oti_declare_ops[OSD_OT_REF_ADD],
1643 oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1644 oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1645 oti->oti_declare_ops[OSD_OT_REF_DEL],
1646 oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1647 oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1651 * Concurrency: shouldn't matter.
1653 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1656 struct osd_thread_info *oti = osd_oti_get(env);
1657 struct osd_device *dev = osd_dt_dev(d);
1659 struct osd_thandle *oh;
1664 LASSERT(current->journal_info == NULL);
1666 oh = container_of0(th, struct osd_thandle, ot_super);
1667 LASSERT(oh != NULL);
1668 LASSERT(oh->ot_handle == NULL);
1670 rc = dt_txn_hook_start(env, d, th);
1674 if (unlikely(osd_param_is_not_sane(dev, th))) {
1675 static unsigned long last_printed;
1676 static int last_credits;
1678 /* don't make noise on a tiny testing systems
1679 * actual credits misuse will be caught anyway */
1680 if (last_credits != oh->ot_credits &&
1681 time_after(jiffies, last_printed +
1682 msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
1683 osd_transaction_size(dev) > 512) {
1684 osd_trans_dump_creds(env, th);
1685 libcfs_debug_dumpstack(NULL);
1686 last_credits = oh->ot_credits;
1687 last_printed = jiffies;
1689 /* XXX Limit the credits to 'max_transaction_buffers', and
1690 * let the underlying filesystem to catch the error if
1691 * we really need so many credits.
1693 * This should be removed when we can calculate the
1694 * credits precisely. */
1695 oh->ot_credits = osd_transaction_size(dev);
1699 * XXX temporary stuff. Some abstraction layer should
1702 jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1706 LASSERT(oti->oti_txns == 0);
1707 lu_context_init(&th->th_ctx, th->th_tags);
1708 lu_context_enter(&th->th_ctx);
1710 lu_device_get(&d->dd_lu_dev);
1711 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1722 static int osd_seq_exists(const struct lu_env *env,
1723 struct osd_device *osd, u64 seq)
1725 struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1726 struct seq_server_site *ss = osd_seq_site(osd);
1730 LASSERT(ss != NULL);
1731 LASSERT(ss->ss_server_fld != NULL);
1733 rc = osd_fld_lookup(env, osd, seq, range);
1736 CERROR("%s: can't lookup FLD sequence "LPX64
1737 ": rc = %d\n", osd_name(osd), seq, rc);
1741 RETURN(ss->ss_node_id == range->lsr_index);
1744 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1746 struct dt_txn_commit_cb *dcb;
1747 struct dt_txn_commit_cb *tmp;
1749 /* call per-transaction stop callbacks if any */
1750 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1752 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1753 "commit callback entry: magic=%x name='%s'\n",
1754 dcb->dcb_magic, dcb->dcb_name);
1755 list_del_init(&dcb->dcb_linkage);
1756 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1761 * Concurrency: shouldn't matter.
1763 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1766 int rc = 0, remove_agents = 0;
1767 struct osd_thandle *oh;
1768 struct osd_thread_info *oti = osd_oti_get(env);
1769 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1770 struct osd_device *osd = osd_dt_dev(th->th_dev);
1771 struct qsd_instance *qsd = osd->od_quota_slave;
1772 struct lquota_trans *qtrans;
1775 oh = container_of0(th, struct osd_thandle, ot_super);
1777 /* reset OI cache for safety */
1778 oti->oti_ins_cache_used = 0;
1780 remove_agents = oh->ot_remove_agents;
1782 qtrans = oh->ot_quota_trans;
1783 oh->ot_quota_trans = NULL;
1785 if (oh->ot_handle != NULL) {
1786 handle_t *hdl = oh->ot_handle;
1789 * add commit callback
1790 * notice we don't do this in osd_trans_start()
1791 * as underlying transaction can change during truncate
1793 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
1796 LASSERT(oti->oti_txns == 1);
1799 rc = dt_txn_hook_stop(env, th);
1801 CERROR("%s: failed in transaction hook: rc = %d\n",
1804 osd_trans_stop_cb(oh, rc);
1805 /* hook functions might modify th_sync */
1806 hdl->h_sync = th->th_sync;
1808 oh->ot_handle = NULL;
1809 OSD_CHECK_SLOW_TH(oh, osd, rc = ldiskfs_journal_stop(hdl));
1811 CERROR("%s: failed to stop transaction: rc = %d\n",
1814 osd_trans_stop_cb(oh, th->th_result);
1818 /* inform the quota slave device that the transaction is stopping */
1819 qsd_op_end(env, qsd, qtrans);
1821 /* as we want IO to journal and data IO be concurrent, we don't block
1822 * awaiting data IO completion in osd_do_bio(), instead we wait here
1823 * once transaction is submitted to the journal. all reqular requests
1824 * don't do direct IO (except read/write), thus this wait_event becomes
1827 * IMPORTANT: we have to wait till any IO submited by the thread is
1828 * completed otherwise iobuf may be corrupted by different request
1830 wait_event(iobuf->dr_wait,
1831 atomic_read(&iobuf->dr_numreqs) == 0);
1832 osd_fini_iobuf(osd, iobuf);
1834 rc = iobuf->dr_error;
1836 if (unlikely(remove_agents != 0))
1837 osd_process_scheduled_agent_removals(env, osd);
1839 sb_end_write(osd_sb(osd));
1844 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
1846 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
1849 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
1850 LASSERT(&dcb->dcb_func != NULL);
1851 if (dcb->dcb_flags & DCB_TRANS_STOP)
1852 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
1854 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
1860 * Called just before object is freed. Releases all resources except for
1861 * object itself (that is released by osd_object_free()).
1863 * Concurrency: no concurrent access is possible that late in object
1866 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
1868 struct osd_object *obj = osd_obj(l);
1869 struct inode *inode = obj->oo_inode;
1871 LINVRNT(osd_invariant(obj));
1874 * If object is unlinked remove fid->ino mapping from object index.
1877 osd_index_fini(obj);
1878 if (inode != NULL) {
1879 struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
1880 qid_t uid = i_uid_read(inode);
1881 qid_t gid = i_gid_read(inode);
1884 obj->oo_inode = NULL;
1887 struct osd_thread_info *info = osd_oti_get(env);
1888 struct lquota_id_info *qi = &info->oti_qi;
1890 /* Release granted quota to master if necessary */
1891 qi->lqi_id.qid_uid = uid;
1892 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1894 qi->lqi_id.qid_uid = gid;
1895 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1901 * Concurrency: ->loo_object_release() is called under site spin-lock.
1903 static void osd_object_release(const struct lu_env *env,
1904 struct lu_object *l)
1906 struct osd_object *o = osd_obj(l);
1907 /* nobody should be releasing a non-destroyed object with nlink=0
1908 * the API allows this, but ldiskfs doesn't like and then report
1909 * this inode as deleted */
1910 if (unlikely(!o->oo_destroyed && o->oo_inode && o->oo_inode->i_nlink == 0))
1915 * Concurrency: shouldn't matter.
1917 static int osd_object_print(const struct lu_env *env, void *cookie,
1918 lu_printer_t p, const struct lu_object *l)
1920 struct osd_object *o = osd_obj(l);
1921 struct iam_descr *d;
1923 if (o->oo_dir != NULL)
1924 d = o->oo_dir->od_container.ic_descr;
1927 return (*p)(env, cookie,
1928 LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1930 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1931 o->oo_inode ? o->oo_inode->i_generation : 0,
1932 d ? d->id_ops->id_name : "plain");
1936 * Concurrency: shouldn't matter.
1938 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1939 struct obd_statfs *sfs)
1941 struct osd_device *osd = osd_dt_dev(d);
1942 struct super_block *sb = osd_sb(osd);
1943 struct kstatfs *ksfs;
1947 if (unlikely(osd->od_mnt == NULL))
1948 return -EINPROGRESS;
1950 /* osd_lproc.c call this without env, allocate ksfs for that case */
1951 if (unlikely(env == NULL)) {
1952 OBD_ALLOC_PTR(ksfs);
1956 ksfs = &osd_oti_get(env)->oti_ksfs;
1959 result = sb->s_op->statfs(sb->s_root, ksfs);
1963 statfs_pack(sfs, ksfs);
1964 if (unlikely(sb->s_flags & MS_RDONLY))
1965 sfs->os_state = OS_STATE_READONLY;
1966 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
1967 LDISKFS_FEATURE_INCOMPAT_EXTENTS))
1968 sfs->os_maxbytes = sb->s_maxbytes;
1970 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
1973 * Reserve some space so to avoid fragmenting the filesystem too much.
1974 * Fragmentation not only impacts performance, but can also increase
1975 * metadata overhead significantly, causing grant calculation to be
1978 * Reserve 0.78% of total space, at least 8MB for small filesystems.
1980 CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
1981 reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
1982 if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
1983 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
1985 sfs->os_blocks -= reserved;
1986 sfs->os_bfree -= min(reserved, sfs->os_bfree);
1987 sfs->os_bavail -= min(reserved, sfs->os_bavail);
1990 if (unlikely(env == NULL))
1996 * Estimate space needed for file creations. We assume the largest filename
1997 * which is 2^64 - 1, hence a filename of 20 chars.
1998 * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2000 #ifdef __LDISKFS_DIR_REC_LEN
2001 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2003 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2007 * Concurrency: doesn't access mutable data.
2009 static void osd_conf_get(const struct lu_env *env,
2010 const struct dt_device *dev,
2011 struct dt_device_param *param)
2013 struct super_block *sb = osd_sb(osd_dt_dev(dev));
2017 * XXX should be taken from not-yet-existing fs abstraction layer.
2019 param->ddp_max_name_len = LDISKFS_NAME_LEN;
2020 param->ddp_max_nlink = LDISKFS_LINK_MAX;
2021 param->ddp_symlink_max = sb->s_blocksize;
2022 param->ddp_mount_type = LDD_MT_LDISKFS;
2023 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
2024 param->ddp_maxbytes = sb->s_maxbytes;
2026 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2027 /* inode are statically allocated, so per-inode space consumption
2028 * is the space consumed by the directory entry */
2029 param->ddp_inodespace = PER_OBJ_USAGE;
2030 /* EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2031 * = 128MB) which is unlikely to be hit in real life. Report a smaller
2032 * maximum length to not under count the actual number of extents
2033 * needed for writing a file. */
2034 param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
2035 /* worst-case extent insertion metadata overhead */
2036 param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2037 param->ddp_mntopts = 0;
2038 if (test_opt(sb, XATTR_USER))
2039 param->ddp_mntopts |= MNTOPT_USERXATTR;
2040 if (test_opt(sb, POSIX_ACL))
2041 param->ddp_mntopts |= MNTOPT_ACL;
2043 /* LOD might calculate the max stripe count based on max_ea_size,
2044 * so we need take account in the overhead as well,
2045 * xattr_header + magic + xattr_entry_head */
2046 ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2047 LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2049 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2050 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
2051 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2055 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2059 * Concurrency: shouldn't matter.
2061 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2065 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
2067 rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
2069 CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
2070 LUSTRE_OSD_LDISKFS_NAME, rc);
2076 * Start commit for OSD device.
2078 * An implementation of dt_commit_async method for OSD device.
2079 * Asychronously starts underlayng fs sync and thereby a transaction
2082 * \param env environment
2083 * \param d dt device
2085 * \see dt_device_operations
2087 static int osd_commit_async(const struct lu_env *env,
2088 struct dt_device *d)
2090 struct super_block *s = osd_sb(osd_dt_dev(d));
2093 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
2094 RETURN(s->s_op->sync_fs(s, 0));
2098 * Concurrency: shouldn't matter.
2101 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2103 struct super_block *sb = osd_sb(osd_dt_dev(d));
2104 struct block_device *dev = sb->s_bdev;
2105 #ifdef HAVE_DEV_SET_RDONLY
2106 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
2109 int rc = -EOPNOTSUPP;
2113 #ifdef HAVE_DEV_SET_RDONLY
2114 CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
2116 if (sb->s_op->freeze_fs) {
2117 rc = sb->s_op->freeze_fs(sb);
2122 if (jdev && (jdev != dev)) {
2123 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
2125 dev_set_rdonly(jdev);
2127 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
2128 dev_set_rdonly(dev);
2130 if (sb->s_op->unfreeze_fs)
2131 sb->s_op->unfreeze_fs(sb);
2136 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2137 osd_dt_dev(d)->od_svname, (long)dev, rc);
2143 * Note: we do not count into QUOTA here.
2144 * If we mount with --data_journal we may need more.
2146 const int osd_dto_credits_noquota[DTO_NR] = {
2149 * INDEX_EXTRA_TRANS_BLOCKS(8) +
2150 * SINGLEDATA_TRANS_BLOCKS(8)
2151 * XXX Note: maybe iam need more, since iam have more level than
2154 [DTO_INDEX_INSERT] = 16,
2157 * just modify a single entry, probably merge few within a block
2159 [DTO_INDEX_DELETE] = 1,
2163 [DTO_INDEX_UPDATE] = 16,
2165 * 4(inode, inode bits, groups, GDT)
2166 * notice: OI updates are counted separately with DTO_INDEX_INSERT
2168 [DTO_OBJECT_CREATE] = 4,
2170 * 4(inode, inode bits, groups, GDT)
2171 * notice: OI updates are counted separately with DTO_INDEX_DELETE
2173 [DTO_OBJECT_DELETE] = 4,
2175 * Attr set credits (inode)
2177 [DTO_ATTR_SET_BASE] = 1,
2179 * Xattr set. The same as xattr of EXT3.
2180 * DATA_TRANS_BLOCKS(14)
2181 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2182 * are also counted in. Do not know why?
2184 [DTO_XATTR_SET] = 14,
2186 * credits for inode change during write.
2188 [DTO_WRITE_BASE] = 3,
2190 * credits for single block write.
2192 [DTO_WRITE_BLOCK] = 14,
2194 * Attr set credits for chown.
2195 * This is extra credits for setattr, and it is null without quota
2197 [DTO_ATTR_SET_CHOWN] = 0
2200 static const struct dt_device_operations osd_dt_ops = {
2201 .dt_root_get = osd_root_get,
2202 .dt_statfs = osd_statfs,
2203 .dt_trans_create = osd_trans_create,
2204 .dt_trans_start = osd_trans_start,
2205 .dt_trans_stop = osd_trans_stop,
2206 .dt_trans_cb_add = osd_trans_cb_add,
2207 .dt_conf_get = osd_conf_get,
2208 .dt_sync = osd_sync,
2210 .dt_commit_async = osd_commit_async,
2213 static void osd_object_read_lock(const struct lu_env *env,
2214 struct dt_object *dt, unsigned role)
2216 struct osd_object *obj = osd_dt_obj(dt);
2217 struct osd_thread_info *oti = osd_oti_get(env);
2219 LINVRNT(osd_invariant(obj));
2221 LASSERT(obj->oo_owner != env);
2222 down_read_nested(&obj->oo_sem, role);
2224 LASSERT(obj->oo_owner == NULL);
2228 static void osd_object_write_lock(const struct lu_env *env,
2229 struct dt_object *dt, unsigned role)
2231 struct osd_object *obj = osd_dt_obj(dt);
2232 struct osd_thread_info *oti = osd_oti_get(env);
2234 LINVRNT(osd_invariant(obj));
2236 LASSERT(obj->oo_owner != env);
2237 down_write_nested(&obj->oo_sem, role);
2239 LASSERT(obj->oo_owner == NULL);
2240 obj->oo_owner = env;
2244 static void osd_object_read_unlock(const struct lu_env *env,
2245 struct dt_object *dt)
2247 struct osd_object *obj = osd_dt_obj(dt);
2248 struct osd_thread_info *oti = osd_oti_get(env);
2250 LINVRNT(osd_invariant(obj));
2252 LASSERT(oti->oti_r_locks > 0);
2254 up_read(&obj->oo_sem);
2257 static void osd_object_write_unlock(const struct lu_env *env,
2258 struct dt_object *dt)
2260 struct osd_object *obj = osd_dt_obj(dt);
2261 struct osd_thread_info *oti = osd_oti_get(env);
2263 LINVRNT(osd_invariant(obj));
2265 LASSERT(obj->oo_owner == env);
2266 LASSERT(oti->oti_w_locks > 0);
2268 obj->oo_owner = NULL;
2269 up_write(&obj->oo_sem);
2272 static int osd_object_write_locked(const struct lu_env *env,
2273 struct dt_object *dt)
2275 struct osd_object *obj = osd_dt_obj(dt);
2277 LINVRNT(osd_invariant(obj));
2279 return obj->oo_owner == env;
2282 static struct timespec *osd_inode_time(const struct lu_env *env,
2283 struct inode *inode, __u64 seconds)
2285 struct osd_thread_info *oti = osd_oti_get(env);
2286 struct timespec *t = &oti->oti_time;
2288 t->tv_sec = seconds;
2290 *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
2294 static void osd_inode_getattr(const struct lu_env *env,
2295 struct inode *inode, struct lu_attr *attr)
2297 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2298 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2299 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
2302 attr->la_atime = LTIME_S(inode->i_atime);
2303 attr->la_mtime = LTIME_S(inode->i_mtime);
2304 attr->la_ctime = LTIME_S(inode->i_ctime);
2305 attr->la_mode = inode->i_mode;
2306 attr->la_size = i_size_read(inode);
2307 attr->la_blocks = inode->i_blocks;
2308 attr->la_uid = i_uid_read(inode);
2309 attr->la_gid = i_gid_read(inode);
2310 attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
2311 attr->la_nlink = inode->i_nlink;
2312 attr->la_rdev = inode->i_rdev;
2313 attr->la_blksize = 1 << inode->i_blkbits;
2314 attr->la_blkbits = inode->i_blkbits;
2317 static int osd_attr_get(const struct lu_env *env,
2318 struct dt_object *dt,
2319 struct lu_attr *attr)
2321 struct osd_object *obj = osd_dt_obj(dt);
2323 if (unlikely(!dt_object_exists(dt)))
2325 if (unlikely(obj->oo_destroyed))
2328 LASSERT(!dt_object_remote(dt));
2329 LINVRNT(osd_invariant(obj));
2331 spin_lock(&obj->oo_guard);
2332 osd_inode_getattr(env, obj->oo_inode, attr);
2333 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
2334 attr->la_flags |= LUSTRE_ORPHAN_FL;
2335 spin_unlock(&obj->oo_guard);
2340 static int osd_declare_attr_set(const struct lu_env *env,
2341 struct dt_object *dt,
2342 const struct lu_attr *attr,
2343 struct thandle *handle)
2345 struct osd_thandle *oh;
2346 struct osd_object *obj;
2347 struct osd_thread_info *info = osd_oti_get(env);
2348 struct lquota_id_info *qi = &info->oti_qi;
2356 LASSERT(dt != NULL);
2357 LASSERT(handle != NULL);
2359 obj = osd_dt_obj(dt);
2360 LASSERT(osd_invariant(obj));
2362 oh = container_of0(handle, struct osd_thandle, ot_super);
2363 LASSERT(oh->ot_handle == NULL);
2365 osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2366 osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2368 osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2369 osd_dto_credits_noquota[DTO_XATTR_SET]);
2371 if (attr == NULL || obj->oo_inode == NULL)
2374 bspace = obj->oo_inode->i_blocks;
2375 bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
2376 bspace = toqb(bspace);
2378 /* Changing ownership is always preformed by super user, it should not
2381 * We still need to call the osd_declare_qid() to calculate the journal
2382 * credits for updating quota accounting files and to trigger quota
2383 * space adjustment once the operation is completed.*/
2384 if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2386 uid = i_uid_read(obj->oo_inode);
2387 qi->lqi_type = USRQUOTA;
2388 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2389 /* inode accounting */
2390 qi->lqi_is_blk = false;
2392 /* one more inode for the new uid ... */
2393 qi->lqi_id.qid_uid = attr->la_uid;
2395 /* Reserve credits for the new uid */
2396 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2397 if (rc == -EDQUOT || rc == -EINPROGRESS)
2402 /* and one less inode for the current uid */
2403 qi->lqi_id.qid_uid = uid;
2405 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2406 if (rc == -EDQUOT || rc == -EINPROGRESS)
2411 /* block accounting */
2412 qi->lqi_is_blk = true;
2414 /* more blocks for the new uid ... */
2415 qi->lqi_id.qid_uid = attr->la_uid;
2416 qi->lqi_space = bspace;
2418 * Credits for the new uid has been reserved, re-use "obj"
2419 * to save credit reservation.
2421 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2422 if (rc == -EDQUOT || rc == -EINPROGRESS)
2427 /* and finally less blocks for the current uid */
2428 qi->lqi_id.qid_uid = uid;
2429 qi->lqi_space = -bspace;
2430 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2431 if (rc == -EDQUOT || rc == -EINPROGRESS)
2437 gid = i_gid_read(obj->oo_inode);
2438 qi->lqi_type = GRPQUOTA;
2439 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2441 /* inode accounting */
2442 qi->lqi_is_blk = false;
2444 /* one more inode for the new gid ... */
2445 qi->lqi_id.qid_gid = attr->la_gid;
2447 rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2448 if (rc == -EDQUOT || rc == -EINPROGRESS)
2453 /* and one less inode for the current gid */
2454 qi->lqi_id.qid_gid = gid;
2456 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2457 if (rc == -EDQUOT || rc == -EINPROGRESS)
2462 /* block accounting */
2463 qi->lqi_is_blk = true;
2465 /* more blocks for the new gid ... */
2466 qi->lqi_id.qid_gid = attr->la_gid;
2467 qi->lqi_space = bspace;
2468 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2469 if (rc == -EDQUOT || rc == -EINPROGRESS)
2474 /* and finally less blocks for the current gid */
2475 qi->lqi_id.qid_gid = gid;
2476 qi->lqi_space = -bspace;
2477 rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2478 if (rc == -EDQUOT || rc == -EINPROGRESS)
2487 static int osd_inode_setattr(const struct lu_env *env,
2488 struct inode *inode, const struct lu_attr *attr)
2490 __u64 bits = attr->la_valid;
2492 /* Only allow set size for regular file */
2493 if (!S_ISREG(inode->i_mode))
2494 bits &= ~(LA_SIZE | LA_BLOCKS);
2499 if (bits & LA_ATIME)
2500 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
2501 if (bits & LA_CTIME)
2502 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
2503 if (bits & LA_MTIME)
2504 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
2505 if (bits & LA_SIZE) {
2506 LDISKFS_I(inode)->i_disksize = attr->la_size;
2507 i_size_write(inode, attr->la_size);
2510 /* OSD should not change "i_blocks" which is used by quota.
2511 * "i_blocks" should be changed by ldiskfs only. */
2513 inode->i_mode = (inode->i_mode & S_IFMT) |
2514 (attr->la_mode & ~S_IFMT);
2516 i_uid_write(inode, attr->la_uid);
2518 i_gid_write(inode, attr->la_gid);
2519 if (bits & LA_NLINK)
2520 set_nlink(inode, attr->la_nlink);
2522 inode->i_rdev = attr->la_rdev;
2524 if (bits & LA_FLAGS) {
2525 /* always keep S_NOCMTIME */
2526 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2532 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
2534 if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
2535 (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
2539 ll_vfs_dq_init(inode);
2541 if (attr->la_valid & LA_UID)
2542 iattr.ia_valid |= ATTR_UID;
2543 if (attr->la_valid & LA_GID)
2544 iattr.ia_valid |= ATTR_GID;
2545 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
2546 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
2548 rc = ll_vfs_dq_transfer(inode, &iattr);
2550 CERROR("%s: quota transfer failed: rc = %d. Is quota "
2551 "enforcement enabled on the ldiskfs "
2552 "filesystem?\n", inode->i_sb->s_id, rc);
2559 static int osd_attr_set(const struct lu_env *env,
2560 struct dt_object *dt,
2561 const struct lu_attr *attr,
2562 struct thandle *handle)
2564 struct osd_object *obj = osd_dt_obj(dt);
2565 struct inode *inode;
2568 if (!dt_object_exists(dt))
2571 LASSERT(handle != NULL);
2572 LASSERT(!dt_object_remote(dt));
2573 LASSERT(osd_invariant(obj));
2575 osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
2577 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
2578 struct osd_thread_info *oti = osd_oti_get(env);
2579 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
2580 struct lu_fid *fid1 = &oti->oti_fid;
2581 struct osd_inode_id *id = &oti->oti_id;
2582 struct iam_path_descr *ipd;
2583 struct iam_container *bag;
2584 struct osd_thandle *oh;
2587 fid_cpu_to_be(fid1, fid0);
2588 memset(id, 1, sizeof(*id));
2589 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
2590 fid0)->oi_dir.od_container;
2591 ipd = osd_idx_ipd_get(env, bag);
2592 if (unlikely(ipd == NULL))
2595 oh = container_of0(handle, struct osd_thandle, ot_super);
2596 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
2597 (const struct iam_rec *)id, ipd);
2598 osd_ipd_put(env, bag, ipd);
2599 return(rc > 0 ? 0 : rc);
2602 inode = obj->oo_inode;
2604 rc = osd_quota_transfer(inode, attr);
2608 spin_lock(&obj->oo_guard);
2609 rc = osd_inode_setattr(env, inode, attr);
2610 spin_unlock(&obj->oo_guard);
2614 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2616 if (!(attr->la_valid & LA_FLAGS))
2619 /* Let's check if there are extra flags need to be set into LMA */
2620 if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
2621 struct osd_thread_info *info = osd_oti_get(env);
2622 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2624 rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
2628 lma->lma_incompat |=
2629 lustre_to_lma_flags(attr->la_flags);
2630 lustre_lma_swab(lma);
2631 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
2632 lma, sizeof(*lma), XATTR_REPLACE);
2634 struct osd_device *osd = osd_obj2dev(obj);
2636 CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
2637 osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
2638 lma->lma_incompat, rc);
2641 attr->la_flags & LUSTRE_LMA_FL_MASKS;
2643 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
2646 osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
2651 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
2652 struct osd_object *obj,
2653 const char *name, const int namelen)
2655 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
2658 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
2659 umode_t mode, struct dt_allocation_hint *hint,
2663 struct osd_device *osd = osd_obj2dev(obj);
2664 struct osd_thandle *oth;
2665 struct dt_object *parent = NULL;
2666 struct inode *inode;
2668 LINVRNT(osd_invariant(obj));
2669 LASSERT(obj->oo_inode == NULL);
2670 LASSERT(obj->oo_hl_head == NULL);
2672 if (S_ISDIR(mode) && ldiskfs_pdo) {
2673 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
2674 if (obj->oo_hl_head == NULL)
2678 oth = container_of(th, struct osd_thandle, ot_super);
2679 LASSERT(oth->ot_handle->h_transaction != NULL);
2681 if (hint != NULL && hint->dah_parent != NULL &&
2682 !dt_object_remote(hint->dah_parent))
2683 parent = hint->dah_parent;
2685 inode = ldiskfs_create_inode(oth->ot_handle,
2686 parent ? osd_dt_obj(parent)->oo_inode :
2687 osd_sb(osd)->s_root->d_inode,
2689 if (!IS_ERR(inode)) {
2690 /* Do not update file c/mtime in ldiskfs. */
2691 inode->i_flags |= S_NOCMTIME;
2693 /* For new created object, it must be consistent,
2694 * and it is unnecessary to scrub against it. */
2695 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
2697 obj->oo_inode = inode;
2700 if (obj->oo_hl_head != NULL) {
2701 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
2702 obj->oo_hl_head = NULL;
2704 result = PTR_ERR(inode);
2706 LINVRNT(osd_invariant(obj));
2714 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
2715 struct lu_attr *attr,
2716 struct dt_allocation_hint *hint,
2717 struct dt_object_format *dof,
2721 struct osd_thandle *oth;
2722 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
2724 LASSERT(S_ISDIR(attr->la_mode));
2726 oth = container_of(th, struct osd_thandle, ot_super);
2727 LASSERT(oth->ot_handle->h_transaction != NULL);
2728 result = osd_mkfile(info, obj, mode, hint, th);
2733 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
2734 struct lu_attr *attr,
2735 struct dt_allocation_hint *hint,
2736 struct dt_object_format *dof,
2740 struct osd_thandle *oth;
2741 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
2743 __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
2745 LASSERT(S_ISREG(attr->la_mode));
2747 oth = container_of(th, struct osd_thandle, ot_super);
2748 LASSERT(oth->ot_handle->h_transaction != NULL);
2750 result = osd_mkfile(info, obj, mode, hint, th);
2752 LASSERT(obj->oo_inode != NULL);
2753 if (feat->dif_flags & DT_IND_VARKEY)
2754 result = iam_lvar_create(obj->oo_inode,
2755 feat->dif_keysize_max,
2757 feat->dif_recsize_max,
2760 result = iam_lfix_create(obj->oo_inode,
2761 feat->dif_keysize_max,
2763 feat->dif_recsize_max,
2770 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
2771 struct lu_attr *attr,
2772 struct dt_allocation_hint *hint,
2773 struct dt_object_format *dof,
2776 LASSERT(S_ISREG(attr->la_mode));
2777 return osd_mkfile(info, obj, (attr->la_mode &
2778 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2781 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
2782 struct lu_attr *attr,
2783 struct dt_allocation_hint *hint,
2784 struct dt_object_format *dof,
2787 LASSERT(S_ISLNK(attr->la_mode));
2788 return osd_mkfile(info, obj, (attr->la_mode &
2789 (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
2792 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
2793 struct lu_attr *attr,
2794 struct dt_allocation_hint *hint,
2795 struct dt_object_format *dof,
2798 umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
2801 LINVRNT(osd_invariant(obj));
2802 LASSERT(obj->oo_inode == NULL);
2803 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
2804 S_ISFIFO(mode) || S_ISSOCK(mode));
2806 result = osd_mkfile(info, obj, mode, hint, th);
2808 LASSERT(obj->oo_inode != NULL);
2810 * This inode should be marked dirty for i_rdev. Currently
2811 * that is done in the osd_attr_init().
2813 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
2816 LINVRNT(osd_invariant(obj));
2820 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
2822 struct dt_allocation_hint *hint,
2823 struct dt_object_format *dof,
2826 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
2828 osd_obj_type_f result;
2844 result = osd_mk_index;
2855 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2856 struct dt_object *parent, struct dt_object *child,
2861 ah->dah_parent = parent;
2862 ah->dah_mode = child_mode;
2864 if (parent != NULL && !dt_object_remote(parent)) {
2865 /* will help to find FID->ino at dt_insert("..") */
2866 struct osd_object *pobj = osd_dt_obj(parent);
2867 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
2871 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2872 struct lu_attr *attr, struct dt_object_format *dof)
2874 struct inode *inode = obj->oo_inode;
2875 __u64 valid = attr->la_valid;
2878 attr->la_valid &= ~(LA_TYPE | LA_MODE);
2880 if (dof->dof_type != DFT_NODE)
2881 attr->la_valid &= ~LA_RDEV;
2882 if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2883 attr->la_valid &= ~LA_ATIME;
2884 if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2885 attr->la_valid &= ~LA_CTIME;
2886 if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2887 attr->la_valid &= ~LA_MTIME;
2889 result = osd_quota_transfer(inode, attr);
2893 if (attr->la_valid != 0) {
2894 result = osd_inode_setattr(info->oti_env, inode, attr);
2896 * The osd_inode_setattr() should always succeed here. The
2897 * only error that could be returned is EDQUOT when we are
2898 * trying to change the UID or GID of the inode. However, this
2899 * should not happen since quota enforcement is no longer
2900 * enabled on ldiskfs (lquota takes care of it).
2902 LASSERTF(result == 0, "%d\n", result);
2903 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2906 attr->la_valid = valid;
2910 * Helper function for osd_object_create()
2912 * \retval 0, on success
2914 static int __osd_object_create(struct osd_thread_info *info,
2915 struct osd_object *obj, struct lu_attr *attr,
2916 struct dt_allocation_hint *hint,
2917 struct dt_object_format *dof,
2923 osd_trans_exec_op(info->oti_env, th, OSD_OT_CREATE);
2925 /* we drop umask so that permissions we pass are not affected */
2926 umask = current->fs->umask;
2927 current->fs->umask = 0;
2929 result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2931 if (likely(obj->oo_inode != NULL)) {
2932 LASSERT(obj->oo_inode->i_state & I_NEW);
2934 /* Unlock the inode before attr initialization to avoid
2935 * unnecessary dqget operations. LU-6378 */
2936 unlock_new_inode(obj->oo_inode);
2939 if (likely(result == 0)) {
2940 osd_attr_init(info, obj, attr, dof);
2941 osd_object_init0(obj);
2944 /* restore previous umask value */
2945 current->fs->umask = umask;
2947 osd_trans_exec_check(info->oti_env, th, OSD_OT_CREATE);
2953 * Helper function for osd_object_create()
2955 * \retval 0, on success
2957 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2958 const struct lu_fid *fid, struct thandle *th)
2960 struct osd_thread_info *info = osd_oti_get(env);
2961 struct osd_inode_id *id = &info->oti_id;
2962 struct osd_device *osd = osd_obj2dev(obj);
2963 struct osd_thandle *oh;
2966 LASSERT(obj->oo_inode != NULL);
2968 oh = container_of0(th, struct osd_thandle, ot_super);
2969 LASSERT(oh->ot_handle);
2970 osd_trans_exec_op(env, th, OSD_OT_INSERT);
2972 osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2973 rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
2974 OI_CHECK_FLD, NULL);
2975 osd_trans_exec_check(env, th, OSD_OT_INSERT);
2980 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2981 u64 seq, struct lu_seq_range *range)
2983 struct seq_server_site *ss = osd_seq_site(osd);
2985 if (fid_seq_is_idif(seq)) {
2986 fld_range_set_ost(range);
2987 range->lsr_index = idif_ost_idx(seq);
2991 if (!fid_seq_in_fldb(seq)) {
2992 fld_range_set_mdt(range);
2994 /* FIXME: If ss is NULL, it suppose not get lsr_index
2996 range->lsr_index = ss->ss_node_id;
3000 LASSERT(ss != NULL);
3001 fld_range_set_any(range);
3002 /* OSD will only do local fld lookup */
3003 return fld_local_lookup(env, ss->ss_server_fld, seq, range);
3007 * Concurrency: no external locking is necessary.
3009 static int osd_declare_object_create(const struct lu_env *env,
3010 struct dt_object *dt,
3011 struct lu_attr *attr,
3012 struct dt_allocation_hint *hint,
3013 struct dt_object_format *dof,
3014 struct thandle *handle)
3016 struct osd_thandle *oh;
3020 LASSERT(handle != NULL);
3022 oh = container_of0(handle, struct osd_thandle, ot_super);
3023 LASSERT(oh->ot_handle == NULL);
3025 /* EA object consumes more credits than regular object: osd_mk_index
3026 * vs. osd_mkreg: osd_mk_index will create 2 blocks for root_node and
3027 * leaf_node, could involves the block, block bitmap, groups, GDT
3028 * change for each block, so add 4 * 2 credits in that case. */
3029 osd_trans_declare_op(env, oh, OSD_OT_CREATE,
3030 osd_dto_credits_noquota[DTO_OBJECT_CREATE] +
3031 (dof->dof_type == DFT_INDEX) ? 4 * 2 : 0);
3032 /* Reuse idle OI block may cause additional one OI block
3034 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
3035 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
3040 rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
3041 osd_dt_obj(dt), false, NULL, false);
3045 /* will help to find FID->ino mapping at dt_insert() */
3046 rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
3052 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
3053 struct lu_attr *attr,
3054 struct dt_allocation_hint *hint,
3055 struct dt_object_format *dof, struct thandle *th)
3057 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3058 struct osd_object *obj = osd_dt_obj(dt);
3059 struct osd_thread_info *info = osd_oti_get(env);
3063 if (dt_object_exists(dt))
3066 LINVRNT(osd_invariant(obj));
3067 LASSERT(!dt_object_remote(dt));
3068 LASSERT(osd_write_locked(env, obj));
3069 LASSERT(th != NULL);
3071 if (unlikely(fid_is_acct(fid)))
3072 /* Quota files can't be created from the kernel any more,
3073 * 'tune2fs -O quota' will take care of creating them */
3076 result = __osd_object_create(info, obj, attr, hint, dof, th);
3078 result = __osd_oi_insert(env, obj, fid, th);
3079 if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
3080 obj->oo_dt.do_body_ops = &osd_body_ops;
3082 LASSERT(ergo(result == 0,
3083 dt_object_exists(dt) && !dt_object_remote(dt)));
3085 LASSERT(osd_invariant(obj));
3090 * Called to destroy on-disk representation of the object
3092 * Concurrency: must be locked
3094 static int osd_declare_object_destroy(const struct lu_env *env,
3095 struct dt_object *dt,
3098 struct osd_object *obj = osd_dt_obj(dt);
3099 struct inode *inode = obj->oo_inode;
3100 struct osd_thandle *oh;
3107 oh = container_of0(th, struct osd_thandle, ot_super);
3108 LASSERT(oh->ot_handle == NULL);
3110 osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
3111 osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
3112 /* Recycle idle OI leaf may cause additional three OI blocks
3114 osd_trans_declare_op(env, oh, OSD_OT_DELETE,
3115 osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
3116 /* one less inode */
3117 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3118 -1, oh, obj, false, NULL, false);
3121 /* data to be truncated */
3122 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
3123 0, oh, obj, true, NULL, false);
3127 /* will help to find FID->ino when this object is being
3128 * added to PENDING/ */
3129 rc = osd_idc_find_and_init(env, osd_obj2dev(obj), obj);
3134 static int osd_object_destroy(const struct lu_env *env,
3135 struct dt_object *dt,
3138 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
3139 struct osd_object *obj = osd_dt_obj(dt);
3140 struct inode *inode = obj->oo_inode;
3141 struct osd_device *osd = osd_obj2dev(obj);
3142 struct osd_thandle *oh;