* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/fs.h>
/* XATTR_{REPLACE,CREATE} */
#include <linux/xattr.h>
-/* simple_mkdir() */
-#include <lvfs.h>
/*
* struct OBD_{ALLOC,FREE}*()
/* struct ptlrpc_thread */
#include <lustre_net.h>
#include <lustre_fid.h>
+/* process_config */
+#include <lustre_param.h>
#include "osd_internal.h"
+#include "osd_dynlocks.h"
/* llo_* api support */
#include <md_object.h>
#include <lustre_quota.h>
+#include <ldiskfs/xattr.h>
+
int ldiskfs_pdo = 1;
CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
"ldiskfs with parallel directory operations");
+int ldiskfs_track_declares_assert;
+CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
+ "LBUG during tracking of declares");
+
+/* Slab to allocate dynlocks */
+struct kmem_cache *dynlock_cachep;
+
+static struct lu_kmem_descr ldiskfs_caches[] = {
+ {
+ .ckd_cache = &dynlock_cachep,
+ .ckd_name = "dynlock_cache",
+ .ckd_size = sizeof(struct dynlock_handle)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
static const char dot[] = ".";
static const char dotdot[] = "..";
static const char remote_obj_dir[] = "REM_OBJ_DIR";
static const struct dt_index_operations osd_index_iam_ops;
static const struct dt_index_operations osd_index_ea_ops;
-#ifdef OSD_TRACK_DECLARES
int osd_trans_declare_op2rb[] = {
[OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
[OSD_OT_PUNCH] = OSD_OT_MAX,
[OSD_OT_WRITE] = OSD_OT_WRITE,
[OSD_OT_INSERT] = OSD_OT_DELETE,
[OSD_OT_DELETE] = OSD_OT_INSERT,
+ [OSD_OT_UPDATE] = OSD_OT_MAX,
[OSD_OT_QUOTA] = OSD_OT_MAX,
};
-#endif
static int osd_has_index(const struct osd_object *obj)
{
/*
* Concurrency: doesn't matter
*/
-static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
-{
- return osd_oti_get(env)->oti_r_locks > 0;
-}
/*
* Concurrency: doesn't matter
}
}
-static inline int __osd_xattr_get(struct inode *inode, struct dentry *dentry,
- const char *name, void *buf, int len)
-{
- dentry->d_inode = inode;
- return inode->i_op->getxattr(dentry, name, buf, len);
-}
-
int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
struct dentry *dentry, struct lustre_mdt_attrs *lma)
{
int rc;
- rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA, (void *)lma,
- sizeof(*lma));
- if (rc == -ERANGE) {
- /* try with old lma size */
- rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA,
- info->oti_mdt_attrs_old,
- LMA_OLD_SIZE);
- if (rc > 0)
- memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
- }
+ CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
+ info->oti_mdt_attrs_old, LMA_OLD_SIZE);
if (rc > 0) {
+ if ((void *)lma != (void *)info->oti_mdt_attrs_old)
+ memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
+ rc = 0;
+ lustre_lma_swab(lma);
/* Check LMA compatibility */
- if (lma->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP)) {
- CWARN("%.16s: unsupported incompat LMA feature(s) "
- "%lx/%#x\n",
+ if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
+ CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
+ "for fid = "DFID", ino = %lu\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- inode->i_ino, le32_to_cpu(lma->lma_incompat) &
- ~LMA_INCOMPAT_SUPP);
- rc = -ENOSYS;
- } else {
- lustre_lma_swab(lma);
- rc = 0;
+ lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
+ PFID(&lma->lma_self_fid), inode->i_ino);
+ rc = -EOPNOTSUPP;
}
} else if (rc == 0) {
rc = -ENODATA;
id->oii_ino, PTR_ERR(inode));
} else if (id->oii_gen != OSD_OII_NOGEN &&
inode->i_generation != id->oii_gen) {
- CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
- "gen1 = %u\n",
+ CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
+ "i_generation = %u\n",
id->oii_ino, id->oii_gen, inode->i_generation);
iput(inode);
inode = ERR_PTR(-ESTALE);
/* due to parallel readdir and unlink,
* we can have dead inode here. */
CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
- make_bad_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
} else if (is_bad_inode(inode)) {
return inode;
}
-static struct inode *
-osd_iget_verify(struct osd_thread_info *info, struct osd_device *dev,
- struct osd_inode_id *id, const struct lu_fid *fid)
+static struct inode *osd_iget_check(struct osd_thread_info *info,
+ struct osd_device *dev,
+ const struct lu_fid *fid,
+ struct osd_inode_id *id,
+ bool in_oi)
{
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- struct inode *inode;
- int rc;
+ struct inode *inode;
+ int rc = 0;
+ ENTRY;
- inode = osd_iget(info, dev, id);
- if (IS_ERR(inode))
- return inode;
+ inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ if (!in_oi || (rc != -ENOENT && rc != -ESTALE)) {
+ CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
+ id->oii_ino, rc);
- rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
- if (rc == -ENODATA)
- return inode;
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+ if (is_bad_inode(inode)) {
+ rc = -ENOENT;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (id->oii_gen != OSD_OII_NOGEN &&
+ inode->i_generation != id->oii_gen) {
+ rc = -ESTALE;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "unmatched inode: ino = %u, "
+ "oii_gen = %u, i_generation = %u\n",
+ id->oii_ino, id->oii_gen, inode->i_generation);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (inode->i_nlink == 0) {
+ rc = -ENOENT;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+check_oi:
if (rc != 0) {
- iput(inode);
- return ERR_PTR(rc);
+ LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
+
+ rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
+ /* XXX: There are some possible cases:
+ * 1. rc = 0.
+ * Backup/restore caused the OI invalid.
+ * 2. rc = 0.
+ * Someone unlinked the object but NOT removed
+ * the OI mapping, such as mount target device
+ * as ldiskfs, and modify something directly.
+ * 3. rc = -ENOENT.
+ * Someone just removed the object between the
+ * former oi_lookup and the iget. It is normal.
+ * 4. Other failure cases.
+ *
+ * Generally, when the device is mounted, it will
+ * auto check whether the system is restored from
+ * file-level backup or not. We trust such detect
+ * to distinguish the 1st case from the 2nd case. */
+ if (rc == 0) {
+ if (!IS_ERR(inode) && inode->i_generation != 0 &&
+ inode->i_generation == id->oii_gen)
+ rc = -ENOENT;
+ else
+ rc = -EREMCHG;
+ }
+ } else {
+ if (id->oii_gen == OSD_OII_NOGEN)
+ osd_id_gen(id, inode->i_ino, inode->i_generation);
+
+ /* Do not update file c/mtime in ldiskfs.
+ * NB: we don't have any lock to protect this because we don't
+ * have reference on osd_object now, but contention with
+ * another lookup + attr_set can't happen in the tiny window
+ * between if (...) and set S_NOCMTIME. */
+ if (!(inode->i_flags & S_NOCMTIME))
+ inode->i_flags |= S_NOCMTIME;
}
- if (!lu_fid_eq(fid, &lma->lma_self_fid)) {
- CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
- PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
- iput(inode);
- return ERR_PTR(-EREMCHG);
+ GOTO(put, rc);
+
+put:
+ if (rc != 0) {
+ if (!IS_ERR(inode))
+ iput(inode);
+
+ inode = ERR_PTR(rc);
}
return inode;
}
+/**
+ * \retval +v: new filter_fid, does not contain self-fid
+ * \retval 0: filter_fid_old, contains self-fid
+ * \retval -v: other failure cases
+ */
+int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
+ struct dentry *dentry, struct lu_fid *fid)
+{
+ struct filter_fid_old *ff = &info->oti_ff;
+ struct ost_id *ostid = &info->oti_ostid;
+ int rc;
+
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
+ if (rc == sizeof(*ff)) {
+ rc = 0;
+ ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
+ ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
+ /* XXX: should use real OST index in the future. LU-3569 */
+ ostid_to_fid(fid, ostid, 0);
+ } else if (rc == sizeof(struct filter_fid)) {
+ rc = 1;
+ } else if (rc >= 0) {
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int osd_lma_self_repair(struct osd_thread_info *info,
+ struct osd_device *osd, struct inode *inode,
+ const struct lu_fid *fid, __u32 compat)
+{
+ handle_t *jh;
+ int rc;
+
+ LASSERT(current->journal_info == NULL);
+
+ jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+ if (IS_ERR(jh)) {
+ rc = PTR_ERR(jh);
+ CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
+ osd_name(osd), rc);
+ return rc;
+ }
+
+ rc = osd_ea_fid_set(info, inode, fid, compat, 0);
+ if (rc != 0)
+ CWARN("%s: cannot self repair the LMA: rc = %d\n",
+ osd_name(osd), rc);
+ ldiskfs_journal_stop(jh);
+ return rc;
+}
+
+static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct inode *inode = obj->oo_inode;
+ struct dentry *dentry = &info->oti_obj_dentry;
+ struct lu_fid *fid = NULL;
+ const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
+ int rc;
+ ENTRY;
+
+ CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
+ info->oti_mdt_attrs_old, LMA_OLD_SIZE);
+ if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
+ fid = &lma->lma_self_fid;
+ rc = osd_get_idif(info, inode, dentry, fid);
+ if ((rc > 0) || (rc == -ENODATA && osd->od_lma_self_repair)) {
+ /* For the given OST-object, if it has neither LMA nor
+ * FID in XATTR_NAME_FID, then the given FID (which is
+ * contained in the @obj, from client RPC for locating
+ * the OST-object) is trusted. We use it to generate
+ * the LMA. */
+ osd_lma_self_repair(info, osd, inode, rfid,
+ fid_is_on_ost(info, osd, fid, OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0);
+ RETURN(0);
+ }
+ }
+
+ if (unlikely(rc == -ENODATA))
+ RETURN(0);
+
+ if (rc < 0)
+ RETURN(rc);
+
+ if (rc > 0) {
+ rc = 0;
+ lustre_lma_swab(lma);
+ if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
+ CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
+ CWARN("%s: unsupported incompat LMA feature(s) %#x for "
+ "fid = "DFID", ino = %lu\n", osd_name(osd),
+ lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
+ PFID(rfid), inode->i_ino);
+ rc = -EOPNOTSUPP;
+ } else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
+ fid = &lma->lma_self_fid;
+ }
+ }
+
+ if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
+ if (fid_is_idif(rfid) && fid_is_idif(fid)) {
+ struct ost_id *oi = &info->oti_ostid;
+ struct lu_fid *fid1 = &info->oti_fid3;
+ __u32 idx = fid_idif_ost_idx(rfid);
+
+ /* For old IDIF, the OST index is not part of the IDIF,
+ * Means that different OSTs may have the same IDIFs.
+ * Under such case, we need to make some compatible
+ * check to make sure to trigger OI scrub properly. */
+ if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
+ /* Given @rfid is new, LMA is old. */
+ fid_to_ostid(fid, oi);
+ ostid_to_fid(fid1, oi, idx);
+ if (lu_fid_eq(fid1, rfid)) {
+ if (osd->od_lma_self_repair)
+ osd_lma_self_repair(info, osd,
+ inode, rfid,
+ LMAC_FID_ON_OST);
+ RETURN(0);
+ }
+ }
+ }
+
+ CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
+ osd_name(osd), PFID(rfid), PFID(fid));
+ rc = -EREMCHG;
+ }
+
+ RETURN(rc);
+}
+
static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
const struct lu_fid *fid,
const struct lu_object_conf *conf)
struct osd_scrub *scrub;
struct scrub_file *sf;
int result;
- int verify = 0;
+ int saved = 0;
+ bool in_oi = false;
+ bool triggered = false;
ENTRY;
LINVRNT(osd_invariant(obj));
if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
RETURN(-ENOENT);
+ /* For the object is created as locking anchor, or for the object to
+ * be created on disk. No need to osd_oi_lookup() at here because FID
+ * shouldn't never be re-used, if it's really a duplicate FID from
+ * unexpected reason, we should be able to detect it later by calling
+ * do_create->osd_oi_insert(). */
+ if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ GOTO(out, result = 0);
+
/* Search order: 1. per-thread cache. */
- if (lu_fid_eq(fid, &oic->oic_fid)) {
+ if (lu_fid_eq(fid, &oic->oic_fid) &&
+ likely(oic->oic_dev == dev)) {
id = &oic->oic_lid;
goto iget;
}
id = &info->oti_id;
- if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ if (!list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
if (result == 0)
goto iget;
}
- if (sf->sf_flags & SF_INCONSISTENT)
- verify = 1;
-
- /*
- * Objects are created as locking anchors or place holders for objects
- * yet to be created. No need to osd_oi_lookup() at here because FID
- * shouldn't never be re-used, if it's really a duplicate FID from
- * unexpected reason, we should be able to detect it later by calling
- * do_create->osd_oi_insert()
- */
- if (conf != NULL && (conf->loc_flags & LOC_F_NEW) != 0)
- GOTO(out, result = 0);
-
/* Search order: 3. OI files. */
- result = osd_oi_lookup(info, dev, fid, id, true);
+ result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
if (result == -ENOENT) {
if (!fid_is_norm(fid) ||
+ fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
!ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
sf->sf_oi_bitmap))
GOTO(out, result = 0);
if (result != 0)
GOTO(out, result);
+ in_oi = true;
+
iget:
- if (verify == 0)
- inode = osd_iget(info, dev, id);
- else
- inode = osd_iget_verify(info, dev, id, fid);
+ inode = osd_iget_check(info, dev, fid, id, in_oi);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
if (result == -ENOENT || result == -ESTALE) {
- fid_zero(&oic->oic_fid);
- result = 0;
+ if (!in_oi)
+ fid_zero(&oic->oic_fid);
+
+ GOTO(out, result = -ENOENT);
} else if (result == -EREMCHG) {
trigger:
+ if (!in_oi)
+ fid_zero(&oic->oic_fid);
+
+ if (unlikely(triggered))
+ GOTO(out, result = saved);
+
+ triggered = true;
if (thread_is_running(&scrub->os_thread)) {
result = -EINPROGRESS;
} else if (!dev->od_noscrub) {
result = osd_scrub_start(dev);
- LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [1]\n",
- LDISKFS_SB(osd_sb(dev))->s_es->\
- s_volume_name,PFID(fid), result);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
+ "for "DFID", rc = %d [1]\n",
+ osd_name(dev), PFID(fid),result);
if (result == 0 || result == -EALREADY)
result = -EINPROGRESS;
else
result = -EREMCHG;
}
+
+ /* We still have chance to get the valid inode: for the
+ * object which is referenced by remote name entry, the
+ * object on the local MDT will be linked under the dir
+ * of "/REMOTE_PARENT_DIR" with its FID string as name.
+ *
+ * We do not know whether the object for the given FID
+ * is referenced by some remote name entry or not, and
+ * especially for DNE II, a multiple-linked object may
+ * have many name entries reside on many MDTs.
+ *
+ * To simplify the operation, OSD will not distinguish
+ * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
+ * only happened for the RPC from other MDT during the
+ * OI scrub, or for the client side RPC with FID only,
+ * such as FID to path, or from old connected client. */
+ saved = result;
+ result = osd_lookup_in_remote_parent(info, dev,
+ fid, id);
+ if (result == 0) {
+ in_oi = false;
+ goto iget;
+ }
+
+ result = saved;
}
GOTO(out, result);
obj->oo_inode = inode;
LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
+ result = osd_check_lma(env, obj);
+ if (result != 0) {
+ iput(inode);
+ obj->oo_inode = NULL;
+ if (result == -EREMCHG) {
+ if (!in_oi) {
+ result = osd_oi_lookup(info, dev, fid, id,
+ OI_CHECK_FLD);
+ if (result != 0) {
+ fid_zero(&oic->oic_fid);
+ GOTO(out, result);
+ }
+ }
+
+ goto trigger;
+ }
+
+ GOTO(out, result);
+ }
+
obj->oo_compat_dot_created = 1;
obj->oo_compat_dotdot_created = 1;
/*
* Concurrency: shouldn't matter.
*/
-#ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
static void osd_trans_commit_cb(struct super_block *sb,
- struct journal_callback *jcb, int error)
-#else
-static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
-#endif
+ struct ldiskfs_journal_cb_entry *jcb, int error)
{
struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
struct thandle *th = &oh->ot_super;
dt_txn_hook_commit(th);
/* call per-transaction callbacks if any */
- cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+ list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
"commit callback entry: magic=%x name='%s'\n",
dcb->dcb_magic, dcb->dcb_name);
- cfs_list_del_init(&dcb->dcb_linkage);
+ list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, th, dcb, error);
}
- lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
+ lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
lu_device_put(lud);
th->th_dev = NULL;
lu_context_exit(&th->th_ctx);
lu_context_fini(&th->th_ctx);
- OBD_FREE_PTR(oh);
+ thandle_put(th);
}
static struct thandle *osd_trans_create(const struct lu_env *env,
- struct dt_device *d)
+ struct dt_device *d)
{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_iobuf *iobuf = &oti->oti_iobuf;
- struct osd_thandle *oh;
- struct thandle *th;
- ENTRY;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_iobuf *iobuf = &oti->oti_iobuf;
+ struct osd_thandle *oh;
+ struct thandle *th;
+ ENTRY;
- /* on pending IO in this thread should left from prev. request */
- LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ /* on pending IO in this thread should left from prev. request */
+ LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
- th = ERR_PTR(-ENOMEM);
- OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
- if (oh != NULL) {
+ th = ERR_PTR(-ENOMEM);
+ OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
+ if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
- th = &oh->ot_super;
- th->th_dev = d;
- th->th_result = 0;
- th->th_tags = LCT_TX_HANDLE;
- oh->ot_credits = 0;
- oti->oti_dev = osd_dt_dev(d);
- CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
- osd_th_alloced(oh);
-
- memset(oti->oti_declare_ops, 0, OSD_OT_MAX);
- memset(oti->oti_declare_ops_rb, 0, OSD_OT_MAX);
- memset(oti->oti_declare_ops_cred, 0, OSD_OT_MAX);
+ th = &oh->ot_super;
+ th->th_dev = d;
+ th->th_result = 0;
+ th->th_tags = LCT_TX_HANDLE;
+ oh->ot_credits = 0;
+ atomic_set(&th->th_refc, 1);
+ th->th_alloc_size = sizeof(*oh);
+ oti->oti_dev = osd_dt_dev(d);
+ INIT_LIST_HEAD(&oh->ot_dcb_list);
+ osd_th_alloced(oh);
+
+ memset(oti->oti_declare_ops, 0,
+ sizeof(oti->oti_declare_ops));
+ memset(oti->oti_declare_ops_rb, 0,
+ sizeof(oti->oti_declare_ops_rb));
+ memset(oti->oti_declare_ops_cred, 0,
+ sizeof(oti->oti_declare_ops_cred));
oti->oti_rollback = false;
- }
- RETURN(th);
+ }
+ RETURN(th);
}
/*
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
oh->ot_credits,
osd_journal(dev)->j_max_transaction_buffers);
-#ifdef OSD_TRACK_DECLARES
- CWARN(" create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
+ CWARN(" create: %u/%u, destroy: %u/%u\n",
oti->oti_declare_ops[OSD_OT_CREATE],
oti->oti_declare_ops_cred[OSD_OT_CREATE],
- oti->oti_declare_ops[OSD_OT_DELETE],
- oti->oti_declare_ops_cred[OSD_OT_DELETE],
oti->oti_declare_ops[OSD_OT_DESTROY],
oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
CWARN(" insert: %u/%u, delete: %u/%u\n",
oti->oti_declare_ops[OSD_OT_INSERT],
oti->oti_declare_ops_cred[OSD_OT_INSERT],
- oti->oti_declare_ops[OSD_OT_DESTROY],
- oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
+ oti->oti_declare_ops[OSD_OT_DELETE],
+ oti->oti_declare_ops_cred[OSD_OT_DELETE]);
CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
oti->oti_declare_ops[OSD_OT_REF_ADD],
oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
last_credits = oh->ot_credits;
last_printed = jiffies;
}
-#endif
/* XXX Limit the credits to 'max_transaction_buffers', and
* let the underlying filesystem to catch the error if
* we really need so many credits.
* XXX temporary stuff. Some abstraction layer should
* be used.
*/
- jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
+ jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
osd_th_started(oh);
if (!IS_ERR(jh)) {
oh->ot_handle = jh;
lu_context_enter(&th->th_ctx);
lu_device_get(&d->dd_lu_dev);
- oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
- "osd-tx", th);
+ lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
+ "osd-tx", th);
oti->oti_txns++;
rc = 0;
} else {
RETURN(rc);
}
+static int osd_seq_exists(const struct lu_env *env,
+ struct osd_device *osd, obd_seq seq)
+{
+ struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
+ ENTRY;
+
+ if (ss == NULL)
+ RETURN(1);
+
+ rc = osd_fld_lookup(env, osd, seq, range);
+ if (rc != 0) {
+ if (rc != -ENOENT)
+ CERROR("%s: can't lookup FLD sequence "LPX64
+ ": rc = %d\n", osd_name(osd), seq, rc);
+ RETURN(0);
+ }
+
+ RETURN(ss->ss_node_id == range->lsr_index);
+}
+
/*
* Concurrency: shouldn't matter.
*/
-static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
+static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
+ struct thandle *th)
{
- int rc = 0;
- struct osd_thandle *oh;
- struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_iobuf *iobuf = &oti->oti_iobuf;
+ int rc = 0;
+ struct osd_thandle *oh;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_iobuf *iobuf = &oti->oti_iobuf;
struct qsd_instance *qsd = oti->oti_dev->od_quota_slave;
- ENTRY;
+ struct lquota_trans *qtrans;
+ ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of0(th, struct osd_thandle, ot_super);
- if (qsd != NULL)
- /* inform the quota slave device that the transaction is
- * stopping */
- qsd_op_end(env, qsd, oh->ot_quota_trans);
+ qtrans = oh->ot_quota_trans;
oh->ot_quota_trans = NULL;
if (oh->ot_handle != NULL) {
* notice we don't do this in osd_trans_start()
* as underlying transaction can change during truncate
*/
- osd_journal_callback_set(hdl, osd_trans_commit_cb,
+ ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
&oh->ot_jcb);
LASSERT(oti->oti_txns == 1);
if (rc != 0)
CERROR("Failure to stop transaction: %d\n", rc);
} else {
- OBD_FREE_PTR(oh);
+ thandle_put(&oh->ot_super);
}
- /* as we want IO to journal and data IO be concurrent, we don't block
- * awaiting data IO completion in osd_do_bio(), instead we wait here
- * once transaction is submitted to the journal. all reqular requests
- * don't do direct IO (except read/write), thus this wait_event becomes
- * no-op for them.
- *
- * IMPORTANT: we have to wait till any IO submited by the thread is
- * completed otherwise iobuf may be corrupted by different request
- */
- cfs_wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
- if (!rc)
- rc = iobuf->dr_error;
+ /* inform the quota slave device that the transaction is stopping */
+ qsd_op_end(env, qsd, qtrans);
- RETURN(rc);
+ /* as we want IO to journal and data IO be concurrent, we don't block
+ * awaiting data IO completion in osd_do_bio(), instead we wait here
+ * once transaction is submitted to the journal. all reqular requests
+ * don't do direct IO (except read/write), thus this wait_event becomes
+ * no-op for them.
+ *
+ * IMPORTANT: we have to wait till any IO submited by the thread is
+ * completed otherwise iobuf may be corrupted by different request
+ */
+ wait_event(iobuf->dr_wait,
+ atomic_read(&iobuf->dr_numreqs) == 0);
+ osd_fini_iobuf(oti->oti_dev, iobuf);
+ if (!rc)
+ rc = iobuf->dr_error;
+
+ RETURN(rc);
}
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
LASSERT(&dcb->dcb_func != NULL);
- cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+ list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
return 0;
}
osd_index_fini(obj);
if (inode != NULL) {
struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
- qid_t uid = inode->i_uid;
- qid_t gid = inode->i_gid;
+ qid_t uid = i_uid_read(inode);
+ qid_t gid = i_gid_read(inode);
iput(inode);
obj->oo_inode = NULL;
d ? d->id_ops->id_name : "plain");
}
+#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
+
/*
* Concurrency: shouldn't matter.
*/
}
spin_lock(&osd->od_osfs_lock);
- /* cache 1 second */
- if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
- result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- osd->od_osfs_age = cfs_time_current_64();
- statfs_pack(&osd->od_statfs, ksfs);
- if (sb->s_flags & MS_RDONLY)
- sfs->os_state = OS_STATE_READONLY;
- }
+ result = sb->s_op->statfs(sb->s_root, ksfs);
+ if (likely(result == 0)) { /* N.B. statfs can't really fail */
+ statfs_pack(sfs, ksfs);
+ if (sb->s_flags & MS_RDONLY)
+ sfs->os_state = OS_STATE_READONLY;
}
- if (likely(result == 0))
- *sfs = osd->od_statfs;
spin_unlock(&osd->od_osfs_lock);
- if (unlikely(env == NULL))
+ if (unlikely(env == NULL))
OBD_FREE_PTR(ksfs);
+ /* Reserve a small amount of space for local objects like last_rcvd,
+ * llog, quota files, ... */
+ if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
+ sfs->os_bavail = 0;
+ } else {
+ sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
+ /** Take out metadata overhead for indirect blocks */
+ sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
+ }
+
return result;
}
struct dt_device_param *param)
{
struct super_block *sb = osd_sb(osd_dt_dev(dev));
+ int ea_overhead;
/*
* XXX should be taken from not-yet-existing fs abstraction layer.
*/
- param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
param->ddp_block_shift = sb->s_blocksize_bits;
if (test_opt(sb, POSIX_ACL))
param->ddp_mntopts |= MNTOPT_ACL;
+ /* LOD might calculate the max stripe count based on max_ea_size,
+ * so we need take account in the overhead as well,
+ * xattr_header + magic + xattr_entry_head */
+ ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
+ LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
+
#if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
- if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
- param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
- else
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
+ param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
+ ea_overhead;
+ else
#endif
- param->ddp_max_ea_size = sb->s_blocksize;
-
+ param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
}
/*
static int osd_ro(const struct lu_env *env, struct dt_device *d)
{
- struct super_block *sb = osd_sb(osd_dt_dev(d));
- int rc;
- ENTRY;
+ struct super_block *sb = osd_sb(osd_dt_dev(d));
+ struct block_device *dev = sb->s_bdev;
+#ifdef HAVE_DEV_SET_RDONLY
+ struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
+ int rc = 0;
+#else
+ int rc = -EOPNOTSUPP;
+#endif
+ ENTRY;
+#ifdef HAVE_DEV_SET_RDONLY
CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
- rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
- RETURN(rc);
+ if (jdev && (jdev != dev)) {
+ CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
+ (long)jdev);
+ dev_set_rdonly(jdev);
+ }
+ CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
+ dev_set_rdonly(dev);
+#else
+ CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
+ osd_dt_dev(d)->od_svname, (long)dev, rc);
+#endif
+ RETURN(rc);
}
/*
* If we mount with --data_journal we may need more.
*/
const int osd_dto_credits_noquota[DTO_NR] = {
- /**
- * Insert/Delete.
- * INDEX_EXTRA_TRANS_BLOCKS(8) +
- * SINGLEDATA_TRANS_BLOCKS(8)
- * XXX Note: maybe iam need more, since iam have more level than
- * EXT3 htree.
- */
- [DTO_INDEX_INSERT] = 16,
- [DTO_INDEX_DELETE] = 16,
- /**
+ /**
+ * Insert.
+ * INDEX_EXTRA_TRANS_BLOCKS(8) +
+ * SINGLEDATA_TRANS_BLOCKS(8)
+ * XXX Note: maybe iam need more, since iam have more level than
+ * EXT3 htree.
+ */
+ [DTO_INDEX_INSERT] = 16,
+ /**
+ * Delete
+ * just modify a single entry, probably merge few within a block
+ */
+ [DTO_INDEX_DELETE] = 1,
+ /**
* Used for OI scrub
- */
- [DTO_INDEX_UPDATE] = 16,
- /**
- * Create a object. The same as create object in EXT3.
- * DATA_TRANS_BLOCKS(14) +
- * INDEX_EXTRA_BLOCKS(8) +
- * 3(inode bits, groups, GDT)
- */
- [DTO_OBJECT_CREATE] = 25,
- /**
- * XXX: real credits to be fixed
- */
- [DTO_OBJECT_DELETE] = 25,
- /**
- * Attr set credits (inode)
- */
- [DTO_ATTR_SET_BASE] = 1,
- /**
- * Xattr set. The same as xattr of EXT3.
- * DATA_TRANS_BLOCKS(14)
- * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
- * are also counted in. Do not know why?
- */
- [DTO_XATTR_SET] = 14,
- [DTO_LOG_REC] = 14,
- /**
- * credits for inode change during write.
- */
- [DTO_WRITE_BASE] = 3,
- /**
- * credits for single block write.
- */
- [DTO_WRITE_BLOCK] = 14,
- /**
- * Attr set credits for chown.
- * This is extra credits for setattr, and it is null without quota
- */
- [DTO_ATTR_SET_CHOWN]= 0
+ */
+ [DTO_INDEX_UPDATE] = 16,
+ /**
+ * 4(inode, inode bits, groups, GDT)
+ * notice: OI updates are counted separately with DTO_INDEX_INSERT
+ */
+ [DTO_OBJECT_CREATE] = 4,
+ /**
+ * 4(inode, inode bits, groups, GDT)
+ * notice: OI updates are counted separately with DTO_INDEX_DELETE
+ */
+ [DTO_OBJECT_DELETE] = 4,
+ /**
+ * Attr set credits (inode)
+ */
+ [DTO_ATTR_SET_BASE] = 1,
+ /**
+ * Xattr set. The same as xattr of EXT3.
+ * DATA_TRANS_BLOCKS(14)
+ * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
+ * are also counted in. Do not know why?
+ */
+ [DTO_XATTR_SET] = 14,
+ /**
+ * credits for inode change during write.
+ */
+ [DTO_WRITE_BASE] = 3,
+ /**
+ * credits for single block write.
+ */
+ [DTO_WRITE_BLOCK] = 14,
+ /**
+ * Attr set credits for chown.
+ * This is extra credits for setattr, and it is null without quota
+ */
+ [DTO_ATTR_SET_CHOWN] = 0
};
static const struct dt_device_operations osd_dt_ops = {
}
int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
- struct lustre_capa *capa, __u64 opc)
+ struct lustre_capa *capa, __u64 opc)
{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
- struct md_capainfo *ci;
- int rc;
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct lu_capainfo *lci;
+ int rc;
- if (!dev->od_fl_capa)
- return 0;
+ if (!osd->od_fl_capa)
+ return 0;
- if (capa == BYPASS_CAPA)
- return 0;
+ if (capa == BYPASS_CAPA)
+ return 0;
- ci = md_capainfo(env);
- if (unlikely(!ci))
- return 0;
+ lci = lu_capainfo_get(env);
+ if (unlikely(lci == NULL))
+ return 0;
- if (ci->mc_auth == LC_ID_NONE)
- return 0;
+ if (lci->lci_auth == LC_ID_NONE)
+ return 0;
- if (!capa) {
- CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
- return -EACCES;
- }
+ if (capa == NULL) {
+ CERROR("%s: no capability provided for FID "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), -EACCES);
+ return -EACCES;
+ }
- if (!lu_fid_eq(fid, &capa->lc_fid)) {
- DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
- PFID(fid));
- return -EACCES;
- }
+ if (!lu_fid_eq(fid, &capa->lc_fid)) {
+ DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
+ PFID(fid));
+ return -EACCES;
+ }
- if (!capa_opc_supported(capa, opc)) {
- DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
- return -EACCES;
- }
+ if (!capa_opc_supported(capa, opc)) {
+ DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
+ return -EACCES;
+ }
- if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
- DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
- return -EACCES;
- }
+ rc = capa_is_sane(env, osd, capa, osd->od_capa_keys);
+ if (rc != 0) {
+ DEBUG_CAPA(D_ERROR, capa, "insane: rc = %d", rc);
+ return -EACCES;
+ }
- return 0;
+ return 0;
}
static struct timespec *osd_inode_time(const struct lu_env *env,
static void osd_inode_getattr(const struct lu_env *env,
- struct inode *inode, struct lu_attr *attr)
-{
- attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
- LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
- LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
-
- attr->la_atime = LTIME_S(inode->i_atime);
- attr->la_mtime = LTIME_S(inode->i_mtime);
- attr->la_ctime = LTIME_S(inode->i_ctime);
- attr->la_mode = inode->i_mode;
- attr->la_size = i_size_read(inode);
- attr->la_blocks = inode->i_blocks;
- attr->la_uid = inode->i_uid;
- attr->la_gid = inode->i_gid;
- attr->la_flags = LDISKFS_I(inode)->i_flags;
- attr->la_nlink = inode->i_nlink;
- attr->la_rdev = inode->i_rdev;
- attr->la_blksize = 1 << inode->i_blkbits;
- attr->la_blkbits = inode->i_blkbits;
+ struct inode *inode, struct lu_attr *attr)
+{
+ attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
+ LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
+ LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
+ LA_TYPE;
+
+ attr->la_atime = LTIME_S(inode->i_atime);
+ attr->la_mtime = LTIME_S(inode->i_mtime);
+ attr->la_ctime = LTIME_S(inode->i_ctime);
+ attr->la_mode = inode->i_mode;
+ attr->la_size = i_size_read(inode);
+ attr->la_blocks = inode->i_blocks;
+ attr->la_uid = i_uid_read(inode);
+ attr->la_gid = i_gid_read(inode);
+ attr->la_flags = LDISKFS_I(inode)->i_flags;
+ attr->la_nlink = inode->i_nlink;
+ attr->la_rdev = inode->i_rdev;
+ attr->la_blksize = 1 << inode->i_blkbits;
+ attr->la_blkbits = inode->i_blkbits;
}
static int osd_attr_get(const struct lu_env *env,
{
struct osd_object *obj = osd_dt_obj(dt);
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LINVRNT(osd_invariant(obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
struct osd_object *obj;
struct osd_thread_info *info = osd_oti_get(env);
struct lquota_id_info *qi = &info->oti_qi;
+ qid_t uid;
+ qid_t gid;
long long bspace;
int rc = 0;
bool allocated;
* credits for updating quota accounting files and to trigger quota
* space adjustment once the operation is completed.*/
if ((attr->la_valid & LA_UID) != 0 &&
- attr->la_uid != obj->oo_inode->i_uid) {
+ attr->la_uid != (uid = i_uid_read(obj->oo_inode))) {
qi->lqi_type = USRQUOTA;
/* inode accounting */
RETURN(rc);
/* and one less inode for the current uid */
- qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_id.qid_uid = uid;
qi->lqi_space = -1;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
RETURN(rc);
/* and finally less blocks for the current owner */
- qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_id.qid_uid = uid;
qi->lqi_space = -bspace;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
}
if (attr->la_valid & LA_GID &&
- attr->la_gid != obj->oo_inode->i_gid) {
+ attr->la_gid != (gid = i_gid_read(obj->oo_inode))) {
qi->lqi_type = GRPQUOTA;
/* inode accounting */
RETURN(rc);
/* and one less inode for the current gid */
- qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_id.qid_gid = gid;
qi->lqi_space = -1;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
RETURN(rc);
/* and finally less blocks for the current owner */
- qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_id.qid_gid = gid;
qi->lqi_space = -bspace;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
bits = attr->la_valid;
- LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
-
if (bits & LA_ATIME)
inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
if (bits & LA_CTIME)
if (bits & LA_BLOCKS)
inode->i_blocks = attr->la_blocks;
#endif
- if (bits & LA_MODE)
- inode->i_mode = (inode->i_mode & S_IFMT) |
- (attr->la_mode & ~S_IFMT);
- if (bits & LA_UID)
- inode->i_uid = attr->la_uid;
- if (bits & LA_GID)
- inode->i_gid = attr->la_gid;
- if (bits & LA_NLINK)
+ if (bits & LA_MODE)
+ inode->i_mode = (inode->i_mode & S_IFMT) |
+ (attr->la_mode & ~S_IFMT);
+ if (bits & LA_UID)
+ i_uid_write(inode, attr->la_uid);
+ if (bits & LA_GID)
+ i_gid_write(inode, attr->la_gid);
+ if (bits & LA_NLINK)
set_nlink(inode, attr->la_nlink);
- if (bits & LA_RDEV)
- inode->i_rdev = attr->la_rdev;
+ if (bits & LA_RDEV)
+ inode->i_rdev = attr->la_rdev;
if (bits & LA_FLAGS) {
/* always keep S_NOCMTIME */
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
{
- if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
- (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
+ if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
+ (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
int rc;
iattr.ia_valid |= ATTR_UID;
if (attr->la_valid & LA_GID)
iattr.ia_valid |= ATTR_GID;
- iattr.ia_uid = attr->la_uid;
- iattr.ia_gid = attr->la_gid;
+ iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
+ iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
rc = ll_vfs_dq_transfer(inode, &iattr);
if (rc) {
int rc;
LASSERT(handle != NULL);
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_invariant(obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
+ struct osd_thread_info *oti = osd_oti_get(env);
+ const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
+ struct lu_fid *fid1 = &oti->oti_fid;
+ struct osd_inode_id *id = &oti->oti_id;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag;
+ struct osd_thandle *oh;
+ int rc;
+
+ fid_cpu_to_be(fid1, fid0);
+ memset(id, 1, sizeof(*id));
+ bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
+ fid0)->oi_dir.od_container;
+ ipd = osd_idx_ipd_get(env, bag);
+ if (unlikely(ipd == NULL))
+ RETURN(-ENOMEM);
+
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
+ (const struct iam_rec *)id, ipd);
+ osd_ipd_put(env, bag, ipd);
+ return(rc > 0 ? 0 : rc);
+ }
+
inode = obj->oo_inode;
ll_vfs_dq_init(inode);
spin_unlock(&obj->oo_guard);
if (!rc)
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
return rc;
}
}
static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
- cfs_umode_t mode,
- struct dt_allocation_hint *hint,
- struct thandle *th)
+ umode_t mode, struct dt_allocation_hint *hint,
+ struct thandle *th)
{
int result;
struct osd_device *osd = osd_obj2dev(obj);
}
static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th)
{
- cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
- int result;
+ umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
+ int result;
- LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_inode == NULL);
+ LINVRNT(osd_invariant(obj));
+ LASSERT(obj->oo_inode == NULL);
LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode));
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
struct dt_object *parent, struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
LASSERT(ah);
- memset(ah, 0, sizeof(*ah));
ah->dah_parent = parent;
ah->dah_mode = child_mode;
}
* enabled on ldiskfs (lquota takes care of it).
*/
LASSERTF(result == 0, "%d", result);
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
attr->la_valid = valid;
struct thandle *th)
{
int result;
+ __u32 umask;
+
+ /* we drop umask so that permissions we pass are not affected */
+ umask = current->fs->umask;
+ current->fs->umask = 0;
result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
th);
unlock_new_inode(obj->oo_inode);
}
- return result;
+ /* restore previous umask value */
+ current->fs->umask = umask;
+
+ return result;
}
/**
* \retval 0, on success
*/
static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
- const struct lu_fid *fid, struct thandle *th)
+ const struct lu_fid *fid, struct thandle *th)
{
- struct osd_thread_info *info = osd_oti_get(env);
- struct osd_inode_id *id = &info->oti_id;
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_inode_id *id = &info->oti_id;
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thandle *oh;
- LASSERT(obj->oo_inode != NULL);
+ LASSERT(obj->oo_inode != NULL);
+
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
- return osd_oi_insert(info, osd, fid, id, th);
+ return osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
}
int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
- const struct lu_fid *fid, struct lu_seq_range *range)
+ obd_seq seq, struct lu_seq_range *range)
{
struct seq_server_site *ss = osd_seq_site(osd);
- int rc;
-
- if (fid_is_igif(fid)) {
- range->lsr_flags = LU_SEQ_RANGE_MDT;
- range->lsr_index = 0;
- return 0;
- }
- if (fid_is_idif(fid)) {
- range->lsr_flags = LU_SEQ_RANGE_OST;
- range->lsr_index = fid_idif_ost_idx(fid);
+ if (fid_seq_is_idif(seq)) {
+ fld_range_set_ost(range);
+ range->lsr_index = idif_ost_idx(seq);
return 0;
}
- if (!fid_is_norm(fid)) {
- range->lsr_flags = LU_SEQ_RANGE_MDT;
+ if (!fid_seq_in_fldb(seq)) {
+ fld_range_set_mdt(range);
if (ss != NULL)
/* FIXME: If ss is NULL, it suppose not get lsr_index
* at all */
}
LASSERT(ss != NULL);
- range->lsr_flags = -1;
- rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), range);
- if (rc != 0) {
- CERROR("%s can not find "DFID": rc = %d\n",
- osd2lu_dev(osd)->ld_obd->obd_name, PFID(fid), rc);
- }
- return rc;
+ fld_range_set_any(range);
+ /* OSD will only do local fld lookup */
+ return fld_local_lookup(env, ss->ss_server_fld, seq, range);
}
-
+/*
+ * Concurrency: no external locking is necessary.
+ */
static int osd_declare_object_create(const struct lu_env *env,
struct dt_object *dt,
struct lu_attr *attr,
struct dt_object_format *dof,
struct thandle *handle)
{
- struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
struct osd_thandle *oh;
int rc;
ENTRY;
osd_trans_declare_op(env, oh, OSD_OT_CREATE,
osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
- /* XXX: So far, only normal fid needs be inserted into the oi,
- * things could be changed later. Revise following code then. */
- if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
- !fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
- lu_object_fid(&dt->do_lu))) {
- /* Reuse idle OI block may cause additional one OI block
- * to be changed. */
- osd_trans_declare_op(env, oh, OSD_OT_INSERT,
- osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
- }
+ /* Reuse idle OI block may cause additional one OI block
+ * to be changed. */
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+
/* If this is directory, then we expect . and .. to be inserted as
* well. The one directory block always needs to be created for the
* directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
if (rc != 0)
RETURN(rc);
- /* It does fld look up inside declare, and the result will be
- * added to fld cache, so the following fld lookup inside insert
- * does not need send RPC anymore, so avoid send rpc with holding
- * transaction */
- if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
- !fid_is_last_id(lu_object_fid(&dt->do_lu)))
- osd_fld_lookup(env, osd_dt_dev(handle->th_dev),
- lu_object_fid(&dt->do_lu), range);
-
-
RETURN(rc);
}
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(!dt_object_exists(dt));
+ LASSERT(!dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
if (result == 0)
result = __osd_oi_insert(env, obj, fid, th);
- LASSERT(ergo(result == 0, dt_object_exists(dt)));
+ LASSERT(ergo(result == 0,
+ dt_object_exists(dt) && !dt_object_remote(dt)));
+
LASSERT(osd_invariant(obj));
RETURN(result);
}
LASSERT(oh->ot_handle == NULL);
LASSERT(inode);
- osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
- /* XXX: So far, only normal fid needs to be inserted into the OI,
- * so only normal fid needs to be removed from the OI also.
- * Recycle idle OI leaf may cause additional three OI blocks
+ /* Recycle idle OI leaf may cause additional three OI blocks
* to be changed. */
- osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
- fid_is_norm(lu_object_fid(&dt->do_lu)) ?
- osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3 : 0);
-
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
/* one less inode */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
- false, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ -1, oh, false, true, NULL, false);
if (rc)
RETURN(rc);
/* data to be truncated */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
if (unlikely(fid_is_acct(fid)))
RETURN(-EPERM);
- /* Parallel control for OI scrub. For most of cases, there is no
- * lock contention. So it will not affect unlink performance. */
- mutex_lock(&inode->i_mutex);
if (S_ISDIR(inode->i_mode)) {
- LASSERT(osd_inode_unlinked(inode) ||
- inode->i_nlink == 1);
+ LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
+ inode->i_nlink == 2);
+ /* it will check/delete the inode from remote parent,
+ * how to optimize it? unlink performance impaction XXX */
+ result = osd_delete_from_remote_parent(env, osd, obj, oh);
+ if (result != 0 && result != -ENOENT) {
+ CERROR("%s: delete inode "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), result);
+ }
spin_lock(&obj->oo_guard);
clear_nlink(inode);
spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
osd_trans_exec_op(env, th, OSD_OT_DESTROY);
- result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
- mutex_unlock(&inode->i_mutex);
+ result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
+ OI_CHECK_FLD);
/* XXX: add to ext3 orphan list */
/* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
RETURN(0);
}
-static inline int __osd_xattr_set(struct osd_thread_info *info,
- struct inode *inode, const char *name,
- const void *buf, int buflen, int fl)
-{
- struct dentry *dentry = &info->oti_child_dentry;
-
- ll_vfs_dq_init(inode);
- dentry->d_inode = inode;
- return inode->i_op->setxattr(dentry, name, buf, buflen, fl);
-}
-
/**
* Put the fid into lustre_mdt_attrs, and then place the structure
* inode's ea. This fid should not be altered during the life time
* FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
*/
int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
- const struct lu_fid *fid)
+ const struct lu_fid *fid, __u32 compat, __u32 incompat)
{
struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
int rc;
+ ENTRY;
- lustre_lma_init(lma, fid);
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
+ RETURN(0);
+
+ lustre_lma_init(lma, fid, compat, incompat);
lustre_lma_swab(lma);
rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
XATTR_CREATE);
- /* Someone may created the EA by race. */
- if (unlikely(rc == -EEXIST))
- rc = 0;
- return rc;
+ /* LMA may already exist, but we need to check that all the
+ * desired compat/incompat flags have been added. */
+ if (unlikely(rc == -EEXIST)) {
+ if (compat == 0 && incompat == 0)
+ RETURN(0);
+
+ rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
+ XATTR_NAME_LMA, info->oti_mdt_attrs_old,
+ LMA_OLD_SIZE);
+ if (rc <= 0)
+ RETURN(-EINVAL);
+
+ lustre_lma_swab(lma);
+ if (!(~lma->lma_compat & compat) &&
+ !(~lma->lma_incompat & incompat))
+ RETURN(0);
+
+ lma->lma_compat |= compat;
+ lma->lma_incompat |= incompat;
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ }
+
+ RETURN(rc);
}
/**
void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
const struct dt_rec *fid)
{
- /* XXX: replace the check with "!fid_is_client_mdt_visible()"
- * when FID in OI file introduced for local object. */
- if (!fid_is_norm((const struct lu_fid *)fid) &&
- !fid_is_igif((const struct lu_fid *)fid)) {
+ if (!fid_is_namespace_visible((const struct lu_fid *)fid) ||
+ OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
param->edp_magic = 0;
return;
}
}
/**
- * Try to read the fid from inode ea into dt_rec, if return value
- * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
+ * Try to read the fid from inode ea into dt_rec.
*
* \param fid object fid.
*
RETURN(0);
}
+static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
+ struct inode *dir,
+ struct inode *parent_dir,
+ const struct dt_rec *dot_fid,
+ const struct dt_rec *dot_dot_fid,
+ struct osd_thandle *oth)
+{
+ struct ldiskfs_dentry_param *dot_ldp;
+ struct ldiskfs_dentry_param *dot_dot_ldp;
+
+ dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
+ osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
+
+ dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
+ dot_ldp->edp_magic = 0;
+ return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
+ dir, dot_ldp, dot_dot_ldp);
+}
+
+/**
+ * Create an local agent inode for remote entry
+ */
+static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
+ struct osd_device *osd,
+ struct osd_object *pobj,
+ const struct lu_fid *fid,
+ struct thandle *th)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct inode *local;
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
+
+ LASSERT(th);
+ oh = container_of(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle->h_transaction != NULL);
+
+ /* FIXME: Insert index api needs to know the mode of
+ * the remote object. Just use S_IFDIR for now */
+ local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, S_IFDIR);
+ if (IS_ERR(local)) {
+ CERROR("%s: create local error %d\n", osd_name(osd),
+ (int)PTR_ERR(local));
+ RETURN(local);
+ }
+
+ /* Set special LMA flag for local agent inode */
+ rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
+ if (rc != 0) {
+ CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
+ (const struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
+ (const struct dt_rec *)fid, oh);
+ if (rc != 0) {
+ CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ RETURN(local);
+}
+
+/**
+ * Delete local agent inode for remote entry
+ */
+static int osd_delete_local_agent_inode(const struct lu_env *env,
+ struct osd_device *osd,
+ const struct lu_fid *fid,
+ __u32 ino, struct osd_thandle *oh)
+{
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
+ struct inode *inode;
+ ENTRY;
+
+ id->oii_ino = le32_to_cpu(ino);
+ id->oii_gen = OSD_OII_NOGEN;
+ inode = osd_iget(oti, osd, id);
+ if (IS_ERR(inode)) {
+ CERROR("%s: iget error "DFID" id %u:%u\n", osd_name(osd),
+ PFID(fid), id->oii_ino, id->oii_gen);
+ RETURN(PTR_ERR(inode));
+ }
+
+ clear_nlink(inode);
+ mark_inode_dirty(inode);
+ CDEBUG(D_INODE, "%s: delete remote inode "DFID" %lu\n",
+ osd_name(osd), PFID(fid), inode->i_ino);
+ iput(inode);
+ RETURN(0);
+}
+
/**
* OSD layer object create function for interoperability mode (b11826).
* This is mostly similar to osd_object_create(). Only difference being, fid is
ENTRY;
LASSERT(osd_invariant(obj));
- LASSERT(!dt_object_exists(dt));
+ LASSERT(!dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
result = __osd_object_create(info, obj, attr, hint, dof, th);
- /* objects under osd root shld have igif fid, so dont add fid EA */
- /* For ost object, the fid will be stored during first write */
- if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL &&
- !fid_is_on_ost(info, osd_dt_dev(th->th_dev), fid))
- result = osd_ea_fid_set(info, obj->oo_inode, fid);
+ if (result == 0)
+ result = osd_ea_fid_set(info, obj->oo_inode, fid,
+ fid_is_on_ost(info, osd_obj2dev(obj),
+ fid, OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0, 0);
- if (result == 0)
- result = __osd_oi_insert(env, obj, fid, th);
+ if (result == 0)
+ result = __osd_oi_insert(env, obj, fid, th);
- LASSERT(ergo(result == 0, dt_object_exists(dt)));
+ LASSERT(ergo(result == 0,
+ dt_object_exists(dt) && !dt_object_remote(dt)));
LINVRNT(osd_invariant(obj));
RETURN(result);
}
static int osd_object_ref_add(const struct lu_env *env,
struct dt_object *dt, struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thandle *oh;
+ int rc = 0;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle != NULL);
+
osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
+ CDEBUG(D_INODE, DFID" increase nlink %d\n",
+ PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
/*
- * DIR_NLINK feature is set for compatibility reasons if:
- * 1) nlinks > LDISKFS_LINK_MAX, or
- * 2) nlinks == 2, since this indicates i_nlink was previously 1.
+ * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
+ * (65000) subdirectories by storing "1" in i_nlink if the link count
+ * would otherwise overflow. Directory tranversal tools understand
+ * that (st_nlink == 1) indicates that the filesystem dose not track
+ * hard links count on the directory, and will not abort subdirectory
+ * scanning early once (st_nlink - 2) subdirs have been found.
*
- * It is easier to always set this flag (rather than check and set),
- * since it has less overhead, and the superblock will be dirtied
- * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
- * do not actually care whether this flag is set or not.
+ * This also has to properly handle the case of inodes with nlink == 0
+ * in case they are being linked into the PENDING directory
*/
spin_lock(&obj->oo_guard);
- /* inc_nlink from 0 may cause WARN_ON */
- if(inode->i_nlink == 0)
- set_nlink(inode, 1);
- else
- inc_nlink(inode);
- if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
- if (inode->i_nlink >= LDISKFS_LINK_MAX ||
- inode->i_nlink == 2)
- set_nlink(inode, 1);
- }
+ ldiskfs_inc_count(oh->ot_handle, inode);
LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
+
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
LINVRNT(osd_invariant(obj));
- return 0;
+ return rc;
}
static int osd_declare_object_ref_del(const struct lu_env *env,
{
struct osd_thandle *oh;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct osd_thandle *oh;
- LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
- LASSERT(osd_write_locked(env, obj));
- LASSERT(th != NULL);
+ LINVRNT(osd_invariant(obj));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LASSERT(osd_write_locked(env, obj));
+ LASSERT(th != NULL);
+
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle != NULL);
osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
spin_lock(&obj->oo_guard);
- LASSERT(inode->i_nlink > 0);
- drop_nlink(inode);
- /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
- * then the nlink count is 1. Don't let it be set to 0 or the directory
- * inode will be deleted incorrectly. */
- if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
- set_nlink(inode, 1);
+ /* That can be result of upgrade from old Lustre version and
+ * applied only to local files. Just skip this ref_del call.
+ * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
+ if (inode->i_nlink == 0) {
+ CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
+ D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
+ ", maybe an upgraded file? (LU-3915)\n",
+ osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
+ spin_unlock(&obj->oo_guard);
+ return 0;
+ }
+
+ CDEBUG(D_INODE, DFID" decrease nlink %d\n",
+ PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
+
+ ldiskfs_dec_count(oh->ot_handle, inode);
spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
+
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
LINVRNT(osd_invariant(obj));
return 0;
return sizeof(dt_obj_version_t);
}
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
int fl, struct thandle *handle)
{
struct osd_thandle *oh;
+ int credits;
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
- strcmp(name, XATTR_NAME_VERSION) == 0 ?
- osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
- osd_dto_credits_noquota[DTO_XATTR_SET]);
+ /* optimistic optimization: LMA is set first and usually fit inode */
+ if (strcmp(name, XATTR_NAME_LMA) == 0) {
+ if (dt_object_exists(dt))
+ credits = 0;
+ else
+ credits = 1;
+ } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ credits = 1;
+ } else {
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct super_block *sb = osd_sb(osd);
+ credits = osd_dto_credits_noquota[DTO_XATTR_SET];
+ if (buf && buf->lb_len > sb->s_blocksize) {
+ credits *= (buf->lb_len + sb->s_blocksize - 1) >>
+ sb->s_blocksize_bits;
+ }
+ }
+
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
return 0;
}
LDISKFS_I(inode)->i_fs_version = *new_version;
/** Version is set after all inode operations are finished,
* so we should mark it dirty here */
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
/*
struct inode *inode = obj->oo_inode;
struct osd_thread_info *info = osd_oti_get(env);
int fs_flags = 0;
+ ENTRY;
LASSERT(handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
+ CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
+ PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
+
osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
if (fl & LU_XATTR_REPLACE)
fs_flags |= XATTR_REPLACE;
if (fl & LU_XATTR_CREATE)
fs_flags |= XATTR_CREATE;
+ if (strcmp(name, XATTR_NAME_LMV) == 0) {
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ int rc;
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc != 0)
+ RETURN(rc);
+
+ lma->lma_incompat |= LMAI_STRIPED;
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ if (rc != 0)
+ RETURN(rc);
+ }
+
return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
fs_flags);
}
struct osd_thread_info *info = osd_oti_get(env);
struct dentry *dentry = &info->oti_obj_dentry;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
- LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
}
{
struct osd_thandle *oh;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
struct dentry *dentry = &info->oti_obj_dentry;
int rc;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
LASSERT(handle != NULL);
ll_vfs_dq_init(inode);
dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
rc = inode->i_op->removexattr(dentry, name);
return rc;
}
static struct obd_capa *osd_capa_get(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *old,
- __u64 opc)
+ struct dt_object *dt,
+ struct lustre_capa *old, __u64 opc)
{
- struct osd_thread_info *info = osd_oti_get(env);
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *dev = osd_obj2dev(obj);
- struct lustre_capa_key *key = &info->oti_capa_key;
- struct lustre_capa *capa = &info->oti_capa;
- struct obd_capa *oc;
- struct md_capainfo *ci;
- int rc;
- ENTRY;
+ struct osd_thread_info *info = osd_oti_get(env);
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct lustre_capa_key *key = &info->oti_capa_key;
+ struct lustre_capa *capa = &info->oti_capa;
+ struct obd_capa *oc;
+ struct lu_capainfo *lci;
+ int rc;
+ ENTRY;
- if (!dev->od_fl_capa)
- RETURN(ERR_PTR(-ENOENT));
+ if (!osd->od_fl_capa)
+ RETURN(ERR_PTR(-ENOENT));
- LASSERT(dt_object_exists(dt));
- LINVRNT(osd_invariant(obj));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LINVRNT(osd_invariant(obj));
- /* renewal sanity check */
- if (old && osd_object_auth(env, dt, old, opc))
- RETURN(ERR_PTR(-EACCES));
-
- ci = md_capainfo(env);
- if (unlikely(!ci))
- RETURN(ERR_PTR(-ENOENT));
-
- switch (ci->mc_auth) {
- case LC_ID_NONE:
- RETURN(NULL);
- case LC_ID_PLAIN:
- capa->lc_uid = obj->oo_inode->i_uid;
- capa->lc_gid = obj->oo_inode->i_gid;
- capa->lc_flags = LC_ID_PLAIN;
- break;
- case LC_ID_CONVERT: {
- __u32 d[4], s[4];
-
- s[0] = obj->oo_inode->i_uid;
- cfs_get_random_bytes(&(s[1]), sizeof(__u32));
- s[2] = obj->oo_inode->i_gid;
- cfs_get_random_bytes(&(s[3]), sizeof(__u32));
- rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
- if (unlikely(rc))
- RETURN(ERR_PTR(rc));
-
- capa->lc_uid = ((__u64)d[1] << 32) | d[0];
- capa->lc_gid = ((__u64)d[3] << 32) | d[2];
- capa->lc_flags = LC_ID_CONVERT;
- break;
- }
- default:
- RETURN(ERR_PTR(-EINVAL));
+ /* renewal sanity check */
+ if (old && osd_object_auth(env, dt, old, opc))
+ RETURN(ERR_PTR(-EACCES));
+
+ lci = lu_capainfo_get(env);
+ if (unlikely(lci == NULL))
+ RETURN(ERR_PTR(-ENOENT));
+
+ switch (lci->lci_auth) {
+ case LC_ID_NONE:
+ RETURN(NULL);
+ case LC_ID_PLAIN:
+ capa->lc_uid = i_uid_read(obj->oo_inode);
+ capa->lc_gid = i_gid_read(obj->oo_inode);
+ capa->lc_flags = LC_ID_PLAIN;
+ break;
+ case LC_ID_CONVERT: {
+ __u32 d[4], s[4];
+
+ s[0] = i_uid_read(obj->oo_inode);
+ cfs_get_random_bytes(&(s[1]), sizeof(__u32));
+ s[2] = i_uid_read(obj->oo_inode);
+ cfs_get_random_bytes(&(s[3]), sizeof(__u32));
+ rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
+ if (unlikely(rc))
+ RETURN(ERR_PTR(rc));
+
+ capa->lc_uid = ((__u64)d[1] << 32) | d[0];
+ capa->lc_gid = ((__u64)d[3] << 32) | d[2];
+ capa->lc_flags = LC_ID_CONVERT;
+ break;
}
+ default:
+ RETURN(ERR_PTR(-EINVAL));
+ }
- capa->lc_fid = *fid;
- capa->lc_opc = opc;
- capa->lc_flags |= dev->od_capa_alg << 24;
- capa->lc_timeout = dev->od_capa_timeout;
- capa->lc_expiry = 0;
+ capa->lc_fid = *fid;
+ capa->lc_opc = opc;
+ capa->lc_flags |= osd->od_capa_alg << 24;
+ capa->lc_timeout = osd->od_capa_timeout;
+ capa->lc_expiry = 0;
- oc = capa_lookup(dev->od_capa_hash, capa, 1);
- if (oc) {
- LASSERT(!capa_is_expired(oc));
- RETURN(oc);
- }
+ oc = capa_lookup(osd->od_capa_hash, capa, 1);
+ if (oc) {
+ LASSERT(!capa_is_expired(oc));
+ RETURN(oc);
+ }
spin_lock(&capa_lock);
- *key = dev->od_capa_keys[1];
+ *key = osd->od_capa_keys[1];
spin_unlock(&capa_lock);
- capa->lc_keyid = key->lk_keyid;
- capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
+ capa->lc_keyid = key->lk_keyid;
+ capa->lc_expiry = cfs_time_current_sec() + osd->od_capa_timeout;
- rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
- if (rc) {
- DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
- RETURN(ERR_PTR(rc));
- }
+ rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
+ if (rc) {
+ DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
+ RETURN(ERR_PTR(rc));
+ }
- oc = capa_add(dev->od_capa_hash, capa);
- RETURN(oc);
+ oc = capa_add(osd->od_capa_hash, capa);
+ RETURN(oc);
}
-static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
+static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
ENTRY;
dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
file->f_dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_op = inode->i_fop;
+ set_file_inode(file, inode);
+
+#ifdef HAVE_FILE_FSYNC_4ARGS
+ rc = file->f_op->fsync(file, start, end, 0);
+#elif defined(HAVE_FILE_FSYNC_2ARGS)
+ mutex_lock(&inode->i_mutex);
+ rc = file->f_op->fsync(file, 0);
+ mutex_unlock(&inode->i_mutex);
+#else
mutex_lock(&inode->i_mutex);
rc = file->f_op->fsync(file, dentry, 0);
mutex_unlock(&inode->i_mutex);
+#endif
+
RETURN(rc);
}
struct osd_object *obj = osd_dt_obj(dt);
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
if (osd_object_is_root(obj)) {
dt->do_index_ops = &osd_index_ea_ops;
result = 0;
} else if (feat == &dt_directory_features) {
dt->do_index_ops = &osd_index_ea_ops;
- if (S_ISDIR(obj->oo_inode->i_mode))
+ if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
result = 0;
else
result = -ENOTDIR;
* recheck under lock.
*/
if (!osd_has_index(obj))
- result = osd_iam_container_init(env, obj, dir);
+ result = osd_iam_container_init(env, obj,
+ obj->oo_dir);
else
result = 0;
up_write(&obj->oo_ext_idx_sem);
}
LINVRNT(osd_invariant(obj));
- if (is_quota_glb_feat(feat))
+ if (result == 0 && is_quota_glb_feat(feat) &&
+ fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
result = osd_quota_migration(env, dt, feat);
return result;
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(bag->ic_object == obj->oo_inode);
LASSERT(handle != NULL);
int rc;
ENTRY;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
rc = osd_fid_unpack((struct lu_fid *)fid, rec);
}
- RETURN(rc);
+ return rc;
+}
+
+static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
+ struct lu_fid *fid)
+{
+ ENTRY;
+
+ /* FID seqs not in FLDB, must be local seq */
+ if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
+ RETURN(0);
+
+ if (osd_seq_exists(env, osd, fid_seq(fid)))
+ RETURN(0);
+
+ RETURN(1);
}
/**
struct inode *dir = obj->oo_inode;
struct dentry *dentry;
struct osd_thandle *oh;
- struct ldiskfs_dir_entry_2 *de;
+ struct ldiskfs_dir_entry_2 *de = NULL;
struct buffer_head *bh;
struct htree_lock *hlock = NULL;
- int rc;
-
+ struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ int rc;
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
osd_trans_exec_op(env, handle, OSD_OT_DELETE);
down_write(&obj->oo_ext_idx_sem);
}
- bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
if (bh) {
- rc = ldiskfs_delete_entry(oh->ot_handle,
- dir, de, bh);
+ __u32 ino = 0;
+
+ /* If this is not the ".." entry, it might be a remote DNE
+ * entry and we need to check if the FID is for a remote
+ * MDT. If the FID is not in the directory entry (e.g.
+ * upgraded 1.8 filesystem without dirdata enabled) then
+ * we need to get the FID from the LMA. For a remote directory
+ * there HAS to be an LMA, it cannot be an IGIF inode in this
+ * case.
+ *
+ * Delete the entry before the agent inode in order to
+ * simplify error handling. At worst an error after deleting
+ * the entry first might leak the agent inode afterward. The
+ * reverse would need filesystem abort in case of error deleting
+ * the entry after the agent had been removed, or leave a
+ * dangling entry pointing at a random inode. */
+ if (strcmp((char *)key, dotdot) != 0) {
+ LASSERT(de != NULL);
+ rc = osd_get_fid_from_dentry(de, (struct dt_rec *)fid);
+ /* If Fid is not in dentry, try to get it from LMA */
+ if (rc == -ENODATA) {
+ struct osd_inode_id *id;
+ struct inode *inode;
+
+ /* Before trying to get fid from the inode,
+ * check whether the inode is valid.
+ *
+ * If the inode has been deleted, do not go
+ * ahead to do osd_ea_fid_get, which will set
+ * the inode to bad inode, which might cause
+ * the inode to be deleted uncorrectly */
+ inode = ldiskfs_iget(osd_sb(osd),
+ le32_to_cpu(de->inode));
+ if (IS_ERR(inode)) {
+ CDEBUG(D_INODE, "%s: "DFID"get inode"
+ "error.\n", osd_name(osd),
+ PFID(fid));
+ rc = PTR_ERR(inode);
+ } else {
+ if (likely(inode->i_nlink != 0)) {
+ id = &osd_oti_get(env)->oti_id;
+ rc = osd_ea_fid_get(env, obj,
+ le32_to_cpu(de->inode),
+ fid, id);
+ } else {
+ CDEBUG(D_INFO, "%s: %u "DFID
+ "deleted.\n",
+ osd_name(osd),
+ le32_to_cpu(de->inode),
+ PFID(fid));
+ rc = -ESTALE;
+ }
+ iput(inode);
+ }
+ }
+ if (rc == 0 &&
+ unlikely(osd_remote_fid(env, osd, fid)))
+ /* Need to delete agent inode */
+ ino = le32_to_cpu(de->inode);
+ }
+ rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
brelse(bh);
+ if (rc == 0 && unlikely(ino != 0)) {
+ rc = osd_delete_local_agent_inode(env, osd, fid, ino,
+ oh);
+ if (rc != 0)
+ CERROR("%s: del local inode "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ }
} else {
rc = -ENOENT;
}
else
up_write(&obj->oo_ext_idx_sem);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ /* For inode on the remote MDT, .. will point to
+ * /Agent directory, Check whether it needs to delete
+ * from agent directory */
+ if (unlikely(strcmp((char *)key, dotdot) == 0)) {
+ rc = osd_delete_from_remote_parent(env, osd_obj2dev(obj), obj,
+ oh);
+ if (rc != 0 && rc != -ENOENT) {
+ CERROR("%s: delete agent inode "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ }
+
+ if (rc == -ENOENT)
+ rc = 0;
+
+ GOTO(out, rc);
+ }
+out:
+
LASSERT(osd_invariant(obj));
RETURN(rc);
}
ENTRY;
LASSERT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(bag->ic_object == obj->oo_inode);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
{
struct osd_thandle *oh;
- LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(bag->ic_object == obj->oo_inode);
LASSERT(th != NULL);
struct thandle *th)
{
struct inode *inode = dir->oo_inode;
- struct ldiskfs_dentry_param *dot_ldp;
- struct ldiskfs_dentry_param *dot_dot_ldp;
struct osd_thandle *oth;
int result = 0;
dir->oo_compat_dot_created = 1;
result = 0;
}
- } else if(strcmp(name, dotdot) == 0) {
+ } else if (strcmp(name, dotdot) == 0) {
if (!dir->oo_compat_dot_created)
return -EINVAL;
-
- dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
- osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
/* in case of rename, dotdot is already created */
- if (dir->oo_compat_dotdot_created)
+ if (dir->oo_compat_dotdot_created) {
return __osd_ea_add_rec(info, dir, parent_dir, name,
dot_dot_fid, NULL, th);
+ }
- dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
- dot_ldp->edp_magic = 0;
- result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
- inode, dot_ldp, dot_dot_ldp);
+ result = osd_add_dot_dotdot_internal(info, dir->oo_inode,
+ parent_dir, dot_fid,
+ dot_dot_fid, oth);
if (result == 0)
dir->oo_compat_dotdot_created = 1;
}
down_write(&pobj->oo_ext_idx_sem);
}
- rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
- hlock, th);
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_INDIR)) {
+ struct lu_fid *tfid = &info->oti_fid;
+
+ *tfid = *(const struct lu_fid *)fid;
+ tfid->f_ver = ~0;
+ rc = __osd_ea_add_rec(info, pobj, cinode, name,
+ (const struct dt_rec *)tfid,
+ hlock, th);
+ } else {
+ rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
+ hlock, th);
+ }
}
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
RETURN_EXIT;
again:
- rc = osd_oi_lookup(oti, dev, fid, id, true);
+ rc = osd_oi_lookup(oti, dev, fid, id, OI_CHECK_FLD);
if (rc != 0 && rc != -ENOENT)
RETURN_EXIT;
}
if (!dev->od_noscrub && ++once == 1) {
- CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
- PFID(fid));
rc = osd_scrub_start(dev);
- LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
- ", rc = %d [2]\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- PFID(fid), rc);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC for "DFID
+ ", rc = %d [2]\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
+ PFID(fid), rc);
if (rc == 0)
goto again;
}
EXIT;
}
+static int osd_fail_fid_lookup(struct osd_thread_info *oti,
+ struct osd_device *dev,
+ struct osd_idmap_cache *oic,
+ struct lu_fid *fid, __u32 ino)
+{
+ struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
+ struct inode *inode;
+ int rc;
+
+ osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
+ inode = osd_iget(oti, dev, &oic->oic_lid);
+ if (IS_ERR(inode)) {
+ fid_zero(&oic->oic_fid);
+ return PTR_ERR(inode);
+ }
+
+ rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, lma);
+ iput(inode);
+ if (rc != 0)
+ fid_zero(&oic->oic_fid);
+ else
+ *fid = oic->oic_fid = lma->lma_self_fid;
+ return rc;
+}
+
+int osd_add_oi_cache(struct osd_thread_info *info, struct osd_device *osd,
+ struct osd_inode_id *id, const struct lu_fid *fid)
+{
+ CDEBUG(D_INODE, "add "DFID" %u:%u to info %p\n", PFID(fid),
+ id->oii_ino, id->oii_gen, info);
+ info->oti_cache.oic_lid = *id;
+ info->oti_cache.oic_fid = *fid;
+ info->oti_cache.oic_dev = osd;
+
+ return 0;
+}
+
/**
* Calls ->lookup() to find dentry. From dentry get inode and
* read inode's ea to get fid. This is required for interoperability
down_read(&obj->oo_ext_idx_sem);
}
- bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
if (bh) {
struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
struct osd_idmap_cache *oic = &oti->oti_cache;
struct osd_device *dev = osd_obj2dev(obj);
struct osd_scrub *scrub = &dev->od_scrub;
struct scrub_file *sf = &scrub->os_file;
ino = le32_to_cpu(de->inode);
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP)) {
+ brelse(bh);
+ rc = osd_fail_fid_lookup(oti, dev, oic, fid, ino);
+ GOTO(out, rc);
+ }
+
rc = osd_get_fid_from_dentry(de, rec);
/* done with de, release bh */
brelse(bh);
if (rc != 0)
- rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
+ rc = osd_ea_fid_get(env, obj, ino, fid, id);
else
- osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
+ osd_id_gen(id, ino, OSD_OII_NOGEN);
if (rc != 0) {
fid_zero(&oic->oic_fid);
GOTO(out, rc);
}
- oic->oic_fid = *fid;
+ if (osd_remote_fid(env, dev, fid))
+ GOTO(out, rc = 0);
+
+ rc = osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id,
+ fid);
+ if (rc != 0)
+ GOTO(out, rc);
if ((scrub->os_pos_current <= ino) &&
- (sf->sf_flags & SF_INCONSISTENT ||
+ ((sf->sf_flags & SF_INCONSISTENT) ||
+ (sf->sf_flags & SF_UPGRADE && fid_is_igif(fid)) ||
ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
sf->sf_oi_bitmap)))
osd_consistency_check(oti, dev, oic);
* in the cache, otherwise lu_object_alloc() crashes
* -bzzz
*/
- luch = lu_object_find_at(env, ludev, fid, NULL);
- if (!IS_ERR(luch)) {
- if (lu_object_exists(luch)) {
- lo = lu_object_locate(luch->lo_header, ludev->ld_type);
- if (lo != NULL)
- child = osd_obj(lo);
- else
- LU_OBJECT_DEBUG(D_ERROR, env, luch,
- "lu_object can't be located"
+ luch = lu_object_find_at(env, ludev->ld_site->ls_top_dev == NULL ?
+ ludev : ludev->ld_site->ls_top_dev,
+ fid, NULL);
+ if (!IS_ERR(luch)) {
+ if (lu_object_exists(luch)) {
+ lo = lu_object_locate(luch->lo_header, ludev->ld_type);
+ if (lo != NULL)
+ child = osd_obj(lo);
+ else
+ LU_OBJECT_DEBUG(D_ERROR, env, luch,
+ "lu_object can't be located"
DFID"\n", PFID(fid));
if (child == NULL) {
lu_object_put(env, luch);
child = ERR_PTR(-ENOENT);
}
- } else
- child = (void *)luch;
+ } else {
+ child = ERR_CAST(luch);
+ }
- return child;
+ return child;
}
/**
struct thandle *handle)
{
struct osd_thandle *oh;
- struct inode *inode;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
struct lu_fid *fid = (struct lu_fid *)rec;
int rc;
ENTRY;
- LASSERT(dt_object_exists(dt));
+ LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
osd_dto_credits_noquota[DTO_INDEX_INSERT]);
- inode = osd_dt_obj(dt)->oo_inode;
- LASSERT(inode);
+ if (osd_dt_obj(dt)->oo_inode != NULL) {
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+
+ /* We ignore block quota on meta pool (MDTs), so needn't
+ * calculate how many blocks will be consumed by this index
+ * insert */
+ rc = osd_declare_inode_qid(env, i_uid_read(inode),
+ i_gid_read(inode), 0,
+ oh, true, true, NULL, false);
+ }
- /* We ignore block quota on meta pool (MDTs), so needn't
- * calculate how many blocks will be consumed by this index
- * insert */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
if (fid == NULL)
RETURN(0);
- /* It does fld look up inside declare, and the result will be
- * added to fld cache, so the following fld lookup inside insert
- * does not need send RPC anymore, so avoid send rpc with holding
- * transaction */
- LASSERTF(fid_is_sane(fid), "fid is insane"DFID"\n", PFID(fid));
- osd_fld_lookup(env, osd_dt_dev(handle->th_dev), fid,
- &osd_oti_get(env)->oti_seq_range);
+ rc = osd_remote_fid(env, osd, fid);
+ if (rc <= 0)
+ RETURN(rc);
+
+ rc = 0;
+
+ osd_trans_declare_op(env, oh, OSD_OT_CREATE,
+ osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
RETURN(rc);
}
const struct dt_key *key, struct thandle *th,
struct lustre_capa *capa, int ignore_quota)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct lu_fid *fid = (struct lu_fid *) rec;
- const char *name = (const char *)key;
- struct osd_object *child;
- int rc;
-
- ENTRY;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct lu_fid *fid = (struct lu_fid *) rec;
+ const char *name = (const char *)key;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
+ struct inode *child_inode = NULL;
+ struct osd_object *child = NULL;
+ int rc;
+ ENTRY;
LASSERT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(th != NULL);
osd_trans_exec_op(env, th, OSD_OT_INSERT);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
RETURN(-EACCES);
- child = osd_object_find(env, dt, fid);
- if (!IS_ERR(child)) {
- rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
- osd_object_put(env, child);
- } else {
- rc = PTR_ERR(child);
- }
+ LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!", PFID(fid));
- LASSERT(osd_invariant(obj));
- RETURN(rc);
-}
+ rc = osd_remote_fid(env, osd, fid);
+ if (rc < 0) {
+ CERROR("%s: Can not find object "DFID" rc %d\n",
+ osd_name(osd), PFID(fid), rc);
+ RETURN(rc);
+ }
-/**
+ if (rc == 1) {
+ /* Insert remote entry */
+ if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
+ struct osd_mdobj_map *omm = osd->od_mdt_map;
+ struct osd_thandle *oh;
+
+ /* If parent on remote MDT, we need put this object
+ * under AGENT */
+ oh = container_of(th, typeof(*oh), ot_super);
+ rc = osd_add_to_remote_parent(env, osd, obj, oh);
+ if (rc != 0) {
+ CERROR("%s: add "DFID" error: rc = %d\n",
+ osd_name(osd),
+ PFID(lu_object_fid(&dt->do_lu)), rc);
+ RETURN(rc);
+ }
+
+ child_inode = igrab(omm->omm_remote_parent->d_inode);
+ } else {
+ child_inode = osd_create_local_agent_inode(env, osd,
+ obj, fid,
+ th);
+ if (IS_ERR(child_inode))
+ RETURN(PTR_ERR(child_inode));
+ }
+ } else {
+ /* Insert local entry */
+ child = osd_object_find(env, dt, fid);
+ if (IS_ERR(child)) {
+ CERROR("%s: Can not find object "DFID"%u:%u: rc = %d\n",
+ osd_name(osd), PFID(fid),
+ id->oii_ino, id->oii_gen,
+ (int)PTR_ERR(child));
+ RETURN(PTR_ERR(child));
+ }
+ child_inode = igrab(child->oo_inode);
+ }
+
+ rc = osd_ea_add_rec(env, obj, child_inode, name, rec, th);
+
+ CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
+ obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
+
+ iput(child_inode);
+ if (child != NULL)
+ osd_object_put(env, child);
+ LASSERT(osd_invariant(obj));
+ RETURN(rc);
+}
+
+/**
* Initialize osd Iterator for given osd index object.
*
* \param dt osd index object
*
* \param di osd iterator
*/
-
static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
{
struct osd_it_iam *it = (struct osd_it_iam *)di;
return iam_it_key_size(&it->oi_it);
}
-static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
- int len, __u16 type)
+static inline void
+osd_it_append_attrs(struct lu_dirent *ent, int len, __u16 type)
{
- struct luda_type *lt;
- const unsigned align = sizeof(struct luda_type) - 1;
-
- /* check if file type is required */
- if (attr & LUDA_TYPE) {
- len = (len + align) & ~align;
+ /* check if file type is required */
+ if (ent->lde_attrs & LUDA_TYPE) {
+ struct luda_type *lt;
+ int align = sizeof(*lt) - 1;
- lt = (void *) ent->lde_name + len;
- lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
- ent->lde_attrs |= LUDA_TYPE;
- }
+ len = (len + align) & ~align;
+ lt = (struct luda_type *)(ent->lde_name + len);
+ lt->lt_type = cpu_to_le16(DTTOIF(type));
+ }
- ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
+ ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
}
/**
* build lu direct from backend fs dirent.
*/
-static inline void osd_it_pack_dirent(struct lu_dirent *ent,
- struct lu_fid *fid, __u64 offset,
- char *name, __u16 namelen,
- __u16 type, __u32 attr)
+static inline void
+osd_it_pack_dirent(struct lu_dirent *ent, struct lu_fid *fid, __u64 offset,
+ char *name, __u16 namelen, __u16 type, __u32 attr)
{
- fid_cpu_to_le(&ent->lde_fid, fid);
- ent->lde_attrs = LUDA_FID;
+ ent->lde_attrs = attr | LUDA_FID;
+ fid_cpu_to_le(&ent->lde_fid, fid);
- ent->lde_hash = cpu_to_le64(offset);
- ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
+ ent->lde_hash = cpu_to_le64(offset);
+ ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
- strncpy(ent->lde_name, name, namelen);
- ent->lde_namelen = cpu_to_le16(namelen);
+ strncpy(ent->lde_name, name, namelen);
+ ent->lde_name[namelen] = '\0';
+ ent->lde_namelen = cpu_to_le16(namelen);
- /* append lustre attributes */
- osd_it_append_attrs(ent, attr, namelen, type);
+ /* append lustre attributes */
+ osd_it_append_attrs(ent, namelen, type);
}
/**
struct osd_object *obj = osd_dt_obj(dt);
struct osd_thread_info *info = osd_oti_get(env);
struct osd_it_ea *it = &info->oti_it_ea;
+ struct file *file = &it->oie_file;
struct lu_object *lo = &dt->do_lu;
struct dentry *obj_dentry = &info->oti_it_dentry;
ENTRY;
it->oie_dirent = NULL;
it->oie_buf = info->oti_it_ea_buf;
it->oie_obj = obj;
- it->oie_file.f_pos = 0;
- it->oie_file.f_dentry = obj_dentry;
- if (attr & LUDA_64BITHASH)
- it->oie_file.f_mode |= FMODE_64BITHASH;
- else
- it->oie_file.f_mode |= FMODE_32BITHASH;
- it->oie_file.f_mapping = obj->oo_inode->i_mapping;
- it->oie_file.f_op = obj->oo_inode->i_fop;
- it->oie_file.private_data = NULL;
- lu_object_get(lo);
- RETURN((struct dt_it *) it);
+
+ /* Reset the "file" totally to avoid to reuse any old value from
+ * former readdir handling, the "file->f_pos" should be zero. */
+ memset(file, 0, sizeof(*file));
+ /* Only FMODE_64BITHASH or FMODE_32BITHASH should be set, NOT both. */
+ if (attr & LUDA_64BITHASH)
+ file->f_mode = FMODE_64BITHASH;
+ else
+ file->f_mode = FMODE_32BITHASH;
+ file->f_dentry = obj_dentry;
+ file->f_mapping = obj->oo_inode->i_mapping;
+ file->f_op = obj->oo_inode->i_fop;
+ set_file_inode(file, obj->oo_inode);
+
+ lu_object_get(lo);
+ RETURN((struct dt_it *) it);
}
/**
{
}
+struct osd_filldir_cbs {
+#ifdef HAVE_DIR_CONTEXT
+ struct dir_context ctx;
+#endif
+ struct osd_it_ea *it;
+};
/**
* It is called internally by ->readdir(). It fills the
* iterator's in-memory data structure with required
* \retval 0 on success
* \retval 1 on buffer full
*/
-static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
+static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
loff_t offset, __u64 ino,
unsigned d_type)
{
- struct osd_it_ea *it = (struct osd_it_ea *)buf;
+ struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
*
* \retval 0 on success
* \retval -ve on error
+ * \retval +1 reach the end of entry
*/
static int osd_ldiskfs_it_fill(const struct lu_env *env,
const struct dt_it *di)
struct osd_object *obj = it->oie_obj;
struct inode *inode = obj->oo_inode;
struct htree_lock *hlock = NULL;
- int result = 0;
+ struct file *filp = &it->oie_file;
+ int rc = 0;
+ struct osd_filldir_cbs buf = {
+#ifdef HAVE_DIR_CONTEXT
+ .ctx.actor = osd_ldiskfs_filldir,
+#endif
+ .it = it
+ };
ENTRY;
it->oie_dirent = it->oie_buf;
down_read(&obj->oo_ext_idx_sem);
}
- result = inode->i_fop->readdir(&it->oie_file, it,
- (filldir_t) osd_ldiskfs_filldir);
+#ifdef HAVE_DIR_CONTEXT
+ buf.ctx.pos = filp->f_pos;
+ rc = inode->i_fop->iterate(filp, &buf.ctx);
+ filp->f_pos = buf.ctx.pos;
+#else
+ rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
+#endif
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
up_read(&obj->oo_ext_idx_sem);
- if (it->oie_rd_dirent == 0) {
- result = -EIO;
- } else {
- it->oie_dirent = it->oie_buf;
- it->oie_it_dirent = 1;
- }
+ if (it->oie_rd_dirent == 0) {
+ /*If it does not get any dirent, it means it has been reached
+ *to the end of the dir */
+ it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
+ if (rc == 0)
+ rc = 1;
+ } else {
+ it->oie_dirent = it->oie_buf;
+ it->oie_it_dirent = 1;
+ }
- RETURN(result);
+ RETURN(rc);
}
/**
return it->oie_dirent->oied_namelen;
}
+static int
+osd_dirent_update(handle_t *jh, struct super_block *sb,
+ struct osd_it_ea_dirent *ent, struct lu_fid *fid,
+ struct buffer_head *bh, struct ldiskfs_dir_entry_2 *de)
+{
+ struct osd_fid_pack *rec;
+ int rc;
+ ENTRY;
+
+ LASSERT(de->file_type & LDISKFS_DIRENT_LUFID);
+ LASSERT(de->rec_len >= de->name_len + sizeof(struct osd_fid_pack));
+
+ rc = ldiskfs_journal_get_write_access(jh, bh);
+ if (rc != 0) {
+ CERROR("%.16s: fail to write access for update dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
+ fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
+ rc = ldiskfs_journal_dirty_metadata(jh, bh);
+ if (rc != 0)
+ CERROR("%.16s: fail to dirty metadata for update dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+
+ RETURN(rc);
+}
+
+static inline int
+osd_dirent_has_space(__u16 reclen, __u16 namelen, unsigned blocksize)
+{
+ if (ldiskfs_rec_len_from_disk(reclen, blocksize) >=
+ __LDISKFS_DIR_REC_LEN(namelen + 1 + sizeof(struct osd_fid_pack)))
+ return 1;
+ else
+ return 0;
+}
+
+static inline int
+osd_dot_dotdot_has_space(struct ldiskfs_dir_entry_2 *de, int dot_dotdot)
+{
+ LASSERTF(dot_dotdot == 1 || dot_dotdot == 2,
+ "dot_dotdot = %d\n", dot_dotdot);
+
+ if (LDISKFS_DIR_REC_LEN(de) >=
+ __LDISKFS_DIR_REC_LEN(dot_dotdot + 1 + sizeof(struct osd_fid_pack)))
+ return 1;
+ else
+ return 0;
+}
+
+static int
+osd_dirent_reinsert(const struct lu_env *env, handle_t *jh,
+ struct inode *dir, struct inode *inode,
+ struct osd_it_ea_dirent *ent, struct lu_fid *fid,
+ struct buffer_head *bh, struct ldiskfs_dir_entry_2 *de,
+ struct htree_lock *hlock)
+{
+ struct dentry *dentry;
+ struct osd_fid_pack *rec;
+ struct ldiskfs_dentry_param *ldp;
+ int rc;
+ ENTRY;
+
+ if (!LDISKFS_HAS_INCOMPAT_FEATURE(inode->i_sb,
+ LDISKFS_FEATURE_INCOMPAT_DIRDATA))
+ RETURN(0);
+
+ /* There is enough space to hold the FID-in-dirent. */
+ if (osd_dirent_has_space(de->rec_len, ent->oied_namelen,
+ dir->i_sb->s_blocksize)) {
+ rc = ldiskfs_journal_get_write_access(jh, bh);
+ if (rc != 0) {
+ CERROR("%.16s: fail to write access for reinsert "
+ "dirent: name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ de->name[de->name_len] = 0;
+ rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
+ rec->fp_len = sizeof(struct lu_fid) + 1;
+ fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
+ de->file_type |= LDISKFS_DIRENT_LUFID;
+
+ rc = ldiskfs_journal_dirty_metadata(jh, bh);
+ if (rc != 0)
+ CERROR("%.16s: fail to dirty metadata for reinsert "
+ "dirent: name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+
+ RETURN(rc);
+ }
+
+ rc = ldiskfs_delete_entry(jh, dir, de, bh);
+ if (rc != 0) {
+ CERROR("%.16s: fail to delete entry for reinsert dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
+ ent->oied_namelen);
+ ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
+ osd_get_ldiskfs_dirent_param(ldp, (const struct dt_rec *)fid);
+ dentry->d_fsdata = (void *)ldp;
+ ll_vfs_dq_init(dir);
+ rc = osd_ldiskfs_add_entry(jh, dentry, inode, hlock);
+ /* It is too bad, we cannot reinsert the name entry back.
+ * That means we lose it! */
+ if (rc != 0)
+ CERROR("%.16s: fail to insert entry for reinsert dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+
+ RETURN(rc);
+}
+
+static int
+osd_dirent_check_repair(const struct lu_env *env, struct osd_object *obj,
+ struct osd_it_ea *it, struct lu_fid *fid,
+ struct osd_inode_id *id, __u32 *attr)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct osd_device *dev = osd_obj2dev(obj);
+ struct super_block *sb = osd_sb(dev);
+ const char *devname =
+ LDISKFS_SB(sb)->s_es->s_volume_name;
+ struct osd_it_ea_dirent *ent = it->oie_dirent;
+ struct inode *dir = obj->oo_inode;
+ struct htree_lock *hlock = NULL;
+ struct buffer_head *bh = NULL;
+ handle_t *jh = NULL;
+ struct ldiskfs_dir_entry_2 *de;
+ struct dentry *dentry;
+ struct inode *inode;
+ int credits;
+ int rc;
+ int dot_dotdot = 0;
+ bool dirty = false;
+ ENTRY;
+
+ if (ent->oied_name[0] == '.') {
+ if (ent->oied_namelen == 1)
+ dot_dotdot = 1;
+ else if (ent->oied_namelen == 2 && ent->oied_name[1] == '.')
+ dot_dotdot = 2;
+ }
+
+ dentry = osd_child_dentry_get(env, obj, ent->oied_name,
+ ent->oied_namelen);
+
+ /* We need to ensure that the name entry is still valid.
+ * Because it may be removed or renamed by other already.
+ *
+ * The unlink or rename operation will start journal before PDO lock,
+ * so to avoid deadlock, here we need to start journal handle before
+ * related PDO lock also. But because we do not know whether there
+ * will be something to be repaired before PDO lock, we just start
+ * journal without conditions.
+ *
+ * We may need to remove the name entry firstly, then insert back.
+ * One credit is for user quota file update.
+ * One credit is for group quota file update.
+ * Two credits are for dirty inode. */
+ credits = osd_dto_credits_noquota[DTO_INDEX_DELETE] +
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1 + 1 + 2;
+
+again:
+ if (dev->od_dirent_journal) {
+ jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
+ if (IS_ERR(jh)) {
+ rc = PTR_ERR(jh);
+ CERROR("%.16s: fail to start trans for dirent "
+ "check_repair: credits %d, name %.*s, rc %d\n",
+ devname, credits, ent->oied_namelen,
+ ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ if (obj->oo_hl_head != NULL) {
+ hlock = osd_oti_get(env)->oti_hlock;
+ /* "0" means exclusive lock for the whole directory.
+ * We need to prevent others access such name entry
+ * during the delete + insert. Neither HLOCK_ADD nor
+ * HLOCK_DEL cannot guarantee the atomicity. */
+ ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir, 0);
+ } else {
+ down_write(&obj->oo_ext_idx_sem);
+ }
+ } else {
+ if (obj->oo_hl_head != NULL) {
+ hlock = osd_oti_get(env)->oti_hlock;
+ ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir,
+ LDISKFS_HLOCK_LOOKUP);
+ } else {
+ down_read(&obj->oo_ext_idx_sem);
+ }
+ }
+
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
+ /* For dot/dotdot entry, if there is not enough space to hold the
+ * FID-in-dirent, just keep them there. It only happens when the
+ * device upgraded from 1.8 or restored from MDT file-level backup.
+ * For the whole directory, only dot/dotdot entry have no FID-in-dirent
+ * and needs to get FID from LMA when readdir, it will not affect the
+ * performance much. */
+ if ((bh == NULL) || (le32_to_cpu(de->inode) != ent->oied_ino) ||
+ (dot_dotdot != 0 && !osd_dot_dotdot_has_space(de, dot_dotdot))) {
+ *attr |= LUDA_IGNORE;
+ GOTO(out_journal, rc = 0);
+ }
+
+ osd_id_gen(id, ent->oied_ino, OSD_OII_NOGEN);
+ inode = osd_iget(info, dev, id);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ if (rc == -ENOENT || rc == -ESTALE) {
+ *attr |= LUDA_IGNORE;
+ rc = 0;
+ }
+
+ GOTO(out_journal, rc);
+ }
+
+ /* skip the REMOTE_PARENT_DIR. */
+ if (inode == dev->od_mdt_map->omm_remote_parent->d_inode)
+ GOTO(out_inode, rc = 0);
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc == 0) {
+ LASSERT(!(lma->lma_compat & LMAC_NOT_IN_OI));
+
+ if (fid_is_sane(fid)) {
+ /* FID-in-dirent is valid. */
+ if (lu_fid_eq(fid, &lma->lma_self_fid))
+ GOTO(out_inode, rc = 0);
+
+ /* Do not repair under dryrun mode. */
+ if (*attr & LUDA_VERIFY_DRYRUN) {
+ *attr |= LUDA_REPAIR;
+ GOTO(out_inode, rc = 0);
+ }
+
+ if (!dev->od_dirent_journal) {
+ iput(inode);
+ brelse(bh);
+ if (hlock != NULL)
+ ldiskfs_htree_unlock(hlock);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ dev->od_dirent_journal = 1;
+ goto again;
+ }
+
+ *fid = lma->lma_self_fid;
+ dirty = true;
+ /* Update the FID-in-dirent. */
+ rc = osd_dirent_update(jh, sb, ent, fid, bh, de);
+ if (rc == 0)
+ *attr |= LUDA_REPAIR;
+ } else {
+ /* Do not repair under dryrun mode. */
+ if (*attr & LUDA_VERIFY_DRYRUN) {
+ *fid = lma->lma_self_fid;
+ *attr |= LUDA_REPAIR;
+ GOTO(out_inode, rc = 0);
+ }
+
+ if (!dev->od_dirent_journal) {
+ iput(inode);
+ brelse(bh);
+ if (hlock != NULL)
+ ldiskfs_htree_unlock(hlock);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ dev->od_dirent_journal = 1;
+ goto again;
+ }
+
+ *fid = lma->lma_self_fid;
+ dirty = true;
+ /* Append the FID-in-dirent. */
+ rc = osd_dirent_reinsert(env, jh, dir, inode, ent,
+ fid, bh, de, hlock);
+ if (rc == 0)
+ *attr |= LUDA_REPAIR;
+ }
+ } else if (rc == -ENODATA) {
+ /* Do not repair under dryrun mode. */
+ if (*attr & LUDA_VERIFY_DRYRUN) {
+ if (fid_is_sane(fid)) {
+ *attr |= LUDA_REPAIR;
+ } else {
+ lu_igif_build(fid, inode->i_ino,
+ inode->i_generation);
+ *attr |= LUDA_UPGRADE;
+ }
+ GOTO(out_inode, rc = 0);
+ }
+
+ if (!dev->od_dirent_journal) {
+ iput(inode);
+ brelse(bh);
+ if (hlock != NULL)
+ ldiskfs_htree_unlock(hlock);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ dev->od_dirent_journal = 1;
+ goto again;
+ }
+
+ dirty = true;
+ if (unlikely(fid_is_sane(fid))) {
+ /* FID-in-dirent exists, but FID-in-LMA is lost.
+ * Trust the FID-in-dirent, and add FID-in-LMA. */
+ rc = osd_ea_fid_set(info, inode, fid, 0, 0);
+ if (rc == 0)
+ *attr |= LUDA_REPAIR;
+ } else {
+ lu_igif_build(fid, inode->i_ino, inode->i_generation);
+ /* It is probably IGIF object. Only aappend the
+ * FID-in-dirent. OI scrub will process FID-in-LMA. */
+ rc = osd_dirent_reinsert(env, jh, dir, inode, ent,
+ fid, bh, de, hlock);
+ if (rc == 0)
+ *attr |= LUDA_UPGRADE;
+ }
+ }
+
+ GOTO(out_inode, rc);
+
+out_inode:
+ iput(inode);
+
+out_journal:
+ brelse(bh);
+ if (hlock != NULL) {
+ ldiskfs_htree_unlock(hlock);
+ } else {
+ if (dev->od_dirent_journal)
+ up_write(&obj->oo_ext_idx_sem);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ }
+ if (jh != NULL)
+ ldiskfs_journal_stop(jh);
+ if (rc >= 0 && !dirty)
+ dev->od_dirent_journal = 0;
+ return rc;
+}
/**
- * Returns the value (i.e. fid/igif) at current position from iterator's
- * in memory structure.
+ * Returns the value at current position from iterator's in memory structure.
*
* \param di struct osd_it_ea, iterator's in memory structure
* \param attr attr requested for dirent.
struct osd_scrub *scrub = &dev->od_scrub;
struct scrub_file *sf = &scrub->os_file;
struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
struct osd_idmap_cache *oic = &oti->oti_cache;
struct lu_fid *fid = &it->oie_dirent->oied_fid;
struct lu_dirent *lde = (struct lu_dirent *)dtrec;
int rc = 0;
ENTRY;
- if (!fid_is_sane(fid)) {
- rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
- if (rc != 0) {
- fid_zero(&oic->oic_fid);
- RETURN(rc);
+ if (attr & LUDA_VERIFY) {
+ attr |= LUDA_TYPE;
+ if (unlikely(ino == osd_sb(dev)->s_root->d_inode->i_ino)) {
+ attr |= LUDA_IGNORE;
+ rc = 0;
+ } else {
+ rc = osd_dirent_check_repair(env, obj, it, fid, id,
+ &attr);
}
} else {
- osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
+ attr &= ~LU_DIRENT_ATTRS_MASK;
+ if (!fid_is_sane(fid)) {
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP) &&
+ likely(it->oie_dirent->oied_namelen != 2 ||
+ it->oie_dirent->oied_name[0] != '.' ||
+ it->oie_dirent->oied_name[1] != '.'))
+ RETURN(-ENOENT);
+
+ rc = osd_ea_fid_get(env, obj, ino, fid, id);
+ } else {
+ osd_id_gen(id, ino, OSD_OII_NOGEN);
+ }
}
+ /* Pack the entry anyway, at least the offset is right. */
osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
it->oie_dirent->oied_name,
it->oie_dirent->oied_namelen,
it->oie_dirent->oied_type, attr);
- oic->oic_fid = *fid;
- if ((scrub->os_pos_current <= ino) &&
- (sf->sf_flags & SF_INCONSISTENT ||
+
+ if (rc < 0)
+ RETURN(rc);
+
+ if (osd_remote_fid(env, dev, fid))
+ RETURN(0);
+
+ if (likely(!(attr & LUDA_IGNORE)))
+ rc = osd_add_oi_cache(oti, dev, id, fid);
+
+ if (!(attr & LUDA_VERIFY) &&
+ (scrub->os_pos_current <= ino) &&
+ ((sf->sf_flags & SF_INCONSISTENT) ||
+ (sf->sf_flags & SF_UPGRADE && fid_is_igif(fid)) ||
ldiskfs_test_bit(osd_oi_fid2idx(dev, fid), sf->sf_oi_bitmap)))
osd_consistency_check(oti, dev, oic);
}
/**
+ * Returns the record size size at current position.
+ *
+ * This function will return record(lu_dirent) size in bytes.
+ *
+ * \param[in] env execution environment
+ * \param[in] di iterator's in memory structure
+ * \param[in] attr attribute of the entry, only requires LUDA_TYPE to
+ * calculate the lu_dirent size.
+ *
+ * \retval record size(in bytes & in memory) of the current lu_dirent
+ * entry.
+ */
+static int osd_it_ea_rec_size(const struct lu_env *env, const struct dt_it *di,
+ __u32 attr)
+{
+ struct osd_it_ea *it = (struct osd_it_ea *)di;
+
+ return lu_dirent_calc_size(it->oie_dirent->oied_namelen, attr);
+}
+
+/**
* Returns a cookie for current position of the iterator head, so that
* user can use this cookie to load/start the iterator next time.
*
it->oie_file.f_pos = hash;
rc = osd_ldiskfs_it_fill(env, di);
+ if (rc > 0)
+ rc = -ENODATA;
+
if (rc == 0)
rc = +1;
* mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
*/
static const struct dt_index_operations osd_index_ea_ops = {
- .dio_lookup = osd_index_ea_lookup,
- .dio_declare_insert = osd_index_declare_ea_insert,
- .dio_insert = osd_index_ea_insert,
- .dio_declare_delete = osd_index_declare_ea_delete,
- .dio_delete = osd_index_ea_delete,
- .dio_it = {
- .init = osd_it_ea_init,
- .fini = osd_it_ea_fini,
- .get = osd_it_ea_get,
- .put = osd_it_ea_put,
- .next = osd_it_ea_next,
- .key = osd_it_ea_key,
- .key_size = osd_it_ea_key_size,
- .rec = osd_it_ea_rec,
- .store = osd_it_ea_store,
- .load = osd_it_ea_load
- }
+ .dio_lookup = osd_index_ea_lookup,
+ .dio_declare_insert = osd_index_declare_ea_insert,
+ .dio_insert = osd_index_ea_insert,
+ .dio_declare_delete = osd_index_declare_ea_delete,
+ .dio_delete = osd_index_ea_delete,
+ .dio_it = {
+ .init = osd_it_ea_init,
+ .fini = osd_it_ea_fini,
+ .get = osd_it_ea_get,
+ .put = osd_it_ea_put,
+ .next = osd_it_ea_next,
+ .key = osd_it_ea_key,
+ .key_size = osd_it_ea_key_size,
+ .rec = osd_it_ea_rec,
+ .rec_size = osd_it_ea_rec_size,
+ .store = osd_it_ea_store,
+ .load = osd_it_ea_load
+ }
};
static void *osd_key_init(const struct lu_context *ctx,
static void osd_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void* data)
{
- struct osd_thread_info *info = data;
+ struct osd_thread_info *info = data;
- if (info->oti_hlock != NULL)
- ldiskfs_htree_lock_free(info->oti_hlock);
- OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
- OBD_FREE_PTR(info);
+ if (info->oti_hlock != NULL)
+ ldiskfs_htree_lock_free(info->oti_hlock);
+ OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
+ lu_buf_free(&info->oti_iobuf.dr_pg_buf);
+ lu_buf_free(&info->oti_iobuf.dr_bl_buf);
+ OBD_FREE_PTR(info);
}
static void osd_key_exit(const struct lu_context *ctx,
{
struct osd_device *osd = osd_dev(d);
- strncpy(osd->od_svname, name, MAX_OBD_NAME);
+ if (strlcpy(osd->od_svname, name, sizeof(osd->od_svname))
+ >= sizeof(osd->od_svname))
+ return -E2BIG;
return osd_procfs_init(osd, name);
}
-static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
+static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
{
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
ENTRY;
- osd_scrub_cleanup(env, o);
+ if (osd->od_is_ost || osd->od_cl_seq != NULL)
+ RETURN(0);
+
+ if (unlikely(ss == NULL))
+ RETURN(-ENODEV);
+
+ OBD_ALLOC_PTR(osd->od_cl_seq);
+ if (osd->od_cl_seq == NULL)
+ RETURN(-ENOMEM);
+
+ rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
+ osd->od_svname, ss->ss_server_seq);
- if (o->od_fsops) {
- fsfilt_put_ops(o->od_fsops);
- o->od_fsops = NULL;
+ if (rc != 0) {
+ OBD_FREE_PTR(osd->od_cl_seq);
+ osd->od_cl_seq = NULL;
}
+ RETURN(rc);
+}
+
+static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
+{
+ if (osd->od_cl_seq == NULL)
+ return;
+
+ seq_client_fini(osd->od_cl_seq);
+ OBD_FREE_PTR(osd->od_cl_seq);
+ osd->od_cl_seq = NULL;
+}
+
+static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
+{
+ ENTRY;
+
/* shutdown quota slave instance associated with the device */
if (o->od_quota_slave != NULL) {
qsd_fini(env, o->od_quota_slave);
o->od_quota_slave = NULL;
}
+ osd_fid_fini(env, o);
+
RETURN(0);
}
+static void osd_umount(const struct lu_env *env, struct osd_device *o)
+{
+ ENTRY;
+
+ if (o->od_mnt != NULL) {
+ shrink_dcache_sb(osd_sb(o));
+ osd_sync(env, &o->od_dt_dev);
+
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+ }
+
+ EXIT;
+}
+
static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
struct file_system_type *type;
char *options = NULL;
char *str;
- int rc = 0;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lu_fid *fid = &info->oti_fid;
+ struct inode *inode;
+ int rc = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(-E2BIG);
strcpy(o->od_mntdev, dev);
- o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
- if (o->od_fsops == NULL) {
- CERROR("Can't find fsfilt_ldiskfs\n");
- RETURN(-ENOTSUPP);
- }
-
- OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
+ OBD_PAGE_ALLOC(__page, GFP_IOFS);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
if (str)
lmd_flags = simple_strtoul(str + 1, NULL, 0);
opts = lustre_cfg_string(cfg, 3);
- page = (unsigned long)cfs_page_address(__page);
+ page = (unsigned long)page_address(__page);
options = (char *)page;
*options = '\0';
if (opts == NULL)
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
}
o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
- cfs_module_put(type->owner);
+ module_put(type->owner);
if (IS_ERR(o->od_mnt)) {
rc = PTR_ERR(o->od_mnt);
- CERROR("%s: can't mount %s: %d\n", name, dev, rc);
o->od_mnt = NULL;
+ CERROR("%s: can't mount %s: %d\n", name, dev, rc);
GOTO(out, rc);
}
- if (lvfs_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
+#ifdef HAVE_DEV_SET_RDONLY
+ if (dev_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
CERROR("%s: underlying device %s is marked as read-only. "
"Setup failed\n", name, dev);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
- GOTO(out, rc = -EROFS);
+ GOTO(out_mnt, rc = -EROFS);
}
+#endif
if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
- LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
+ LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
CERROR("%s: device %s is mounted w/o journal\n", name, dev);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
- GOTO(out, rc = -EINVAL);
+ GOTO(out_mnt, rc = -EINVAL);
+ }
+
+#ifdef LDISKFS_MOUNT_DIRDATA
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(o->od_mnt->mnt_sb,
+ LDISKFS_FEATURE_INCOMPAT_DIRDATA))
+ LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
+ else if (!o->od_is_ost)
+ CWARN("%s: device %s was upgraded from Lustre-1.x without "
+ "enabling the dirdata feature. If you do not want to "
+ "downgrade to Lustre-1.x again, you can enable it via "
+ "'tune2fs -O dirdata device'\n", name, dev);
+#endif
+ inode = osd_sb(o)->s_root->d_inode;
+ lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
+ rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
+ if (rc != 0) {
+ CERROR("%s: failed to set lma on %s root inode\n", name, dev);
+ GOTO(out_mnt, rc);
}
- ldiskfs_set_inode_state(osd_sb(o)->s_root->d_inode,
- LDISKFS_STATE_LUSTRE_NO_OI);
if (lmd_flags & LMD_FLG_NOSCRUB)
o->od_noscrub = 1;
+ GOTO(out, rc = 0);
+
+out_mnt:
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+
out:
if (__page)
OBD_PAGE_FREE(__page);
- if (rc)
- fsfilt_put_ops(o->od_fsops);
- RETURN(rc);
+ return rc;
}
static struct lu_device *osd_device_fini(const struct lu_env *env,
- struct lu_device *d)
+ struct lu_device *d)
{
- int rc;
- ENTRY;
-
- rc = osd_shutdown(env, osd_dev(d));
-
- osd_obj_map_fini(osd_dev(d));
-
- shrink_dcache_sb(osd_sb(osd_dev(d)));
- osd_sync(env, lu2dt_dev(d));
-
- rc = osd_procfs_fini(osd_dev(d));
- if (rc) {
- CERROR("proc fini error %d \n", rc);
- RETURN (ERR_PTR(rc));
- }
+ struct osd_device *o = osd_dev(d);
+ ENTRY;
- if (osd_dev(d)->od_mnt) {
- mntput(osd_dev(d)->od_mnt);
- osd_dev(d)->od_mnt = NULL;
- }
+ osd_shutdown(env, o);
+ osd_procfs_fini(o);
+ osd_scrub_cleanup(env, o);
+ osd_obj_map_fini(o);
+ osd_umount(env, o);
- RETURN(NULL);
+ RETURN(NULL);
}
static int osd_device_init0(const struct lu_env *env,
struct lu_device *l = osd2lu_dev(o);
struct osd_thread_info *info;
int rc;
+ int cplen = 0;
/* if the module was re-loaded, env can loose its keys */
rc = lu_env_refill((struct lu_env *) env);
spin_lock_init(&o->od_osfs_lock);
mutex_init(&o->od_otable_mutex);
- o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL)
o->od_writethrough_cache = 1;
o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
- rc = osd_mount(env, o, cfg);
- if (rc)
+ cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
+ sizeof(o->od_svname));
+ if (cplen >= sizeof(o->od_svname)) {
+ rc = -E2BIG;
GOTO(out_capa, rc);
+ }
- CFS_INIT_LIST_HEAD(&o->od_ios_list);
- /* setup scrub, including OI files initialization */
- rc = osd_scrub_setup(env, o);
- if (rc < 0)
- GOTO(out_mnt, rc);
+ if (server_name_is_ost(o->od_svname))
+ o->od_is_ost = 1;
- strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
- sizeof(o->od_svname) - 1);
+ rc = osd_mount(env, o, cfg);
+ if (rc != 0)
+ GOTO(out_capa, rc);
- rc = osd_obj_map_init(o);
+ rc = osd_obj_map_init(env, o);
if (rc != 0)
- GOTO(out_scrub, rc);
+ GOTO(out_mnt, rc);
rc = lu_site_init(&o->od_site, l);
- if (rc)
+ if (rc != 0)
GOTO(out_compat, rc);
o->od_site.ls_bottom_dev = l;
rc = lu_site_init_finish(&o->od_site);
- if (rc)
+ if (rc != 0)
+ GOTO(out_site, rc);
+
+ /* self-repair LMA by default */
+ o->od_lma_self_repair = 1;
+
+ INIT_LIST_HEAD(&o->od_ios_list);
+ /* setup scrub, including OI files initialization */
+ rc = osd_scrub_setup(env, o);
+ if (rc < 0)
GOTO(out_site, rc);
rc = osd_procfs_init(o, o->od_svname);
if (rc != 0) {
CERROR("%s: can't initialize procfs: rc = %d\n",
o->od_svname, rc);
- GOTO(out_site, rc);
+ GOTO(out_scrub, rc);
}
LASSERT(l->ld_site->ls_linkage.next && l->ld_site->ls_linkage.prev);
}
RETURN(0);
+
out_procfs:
osd_procfs_fini(o);
+out_scrub:
+ osd_scrub_cleanup(env, o);
out_site:
lu_site_fini(&o->od_site);
out_compat:
osd_obj_map_fini(o);
-out_scrub:
- osd_scrub_cleanup(env, o);
out_mnt:
- osd_oi_fini(info, o);
- osd_shutdown(env, o);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
+ osd_umount(env, o);
out_capa:
cleanup_capa_hash(o->od_capa_hash);
out:
- RETURN(rc);
+ return rc;
}
static struct lu_device *osd_device_alloc(const struct lu_env *env,
static int osd_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
- struct osd_device *o = osd_dev(d);
- int err;
- ENTRY;
+ struct osd_device *o = osd_dev(d);
+ int rc;
+ ENTRY;
- switch(cfg->lcfg_command) {
- case LCFG_SETUP:
- err = osd_mount(env, o, cfg);
- break;
- case LCFG_CLEANUP:
+ switch (cfg->lcfg_command) {
+ case LCFG_SETUP:
+ rc = osd_mount(env, o, cfg);
+ break;
+ case LCFG_CLEANUP:
lu_dev_del_linkage(d->ld_site, d);
- err = osd_shutdown(env, o);
+ rc = osd_shutdown(env, o);
break;
- default:
- err = -ENOSYS;
- }
+ case LCFG_PARAM:
+ LASSERT(&o->od_dt_dev);
+ rc = class_process_proc_seq_param(PARAM_OSD,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ if (rc > 0 || rc == -ENOSYS)
+ rc = class_process_proc_seq_param(PARAM_OST,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ break;
+ default:
+ rc = -ENOSYS;
+ }
- RETURN(err);
+ RETURN(rc);
}
static int osd_recovery_complete(const struct lu_env *env,
int result = 0;
ENTRY;
- if (dev->ld_site && lu_device_is_md(dev->ld_site->ls_top_dev)) {
- /* MDT/MDD still use old infrastructure to create
- * special files */
- result = llo_local_objects_setup(env, lu2md_dev(pdev),
- lu2dt_dev(dev));
- if (result)
+ if (osd->od_quota_slave != NULL) {
+ /* set up quota slave objects */
+ result = qsd_prepare(env, osd->od_quota_slave);
+ if (result != 0)
RETURN(result);
}
- if (osd->od_quota_slave != NULL)
- /* set up quota slave objects */
- result = qsd_prepare(env, osd->od_quota_slave);
+ result = osd_fid_init(env, osd);
RETURN(result);
}
+int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
+{
+ struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
+
+ return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
+}
+
static const struct lu_object_operations osd_lu_obj_ops = {
.loo_object_init = osd_object_init,
.loo_object_delete = osd_object_delete,
static struct obd_ops osd_obd_device_ops = {
.o_owner = THIS_MODULE,
.o_connect = osd_obd_connect,
- .o_disconnect = osd_obd_disconnect
+ .o_disconnect = osd_obd_disconnect,
+ .o_fid_alloc = osd_fid_alloc,
};
static int __init osd_mod_init(void)
{
- struct lprocfs_static_vars lvars;
+ int rc;
+
+ osd_oi_mod_init();
- osd_oi_mod_init();
- lprocfs_osd_init_vars(&lvars);
- return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
- LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ rc = lu_kmem_init(ldiskfs_caches);
+ if (rc)
+ return rc;
+
+ rc = class_register_type(&osd_obd_device_ops, NULL, true,
+ lprocfs_osd_module_vars,
+#ifndef HAVE_ONLY_PROCFS_SEQ
+ NULL,
+#endif
+ LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ if (rc)
+ lu_kmem_fini(ldiskfs_caches);
+ return rc;
}
static void __exit osd_mod_exit(void)
{
class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
+ lu_kmem_fini(ldiskfs_caches);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");