/* struct ptlrpc_thread */
#include <lustre_net.h>
#include <lustre_fid.h>
+/* process_config */
+#include <lustre_param.h>
#include "osd_internal.h"
+#include "osd_dynlocks.h"
/* llo_* api support */
#include <md_object.h>
CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
"LBUG during tracking of declares");
+/* Slab to allocate dynlocks */
+struct kmem_cache *dynlock_cachep;
+
+static struct lu_kmem_descr ldiskfs_caches[] = {
+ {
+ .ckd_cache = &dynlock_cachep,
+ .ckd_name = "dynlock_cache",
+ .ckd_size = sizeof(struct dynlock_handle)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
static const char dot[] = ".";
static const char dotdot[] = "..";
static const char remote_obj_dir[] = "REM_OBJ_DIR";
[OSD_OT_WRITE] = OSD_OT_WRITE,
[OSD_OT_INSERT] = OSD_OT_DELETE,
[OSD_OT_DELETE] = OSD_OT_INSERT,
+ [OSD_OT_UPDATE] = OSD_OT_MAX,
[OSD_OT_QUOTA] = OSD_OT_MAX,
};
}
}
-static inline int __osd_xattr_get(struct inode *inode, struct dentry *dentry,
- const char *name, void *buf, int len)
-{
- dentry->d_inode = inode;
- dentry->d_sb = inode->i_sb;
- return inode->i_op->getxattr(dentry, name, buf, len);
-}
-
int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
struct dentry *dentry, struct lustre_mdt_attrs *lma)
{
id->oii_ino, PTR_ERR(inode));
} else if (id->oii_gen != OSD_OII_NOGEN &&
inode->i_generation != id->oii_gen) {
- CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
- "gen1 = %u\n",
+ CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
+ "i_generation = %u\n",
id->oii_ino, id->oii_gen, inode->i_generation);
iput(inode);
inode = ERR_PTR(-ESTALE);
/* due to parallel readdir and unlink,
* we can have dead inode here. */
CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
- make_bad_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
} else if (is_bad_inode(inode)) {
return inode;
}
-static struct inode *
-osd_iget_verify(struct osd_thread_info *info, struct osd_device *dev,
- struct osd_inode_id *id, const struct lu_fid *fid)
+static struct inode *osd_iget_check(struct osd_thread_info *info,
+ struct osd_device *dev,
+ const struct lu_fid *fid,
+ struct osd_inode_id *id,
+ bool in_oi)
{
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- struct inode *inode;
- int rc;
+ struct inode *inode;
+ int rc = 0;
+ ENTRY;
- inode = osd_iget(info, dev, id);
- if (IS_ERR(inode))
- return inode;
+ inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ if (!in_oi || (rc != -ENOENT && rc != -ESTALE)) {
+ CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
+ id->oii_ino, rc);
- rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
- if (rc == -ENODATA)
- return inode;
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (is_bad_inode(inode)) {
+ rc = -ENOENT;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
+
+ GOTO(put, rc);
+ }
+ goto check_oi;
+ }
+
+ if (id->oii_gen != OSD_OII_NOGEN &&
+ inode->i_generation != id->oii_gen) {
+ rc = -ESTALE;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "unmatched inode: ino = %u, "
+ "oii_gen = %u, i_generation = %u\n",
+ id->oii_ino, id->oii_gen, inode->i_generation);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (inode->i_nlink == 0) {
+ rc = -ENOENT;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+check_oi:
if (rc != 0) {
- iput(inode);
- return ERR_PTR(rc);
+ LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
+
+ rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
+ /* XXX: There are some possible cases:
+ * 1. rc = 0.
+ * Backup/restore caused the OI invalid.
+ * 2. rc = 0.
+ * Someone unlinked the object but NOT removed
+ * the OI mapping, such as mount target device
+ * as ldiskfs, and modify something directly.
+ * 3. rc = -ENOENT.
+ * Someone just removed the object between the
+ * former oi_lookup and the iget. It is normal.
+ * 4. Other failure cases.
+ *
+ * Generally, when the device is mounted, it will
+ * auto check whether the system is restored from
+ * file-level backup or not. We trust such detect
+ * to distinguish the 1st case from the 2nd case. */
+ if (rc == 0) {
+ if (!IS_ERR(inode) && inode->i_generation != 0 &&
+ inode->i_generation == id->oii_gen)
+ rc = -ENOENT;
+ else
+ rc = -EREMCHG;
+ }
+ } else {
+ if (id->oii_gen == OSD_OII_NOGEN)
+ osd_id_gen(id, inode->i_ino, inode->i_generation);
+
+ /* Do not update file c/mtime in ldiskfs.
+ * NB: we don't have any lock to protect this because we don't
+ * have reference on osd_object now, but contention with
+ * another lookup + attr_set can't happen in the tiny window
+ * between if (...) and set S_NOCMTIME. */
+ if (!(inode->i_flags & S_NOCMTIME))
+ inode->i_flags |= S_NOCMTIME;
}
- if (!lu_fid_eq(fid, &lma->lma_self_fid)) {
- CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
- PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
- iput(inode);
- return ERR_PTR(-EREMCHG);
+ GOTO(put, rc);
+
+put:
+ if (rc != 0) {
+ if (!IS_ERR(inode))
+ iput(inode);
+
+ inode = ERR_PTR(rc);
}
return inode;
}
+/**
+ * \retval +v: new filter_fid, does not contain self-fid
+ * \retval 0: filter_fid_old, contains self-fid
+ * \retval -v: other failure cases
+ */
+int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
+ struct dentry *dentry, struct lu_fid *fid)
+{
+ struct filter_fid_old *ff = &info->oti_ff;
+ struct ost_id *ostid = &info->oti_ostid;
+ int rc;
+
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
+ if (rc == sizeof(*ff)) {
+ rc = 0;
+ ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
+ ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
+ /* XXX: should use real OST index in the future. LU-3569 */
+ ostid_to_fid(fid, ostid, 0);
+ } else if (rc == sizeof(struct filter_fid)) {
+ rc = 1;
+ } else if (rc >= 0) {
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int osd_lma_self_repair(struct osd_thread_info *info,
+ struct osd_device *osd, struct inode *inode,
+ const struct lu_fid *fid, __u32 compat)
+{
+ handle_t *jh;
+ int rc;
+
+ LASSERT(current->journal_info == NULL);
+
+ jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+ if (IS_ERR(jh)) {
+ rc = PTR_ERR(jh);
+ CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
+ osd_name(osd), rc);
+ return rc;
+ }
+
+ rc = osd_ea_fid_set(info, inode, fid, compat, 0);
+ if (rc != 0)
+ CWARN("%s: cannot self repair the LMA: rc = %d\n",
+ osd_name(osd), rc);
+ ldiskfs_journal_stop(jh);
+ return rc;
+}
+
+static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct inode *inode = obj->oo_inode;
+ struct dentry *dentry = &info->oti_obj_dentry;
+ struct lu_fid *fid = NULL;
+ const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
+ int rc;
+ ENTRY;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
+ RETURN(0);
+
+ CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
+ info->oti_mdt_attrs_old, LMA_OLD_SIZE);
+ if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
+ fid = &lma->lma_self_fid;
+ rc = osd_get_idif(info, inode, dentry, fid);
+ if ((rc > 0) || (rc == -ENODATA && osd->od_lma_self_repair)) {
+ /* For the given OST-object, if it has neither LMA nor
+ * FID in XATTR_NAME_FID, then the given FID (which is
+ * contained in the @obj, from client RPC for locating
+ * the OST-object) is trusted. We use it to generate
+ * the LMA. */
+ osd_lma_self_repair(info, osd, inode, rfid,
+ fid_is_on_ost(info, osd, fid, OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0);
+ RETURN(0);
+ }
+ }
+
+ if (unlikely(rc == -ENODATA))
+ RETURN(0);
+
+ if (rc < 0)
+ RETURN(rc);
+
+ if (rc > 0) {
+ rc = 0;
+ lustre_lma_swab(lma);
+ if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
+ CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
+ CWARN("%s: unsupported incompat LMA feature(s) %#x for "
+ "fid = "DFID", ino = %lu\n", osd_name(osd),
+ lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
+ PFID(rfid), inode->i_ino);
+ rc = -EOPNOTSUPP;
+ } else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
+ fid = &lma->lma_self_fid;
+ }
+ }
+
+ if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
+ if (fid_is_idif(rfid) && fid_is_idif(fid)) {
+ struct ost_id *oi = &info->oti_ostid;
+ struct lu_fid *fid1 = &info->oti_fid3;
+ __u32 idx = fid_idif_ost_idx(rfid);
+
+ /* For old IDIF, the OST index is not part of the IDIF,
+ * Means that different OSTs may have the same IDIFs.
+ * Under such case, we need to make some compatible
+ * check to make sure to trigger OI scrub properly. */
+ if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
+ /* Given @rfid is new, LMA is old. */
+ fid_to_ostid(fid, oi);
+ ostid_to_fid(fid1, oi, idx);
+ if (lu_fid_eq(fid1, rfid)) {
+ if (osd->od_lma_self_repair)
+ osd_lma_self_repair(info, osd,
+ inode, rfid,
+ LMAC_FID_ON_OST);
+ RETURN(0);
+ }
+ }
+ }
+
+ CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
+ osd_name(osd), PFID(rfid), PFID(fid));
+ rc = -EREMCHG;
+ }
+
+ RETURN(rc);
+}
+
static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
const struct lu_fid *fid,
const struct lu_object_conf *conf)
struct osd_scrub *scrub;
struct scrub_file *sf;
int result;
- int verify = 0;
+ int saved = 0;
+ bool in_oi = false;
+ bool triggered = false;
ENTRY;
LINVRNT(osd_invariant(obj));
if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
RETURN(-ENOENT);
+ /* For the object is created as locking anchor, or for the object to
+ * be created on disk. No need to osd_oi_lookup() at here because FID
+ * shouldn't never be re-used, if it's really a duplicate FID from
+ * unexpected reason, we should be able to detect it later by calling
+ * do_create->osd_oi_insert(). */
+ if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ GOTO(out, result = 0);
+
/* Search order: 1. per-thread cache. */
- if (lu_fid_eq(fid, &oic->oic_fid)) {
+ if (lu_fid_eq(fid, &oic->oic_fid) &&
+ likely(oic->oic_dev == dev)) {
id = &oic->oic_lid;
goto iget;
}
goto iget;
}
- if (sf->sf_flags & SF_INCONSISTENT)
- verify = 1;
-
- /*
- * Objects are created as locking anchors or place holders for objects
- * yet to be created. No need to osd_oi_lookup() at here because FID
- * shouldn't never be re-used, if it's really a duplicate FID from
- * unexpected reason, we should be able to detect it later by calling
- * do_create->osd_oi_insert()
- */
- if (conf != NULL && conf->loc_flags & LOC_F_NEW)
- GOTO(out, result = 0);
-
/* Search order: 3. OI files. */
- result = osd_oi_lookup(info, dev, fid, id, true);
+ result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
if (result == -ENOENT) {
- if (!fid_is_norm(fid) || fid_is_on_ost(info, dev, fid) ||
+ if (!fid_is_norm(fid) ||
+ fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
!ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
sf->sf_oi_bitmap))
GOTO(out, result = 0);
if (result != 0)
GOTO(out, result);
+ in_oi = true;
+
iget:
- if (verify == 0)
- inode = osd_iget(info, dev, id);
- else
- inode = osd_iget_verify(info, dev, id, fid);
+ inode = osd_iget_check(info, dev, fid, id, in_oi);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
if (result == -ENOENT || result == -ESTALE) {
- fid_zero(&oic->oic_fid);
- result = 0;
+ if (!in_oi)
+ fid_zero(&oic->oic_fid);
+
+ GOTO(out, result = -ENOENT);
} else if (result == -EREMCHG) {
trigger:
+ if (!in_oi)
+ fid_zero(&oic->oic_fid);
+
+ if (unlikely(triggered))
+ GOTO(out, result = saved);
+
+ triggered = true;
if (thread_is_running(&scrub->os_thread)) {
result = -EINPROGRESS;
} else if (!dev->od_noscrub) {
result = osd_scrub_start(dev);
- LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [1]\n",
- LDISKFS_SB(osd_sb(dev))->s_es->\
- s_volume_name,PFID(fid), result);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
+ "for "DFID", rc = %d [1]\n",
+ osd_name(dev), PFID(fid),result);
if (result == 0 || result == -EALREADY)
result = -EINPROGRESS;
else
result = -EREMCHG;
}
+
+ /* We still have chance to get the valid inode: for the
+ * object which is referenced by remote name entry, the
+ * object on the local MDT will be linked under the dir
+ * of "/REMOTE_PARENT_DIR" with its FID string as name.
+ *
+ * We do not know whether the object for the given FID
+ * is referenced by some remote name entry or not, and
+ * especially for DNE II, a multiple-linked object may
+ * have many name entries reside on many MDTs.
+ *
+ * To simplify the operation, OSD will not distinguish
+ * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
+ * only happened for the RPC from other MDT during the
+ * OI scrub, or for the client side RPC with FID only,
+ * such as FID to path, or from old connected client. */
+ saved = result;
+ result = osd_lookup_in_remote_parent(info, dev,
+ fid, id);
+ if (result == 0) {
+ in_oi = false;
+ goto iget;
+ }
+
+ result = saved;
}
GOTO(out, result);
obj->oo_inode = inode;
LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
+ result = osd_check_lma(env, obj);
+ if (result != 0) {
+ iput(inode);
+ obj->oo_inode = NULL;
+ if (result == -EREMCHG) {
+ if (!in_oi) {
+ result = osd_oi_lookup(info, dev, fid, id,
+ OI_CHECK_FLD);
+ if (result != 0) {
+ fid_zero(&oic->oic_fid);
+ GOTO(out, result);
+ }
+ }
+
+ goto trigger;
+ }
+
+ GOTO(out, result);
+ }
+
obj->oo_compat_dot_created = 1;
obj->oo_compat_dotdot_created = 1;
(LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
}
-static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
-{
- struct osd_thread_info *info = osd_oti_get(env);
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- int rc;
- ENTRY;
-
- CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
- rc = __osd_xattr_get(obj->oo_inode, &info->oti_obj_dentry,
- XATTR_NAME_LMA, info->oti_mdt_attrs_old,
- LMA_OLD_SIZE);
- if (rc > 0) {
- rc = 0;
- lustre_lma_swab(lma);
- if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
- CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
- rc = -EOPNOTSUPP;
- CWARN("%s: unsupported incompat LMA feature(s) %#x for "
- "fid = "DFID", ino = %lu: rc = %d\n",
- osd_obj2dev(obj)->od_svname,
- lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
- PFID(lu_object_fid(&obj->oo_dt.do_lu)),
- obj->oo_inode->i_ino, rc);
- }
- if (unlikely(!lu_fid_eq(lu_object_fid(&obj->oo_dt.do_lu),
- &lma->lma_self_fid))) {
- CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
- osd_obj2dev(obj)->od_svname,
- PFID(lu_object_fid(&obj->oo_dt.do_lu)),
- PFID(&lma->lma_self_fid));
- if (obj->oo_inode != NULL) {
- iput(obj->oo_inode);
- obj->oo_inode = NULL;
- }
- rc = -ESTALE;
- }
- } else if (rc == -ENODATA) {
- /* haven't initialize LMA xattr */
- rc = 0;
- }
-
- RETURN(rc);
-}
-
/*
* Concurrency: no concurrent access is possible that early in object
* life-cycle.
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0 && obj->oo_inode != NULL) {
- result = osd_check_lma(env, obj);
- if (result != 0)
- return result;
-
+ if (result == 0 && obj->oo_inode != NULL)
osd_object_init0(obj);
- }
LINVRNT(osd_invariant(obj));
return result;
lu_context_exit(&th->th_ctx);
lu_context_fini(&th->th_ctx);
- OBD_FREE_PTR(oh);
+ thandle_put(th);
}
static struct thandle *osd_trans_create(const struct lu_env *env,
- struct dt_device *d)
+ struct dt_device *d)
{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_iobuf *iobuf = &oti->oti_iobuf;
- struct osd_thandle *oh;
- struct thandle *th;
- ENTRY;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_iobuf *iobuf = &oti->oti_iobuf;
+ struct osd_thandle *oh;
+ struct thandle *th;
+ ENTRY;
- /* on pending IO in this thread should left from prev. request */
- LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ /* on pending IO in this thread should left from prev. request */
+ LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
th = ERR_PTR(-ENOMEM);
- OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
+ OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
th->th_result = 0;
th->th_tags = LCT_TX_HANDLE;
oh->ot_credits = 0;
- oti->oti_dev = osd_dt_dev(d);
+ atomic_set(&th->th_refc, 1);
+ th->th_alloc_size = sizeof(*oh);
+ oti->oti_dev = osd_dt_dev(d);
CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
osd_th_alloced(oh);
* XXX temporary stuff. Some abstraction layer should
* be used.
*/
- jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
+ jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
osd_th_started(oh);
if (!IS_ERR(jh)) {
oh->ot_handle = jh;
RETURN(rc);
}
+static int osd_seq_exists(const struct lu_env *env,
+ struct osd_device *osd, obd_seq seq)
+{
+ struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
+ ENTRY;
+
+ if (ss == NULL)
+ RETURN(1);
+
+ rc = osd_fld_lookup(env, osd, seq, range);
+ if (rc != 0) {
+ if (rc != -ENOENT)
+ CERROR("%s: can't lookup FLD sequence "LPX64
+ ": rc = %d\n", osd_name(osd), seq, rc);
+ RETURN(0);
+ }
+
+ RETURN(ss->ss_node_id == range->lsr_index);
+}
+
/*
* Concurrency: shouldn't matter.
*/
-static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
+static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
+ struct thandle *th)
{
int rc = 0;
struct osd_thandle *oh;
if (rc != 0)
CERROR("Failure to stop transaction: %d\n", rc);
} else {
- OBD_FREE_PTR(oh);
+ thandle_put(&oh->ot_super);
}
- /* as we want IO to journal and data IO be concurrent, we don't block
- * awaiting data IO completion in osd_do_bio(), instead we wait here
- * once transaction is submitted to the journal. all reqular requests
- * don't do direct IO (except read/write), thus this wait_event becomes
- * no-op for them.
- *
- * IMPORTANT: we have to wait till any IO submited by the thread is
- * completed otherwise iobuf may be corrupted by different request
- */
- cfs_wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
- if (!rc)
- rc = iobuf->dr_error;
+ /* as we want IO to journal and data IO be concurrent, we don't block
+ * awaiting data IO completion in osd_do_bio(), instead we wait here
+ * once transaction is submitted to the journal. all reqular requests
+ * don't do direct IO (except read/write), thus this wait_event becomes
+ * no-op for them.
+ *
+ * IMPORTANT: we have to wait till any IO submited by the thread is
+ * completed otherwise iobuf may be corrupted by different request
+ */
+ wait_event(iobuf->dr_wait,
+ atomic_read(&iobuf->dr_numreqs) == 0);
+ if (!rc)
+ rc = iobuf->dr_error;
- RETURN(rc);
+ RETURN(rc);
}
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
d ? d->id_ops->id_name : "plain");
}
+#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
+
/*
* Concurrency: shouldn't matter.
*/
}
spin_lock(&osd->od_osfs_lock);
- /* cache 1 second */
- if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
- result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- osd->od_osfs_age = cfs_time_current_64();
- statfs_pack(&osd->od_statfs, ksfs);
- if (sb->s_flags & MS_RDONLY)
- sfs->os_state = OS_STATE_READONLY;
- }
+ result = sb->s_op->statfs(sb->s_root, ksfs);
+ if (likely(result == 0)) { /* N.B. statfs can't really fail */
+ statfs_pack(sfs, ksfs);
+ if (sb->s_flags & MS_RDONLY)
+ sfs->os_state = OS_STATE_READONLY;
}
- if (likely(result == 0))
- *sfs = osd->od_statfs;
spin_unlock(&osd->od_osfs_lock);
- if (unlikely(env == NULL))
+ if (unlikely(env == NULL))
OBD_FREE_PTR(ksfs);
+ /* Reserve a small amount of space for local objects like last_rcvd,
+ * llog, quota files, ... */
+ if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
+ sfs->os_bavail = 0;
+ } else {
+ sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
+ /** Take out metadata overhead for indirect blocks */
+ sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
+ }
+
return result;
}
/*
* XXX should be taken from not-yet-existing fs abstraction layer.
*/
- param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
param->ddp_block_shift = sb->s_blocksize_bits;
}
int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
- struct lustre_capa *capa, __u64 opc)
+ struct lustre_capa *capa, __u64 opc)
{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
- struct md_capainfo *ci;
- int rc;
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct lu_capainfo *lci;
+ int rc;
- if (!dev->od_fl_capa)
- return 0;
+ if (!osd->od_fl_capa)
+ return 0;
- if (capa == BYPASS_CAPA)
- return 0;
+ if (capa == BYPASS_CAPA)
+ return 0;
- ci = md_capainfo(env);
- if (unlikely(!ci))
- return 0;
+ lci = lu_capainfo_get(env);
+ if (unlikely(lci == NULL))
+ return 0;
- if (ci->mc_auth == LC_ID_NONE)
- return 0;
+ if (lci->lci_auth == LC_ID_NONE)
+ return 0;
- if (!capa) {
- CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
- return -EACCES;
- }
+ if (capa == NULL) {
+ CERROR("%s: no capability provided for FID "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), -EACCES);
+ return -EACCES;
+ }
- if (!lu_fid_eq(fid, &capa->lc_fid)) {
- DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
- PFID(fid));
- return -EACCES;
- }
+ if (!lu_fid_eq(fid, &capa->lc_fid)) {
+ DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
+ PFID(fid));
+ return -EACCES;
+ }
- if (!capa_opc_supported(capa, opc)) {
- DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
- return -EACCES;
- }
+ if (!capa_opc_supported(capa, opc)) {
+ DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
+ return -EACCES;
+ }
- if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
- DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
- return -EACCES;
- }
+ rc = capa_is_sane(env, osd, capa, osd->od_capa_keys);
+ if (rc != 0) {
+ DEBUG_CAPA(D_ERROR, capa, "insane: rc = %d", rc);
+ return -EACCES;
+ }
- return 0;
+ return 0;
}
static struct timespec *osd_inode_time(const struct lu_env *env,
}
static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
- cfs_umode_t mode,
- struct dt_allocation_hint *hint,
- struct thandle *th)
+ umode_t mode, struct dt_allocation_hint *hint,
+ struct thandle *th)
{
int result;
struct osd_device *osd = osd_obj2dev(obj);
}
static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th)
{
- cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
- int result;
+ umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
+ int result;
- LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_inode == NULL);
+ LINVRNT(osd_invariant(obj));
+ LASSERT(obj->oo_inode == NULL);
LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode));
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
struct dt_object *parent, struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
LASSERT(ah);
- memset(ah, 0, sizeof(*ah));
ah->dah_parent = parent;
ah->dah_mode = child_mode;
}
* \retval 0, on success
*/
static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
- const struct lu_fid *fid, struct thandle *th)
+ const struct lu_fid *fid, struct thandle *th)
{
- struct osd_thread_info *info = osd_oti_get(env);
- struct osd_inode_id *id = &info->oti_id;
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_inode_id *id = &info->oti_id;
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thandle *oh;
- LASSERT(obj->oo_inode != NULL);
+ LASSERT(obj->oo_inode != NULL);
+
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
- return osd_oi_insert(info, osd, fid, id, th);
+ return osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
}
int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
- const struct lu_fid *fid, struct lu_seq_range *range)
+ obd_seq seq, struct lu_seq_range *range)
{
struct seq_server_site *ss = osd_seq_site(osd);
- int rc;
- if (fid_is_idif(fid)) {
+ if (fid_seq_is_idif(seq)) {
fld_range_set_ost(range);
- range->lsr_index = fid_idif_ost_idx(fid);
+ range->lsr_index = idif_ost_idx(seq);
return 0;
}
- if (!fid_seq_in_fldb(fid_seq(fid))) {
+ if (!fid_seq_in_fldb(seq)) {
fld_range_set_mdt(range);
if (ss != NULL)
/* FIXME: If ss is NULL, it suppose not get lsr_index
LASSERT(ss != NULL);
fld_range_set_any(range);
- rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), range);
- if (rc != 0) {
- CERROR("%s: cannot find FLD range for "DFID": rc = %d\n",
- osd_name(osd), PFID(fid), rc);
- }
- return rc;
+ /* OSD will only do local fld lookup */
+ return fld_local_lookup(env, ss->ss_server_fld, seq, range);
}
/*
struct dt_object_format *dof,
struct thandle *handle)
{
- struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
struct osd_thandle *oh;
int rc;
ENTRY;
osd_trans_declare_op(env, oh, OSD_OT_CREATE,
osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
if (!fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
- lu_object_fid(&dt->do_lu)))
+ lu_object_fid(&dt->do_lu), OI_CHECK_FLD))
/* Reuse idle OI block may cause additional one OI block
* to be changed. */
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
if (rc != 0)
RETURN(rc);
- /* It does fld look up inside declare, and the result will be
- * added to fld cache, so the following fld lookup inside insert
- * does not need send RPC anymore, so avoid send rpc with holding
- * transaction */
- if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
- !fid_is_last_id(lu_object_fid(&dt->do_lu)))
- osd_fld_lookup(env, osd_dt_dev(handle->th_dev),
- lu_object_fid(&dt->do_lu), range);
-
-
RETURN(rc);
}
RETURN(-EPERM);
if (S_ISDIR(inode->i_mode)) {
- LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1);
+ LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
+ inode->i_nlink == 2);
/* it will check/delete the inode from remote parent,
* how to optimize it? unlink performance impaction XXX */
result = osd_delete_from_remote_parent(env, osd, obj, oh);
osd_trans_exec_op(env, th, OSD_OT_DESTROY);
- result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
+ result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
+ OI_CHECK_FLD);
/* XXX: add to ext3 orphan list */
/* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
* FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
*/
int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
- const struct lu_fid *fid, __u64 flags)
+ const struct lu_fid *fid, __u32 compat, __u32 incompat)
{
struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
int rc;
+ ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
- return 0;
+ RETURN(0);
- lustre_lma_init(lma, fid, flags);
+ lustre_lma_init(lma, fid, compat, incompat);
lustre_lma_swab(lma);
rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
XATTR_CREATE);
- /* Someone may created the EA by race. */
- if (unlikely(rc == -EEXIST))
- rc = 0;
- return rc;
+ /* LMA may already exist, but we need to check that all the
+ * desired compat/incompat flags have been added. */
+ if (unlikely(rc == -EEXIST)) {
+ if (compat == 0 && incompat == 0)
+ RETURN(0);
+
+ rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
+ XATTR_NAME_LMA, info->oti_mdt_attrs_old,
+ LMA_OLD_SIZE);
+ if (rc <= 0)
+ RETURN(-EINVAL);
+
+ lustre_lma_swab(lma);
+ if (!(~lma->lma_compat & compat) &&
+ !(~lma->lma_incompat & incompat))
+ RETURN(0);
+
+ lma->lma_compat |= compat;
+ lma->lma_incompat |= incompat;
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ }
+
+ RETURN(rc);
}
/**
}
/* Set special LMA flag for local agent inode */
- rc = osd_ea_fid_set(info, local, fid, LMAI_AGENT);
+ rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
if (rc != 0) {
CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
osd_name(osd), PFID(fid), rc);
result = __osd_object_create(info, obj, attr, hint, dof, th);
if (result == 0)
- result = osd_ea_fid_set(info, obj->oo_inode, fid, 0);
+ result = osd_ea_fid_set(info, obj->oo_inode, fid,
+ fid_is_on_ost(info, osd_obj2dev(obj),
+ fid, OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0, 0);
if (result == 0)
result = __osd_oi_insert(env, obj, fid, th);
static int osd_object_ref_add(const struct lu_env *env,
struct dt_object *dt, struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- bool need_dirty = false;
- int rc = 0;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thandle *oh;
+ int rc = 0;
LINVRNT(osd_invariant(obj));
LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle != NULL);
+
osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
- /* This based on ldiskfs_inc_count(), which is not exported.
- *
+ /*
* The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
* (65000) subdirectories by storing "1" in i_nlink if the link count
* would otherwise overflow. Directory tranversal tools understand
* in case they are being linked into the PENDING directory
*/
spin_lock(&obj->oo_guard);
- if (unlikely(!S_ISDIR(inode->i_mode) &&
- inode->i_nlink >= LDISKFS_LINK_MAX)) {
- /* MDD should have checked this, but good to be safe */
- rc = -EMLINK;
- } else if (unlikely(inode->i_nlink == 0 ||
- (S_ISDIR(inode->i_mode) &&
- inode->i_nlink >= LDISKFS_LINK_MAX))) {
- /* inc_nlink from 0 may cause WARN_ON */
- set_nlink(inode, 1);
- need_dirty = true;
- } else if (!S_ISDIR(inode->i_mode) ||
- (S_ISDIR(inode->i_mode) && inode->i_nlink >= 2)) {
- inc_nlink(inode);
- need_dirty = true;
- } /* else (S_ISDIR(inode->i_mode) && inode->i_nlink == 1) { ; } */
-
+ ldiskfs_inc_count(oh->ot_handle, inode);
LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
spin_unlock(&obj->oo_guard);
- if (need_dirty)
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
-
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
LINVRNT(osd_invariant(obj));
return rc;
static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct osd_thandle *oh;
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
- LASSERT(th != NULL);
+ LASSERT(osd_write_locked(env, obj));
+ LASSERT(th != NULL);
+
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle != NULL);
osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
spin_lock(&obj->oo_guard);
- LASSERT(inode->i_nlink > 0);
-
- /* This based on ldiskfs_dec_count(), which is not exported.
- *
- * If a directory already has nlink == 1, then do not drop the nlink
- * count to 0, even temporarily, to avoid race conditions with other
- * threads not holding oo_guard seeing i_nlink == 0 in rare cases.
- *
- * nlink == 1 means the directory has/had > EXT4_LINK_MAX subdirs.
- * */
- if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 1) {
- drop_nlink(inode);
-
- spin_unlock(&obj->oo_guard);
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
- LINVRNT(osd_invariant(obj));
- } else {
+ /* That can be result of upgrade from old Lustre version and
+ * applied only to local files. Just skip this ref_del call.
+ * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
+ if (inode->i_nlink == 0) {
+ CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
+ D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
+ ", maybe an upgraded file? (LU-3915)\n",
+ osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
spin_unlock(&obj->oo_guard);
+ return 0;
}
+ ldiskfs_dec_count(oh->ot_handle, inode);
+ spin_unlock(&obj->oo_guard);
+
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ LINVRNT(osd_invariant(obj));
+
return 0;
}
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
+ CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
+ PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
+
osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
if (fl & LU_XATTR_REPLACE)
fs_flags |= XATTR_REPLACE;
}
static struct obd_capa *osd_capa_get(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *old,
- __u64 opc)
+ struct dt_object *dt,
+ struct lustre_capa *old, __u64 opc)
{
- struct osd_thread_info *info = osd_oti_get(env);
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *dev = osd_obj2dev(obj);
- struct lustre_capa_key *key = &info->oti_capa_key;
- struct lustre_capa *capa = &info->oti_capa;
- struct obd_capa *oc;
- struct md_capainfo *ci;
- int rc;
- ENTRY;
+ struct osd_thread_info *info = osd_oti_get(env);
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct lustre_capa_key *key = &info->oti_capa_key;
+ struct lustre_capa *capa = &info->oti_capa;
+ struct obd_capa *oc;
+ struct lu_capainfo *lci;
+ int rc;
+ ENTRY;
- if (!dev->od_fl_capa)
- RETURN(ERR_PTR(-ENOENT));
+ if (!osd->od_fl_capa)
+ RETURN(ERR_PTR(-ENOENT));
LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- /* renewal sanity check */
- if (old && osd_object_auth(env, dt, old, opc))
- RETURN(ERR_PTR(-EACCES));
-
- ci = md_capainfo(env);
- if (unlikely(!ci))
- RETURN(ERR_PTR(-ENOENT));
-
- switch (ci->mc_auth) {
- case LC_ID_NONE:
- RETURN(NULL);
- case LC_ID_PLAIN:
- capa->lc_uid = obj->oo_inode->i_uid;
- capa->lc_gid = obj->oo_inode->i_gid;
- capa->lc_flags = LC_ID_PLAIN;
- break;
- case LC_ID_CONVERT: {
- __u32 d[4], s[4];
-
- s[0] = obj->oo_inode->i_uid;
- cfs_get_random_bytes(&(s[1]), sizeof(__u32));
- s[2] = obj->oo_inode->i_gid;
- cfs_get_random_bytes(&(s[3]), sizeof(__u32));
- rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
- if (unlikely(rc))
- RETURN(ERR_PTR(rc));
-
- capa->lc_uid = ((__u64)d[1] << 32) | d[0];
- capa->lc_gid = ((__u64)d[3] << 32) | d[2];
- capa->lc_flags = LC_ID_CONVERT;
- break;
- }
- default:
- RETURN(ERR_PTR(-EINVAL));
+ /* renewal sanity check */
+ if (old && osd_object_auth(env, dt, old, opc))
+ RETURN(ERR_PTR(-EACCES));
+
+ lci = lu_capainfo_get(env);
+ if (unlikely(lci == NULL))
+ RETURN(ERR_PTR(-ENOENT));
+
+ switch (lci->lci_auth) {
+ case LC_ID_NONE:
+ RETURN(NULL);
+ case LC_ID_PLAIN:
+ capa->lc_uid = obj->oo_inode->i_uid;
+ capa->lc_gid = obj->oo_inode->i_gid;
+ capa->lc_flags = LC_ID_PLAIN;
+ break;
+ case LC_ID_CONVERT: {
+ __u32 d[4], s[4];
+
+ s[0] = obj->oo_inode->i_uid;
+ cfs_get_random_bytes(&(s[1]), sizeof(__u32));
+ s[2] = obj->oo_inode->i_gid;
+ cfs_get_random_bytes(&(s[3]), sizeof(__u32));
+ rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
+ if (unlikely(rc))
+ RETURN(ERR_PTR(rc));
+
+ capa->lc_uid = ((__u64)d[1] << 32) | d[0];
+ capa->lc_gid = ((__u64)d[3] << 32) | d[2];
+ capa->lc_flags = LC_ID_CONVERT;
+ break;
}
+ default:
+ RETURN(ERR_PTR(-EINVAL));
+ }
- capa->lc_fid = *fid;
- capa->lc_opc = opc;
- capa->lc_flags |= dev->od_capa_alg << 24;
- capa->lc_timeout = dev->od_capa_timeout;
- capa->lc_expiry = 0;
+ capa->lc_fid = *fid;
+ capa->lc_opc = opc;
+ capa->lc_flags |= osd->od_capa_alg << 24;
+ capa->lc_timeout = osd->od_capa_timeout;
+ capa->lc_expiry = 0;
- oc = capa_lookup(dev->od_capa_hash, capa, 1);
- if (oc) {
- LASSERT(!capa_is_expired(oc));
- RETURN(oc);
- }
+ oc = capa_lookup(osd->od_capa_hash, capa, 1);
+ if (oc) {
+ LASSERT(!capa_is_expired(oc));
+ RETURN(oc);
+ }
spin_lock(&capa_lock);
- *key = dev->od_capa_keys[1];
+ *key = osd->od_capa_keys[1];
spin_unlock(&capa_lock);
- capa->lc_keyid = key->lk_keyid;
- capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
+ capa->lc_keyid = key->lk_keyid;
+ capa->lc_expiry = cfs_time_current_sec() + osd->od_capa_timeout;
- rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
- if (rc) {
- DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
- RETURN(ERR_PTR(rc));
- }
+ rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
+ if (rc) {
+ DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
+ RETURN(ERR_PTR(rc));
+ }
- oc = capa_add(dev->od_capa_hash, capa);
- RETURN(oc);
+ oc = capa_add(osd->od_capa_hash, capa);
+ RETURN(oc);
}
static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
file->f_dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_op = inode->i_fop;
+ set_file_inode(file, inode);
#ifndef HAVE_FILE_FSYNC_4ARGS
mutex_lock(&inode->i_mutex);
#endif
static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
struct lu_fid *fid)
{
- struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
- struct seq_server_site *ss = osd_seq_site(osd);
- int rc;
ENTRY;
- /* Those FID seqs, which are not in FLDB, must be local seq */
- if (unlikely(!fid_seq_in_fldb(fid_seq(fid)) || ss == NULL))
+ /* FID seqs not in FLDB, must be local seq */
+ if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
RETURN(0);
- rc = osd_fld_lookup(env, osd, fid, range);
- if (rc != 0) {
- CERROR("%s: Can not lookup fld for "DFID"\n",
- osd_name(osd), PFID(fid));
- RETURN(rc);
- }
+ if (osd_seq_exists(env, osd, fid_seq(fid)))
+ RETURN(0);
- RETURN(ss->ss_node_id != range->lsr_index);
+ RETURN(1);
}
/**
down_write(&obj->oo_ext_idx_sem);
}
- bh = ldiskfs_find_entry(dir, &dentry->d_name, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
if (bh) {
__u32 ino = 0;
RETURN_EXIT;
again:
- rc = osd_oi_lookup(oti, dev, fid, id, true);
+ rc = osd_oi_lookup(oti, dev, fid, id, OI_CHECK_FLD);
if (rc != 0 && rc != -ENOENT)
RETURN_EXIT;
}
if (!dev->od_noscrub && ++once == 1) {
- CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
- PFID(fid));
rc = osd_scrub_start(dev);
- LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
- ", rc = %d [2]\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- PFID(fid), rc);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC for "DFID
+ ", rc = %d [2]\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
+ PFID(fid), rc);
if (rc == 0)
goto again;
}
return rc;
}
-static int osd_add_oi_cache(struct osd_thread_info *info,
- struct osd_device *osd,
- struct osd_inode_id *id,
- struct lu_fid *fid)
+int osd_add_oi_cache(struct osd_thread_info *info, struct osd_device *osd,
+ struct osd_inode_id *id, const struct lu_fid *fid)
{
CDEBUG(D_INODE, "add "DFID" %u:%u to info %p\n", PFID(fid),
id->oii_ino, id->oii_gen, info);
info->oti_cache.oic_lid = *id;
info->oti_cache.oic_fid = *fid;
+ info->oti_cache.oic_dev = osd;
return 0;
}
down_read(&obj->oo_ext_idx_sem);
}
- bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
if (bh) {
struct osd_thread_info *oti = osd_oti_get(env);
struct osd_inode_id *id = &oti->oti_id;
rc = osd_ea_fid_get(env, obj, ino, fid, id);
else
osd_id_gen(id, ino, OSD_OII_NOGEN);
- if (rc != 0 || osd_remote_fid(env, dev, fid)) {
+ if (rc != 0) {
fid_zero(&oic->oic_fid);
GOTO(out, rc);
}
+ if (osd_remote_fid(env, dev, fid))
+ GOTO(out, rc = 0);
+
rc = osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id,
fid);
if (rc != 0)
lu_object_put(env, luch);
child = ERR_PTR(-ENOENT);
}
- } else
- child = (void *)luch;
+ } else {
+ child = ERR_CAST(luch);
+ }
- return child;
+ return child;
}
/**
CERROR("%s: Can not find object "DFID"%u:%u: rc = %d\n",
osd_name(osd), PFID(fid),
id->oii_ino, id->oii_gen,
- (int)PTR_ERR(child_inode));
- RETURN(PTR_ERR(child_inode));
+ (int)PTR_ERR(child));
+ RETURN(PTR_ERR(child));
}
child_inode = igrab(child->oo_inode);
}
file->f_dentry = obj_dentry;
file->f_mapping = obj->oo_inode->i_mapping;
file->f_op = obj->oo_inode->i_fop;
+ set_file_inode(file, obj->oo_inode);
+
lu_object_get(lo);
RETURN((struct dt_it *) it);
}
else
up_read(&obj->oo_ext_idx_sem);
- if (it->oie_rd_dirent == 0) {
- result = -EIO;
- } else {
- it->oie_dirent = it->oie_buf;
- it->oie_it_dirent = 1;
- }
+ if (it->oie_rd_dirent == 0) {
+ /*If it does not get any dirent, it means it has been reached
+ *to the end of the dir */
+ it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
+ } else {
+ it->oie_dirent = it->oie_buf;
+ it->oie_it_dirent = 1;
+ }
- RETURN(result);
+ RETURN(result);
}
/**
again:
if (dev->od_dirent_journal) {
- jh = ldiskfs_journal_start_sb(sb, credits);
+ jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
if (IS_ERR(jh)) {
rc = PTR_ERR(jh);
CERROR("%.16s: fail to start trans for dirent "
}
}
- bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
/* For dot/dotdot entry, if there is not enough space to hold the
* FID-in-dirent, just keep them there. It only happens when the
* device upgraded from 1.8 or restored from MDT file-level backup.
rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
if (rc == 0) {
+ LASSERT(!(lma->lma_compat & LMAC_NOT_IN_OI));
+
if (fid_is_sane(fid)) {
/* FID-in-dirent is valid. */
if (lu_fid_eq(fid, &lma->lma_self_fid))
if (unlikely(fid_is_sane(fid))) {
/* FID-in-dirent exists, but FID-in-LMA is lost.
* Trust the FID-in-dirent, and add FID-in-LMA. */
- rc = osd_ea_fid_set(info, inode, fid, 0);
+ rc = osd_ea_fid_set(info, inode, fid, 0, 0);
if (rc == 0)
*attr |= LUDA_REPAIR;
} else {
{
ENTRY;
- if (o->od_fsops) {
- fsfilt_put_ops(o->od_fsops);
- o->od_fsops = NULL;
- }
-
if (o->od_mnt != NULL) {
shrink_dcache_sb(osd_sb(o));
osd_sync(env, &o->od_dt_dev);
struct file_system_type *type;
char *options = NULL;
char *str;
- int rc = 0;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lu_fid *fid = &info->oti_fid;
+ struct inode *inode;
+ int rc = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(-E2BIG);
strcpy(o->od_mntdev, dev);
- o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
- if (IS_ERR(o->od_fsops)) {
- CERROR("%s: Can't find fsfilt_ldiskfs\n", name);
- o->od_fsops = NULL;
- RETURN(-ENOTSUPP);
- }
-
- OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
+ OBD_PAGE_ALLOC(__page, GFP_IOFS);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
if (str)
lmd_flags = simple_strtoul(str + 1, NULL, 0);
opts = lustre_cfg_string(cfg, 3);
- page = (unsigned long)cfs_page_address(__page);
+ page = (unsigned long)page_address(__page);
options = (char *)page;
*options = '\0';
if (opts == NULL)
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
}
o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
- cfs_module_put(type->owner);
+ module_put(type->owner);
if (IS_ERR(o->od_mnt)) {
rc = PTR_ERR(o->od_mnt);
- CERROR("%s: can't mount %s: %d\n", name, dev, rc);
o->od_mnt = NULL;
+ CERROR("%s: can't mount %s: %d\n", name, dev, rc);
GOTO(out, rc);
}
if (dev_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
CERROR("%s: underlying device %s is marked as read-only. "
"Setup failed\n", name, dev);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
- GOTO(out, rc = -EROFS);
+ GOTO(out_mnt, rc = -EROFS);
}
#endif
if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
- LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
+ LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
CERROR("%s: device %s is mounted w/o journal\n", name, dev);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
- GOTO(out, rc = -EINVAL);
+ GOTO(out_mnt, rc = -EINVAL);
+ }
+
+#ifdef LDISKFS_MOUNT_DIRDATA
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(o->od_mnt->mnt_sb,
+ LDISKFS_FEATURE_INCOMPAT_DIRDATA))
+ LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
+#endif
+ inode = osd_sb(o)->s_root->d_inode;
+ lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
+ rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
+ if (rc != 0) {
+ CERROR("%s: failed to set lma on %s root inode\n", name, dev);
+ GOTO(out_mnt, rc);
}
- ldiskfs_set_inode_state(osd_sb(o)->s_root->d_inode,
- LDISKFS_STATE_LUSTRE_NO_OI);
if (lmd_flags & LMD_FLG_NOSCRUB)
o->od_noscrub = 1;
+ GOTO(out, rc = 0);
+
+out_mnt:
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+
out:
if (__page)
OBD_PAGE_FREE(__page);
- if (rc)
- fsfilt_put_ops(o->od_fsops);
- RETURN(rc);
+ return rc;
}
static struct lu_device *osd_device_fini(const struct lu_env *env,
struct osd_device *o = osd_dev(d);
ENTRY;
- osd_procfs_fini(o);
osd_shutdown(env, o);
+ osd_procfs_fini(o);
osd_scrub_cleanup(env, o);
osd_obj_map_fini(o);
osd_umount(env, o);
spin_lock_init(&o->od_osfs_lock);
mutex_init(&o->od_otable_mutex);
- o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL)
GOTO(out_mnt, rc);
}
+ if (server_name_is_ost(o->od_svname))
+ o->od_is_ost = 1;
+
rc = osd_obj_map_init(env, o);
if (rc != 0)
GOTO(out_mnt, rc);
if (rc != 0)
GOTO(out_site, rc);
+ /* self-repair LMA by default */
+ o->od_lma_self_repair = 1;
+
CFS_INIT_LIST_HEAD(&o->od_ios_list);
/* setup scrub, including OI files initialization */
rc = osd_scrub_setup(env, o);
static int osd_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
- struct osd_device *o = osd_dev(d);
- int err;
- ENTRY;
+ struct osd_device *o = osd_dev(d);
+ int rc;
+ ENTRY;
- switch(cfg->lcfg_command) {
- case LCFG_SETUP:
- err = osd_mount(env, o, cfg);
- break;
- case LCFG_CLEANUP:
+ switch (cfg->lcfg_command) {
+ case LCFG_SETUP:
+ rc = osd_mount(env, o, cfg);
+ break;
+ case LCFG_CLEANUP:
lu_dev_del_linkage(d->ld_site, d);
- err = osd_shutdown(env, o);
+ rc = osd_shutdown(env, o);
break;
- default:
- err = -ENOSYS;
- }
+ case LCFG_PARAM:
+ LASSERT(&o->od_dt_dev);
+ rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ if (rc > 0 || rc == -ENOSYS)
+ rc = class_process_proc_param(PARAM_OST,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ break;
+ default:
+ rc = -ENOSYS;
+ }
- RETURN(err);
+ RETURN(rc);
}
static int osd_recovery_complete(const struct lu_env *env,
static int __init osd_mod_init(void)
{
- struct lprocfs_static_vars lvars;
+ int rc;
+
+ osd_oi_mod_init();
- osd_oi_mod_init();
- lprocfs_osd_init_vars(&lvars);
- return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
- LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ rc = lu_kmem_init(ldiskfs_caches);
+ if (rc)
+ return rc;
+
+ rc = class_register_type(&osd_obd_device_ops, NULL, NULL,
+#ifndef HAVE_ONLY_PROCFS_SEQ
+ lprocfs_osd_module_vars,
+#endif
+ LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ if (rc)
+ lu_kmem_fini(ldiskfs_caches);
+ return rc;
}
static void __exit osd_mod_exit(void)
{
class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
+ lu_kmem_fini(ldiskfs_caches);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");