/* struct ptlrpc_thread */
#include <lustre_net.h>
#include <lustre_fid.h>
+/* process_config */
+#include <lustre_param.h>
#include "osd_internal.h"
#include "osd_dynlocks.h"
#include <md_object.h>
#include <lustre_quota.h>
+#include <ldiskfs/xattr.h>
+
int ldiskfs_pdo = 1;
CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
"ldiskfs with parallel directory operations");
/*
* Concurrency: doesn't matter
*/
-static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
-{
- return osd_oti_get(env)->oti_r_locks > 0;
-}
/*
* Concurrency: doesn't matter
id->oii_ino, PTR_ERR(inode));
} else if (id->oii_gen != OSD_OII_NOGEN &&
inode->i_generation != id->oii_gen) {
- CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
- "gen1 = %u\n",
+ CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
+ "i_generation = %u\n",
id->oii_ino, id->oii_gen, inode->i_generation);
iput(inode);
inode = ERR_PTR(-ESTALE);
/* due to parallel readdir and unlink,
* we can have dead inode here. */
CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
- make_bad_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
} else if (is_bad_inode(inode)) {
return inode;
}
+static struct inode *osd_iget_check(struct osd_thread_info *info,
+ struct osd_device *dev,
+ const struct lu_fid *fid,
+ struct osd_inode_id *id,
+ bool in_oi)
+{
+ struct inode *inode;
+ int rc = 0;
+ ENTRY;
+
+ inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ if (!in_oi || (rc != -ENOENT && rc != -ESTALE)) {
+ CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
+ id->oii_ino, rc);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (is_bad_inode(inode)) {
+ rc = -ENOENT;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (id->oii_gen != OSD_OII_NOGEN &&
+ inode->i_generation != id->oii_gen) {
+ rc = -ESTALE;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "unmatched inode: ino = %u, "
+ "oii_gen = %u, i_generation = %u\n",
+ id->oii_ino, id->oii_gen, inode->i_generation);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+ if (inode->i_nlink == 0) {
+ rc = -ENOENT;
+ if (!in_oi) {
+ CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
+
+ GOTO(put, rc);
+ }
+
+ goto check_oi;
+ }
+
+check_oi:
+ if (rc != 0) {
+ LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
+
+ rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
+ /* XXX: There are some possible cases:
+ * 1. rc = 0.
+ * Backup/restore caused the OI invalid.
+ * 2. rc = 0.
+ * Someone unlinked the object but NOT removed
+ * the OI mapping, such as mount target device
+ * as ldiskfs, and modify something directly.
+ * 3. rc = -ENOENT.
+ * Someone just removed the object between the
+ * former oi_lookup and the iget. It is normal.
+ * 4. Other failure cases.
+ *
+ * Generally, when the device is mounted, it will
+ * auto check whether the system is restored from
+ * file-level backup or not. We trust such detect
+ * to distinguish the 1st case from the 2nd case. */
+ if (rc == 0) {
+ if (!IS_ERR(inode) && inode->i_generation != 0 &&
+ inode->i_generation == id->oii_gen)
+ rc = -ENOENT;
+ else
+ rc = -EREMCHG;
+ }
+ } else {
+ if (id->oii_gen == OSD_OII_NOGEN)
+ osd_id_gen(id, inode->i_ino, inode->i_generation);
+
+ /* Do not update file c/mtime in ldiskfs.
+ * NB: we don't have any lock to protect this because we don't
+ * have reference on osd_object now, but contention with
+ * another lookup + attr_set can't happen in the tiny window
+ * between if (...) and set S_NOCMTIME. */
+ if (!(inode->i_flags & S_NOCMTIME))
+ inode->i_flags |= S_NOCMTIME;
+ }
+
+ GOTO(put, rc);
+
+put:
+ if (rc != 0) {
+ if (!IS_ERR(inode))
+ iput(inode);
+
+ inode = ERR_PTR(rc);
+ }
+
+ return inode;
+}
+
/**
* \retval +v: new filter_fid, does not contain self-fid
* \retval 0: filter_fid_old, contains self-fid
return rc;
}
+static int osd_lma_self_repair(struct osd_thread_info *info,
+ struct osd_device *osd, struct inode *inode,
+ const struct lu_fid *fid, __u32 compat)
+{
+ handle_t *jh;
+ int rc;
+
+ LASSERT(current->journal_info == NULL);
+
+ jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+ if (IS_ERR(jh)) {
+ rc = PTR_ERR(jh);
+ CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
+ osd_name(osd), rc);
+ return rc;
+ }
+
+ rc = osd_ea_fid_set(info, inode, fid, compat, 0);
+ if (rc != 0)
+ CWARN("%s: cannot self repair the LMA: rc = %d\n",
+ osd_name(osd), rc);
+ ldiskfs_journal_stop(jh);
+ return rc;
+}
+
static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
{
struct osd_thread_info *info = osd_oti_get(env);
struct inode *inode = obj->oo_inode;
struct dentry *dentry = &info->oti_obj_dentry;
struct lu_fid *fid = NULL;
+ const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
int rc;
ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
- RETURN(0);
-
CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
info->oti_mdt_attrs_old, LMA_OLD_SIZE);
- if (rc == -ENODATA && !fid_is_igif(lu_object_fid(&obj->oo_dt.do_lu)) &&
- osd->od_check_ff) {
+ if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
fid = &lma->lma_self_fid;
rc = osd_get_idif(info, inode, dentry, fid);
if ((rc > 0) || (rc == -ENODATA && osd->od_lma_self_repair)) {
- handle_t *jh;
-
/* For the given OST-object, if it has neither LMA nor
* FID in XATTR_NAME_FID, then the given FID (which is
* contained in the @obj, from client RPC for locating
* the OST-object) is trusted. We use it to generate
* the LMA. */
-
- LASSERT(current->journal_info == NULL);
-
- jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
- osd_dto_credits_noquota[DTO_XATTR_SET]);
- if (IS_ERR(jh)) {
- CWARN("%s: cannot start journal for "
- "lma_self_repair: rc = %ld\n",
- osd_name(osd), PTR_ERR(jh));
- RETURN(0);
- }
-
- rc = osd_ea_fid_set(info, inode,
- lu_object_fid(&obj->oo_dt.do_lu),
- fid_is_on_ost(info, osd,
- lu_object_fid(&obj->oo_dt.do_lu),
- OI_CHECK_FLD) ?
- LMAC_FID_ON_OST : 0, 0);
- if (rc != 0)
- CWARN("%s: cannot self repair the LMA: "
- "rc = %d\n", osd_name(osd), rc);
- ldiskfs_journal_stop(jh);
+ osd_lma_self_repair(info, osd, inode, rfid,
+ fid_is_on_ost(info, osd, fid, OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0);
RETURN(0);
}
}
CWARN("%s: unsupported incompat LMA feature(s) %#x for "
"fid = "DFID", ino = %lu\n", osd_name(osd),
lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
- PFID(lu_object_fid(&obj->oo_dt.do_lu)),
- inode->i_ino);
+ PFID(rfid), inode->i_ino);
rc = -EOPNOTSUPP;
} else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
fid = &lma->lma_self_fid;
}
}
- if (fid != NULL &&
- unlikely(!lu_fid_eq(lu_object_fid(&obj->oo_dt.do_lu), fid))) {
+ if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
+ if (fid_is_idif(rfid) && fid_is_idif(fid)) {
+ struct ost_id *oi = &info->oti_ostid;
+ struct lu_fid *fid1 = &info->oti_fid3;
+ __u32 idx = fid_idif_ost_idx(rfid);
+
+ /* For old IDIF, the OST index is not part of the IDIF,
+ * Means that different OSTs may have the same IDIFs.
+ * Under such case, we need to make some compatible
+ * check to make sure to trigger OI scrub properly. */
+ if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
+ /* Given @rfid is new, LMA is old. */
+ fid_to_ostid(fid, oi);
+ ostid_to_fid(fid1, oi, idx);
+ if (lu_fid_eq(fid1, rfid)) {
+ if (osd->od_lma_self_repair)
+ osd_lma_self_repair(info, osd,
+ inode, rfid,
+ LMAC_FID_ON_OST);
+ RETURN(0);
+ }
+ }
+ }
+
CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
- osd_name(osd), PFID(lu_object_fid(&obj->oo_dt.do_lu)),
- PFID(&lma->lma_self_fid));
+ osd_name(osd), PFID(rfid), PFID(fid));
rc = -EREMCHG;
}
}
id = &info->oti_id;
- if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ if (!list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
if (result == 0)
in_oi = true;
iget:
- inode = osd_iget(info, dev, id);
+ inode = osd_iget_check(info, dev, fid, id, in_oi);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
if (result == -ENOENT || result == -ESTALE) {
- if (!in_oi) {
+ if (!in_oi)
fid_zero(&oic->oic_fid);
- GOTO(out, result = -ENOENT);
- }
- /* XXX: There are three possible cases:
- * 1. Backup/restore caused the OI invalid.
- * 2. Someone unlinked the object but NOT removed
- * the OI mapping, such as mount target device
- * as ldiskfs, and modify something directly.
- * 3. Someone just removed the object between the
- * former oi_lookup and the iget. It is normal.
- *
- * It is diffcult to distinguish the 2nd from the
- * 1st case. Relatively speaking, the 1st case is
- * common than the 2nd case, trigger OI scrub. */
- result = osd_oi_lookup(info, dev, fid, id, true);
- if (result == 0)
- /* It is the case 1 or 2. */
- goto trigger;
+ GOTO(out, result = -ENOENT);
} else if (result == -EREMCHG) {
trigger:
+ if (!in_oi)
+ fid_zero(&oic->oic_fid);
+
if (unlikely(triggered))
GOTO(out, result = saved);
result = -EINPROGRESS;
} else if (!dev->od_noscrub) {
result = osd_scrub_start(dev);
- LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [1]\n",
- LDISKFS_SB(osd_sb(dev))->s_es->\
- s_volume_name,PFID(fid), result);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
+ "for "DFID", rc = %d [1]\n",
+ osd_name(dev), PFID(fid),result);
if (result == 0 || result == -EALREADY)
result = -EINPROGRESS;
else
if (result != 0) {
iput(inode);
obj->oo_inode = NULL;
- if (result == -EREMCHG)
+ if (result == -EREMCHG) {
+ if (!in_oi) {
+ result = osd_oi_lookup(info, dev, fid, id,
+ OI_CHECK_FLD);
+ if (result != 0) {
+ fid_zero(&oic->oic_fid);
+ GOTO(out, result);
+ }
+ }
+
goto trigger;
+ }
GOTO(out, result);
}
dt_txn_hook_commit(th);
/* call per-transaction callbacks if any */
- cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+ list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
"commit callback entry: magic=%x name='%s'\n",
dcb->dcb_magic, dcb->dcb_name);
- cfs_list_del_init(&dcb->dcb_linkage);
+ list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, th, dcb, error);
}
lu_context_exit(&th->th_ctx);
lu_context_fini(&th->th_ctx);
- OBD_FREE_PTR(oh);
+ thandle_put(th);
}
static struct thandle *osd_trans_create(const struct lu_env *env,
- struct dt_device *d)
+ struct dt_device *d)
{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_iobuf *iobuf = &oti->oti_iobuf;
- struct osd_thandle *oh;
- struct thandle *th;
- ENTRY;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_iobuf *iobuf = &oti->oti_iobuf;
+ struct osd_thandle *oh;
+ struct thandle *th;
+ ENTRY;
- /* on pending IO in this thread should left from prev. request */
- LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ /* on pending IO in this thread should left from prev. request */
+ LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
- th = ERR_PTR(-ENOMEM);
- OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
- if (oh != NULL) {
+ th = ERR_PTR(-ENOMEM);
+ OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
+ if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
- th = &oh->ot_super;
- th->th_dev = d;
- th->th_result = 0;
- th->th_tags = LCT_TX_HANDLE;
- oh->ot_credits = 0;
- oti->oti_dev = osd_dt_dev(d);
- CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
- osd_th_alloced(oh);
+ th = &oh->ot_super;
+ th->th_dev = d;
+ th->th_result = 0;
+ th->th_tags = LCT_TX_HANDLE;
+ oh->ot_credits = 0;
+ atomic_set(&th->th_refc, 1);
+ th->th_alloc_size = sizeof(*oh);
+ oti->oti_dev = osd_dt_dev(d);
+ INIT_LIST_HEAD(&oh->ot_dcb_list);
+ osd_th_alloced(oh);
memset(oti->oti_declare_ops, 0,
sizeof(oti->oti_declare_ops));
memset(oti->oti_declare_ops_cred, 0,
sizeof(oti->oti_declare_ops_cred));
oti->oti_rollback = false;
- }
- RETURN(th);
+ }
+ RETURN(th);
}
/*
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
oh->ot_credits,
osd_journal(dev)->j_max_transaction_buffers);
- CWARN(" create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
+ CWARN(" create: %u/%u, destroy: %u/%u\n",
oti->oti_declare_ops[OSD_OT_CREATE],
oti->oti_declare_ops_cred[OSD_OT_CREATE],
- oti->oti_declare_ops[OSD_OT_DELETE],
- oti->oti_declare_ops_cred[OSD_OT_DELETE],
oti->oti_declare_ops[OSD_OT_DESTROY],
oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
CWARN(" insert: %u/%u, delete: %u/%u\n",
oti->oti_declare_ops[OSD_OT_INSERT],
oti->oti_declare_ops_cred[OSD_OT_INSERT],
- oti->oti_declare_ops[OSD_OT_DESTROY],
- oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
+ oti->oti_declare_ops[OSD_OT_DELETE],
+ oti->oti_declare_ops_cred[OSD_OT_DELETE]);
CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
oti->oti_declare_ops[OSD_OT_REF_ADD],
oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
/*
* Concurrency: shouldn't matter.
*/
-static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
+static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
+ struct thandle *th)
{
- int rc = 0;
- struct osd_thandle *oh;
- struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_iobuf *iobuf = &oti->oti_iobuf;
+ int rc = 0;
+ struct osd_thandle *oh;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_iobuf *iobuf = &oti->oti_iobuf;
struct qsd_instance *qsd = oti->oti_dev->od_quota_slave;
- ENTRY;
+ struct lquota_trans *qtrans;
+ ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of0(th, struct osd_thandle, ot_super);
- if (qsd != NULL)
- /* inform the quota slave device that the transaction is
- * stopping */
- qsd_op_end(env, qsd, oh->ot_quota_trans);
+ qtrans = oh->ot_quota_trans;
oh->ot_quota_trans = NULL;
if (oh->ot_handle != NULL) {
if (rc != 0)
CERROR("Failure to stop transaction: %d\n", rc);
} else {
- OBD_FREE_PTR(oh);
+ thandle_put(&oh->ot_super);
}
+ /* inform the quota slave device that the transaction is stopping */
+ qsd_op_end(env, qsd, qtrans);
+
/* as we want IO to journal and data IO be concurrent, we don't block
* awaiting data IO completion in osd_do_bio(), instead we wait here
* once transaction is submitted to the journal. all reqular requests
* completed otherwise iobuf may be corrupted by different request
*/
wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ atomic_read(&iobuf->dr_numreqs) == 0);
+ osd_fini_iobuf(oti->oti_dev, iobuf);
if (!rc)
rc = iobuf->dr_error;
LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
LASSERT(&dcb->dcb_func != NULL);
- cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+ list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
return 0;
}
osd_index_fini(obj);
if (inode != NULL) {
struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
- qid_t uid = inode->i_uid;
- qid_t gid = inode->i_gid;
+ qid_t uid = i_uid_read(inode);
+ qid_t gid = i_gid_read(inode);
iput(inode);
obj->oo_inode = NULL;
d ? d->id_ops->id_name : "plain");
}
+#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
+
/*
* Concurrency: shouldn't matter.
*/
}
spin_lock(&osd->od_osfs_lock);
- /* cache 1 second */
- if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
- result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- osd->od_osfs_age = cfs_time_current_64();
- statfs_pack(&osd->od_statfs, ksfs);
- if (sb->s_flags & MS_RDONLY)
- sfs->os_state = OS_STATE_READONLY;
- }
+ result = sb->s_op->statfs(sb->s_root, ksfs);
+ if (likely(result == 0)) { /* N.B. statfs can't really fail */
+ statfs_pack(sfs, ksfs);
+ if (sb->s_flags & MS_RDONLY)
+ sfs->os_state = OS_STATE_READONLY;
}
- if (likely(result == 0))
- *sfs = osd->od_statfs;
spin_unlock(&osd->od_osfs_lock);
- if (unlikely(env == NULL))
+ if (unlikely(env == NULL))
OBD_FREE_PTR(ksfs);
+ /* Reserve a small amount of space for local objects like last_rcvd,
+ * llog, quota files, ... */
+ if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
+ sfs->os_bavail = 0;
+ } else {
+ sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
+ /** Take out metadata overhead for indirect blocks */
+ sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
+ }
+
return result;
}
struct dt_device_param *param)
{
struct super_block *sb = osd_sb(osd_dt_dev(dev));
+ int ea_overhead;
/*
* XXX should be taken from not-yet-existing fs abstraction layer.
if (test_opt(sb, POSIX_ACL))
param->ddp_mntopts |= MNTOPT_ACL;
+ /* LOD might calculate the max stripe count based on max_ea_size,
+ * so we need take account in the overhead as well,
+ * xattr_header + magic + xattr_entry_head */
+ ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
+ LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
+
#if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
- if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
- param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
- else
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
+ param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
+ ea_overhead;
+ else
#endif
- param->ddp_max_ea_size = sb->s_blocksize;
-
+ param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
}
/*
* If we mount with --data_journal we may need more.
*/
const int osd_dto_credits_noquota[DTO_NR] = {
- /**
- * Insert/Delete.
- * INDEX_EXTRA_TRANS_BLOCKS(8) +
- * SINGLEDATA_TRANS_BLOCKS(8)
- * XXX Note: maybe iam need more, since iam have more level than
- * EXT3 htree.
- */
- [DTO_INDEX_INSERT] = 16,
- [DTO_INDEX_DELETE] = 16,
- /**
+ /**
+ * Insert.
+ * INDEX_EXTRA_TRANS_BLOCKS(8) +
+ * SINGLEDATA_TRANS_BLOCKS(8)
+ * XXX Note: maybe iam need more, since iam have more level than
+ * EXT3 htree.
+ */
+ [DTO_INDEX_INSERT] = 16,
+ /**
+ * Delete
+ * just modify a single entry, probably merge few within a block
+ */
+ [DTO_INDEX_DELETE] = 1,
+ /**
* Used for OI scrub
- */
- [DTO_INDEX_UPDATE] = 16,
- /**
- * Create a object. The same as create object in EXT3.
- * DATA_TRANS_BLOCKS(14) +
- * INDEX_EXTRA_BLOCKS(8) +
- * 3(inode bits, groups, GDT)
- */
- [DTO_OBJECT_CREATE] = 25,
- /**
- * XXX: real credits to be fixed
- */
- [DTO_OBJECT_DELETE] = 25,
- /**
- * Attr set credits (inode)
- */
- [DTO_ATTR_SET_BASE] = 1,
- /**
- * Xattr set. The same as xattr of EXT3.
- * DATA_TRANS_BLOCKS(14)
- * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
- * are also counted in. Do not know why?
- */
- [DTO_XATTR_SET] = 14,
- [DTO_LOG_REC] = 14,
- /**
- * credits for inode change during write.
- */
- [DTO_WRITE_BASE] = 3,
- /**
- * credits for single block write.
- */
- [DTO_WRITE_BLOCK] = 14,
- /**
- * Attr set credits for chown.
- * This is extra credits for setattr, and it is null without quota
- */
- [DTO_ATTR_SET_CHOWN]= 0
+ */
+ [DTO_INDEX_UPDATE] = 16,
+ /**
+ * 4(inode, inode bits, groups, GDT)
+ * notice: OI updates are counted separately with DTO_INDEX_INSERT
+ */
+ [DTO_OBJECT_CREATE] = 4,
+ /**
+ * 4(inode, inode bits, groups, GDT)
+ * notice: OI updates are counted separately with DTO_INDEX_DELETE
+ */
+ [DTO_OBJECT_DELETE] = 4,
+ /**
+ * Attr set credits (inode)
+ */
+ [DTO_ATTR_SET_BASE] = 1,
+ /**
+ * Xattr set. The same as xattr of EXT3.
+ * DATA_TRANS_BLOCKS(14)
+ * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
+ * are also counted in. Do not know why?
+ */
+ [DTO_XATTR_SET] = 14,
+ /**
+ * credits for inode change during write.
+ */
+ [DTO_WRITE_BASE] = 3,
+ /**
+ * credits for single block write.
+ */
+ [DTO_WRITE_BLOCK] = 14,
+ /**
+ * Attr set credits for chown.
+ * This is extra credits for setattr, and it is null without quota
+ */
+ [DTO_ATTR_SET_CHOWN] = 0
};
static const struct dt_device_operations osd_dt_ops = {
static void osd_inode_getattr(const struct lu_env *env,
- struct inode *inode, struct lu_attr *attr)
-{
- attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
- LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
- LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
- LA_TYPE;
-
- attr->la_atime = LTIME_S(inode->i_atime);
- attr->la_mtime = LTIME_S(inode->i_mtime);
- attr->la_ctime = LTIME_S(inode->i_ctime);
- attr->la_mode = inode->i_mode;
- attr->la_size = i_size_read(inode);
- attr->la_blocks = inode->i_blocks;
- attr->la_uid = inode->i_uid;
- attr->la_gid = inode->i_gid;
- attr->la_flags = LDISKFS_I(inode)->i_flags;
- attr->la_nlink = inode->i_nlink;
- attr->la_rdev = inode->i_rdev;
- attr->la_blksize = 1 << inode->i_blkbits;
- attr->la_blkbits = inode->i_blkbits;
+ struct inode *inode, struct lu_attr *attr)
+{
+ attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
+ LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
+ LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
+ LA_TYPE;
+
+ attr->la_atime = LTIME_S(inode->i_atime);
+ attr->la_mtime = LTIME_S(inode->i_mtime);
+ attr->la_ctime = LTIME_S(inode->i_ctime);
+ attr->la_mode = inode->i_mode;
+ attr->la_size = i_size_read(inode);
+ attr->la_blocks = inode->i_blocks;
+ attr->la_uid = i_uid_read(inode);
+ attr->la_gid = i_gid_read(inode);
+ attr->la_flags = LDISKFS_I(inode)->i_flags;
+ attr->la_nlink = inode->i_nlink;
+ attr->la_rdev = inode->i_rdev;
+ attr->la_blksize = 1 << inode->i_blkbits;
+ attr->la_blkbits = inode->i_blkbits;
}
static int osd_attr_get(const struct lu_env *env,
struct osd_object *obj;
struct osd_thread_info *info = osd_oti_get(env);
struct lquota_id_info *qi = &info->oti_qi;
+ qid_t uid;
+ qid_t gid;
long long bspace;
int rc = 0;
bool allocated;
* credits for updating quota accounting files and to trigger quota
* space adjustment once the operation is completed.*/
if ((attr->la_valid & LA_UID) != 0 &&
- attr->la_uid != obj->oo_inode->i_uid) {
+ attr->la_uid != (uid = i_uid_read(obj->oo_inode))) {
qi->lqi_type = USRQUOTA;
/* inode accounting */
RETURN(rc);
/* and one less inode for the current uid */
- qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_id.qid_uid = uid;
qi->lqi_space = -1;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
RETURN(rc);
/* and finally less blocks for the current owner */
- qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_id.qid_uid = uid;
qi->lqi_space = -bspace;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
}
if (attr->la_valid & LA_GID &&
- attr->la_gid != obj->oo_inode->i_gid) {
+ attr->la_gid != (gid = i_gid_read(obj->oo_inode))) {
qi->lqi_type = GRPQUOTA;
/* inode accounting */
RETURN(rc);
/* and one less inode for the current gid */
- qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_id.qid_gid = gid;
qi->lqi_space = -1;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
RETURN(rc);
/* and finally less blocks for the current owner */
- qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_id.qid_gid = gid;
qi->lqi_space = -bspace;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
if (bits & LA_BLOCKS)
inode->i_blocks = attr->la_blocks;
#endif
- if (bits & LA_MODE)
- inode->i_mode = (inode->i_mode & S_IFMT) |
- (attr->la_mode & ~S_IFMT);
- if (bits & LA_UID)
- inode->i_uid = attr->la_uid;
- if (bits & LA_GID)
- inode->i_gid = attr->la_gid;
- if (bits & LA_NLINK)
+ if (bits & LA_MODE)
+ inode->i_mode = (inode->i_mode & S_IFMT) |
+ (attr->la_mode & ~S_IFMT);
+ if (bits & LA_UID)
+ i_uid_write(inode, attr->la_uid);
+ if (bits & LA_GID)
+ i_gid_write(inode, attr->la_gid);
+ if (bits & LA_NLINK)
set_nlink(inode, attr->la_nlink);
- if (bits & LA_RDEV)
- inode->i_rdev = attr->la_rdev;
+ if (bits & LA_RDEV)
+ inode->i_rdev = attr->la_rdev;
if (bits & LA_FLAGS) {
/* always keep S_NOCMTIME */
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
{
- if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
- (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
+ if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
+ (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
int rc;
iattr.ia_valid |= ATTR_UID;
if (attr->la_valid & LA_GID)
iattr.ia_valid |= ATTR_GID;
- iattr.ia_uid = attr->la_uid;
- iattr.ia_gid = attr->la_gid;
+ iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
+ iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
rc = ll_vfs_dq_transfer(inode, &iattr);
if (rc) {
{
LASSERT(ah);
- memset(ah, 0, sizeof(*ah));
ah->dah_parent = parent;
ah->dah_mode = child_mode;
}
osd_trans_declare_op(env, oh, OSD_OT_CREATE,
osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
- if (!fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
- lu_object_fid(&dt->do_lu), OI_CHECK_FLD))
- /* Reuse idle OI block may cause additional one OI block
- * to be changed. */
- osd_trans_declare_op(env, oh, OSD_OT_INSERT,
- osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+ /* Reuse idle OI block may cause additional one OI block
+ * to be changed. */
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
/* If this is directory, then we expect . and .. to be inserted as
* well. The one directory block always needs to be created for the
osd_trans_declare_op(env, oh, OSD_OT_DELETE,
osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
/* one less inode */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
- false, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ -1, oh, false, true, NULL, false);
if (rc)
RETURN(rc);
/* data to be truncated */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
+ CDEBUG(D_INODE, DFID" increase nlink %d\n",
+ PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
/*
* The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
* (65000) subdirectories by storing "1" in i_nlink if the link count
return 0;
}
+ CDEBUG(D_INODE, DFID" decrease nlink %d\n",
+ PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
+
ldiskfs_dec_count(oh->ot_handle, inode);
spin_unlock(&obj->oo_guard);
int fl, struct thandle *handle)
{
struct osd_thandle *oh;
+ int credits;
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
- strcmp(name, XATTR_NAME_VERSION) == 0 ?
- osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
- osd_dto_credits_noquota[DTO_XATTR_SET]);
+ /* optimistic optimization: LMA is set first and usually fit inode */
+ if (strcmp(name, XATTR_NAME_LMA) == 0) {
+ if (dt_object_exists(dt))
+ credits = 0;
+ else
+ credits = 1;
+ } else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ credits = 1;
+ } else {
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct super_block *sb = osd_sb(osd);
+ credits = osd_dto_credits_noquota[DTO_XATTR_SET];
+ if (buf && buf->lb_len > sb->s_blocksize) {
+ credits *= (buf->lb_len + sb->s_blocksize - 1) >>
+ sb->s_blocksize_bits;
+ }
+ }
+
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET, credits);
return 0;
}
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
+ CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
+ PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
+
osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
if (fl & LU_XATTR_REPLACE)
fs_flags |= XATTR_REPLACE;
if (fl & LU_XATTR_CREATE)
fs_flags |= XATTR_CREATE;
+ if (strcmp(name, XATTR_NAME_LMV) == 0) {
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ int rc;
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc != 0)
+ RETURN(rc);
+
+ lma->lma_incompat |= LMAI_STRIPED;
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ if (rc != 0)
+ RETURN(rc);
+ }
+
return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
fs_flags);
}
LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
- LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
case LC_ID_NONE:
RETURN(NULL);
case LC_ID_PLAIN:
- capa->lc_uid = obj->oo_inode->i_uid;
- capa->lc_gid = obj->oo_inode->i_gid;
+ capa->lc_uid = i_uid_read(obj->oo_inode);
+ capa->lc_gid = i_gid_read(obj->oo_inode);
capa->lc_flags = LC_ID_PLAIN;
break;
case LC_ID_CONVERT: {
__u32 d[4], s[4];
- s[0] = obj->oo_inode->i_uid;
+ s[0] = i_uid_read(obj->oo_inode);
cfs_get_random_bytes(&(s[1]), sizeof(__u32));
- s[2] = obj->oo_inode->i_gid;
+ s[2] = i_uid_read(obj->oo_inode);
cfs_get_random_bytes(&(s[3]), sizeof(__u32));
rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
if (unlikely(rc))
RETURN(oc);
}
-static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
+static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
file->f_mapping = inode->i_mapping;
file->f_op = inode->i_fop;
set_file_inode(file, inode);
-#ifndef HAVE_FILE_FSYNC_4ARGS
+
+#ifdef HAVE_FILE_FSYNC_4ARGS
+ rc = file->f_op->fsync(file, start, end, 0);
+#elif defined(HAVE_FILE_FSYNC_2ARGS)
mutex_lock(&inode->i_mutex);
-#endif
- rc = do_fsync(file, 0);
-#ifndef HAVE_FILE_FSYNC_4ARGS
+ rc = file->f_op->fsync(file, 0);
+ mutex_unlock(&inode->i_mutex);
+#else
+ mutex_lock(&inode->i_mutex);
+ rc = file->f_op->fsync(file, dentry, 0);
mutex_unlock(&inode->i_mutex);
#endif
+
RETURN(rc);
}
* recheck under lock.
*/
if (!osd_has_index(obj))
- result = osd_iam_container_init(env, obj, dir);
+ result = osd_iam_container_init(env, obj,
+ obj->oo_dir);
else
result = 0;
up_write(&obj->oo_ext_idx_sem);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
}
if (!dev->od_noscrub && ++once == 1) {
- CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
- PFID(fid));
rc = osd_scrub_start(dev);
- LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
- ", rc = %d [2]\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- PFID(fid), rc);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC for "DFID
+ ", rc = %d [2]\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
+ PFID(fid), rc);
if (rc == 0)
goto again;
}
GOTO(out, rc);
}
+ if (osd_remote_fid(env, dev, fid))
+ GOTO(out, rc = 0);
+
rc = osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id,
fid);
if (rc != 0)
* in the cache, otherwise lu_object_alloc() crashes
* -bzzz
*/
- luch = lu_object_find_at(env, ludev, fid, NULL);
- if (!IS_ERR(luch)) {
- if (lu_object_exists(luch)) {
- lo = lu_object_locate(luch->lo_header, ludev->ld_type);
- if (lo != NULL)
- child = osd_obj(lo);
- else
- LU_OBJECT_DEBUG(D_ERROR, env, luch,
- "lu_object can't be located"
+ luch = lu_object_find_at(env, ludev->ld_site->ls_top_dev == NULL ?
+ ludev : ludev->ld_site->ls_top_dev,
+ fid, NULL);
+ if (!IS_ERR(luch)) {
+ if (lu_object_exists(luch)) {
+ lo = lu_object_locate(luch->lo_header, ludev->ld_type);
+ if (lo != NULL)
+ child = osd_obj(lo);
+ else
+ LU_OBJECT_DEBUG(D_ERROR, env, luch,
+ "lu_object can't be located"
DFID"\n", PFID(fid));
if (child == NULL) {
int rc;
ENTRY;
- LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
osd_dto_credits_noquota[DTO_INDEX_INSERT]);
- if (osd_dt_obj(dt)->oo_inode == NULL) {
- const char *name = (const char *)key;
- /* Object is not being created yet. Only happens when
- * 1. declare directory create
- * 2. declare insert .
- * 3. declare insert ..
- */
- LASSERT(strcmp(name, dotdot) == 0 || strcmp(name, dot) == 0);
- } else {
+ if (osd_dt_obj(dt)->oo_inode != NULL) {
struct inode *inode = osd_dt_obj(dt)->oo_inode;
/* We ignore block quota on meta pool (MDTs), so needn't
* calculate how many blocks will be consumed by this index
* insert */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0,
+ rc = osd_declare_inode_qid(env, i_uid_read(inode),
+ i_gid_read(inode), 0,
oh, true, true, NULL, false);
}
rc = osd_ea_add_rec(env, obj, child_inode, name, rec, th);
+ CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
+ obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
+
iput(child_inode);
if (child != NULL)
osd_object_put(env, child);
{
}
+struct osd_filldir_cbs {
+#ifdef HAVE_DIR_CONTEXT
+ struct dir_context ctx;
+#endif
+ struct osd_it_ea *it;
+};
/**
* It is called internally by ->readdir(). It fills the
* iterator's in-memory data structure with required
* \retval 0 on success
* \retval 1 on buffer full
*/
-static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
+static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
loff_t offset, __u64 ino,
unsigned d_type)
{
- struct osd_it_ea *it = (struct osd_it_ea *)buf;
+ struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
*
* \retval 0 on success
* \retval -ve on error
+ * \retval +1 reach the end of entry
*/
static int osd_ldiskfs_it_fill(const struct lu_env *env,
const struct dt_it *di)
struct osd_object *obj = it->oie_obj;
struct inode *inode = obj->oo_inode;
struct htree_lock *hlock = NULL;
- int result = 0;
+ struct file *filp = &it->oie_file;
+ int rc = 0;
+ struct osd_filldir_cbs buf = {
+#ifdef HAVE_DIR_CONTEXT
+ .ctx.actor = osd_ldiskfs_filldir,
+#endif
+ .it = it
+ };
ENTRY;
it->oie_dirent = it->oie_buf;
down_read(&obj->oo_ext_idx_sem);
}
- result = inode->i_fop->readdir(&it->oie_file, it,
- (filldir_t) osd_ldiskfs_filldir);
+#ifdef HAVE_DIR_CONTEXT
+ buf.ctx.pos = filp->f_pos;
+ rc = inode->i_fop->iterate(filp, &buf.ctx);
+ filp->f_pos = buf.ctx.pos;
+#else
+ rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
+#endif
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
up_read(&obj->oo_ext_idx_sem);
- if (it->oie_rd_dirent == 0) {
- result = -EIO;
- } else {
- it->oie_dirent = it->oie_buf;
- it->oie_it_dirent = 1;
- }
+ if (it->oie_rd_dirent == 0) {
+ /*If it does not get any dirent, it means it has been reached
+ *to the end of the dir */
+ it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
+ if (rc == 0)
+ rc = 1;
+ } else {
+ it->oie_dirent = it->oie_buf;
+ it->oie_it_dirent = 1;
+ }
- RETURN(result);
+ RETURN(rc);
}
/**
} else {
attr &= ~LU_DIRENT_ATTRS_MASK;
if (!fid_is_sane(fid)) {
- if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP) &&
+ likely(it->oie_dirent->oied_namelen != 2 ||
+ it->oie_dirent->oied_name[0] != '.' ||
+ it->oie_dirent->oied_name[1] != '.'))
RETURN(-ENOENT);
rc = osd_ea_fid_get(env, obj, ino, fid, id);
}
/**
+ * Returns the record size size at current position.
+ *
+ * This function will return record(lu_dirent) size in bytes.
+ *
+ * \param[in] env execution environment
+ * \param[in] di iterator's in memory structure
+ * \param[in] attr attribute of the entry, only requires LUDA_TYPE to
+ * calculate the lu_dirent size.
+ *
+ * \retval record size(in bytes & in memory) of the current lu_dirent
+ * entry.
+ */
+static int osd_it_ea_rec_size(const struct lu_env *env, const struct dt_it *di,
+ __u32 attr)
+{
+ struct osd_it_ea *it = (struct osd_it_ea *)di;
+
+ return lu_dirent_calc_size(it->oie_dirent->oied_namelen, attr);
+}
+
+/**
* Returns a cookie for current position of the iterator head, so that
* user can use this cookie to load/start the iterator next time.
*
it->oie_file.f_pos = hash;
rc = osd_ldiskfs_it_fill(env, di);
+ if (rc > 0)
+ rc = -ENODATA;
+
if (rc == 0)
rc = +1;
* mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
*/
static const struct dt_index_operations osd_index_ea_ops = {
- .dio_lookup = osd_index_ea_lookup,
- .dio_declare_insert = osd_index_declare_ea_insert,
- .dio_insert = osd_index_ea_insert,
- .dio_declare_delete = osd_index_declare_ea_delete,
- .dio_delete = osd_index_ea_delete,
- .dio_it = {
- .init = osd_it_ea_init,
- .fini = osd_it_ea_fini,
- .get = osd_it_ea_get,
- .put = osd_it_ea_put,
- .next = osd_it_ea_next,
- .key = osd_it_ea_key,
- .key_size = osd_it_ea_key_size,
- .rec = osd_it_ea_rec,
- .store = osd_it_ea_store,
- .load = osd_it_ea_load
- }
+ .dio_lookup = osd_index_ea_lookup,
+ .dio_declare_insert = osd_index_declare_ea_insert,
+ .dio_insert = osd_index_ea_insert,
+ .dio_declare_delete = osd_index_declare_ea_delete,
+ .dio_delete = osd_index_ea_delete,
+ .dio_it = {
+ .init = osd_it_ea_init,
+ .fini = osd_it_ea_fini,
+ .get = osd_it_ea_get,
+ .put = osd_it_ea_put,
+ .next = osd_it_ea_next,
+ .key = osd_it_ea_key,
+ .key_size = osd_it_ea_key_size,
+ .rec = osd_it_ea_rec,
+ .rec_size = osd_it_ea_rec_size,
+ .store = osd_it_ea_store,
+ .load = osd_it_ea_load
+ }
};
static void *osd_key_init(const struct lu_context *ctx,
return osd_procfs_init(osd, name);
}
+static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
+{
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
+ ENTRY;
+
+ if (osd->od_is_ost || osd->od_cl_seq != NULL)
+ RETURN(0);
+
+ if (unlikely(ss == NULL))
+ RETURN(-ENODEV);
+
+ OBD_ALLOC_PTR(osd->od_cl_seq);
+ if (osd->od_cl_seq == NULL)
+ RETURN(-ENOMEM);
+
+ rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
+ osd->od_svname, ss->ss_server_seq);
+
+ if (rc != 0) {
+ OBD_FREE_PTR(osd->od_cl_seq);
+ osd->od_cl_seq = NULL;
+ }
+
+ RETURN(rc);
+}
+
+static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
+{
+ if (osd->od_cl_seq == NULL)
+ return;
+
+ seq_client_fini(osd->od_cl_seq);
+ OBD_FREE_PTR(osd->od_cl_seq);
+ osd->od_cl_seq = NULL;
+}
+
static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
{
ENTRY;
o->od_quota_slave = NULL;
}
+ osd_fid_fini(env, o);
+
RETURN(0);
}
#endif
if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
- LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
+ LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
CERROR("%s: device %s is mounted w/o journal\n", name, dev);
GOTO(out_mnt, rc = -EINVAL);
}
+#ifdef LDISKFS_MOUNT_DIRDATA
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(o->od_mnt->mnt_sb,
+ LDISKFS_FEATURE_INCOMPAT_DIRDATA))
+ LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
+ else if (!o->od_is_ost)
+ CWARN("%s: device %s was upgraded from Lustre-1.x without "
+ "enabling the dirdata feature. If you do not want to "
+ "downgrade to Lustre-1.x again, you can enable it via "
+ "'tune2fs -O dirdata device'\n", name, dev);
+#endif
inode = osd_sb(o)->s_root->d_inode;
- ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NO_OI);
lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
if (rc != 0) {
struct osd_device *o = osd_dev(d);
ENTRY;
- osd_procfs_fini(o);
osd_shutdown(env, o);
+ osd_procfs_fini(o);
osd_scrub_cleanup(env, o);
osd_obj_map_fini(o);
osd_umount(env, o);
spin_lock_init(&o->od_osfs_lock);
mutex_init(&o->od_otable_mutex);
- o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL)
o->od_writethrough_cache = 1;
o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
- rc = osd_mount(env, o, cfg);
- if (rc)
- GOTO(out_capa, rc);
-
cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
sizeof(o->od_svname));
if (cplen >= sizeof(o->od_svname)) {
rc = -E2BIG;
- GOTO(out_mnt, rc);
+ GOTO(out_capa, rc);
}
if (server_name_is_ost(o->od_svname))
o->od_is_ost = 1;
+ rc = osd_mount(env, o, cfg);
+ if (rc != 0)
+ GOTO(out_capa, rc);
+
rc = osd_obj_map_init(env, o);
if (rc != 0)
GOTO(out_mnt, rc);
/* self-repair LMA by default */
o->od_lma_self_repair = 1;
- CFS_INIT_LIST_HEAD(&o->od_ios_list);
+ INIT_LIST_HEAD(&o->od_ios_list);
/* setup scrub, including OI files initialization */
rc = osd_scrub_setup(env, o);
if (rc < 0)
static int osd_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
- struct osd_device *o = osd_dev(d);
- int err;
- ENTRY;
+ struct osd_device *o = osd_dev(d);
+ int rc;
+ ENTRY;
- switch(cfg->lcfg_command) {
- case LCFG_SETUP:
- err = osd_mount(env, o, cfg);
- break;
- case LCFG_CLEANUP:
+ switch (cfg->lcfg_command) {
+ case LCFG_SETUP:
+ rc = osd_mount(env, o, cfg);
+ break;
+ case LCFG_CLEANUP:
lu_dev_del_linkage(d->ld_site, d);
- err = osd_shutdown(env, o);
+ rc = osd_shutdown(env, o);
break;
- default:
- err = -ENOSYS;
- }
+ case LCFG_PARAM:
+ LASSERT(&o->od_dt_dev);
+ rc = class_process_proc_seq_param(PARAM_OSD,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ if (rc > 0 || rc == -ENOSYS)
+ rc = class_process_proc_seq_param(PARAM_OST,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ break;
+ default:
+ rc = -ENOSYS;
+ }
- RETURN(err);
+ RETURN(rc);
}
static int osd_recovery_complete(const struct lu_env *env,
int result = 0;
ENTRY;
- if (osd->od_quota_slave != NULL)
+ if (osd->od_quota_slave != NULL) {
/* set up quota slave objects */
result = qsd_prepare(env, osd->od_quota_slave);
+ if (result != 0)
+ RETURN(result);
+ }
+
+ result = osd_fid_init(env, osd);
RETURN(result);
}
+int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
+{
+ struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
+
+ return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
+}
+
static const struct lu_object_operations osd_lu_obj_ops = {
.loo_object_init = osd_object_init,
.loo_object_delete = osd_object_delete,
static struct obd_ops osd_obd_device_ops = {
.o_owner = THIS_MODULE,
.o_connect = osd_obd_connect,
- .o_disconnect = osd_obd_disconnect
+ .o_disconnect = osd_obd_disconnect,
+ .o_fid_alloc = osd_fid_alloc,
};
static int __init osd_mod_init(void)
{
- struct lprocfs_static_vars lvars;
int rc;
osd_oi_mod_init();
- lprocfs_osd_init_vars(&lvars);
rc = lu_kmem_init(ldiskfs_caches);
if (rc)
return rc;
- rc = class_register_type(&osd_obd_device_ops, NULL, NULL,
+ rc = class_register_type(&osd_obd_device_ops, NULL, true,
+ lprocfs_osd_module_vars,
#ifndef HAVE_ONLY_PROCFS_SEQ
- lvars.module_vars,
+ NULL,
#endif
- LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
if (rc)
lu_kmem_fini(ldiskfs_caches);
return rc;