Whamcloud - gitweb
git://git.whamcloud.com
/
fs
/
lustre-release.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
| inline |
side by side
LU-2675 mdt: add mbo_ prefix to members of struct mdt_body
[fs/lustre-release.git]
/
lustre
/
lmv
/
lmv_obd.c
diff --git
a/lustre/lmv/lmv_obd.c
b/lustre/lmv/lmv_obd.c
index
659d3de
..
8750215
100644
(file)
--- a/
lustre/lmv/lmv_obd.c
+++ b/
lustre/lmv/lmv_obd.c
@@
-514,6
+514,7
@@
static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
{
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
+ int orig_tgt_count = 0;
int rc = 0;
ENTRY;
@@
-585,14
+586,17
@@
static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
tgt->ltd_uuid = *uuidp;
tgt->ltd_active = 0;
lmv->tgts[index] = tgt;
- if (index >= lmv->desc.ld_tgt_count)
+ if (index >= lmv->desc.ld_tgt_count) {
+ orig_tgt_count = lmv->desc.ld_tgt_count;
lmv->desc.ld_tgt_count = index + 1;
+ }
if (lmv->connected) {
rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
+ if (rc
!= 0
) {
spin_lock(&lmv->lmv_lock);
- lmv->desc.ld_tgt_count--;
+ if (lmv->desc.ld_tgt_count == index + 1)
+ lmv->desc.ld_tgt_count = orig_tgt_count;
memset(tgt, 0, sizeof(*tgt));
spin_unlock(&lmv->lmv_lock);
} else {
@@
-1353,7
+1357,7
@@
int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
int rc;
ENTRY;
- tgt = lmv_get_target(lmv, mds);
+ tgt = lmv_get_target(lmv, mds
, NULL
);
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
@@
-1700,27
+1704,39
@@
static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
ldlm_iterator_t it, void *data)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- __u32 i;
- int rc;
- ENTRY;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int i;
+ int tgt;
+ int rc;
+ ENTRY;
-
rc = lmv_check_connect(obd);
-
if (rc)
-
RETURN(rc);
+ rc = lmv_check_connect(obd);
+ if (rc)
+ RETURN(rc);
-
CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
+ CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
/*
* With DNE every object can have two locks in different namespaces:
* lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode.
+ * space of MDT storing inode. Try the MDT that the FID maps to first,
+ * since this can be easily found, and only try others if that fails.
*/
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ for (i = 0, tgt = lmv_find_target_index(lmv, fid);
+ i < lmv->desc.ld_tgt_count;
+ i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
+ if (tgt < 0) {
+ CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
+ obd->obd_name, PFID(fid), tgt);
+ tgt = 0;
+ }
+
+ if (lmv->tgts[tgt] == NULL ||
+ lmv->tgts[tgt]->ltd_exp == NULL)
continue;
- rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
+
+ rc = md_find_cbdata(lmv->tgts[tgt]->ltd_exp, fid, it, data);
if (rc)
RETURN(rc);
}
@@
-1770,7
+1786,7
@@
lmv_locate_target_for_name(struct lmv_obd *lmv, struct lmv_stripe_md *lsm,
RETURN((void *)oinfo);
*fid = oinfo->lmo_fid;
*mds = oinfo->lmo_mds;
- tgt = lmv_get_target(lmv, *mds);
+ tgt = lmv_get_target(lmv, *mds
, NULL
);
CDEBUG(D_INFO, "locate on mds %u "DFID"\n", *mds, PFID(fid));
return tgt;
@@
-1934,8
+1950,8
@@
lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL);
- if (body->valid & OBD_MD_MDS) {
- struct lu_fid rid = body->fid1;
+ if (body->
mbo_
valid & OBD_MD_MDS) {
+ struct lu_fid rid = body->
mbo_
fid1;
CDEBUG(D_INODE, "Request attrs for "DFID"\n",
PFID(&rid));
@@
-2331,33
+2347,65
@@
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+/**
+ * This function will read entry from a striped directory, bascially, it will
+ * read entries from all of stripes, and choose one closest to the required
+ * offset(&op_data->op_hash_offset). A few notes
+ * 1. skip . and .. for non-zero stripes, because there can only have one .
+ * and .. in a directory.
+ * 2. op_data will be shared by all of stripes, instead of allocating new
+ * one, so need to restore before reusing.
+ * 3. release the entry page if that is not being chosen.
+ *
+ * param[in]exp obd export refer to LMV
+ * param[in]op_data hold those MD parameters of read_entry.
+ * param[in]cb_op ldlm callback being used in enqueue in mdc_read_entry
+ * param[out]ldp the entry being read.
+ * param[out]ppage the page holding the entry, note: because the entry
+ * will be accessed in upper layer, so we need hold the
+ * page until the usages of entry is finished, see
+ * ll_dir_entry_next.
+ *
+ * retval =0 if get entry successfully
+ * <0 can not get entry.
+ */
#define NORMAL_MAX_STRIPES 4
-int lmv_read_entry(struct obd_export *exp, struct md_op_data *op_data,
- struct md_callback *cb_op, struct lu_dirent **ldp,
- struct page **ppage)
+static int lmv_read_striped_entry(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_callback *cb_op,
+ struct lu_dirent **ldp,
+ struct page **ppage)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct lmv_tgt_desc *tgt;
struct lu_dirent *tmp_ents[NORMAL_MAX_STRIPES];
struct lu_dirent **ents = NULL;
+ struct lu_fid master_fid = op_data->op_fid1;
+ void *master_data = op_data->op_data;
+ __u64 last_idx = op_data->op_stripe_offset;
+ __u64 hash_offset = op_data->op_hash_offset;
+ __u32 same_hash_offset = op_data->op_same_hash_offset;
+ __u32 cli_flags = op_data->op_cli_flags;
int stripe_count;
__u64 min_hash;
+ int min_same_hash_offset = 0;
int min_idx = 0;
struct page *min_page = NULL;
int i;
int rc;
ENTRY;
+ LASSERT(lsm != NULL);
+
rc = lmv_check_connect(obd);
if (rc)
RETURN(rc);
- if (lsm == NULL)
- stripe_count = 1;
- else
- stripe_count = lsm->lsm_md_stripe_count;
-
+ /* . and .. will be stored on the master object, so we need iterate
+ * the master object as well */
+ stripe_count = lsm->lsm_md_stripe_count;
if (stripe_count > NORMAL_MAX_STRIPES) {
OBD_ALLOC(ents, sizeof(ents[0]) * stripe_count);
if (ents == NULL)
@@
-2369,56
+2417,145
@@
int lmv_read_entry(struct obd_export *exp, struct md_op_data *op_data,
min_hash = MDS_DIR_END_OFF;
for (i = 0; i < stripe_count; i++) {
- struct lmv_tgt_desc *tgt;
struct page *page = NULL;
- if (likely(lsm == NULL)) {
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- GOTO(out, rc = PTR_ERR(tgt));
- LASSERT(op_data->op_data != NULL);
- } else {
- tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds);
- if (IS_ERR(tgt))
- GOTO(out, rc = PTR_ERR(tgt));
- op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
- op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
- op_data->op_stripe_offset = i;
- }
+ tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL);
+ if (IS_ERR(tgt))
+ GOTO(out, rc = PTR_ERR(tgt));
+ if (last_idx != i)
+ op_data->op_same_hash_offset = 0;
+ else
+ op_data->op_same_hash_offset = same_hash_offset;
+
+ /* op_data will be shared by each stripe, so we need
+ * reset these value for each stripe */
+ op_data->op_stripe_offset = i;
+ op_data->op_hash_offset = hash_offset;
+ op_data->op_cli_flags = cli_flags;
+ op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
+ op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
+ op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root;
+
+next:
rc = md_read_entry(tgt->ltd_exp, op_data, cb_op, &ents[i],
&page);
if (rc != 0)
GOTO(out, rc);
if (ents[i] != NULL &&
- le64_to_cpu(ents[i]->lde_hash) <= min_hash) {
- if (min_page != NULL)
- page_cache_release(min_page);
- min_page = page;
- min_hash = le64_to_cpu(ents[i]->lde_hash);
- min_idx = i;
+ (strncmp(ents[i]->lde_name, ".",
+ le16_to_cpu(ents[i]->lde_namelen)) == 0 ||
+ strncmp(ents[i]->lde_name, "..",
+ le16_to_cpu(ents[i]->lde_namelen)) == 0)) {
+ if (i == 0) {
+ /* replace . with master FID */
+ if (le16_to_cpu(ents[i]->lde_namelen) == 1)
+ fid_cpu_to_le(&ents[i]->lde_fid,
+ &master_fid);
+ else
+ fid_cpu_to_le(&ents[i]->lde_fid,
+ &op_data->op_fid3);
+ } else {
+ /* skip . and .. for other stripes */
+ op_data->op_cli_flags |= CLI_NEXT_ENTRY;
+ op_data->op_hash_offset =
+ le64_to_cpu(ents[i]->lde_hash);
+ kunmap(page);
+ page_cache_release(page);
+ goto next;
+ }
+ }
+
+ if (ents[i] != NULL) {
+ /* If the hash value of read_entry is equal to the
+ * current min_hash, which is very rare and only
+ * happens if two entries have the same hash value
+ * but on different stripes, in this case, we need
+ * make sure these entries are being reading forward,
+ * not backward, i.e. only reset the min_entry, if
+ * current stripe is ahead of last entry. Note: if
+ * there are hash conflict inside the entry, MDC
+ * (see mdc_read_entry) will resolve them. */
+ if (le64_to_cpu(ents[i]->lde_hash) < min_hash ||
+ (le64_to_cpu(ents[i]->lde_hash) == min_hash &&
+ i >= last_idx)) {
+ if (min_page != NULL) {
+ kunmap(min_page);
+ page_cache_release(min_page);
+ }
+ min_page = page;
+ min_hash = le64_to_cpu(ents[i]->lde_hash);
+ min_same_hash_offset =
+ op_data->op_same_hash_offset;
+ min_idx = i;
+ } else {
+ kunmap(page);
+ page_cache_release(page);
+ }
}
}
- if (min_hash != MDS_DIR_END_OFF)
+ if (min_hash != MDS_DIR_END_OFF)
{
*ldp = ents[min_idx];
- else
+ op_data->op_stripe_offset = min_idx;
+ op_data->op_same_hash_offset = min_same_hash_offset;
+ *ppage = min_page;
+ } else {
*ldp = NULL;
+ *ppage = NULL;
+ }
out:
+ /* We do not want to allocate md_op_data during each
+ * dir entry reading, so op_data will be shared by every stripe,
+ * then we need to restore it back to original value before
+ * return to the upper layer */
+ op_data->op_hash_offset = hash_offset;
+ op_data->op_fid1 = master_fid;
+ op_data->op_fid2 = master_fid;
+ op_data->op_data = master_data;
+ op_data->op_cli_flags = cli_flags;
if (stripe_count > NORMAL_MAX_STRIPES && ents != NULL)
OBD_FREE(ents, sizeof(ents[0]) * stripe_count);
if (rc != 0 && min_page != NULL) {
kunmap(min_page);
page_cache_release(min_page);
- } else {
- *ppage = min_page;
}
RETURN(rc);
}
+int lmv_read_entry(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_callback *cb_op, struct lu_dirent **ldp,
+ struct page **ppage)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+ ENTRY;
+
+ rc = lmv_check_connect(obd);
+ if (rc != 0)
+ RETURN(rc);
+
+ if (unlikely(lsm != NULL)) {
+ rc = lmv_read_striped_entry(exp, op_data, cb_op,
+ ldp, ppage);
+ RETURN(rc);
+ }
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
+
+ rc = md_read_entry(tgt->ltd_exp, op_data, cb_op, ldp,
+ ppage);
+ RETURN(rc);
+}
+
static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
@@
-2504,11
+2641,11
@@
retry:
RETURN(-EPROTO);
/* Not cross-ref case, just get out of here. */
- if (likely(!(body->valid & OBD_MD_MDS)))
+ if (likely(!(body->
mbo_
valid & OBD_MD_MDS)))
RETURN(0);
CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
- exp->exp_obd->obd_name, PFID(&body->fid1));
+ exp->exp_obd->obd_name, PFID(&body->
mbo_
fid1));
/* This is a remote object, try remote MDT, Note: it may
* try more than 1 time here, Considering following case
@@
-2529,7
+2666,7
@@
retry:
*
* In theory, it might try unlimited time here, but it should
* be very rare case. */
- op_data->op_fid2 = body->fid1;
+ op_data->op_fid2 = body->
mbo_
fid1;
ptlrpc_req_finished(*request);
*request = NULL;
@@
-2832,6
+2969,9
@@
int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
RETURN(0);
}
+ if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
+ RETURN(-EPERM);
+
/* Unpack memmd */
if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
@@
-2963,33
+3103,42
@@
ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
struct lustre_handle *lockh)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_mode_t rc;
- __u32 i;
- ENTRY;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ ldlm_mode_t rc;
+ int tgt;
+ int i;
+ ENTRY;
-
CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
+ CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
/*
- * With CMD every object can have two locks in different namespaces:
- * lookup lock in space of mds storing direntry and update/open lock in
- * space of mds storing inode. Thus we check all targets, not only that
- * one fid was created in.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
+ * With DNE every object can have two locks in different namespaces:
+ * lookup lock in space of MDT storing direntry and update/open lock in
+ * space of MDT storing inode. Try the MDT that the FID maps to first,
+ * since this can be easily found, and only try others if that fails.
+ */
+ for (i = 0, tgt = lmv_find_target_index(lmv, fid);
+ i < lmv->desc.ld_tgt_count;
+ i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
+ if (tgt < 0) {
+ CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
+ obd->obd_name, PFID(fid), tgt);
+ tgt = 0;
+ }
- if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
+ if (lmv->tgts[tgt] == NULL ||
+ lmv->tgts[tgt]->ltd_exp == NULL ||
+ lmv->tgts[tgt]->ltd_active == 0)
continue;
- rc = md_lock_match(
tgt->ltd_exp, flags, fid, type, policy, mode
,
- lockh);
-
if (rc)
-
RETURN(rc);
-
}
+ rc = md_lock_match(
lmv->tgts[tgt]->ltd_exp, flags, fid
,
+
type, policy, mode,
lockh);
+ if (rc)
+ RETURN(rc);
+ }
-
RETURN(0);
+ RETURN(0);
}
int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
@@
-3151,6
+3300,22
@@
int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
RETURN(rc);
}
+int lmv_get_fid_from_lsm(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ const char *name, int namelen, struct lu_fid *fid)
+{
+ const struct lmv_oinfo *oinfo;
+
+ LASSERT(lsm != NULL);
+ oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
+ if (IS_ERR(oinfo))
+ return PTR_ERR(oinfo);
+
+ *fid = oinfo->lmo_fid;
+
+ RETURN(0);
+}
+
/**
* For lmv, only need to send request to master MDT, and the master MDT will
* process with other slave MDTs. The only exception is Q_GETOQUOTA for which
@@
-3325,7
+3490,8
@@
struct md_ops lmv_md_ops = {
.m_unpack_capa = lmv_unpack_capa,
.m_get_remote_perm = lmv_get_remote_perm,
.m_intent_getattr_async = lmv_intent_getattr_async,
- .m_revalidate_lock = lmv_revalidate_lock
+ .m_revalidate_lock = lmv_revalidate_lock,
+ .m_get_fid_from_lsm = lmv_get_fid_from_lsm,
};
int __init lmv_init(void)