#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/namei.h>
#else
#include <lclient.h>
#include <lustre_lite.h>
#include <lustre_fid.h>
+#include <lustre_ioctl.h>
#include "lmv_internal.h"
/* This hash is only for testing purpose */
return hash;
}
-int lmv_name_to_stripe_index(enum lmv_hash_type hashtype,
- unsigned int max_mdt_index,
+int lmv_name_to_stripe_index(__u32 lmv_hash_type, unsigned int stripe_count,
const char *name, int namelen)
{
int idx;
+ __u32 hash_type = lmv_hash_type & LMV_HASH_TYPE_MASK;
LASSERT(namelen > 0);
- if (max_mdt_index <= 1)
+ if (stripe_count <= 1)
return 0;
- switch (hashtype) {
+ /* for migrating object, always start from 0 stripe */
+ if (lmv_hash_type & LMV_HASH_FLAG_MIGRATION)
+ return 0;
+
+ switch (hash_type) {
case LMV_HASH_TYPE_ALL_CHARS:
- idx = lmv_hash_all_chars(max_mdt_index, name, namelen);
+ idx = lmv_hash_all_chars(stripe_count, name, namelen);
break;
case LMV_HASH_TYPE_FNV_1A_64:
- idx = lmv_hash_fnv1a(max_mdt_index, name, namelen);
+ idx = lmv_hash_fnv1a(stripe_count, name, namelen);
break;
- /* LMV_HASH_TYPE_MIGRATION means the file is being migrated,
- * and the file should be accessed by client, except for
- * lookup(see lmv_intent_lookup), return -EACCES here */
- case LMV_HASH_TYPE_MIGRATION:
- CERROR("%.*s is being migrated: rc = %d\n", namelen,
- name, -EACCES);
- return -EACCES;
default:
- CERROR("Unknown hash type 0x%x\n", hashtype);
+ CERROR("Unknown hash type 0x%x\n", hash_type);
return -EINVAL;
}
CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name,
- hashtype, idx);
+ hash_type, idx);
- LASSERT(idx < max_mdt_index);
return idx;
}
struct obd_uuid *cluuid, struct obd_connect_data *data,
void *localdata)
{
-#ifdef __KERNEL__
- struct proc_dir_entry *lmv_proc_dir;
-#endif
struct lmv_obd *lmv = &obd->u.lmv;
struct lustre_handle conn = { 0 };
int rc = 0;
if (data)
lmv->conn_data = *data;
-#ifdef __KERNEL__
- if (obd->obd_proc_private != NULL) {
- lmv_proc_dir = obd->obd_proc_private;
- } else {
- lmv_proc_dir = lprocfs_seq_register("target_obds",
- obd->obd_proc_entry,
- NULL, NULL);
- if (IS_ERR(lmv_proc_dir)) {
+ if (lmv->targets_proc_entry == NULL) {
+ lmv->targets_proc_entry = lprocfs_seq_register("target_obds",
+ obd->obd_proc_entry,
+ NULL, NULL);
+ if (IS_ERR(lmv->targets_proc_entry)) {
CERROR("could not register /proc/fs/lustre/%s/%s/target_obds.",
obd->obd_type->typ_name, obd->obd_name);
- lmv_proc_dir = NULL;
+ lmv->targets_proc_entry = NULL;
}
- obd->obd_proc_private = lmv_proc_dir;
}
-#endif
/*
* All real clients should perform actual connection right away, because
if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_REAL))
rc = lmv_check_connect(obd);
-#ifdef __KERNEL__
- if (rc && lmv_proc_dir) {
- lprocfs_remove(&lmv_proc_dir);
- obd->obd_proc_private = NULL;
- }
-#endif
- RETURN(rc);
+ if (rc && lmv->targets_proc_entry != NULL)
+ lprocfs_remove(&lmv->targets_proc_entry);
+ RETURN(rc);
}
static void lmv_set_timeouts(struct obd_device *obd)
int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
{
-#ifdef __KERNEL__
- struct proc_dir_entry *lmv_proc_dir;
-#endif
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_uuid *cluuid = &lmv->cluuid;
struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
}
}
- tgt->ltd_active = 1;
- tgt->ltd_exp = mdc_exp;
- lmv->desc.ld_active_tgt_count++;
+ tgt->ltd_active = 1;
+ tgt->ltd_exp = mdc_exp;
+ lmv->desc.ld_active_tgt_count++;
md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize,
lmv->max_cookiesize, lmv->max_def_cookiesize);
mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
atomic_read(&obd->obd_refcount));
-#ifdef __KERNEL__
- lmv_proc_dir = obd->obd_proc_private;
- if (lmv_proc_dir) {
+ if (lmv->targets_proc_entry != NULL) {
struct proc_dir_entry *mdc_symlink;
LASSERT(mdc_obd->obd_type != NULL);
LASSERT(mdc_obd->obd_type->typ_name != NULL);
mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
- lmv_proc_dir,
+ lmv->targets_proc_entry,
"../../../%s/%s",
mdc_obd->obd_type->typ_name,
mdc_obd->obd_name);
"/proc/fs/lustre/%s/%s/target_obds/%s.",
obd->obd_type->typ_name, obd->obd_name,
mdc_obd->obd_name);
- lprocfs_remove(&lmv_proc_dir);
- obd->obd_proc_private = NULL;
}
}
-#endif
RETURN(0);
}
{
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
+ int orig_tgt_count = 0;
int rc = 0;
ENTRY;
tgt->ltd_uuid = *uuidp;
tgt->ltd_active = 0;
lmv->tgts[index] = tgt;
- if (index >= lmv->desc.ld_tgt_count)
+ if (index >= lmv->desc.ld_tgt_count) {
+ orig_tgt_count = lmv->desc.ld_tgt_count;
lmv->desc.ld_tgt_count = index + 1;
+ }
if (lmv->connected) {
rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
+ if (rc != 0) {
spin_lock(&lmv->lmv_lock);
- lmv->desc.ld_tgt_count--;
+ if (lmv->desc.ld_tgt_count == index + 1)
+ lmv->desc.ld_tgt_count = orig_tgt_count;
memset(tgt, 0, sizeof(*tgt));
spin_unlock(&lmv->lmv_lock);
} else {
static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
{
-#ifdef __KERNEL__
- struct proc_dir_entry *lmv_proc_dir;
-#endif
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_device *mdc_obd;
int rc;
mdc_obd->obd_no_recov = obd->obd_no_recov;
}
-#ifdef __KERNEL__
- lmv_proc_dir = obd->obd_proc_private;
- if (lmv_proc_dir)
- lprocfs_remove_proc_entry(mdc_obd->obd_name, lmv_proc_dir);
-#endif
+ if (lmv->targets_proc_entry != NULL)
+ lprocfs_remove_proc_entry(mdc_obd->obd_name,
+ lmv->targets_proc_entry);
+
rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
if (rc)
CERROR("Can't finanize fids factory\n");
lmv_disconnect_mdc(obd, lmv->tgts[i]);
}
-#ifdef __KERNEL__
- if (obd->obd_proc_private)
- lprocfs_remove((struct proc_dir_entry **)&obd->obd_proc_private);
+ if (lmv->targets_proc_entry != NULL)
+ lprocfs_remove(&lmv->targets_proc_entry);
else
CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
obd->obd_type->typ_name, obd->obd_name);
-#endif
out_local:
/*
lum = op_data->op_data;
- if (lum->lum_stripe_offset != (__u32)-1) {
- *mds = lum->lum_stripe_offset;
+ if (le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) {
+ *mds = le32_to_cpu(lum->lum_stripe_offset);
} else {
/* -1 means default, which will be in the same MDT with
* the stripe */
*mds = op_data->op_mds;
- lum->lum_stripe_offset = op_data->op_mds;
+ lum->lum_stripe_offset = cpu_to_le32(op_data->op_mds);
}
} else {
/* Allocate new fid on target according to operation type and
int rc;
ENTRY;
- tgt = lmv_get_target(lmv, mds);
+ tgt = lmv_get_target(lmv, mds, NULL);
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL)
GOTO(out, rc = -ENODEV);
- /*
- * Asking underlaying tgt layer to allocate new fid.
- */
- rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
- if (rc > 0) {
- LASSERT(fid_is_sane(fid));
- rc = 0;
- }
+ /*
+ * Asking underlying tgt layer to allocate new fid.
+ */
+ rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL);
+ if (rc > 0) {
+ LASSERT(fid_is_sane(fid));
+ rc = 0;
+ }
EXIT;
out:
return rc;
}
-int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data)
+int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
ldlm_iterator_t it, void *data)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- __u32 i;
- int rc;
- ENTRY;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int i;
+ int tgt;
+ int rc;
+ ENTRY;
- rc = lmv_check_connect(obd);
- if (rc)
- RETURN(rc);
+ rc = lmv_check_connect(obd);
+ if (rc)
+ RETURN(rc);
- CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
+ CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
/*
* With DNE every object can have two locks in different namespaces:
* lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode.
+ * space of MDT storing inode. Try the MDT that the FID maps to first,
+ * since this can be easily found, and only try others if that fails.
*/
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ for (i = 0, tgt = lmv_find_target_index(lmv, fid);
+ i < lmv->desc.ld_tgt_count;
+ i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
+ if (tgt < 0) {
+ CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
+ obd->obd_name, PFID(fid), tgt);
+ tgt = 0;
+ }
+
+ if (lmv->tgts[tgt] == NULL ||
+ lmv->tgts[tgt]->ltd_exp == NULL)
continue;
- rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
+
+ rc = md_find_cbdata(lmv->tgts[tgt]->ltd_exp, fid, it, data);
if (rc)
RETURN(rc);
}
RETURN((void *)oinfo);
*fid = oinfo->lmo_fid;
*mds = oinfo->lmo_mds;
- tgt = lmv_get_target(lmv, *mds);
+ tgt = lmv_get_target(lmv, *mds, NULL);
CDEBUG(D_INFO, "locate on mds %u "DFID"\n", *mds, PFID(fid));
return tgt;
struct lmv_stripe_md *lsm = op_data->op_mea1;
struct lmv_tgt_desc *tgt;
- if (lsm == NULL || lsm->lsm_md_stripe_count <= 1 ||
- op_data->op_namelen == 0 ||
- lsm->lsm_md_magic == LMV_MAGIC_MIGRATE) {
+ if (lsm == NULL || op_data->op_namelen == 0) {
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
return tgt;
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
op_data->op_mds);
- rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
+ rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc)
RETURN(rc);
}
static int
-lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- __u64 extra_lock_flags)
-{
- struct ptlrpc_request *req = it->d.lustre.it_data;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lustre_handle plock;
- struct lmv_tgt_desc *tgt;
- struct md_op_data *rdata;
- struct lu_fid fid1;
- struct mdt_body *body;
- int rc = 0;
- int pmode;
- ENTRY;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
-
- if (!(body->valid & OBD_MD_MDS))
- RETURN(0);
-
- CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
-
- /*
- * We got LOOKUP lock, but we really need attrs.
- */
- pmode = it->d.lustre.it_lock_mode;
- LASSERT(pmode != 0);
- memcpy(&plock, lockh, sizeof(plock));
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
- fid1 = body->fid1;
-
- ptlrpc_req_finished(req);
-
- tgt = lmv_find_target(lmv, &fid1);
- if (IS_ERR(tgt))
- GOTO(out, rc = PTR_ERR(tgt));
-
- OBD_ALLOC_PTR(rdata);
- if (rdata == NULL)
- GOTO(out, rc = -ENOMEM);
-
- rdata->op_fid1 = fid1;
- rdata->op_bias = MDS_CROSS_REF;
-
- rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
- lmm, lmmsize, NULL, extra_lock_flags);
- OBD_FREE_PTR(rdata);
- EXIT;
-out:
- ldlm_lock_decref(&plock, pmode);
- return rc;
-}
-
-static int
lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, __u64 extra_lock_flags)
+ const union ldlm_policy_data *policy,
+ struct lookup_intent *it, struct md_op_data *op_data,
+ struct lustre_handle *lockh, __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
- rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
- lmm, lmmsize, req, extra_lock_flags);
+ rc = md_enqueue(tgt->ltd_exp, einfo, policy, it, op_data, lockh,
+ extra_lock_flags);
- if (rc == 0 && it && it->it_op == IT_OPEN) {
- rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
- lmm, lmmsize, extra_lock_flags);
- }
RETURN(rc);
}
static int
lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
- struct ptlrpc_request **request)
+ struct ptlrpc_request **preq)
{
struct ptlrpc_request *req = NULL;
struct obd_device *obd = exp->exp_obd;
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
tgt->ltd_idx);
- rc = md_getattr_name(tgt->ltd_exp, op_data, request);
+ rc = md_getattr_name(tgt->ltd_exp, op_data, preq);
if (rc != 0)
RETURN(rc);
- body = req_capsule_server_get(&(*request)->rq_pill,
- &RMF_MDT_BODY);
+ body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL);
- if (body->valid & OBD_MD_MDS) {
- struct lu_fid rid = body->fid1;
+ if (body->mbo_valid & OBD_MD_MDS) {
+ struct lu_fid rid = body->mbo_fid1;
CDEBUG(D_INODE, "Request attrs for "DFID"\n",
PFID(&rid));
tgt = lmv_find_target(lmv, &rid);
if (IS_ERR(tgt)) {
- ptlrpc_req_finished(*request);
+ ptlrpc_req_finished(*preq);
+ preq = NULL;
RETURN(PTR_ERR(tgt));
}
op_data->op_namelen = 0;
op_data->op_name = NULL;
rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
- ptlrpc_req_finished(*request);
- *request = req;
+ ptlrpc_req_finished(*preq);
+ *preq = req;
}
RETURN(rc);
PFID(&op_data->op_fid2), op_data->op_namelen,
op_data->op_name, PFID(&op_data->op_fid1));
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
if (op_data->op_mea2 != NULL) {
struct lmv_stripe_md *lsm = op_data->op_mea2;
if (rc)
RETURN(rc);
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
if (op_data->op_cli_flags & CLI_MIGRATE) {
LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n",
PFID(&op_data->op_fid3));
- rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
+ rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc)
RETURN(rc);
src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid3);
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+/**
+ * This function will read entry from a striped directory, bascially, it will
+ * read entries from all of stripes, and choose one closest to the required
+ * offset(&op_data->op_hash_offset). A few notes
+ * 1. skip . and .. for non-zero stripes, because there can only have one .
+ * and .. in a directory.
+ * 2. op_data will be shared by all of stripes, instead of allocating new
+ * one, so need to restore before reusing.
+ * 3. release the entry page if that is not being chosen.
+ *
+ * param[in]exp obd export refer to LMV
+ * param[in]op_data hold those MD parameters of read_entry.
+ * param[in]cb_op ldlm callback being used in enqueue in mdc_read_entry
+ * param[out]ldp the entry being read.
+ * param[out]ppage the page holding the entry, note: because the entry
+ * will be accessed in upper layer, so we need hold the
+ * page until the usages of entry is finished, see
+ * ll_dir_entry_next.
+ *
+ * retval =0 if get entry successfully
+ * <0 can not get entry.
+ */
#define NORMAL_MAX_STRIPES 4
-int lmv_read_entry(struct obd_export *exp, struct md_op_data *op_data,
- struct md_callback *cb_op, struct lu_dirent **ldp,
- struct page **ppage)
+static int lmv_read_striped_entry(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_callback *cb_op,
+ struct lu_dirent **ldp,
+ struct page **ppage)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct lmv_tgt_desc *tgt;
struct lu_dirent *tmp_ents[NORMAL_MAX_STRIPES];
struct lu_dirent **ents = NULL;
+ struct lu_fid master_fid = op_data->op_fid1;
+ void *master_data = op_data->op_data;
+ __u64 last_idx = op_data->op_stripe_offset;
+ __u64 hash_offset = op_data->op_hash_offset;
+ __u32 same_hash_offset = op_data->op_same_hash_offset;
+ __u32 cli_flags = op_data->op_cli_flags;
int stripe_count;
__u64 min_hash;
+ int min_same_hash_offset = 0;
int min_idx = 0;
struct page *min_page = NULL;
int i;
int rc;
ENTRY;
+ LASSERT(lsm != NULL);
+
rc = lmv_check_connect(obd);
if (rc)
RETURN(rc);
- if (lsm == NULL)
- stripe_count = 1;
- else
- stripe_count = lsm->lsm_md_stripe_count;
-
+ /* . and .. will be stored on the master object, so we need iterate
+ * the master object as well */
+ stripe_count = lsm->lsm_md_stripe_count;
if (stripe_count > NORMAL_MAX_STRIPES) {
OBD_ALLOC(ents, sizeof(ents[0]) * stripe_count);
if (ents == NULL)
min_hash = MDS_DIR_END_OFF;
for (i = 0; i < stripe_count; i++) {
- struct lmv_tgt_desc *tgt;
struct page *page = NULL;
- if (likely(lsm == NULL)) {
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- GOTO(out, rc = PTR_ERR(tgt));
- LASSERT(op_data->op_data != NULL);
- } else {
- tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds);
- if (IS_ERR(tgt))
- GOTO(out, rc = PTR_ERR(tgt));
- op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
- op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
- op_data->op_stripe_offset = i;
- }
+ tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL);
+ if (IS_ERR(tgt))
+ GOTO(out, rc = PTR_ERR(tgt));
+ if (last_idx != i)
+ op_data->op_same_hash_offset = 0;
+ else
+ op_data->op_same_hash_offset = same_hash_offset;
+
+ /* op_data will be shared by each stripe, so we need
+ * reset these value for each stripe */
+ op_data->op_stripe_offset = i;
+ op_data->op_hash_offset = hash_offset;
+ op_data->op_cli_flags = cli_flags;
+ op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
+ op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
+ op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root;
+
+next:
rc = md_read_entry(tgt->ltd_exp, op_data, cb_op, &ents[i],
&page);
if (rc != 0)
GOTO(out, rc);
if (ents[i] != NULL &&
- le64_to_cpu(ents[i]->lde_hash) <= min_hash) {
- if (min_page != NULL)
- page_cache_release(min_page);
- min_page = page;
- min_hash = le64_to_cpu(ents[i]->lde_hash);
- min_idx = i;
+ (strncmp(ents[i]->lde_name, ".",
+ le16_to_cpu(ents[i]->lde_namelen)) == 0 ||
+ strncmp(ents[i]->lde_name, "..",
+ le16_to_cpu(ents[i]->lde_namelen)) == 0)) {
+ if (i == 0) {
+ /* replace . with master FID */
+ if (le16_to_cpu(ents[i]->lde_namelen) == 1)
+ fid_cpu_to_le(&ents[i]->lde_fid,
+ &master_fid);
+ else
+ fid_cpu_to_le(&ents[i]->lde_fid,
+ &op_data->op_fid3);
+ } else {
+ /* skip . and .. for other stripes */
+ op_data->op_cli_flags |= CLI_NEXT_ENTRY;
+ op_data->op_hash_offset =
+ le64_to_cpu(ents[i]->lde_hash);
+ kunmap(page);
+ page_cache_release(page);
+ goto next;
+ }
+ }
+
+ if (ents[i] != NULL) {
+ /* If the hash value of read_entry is equal to the
+ * current min_hash, which is very rare and only
+ * happens if two entries have the same hash value
+ * but on different stripes, in this case, we need
+ * make sure these entries are being reading forward,
+ * not backward, i.e. only reset the min_entry, if
+ * current stripe is ahead of last entry. Note: if
+ * there are hash conflict inside the entry, MDC
+ * (see mdc_read_entry) will resolve them. */
+ if (le64_to_cpu(ents[i]->lde_hash) < min_hash ||
+ (le64_to_cpu(ents[i]->lde_hash) == min_hash &&
+ i >= last_idx)) {
+ if (min_page != NULL) {
+ kunmap(min_page);
+ page_cache_release(min_page);
+ }
+ min_page = page;
+ min_hash = le64_to_cpu(ents[i]->lde_hash);
+ min_same_hash_offset =
+ op_data->op_same_hash_offset;
+ min_idx = i;
+ } else {
+ kunmap(page);
+ page_cache_release(page);
+ }
}
}
- if (min_hash != MDS_DIR_END_OFF)
+ if (min_hash != MDS_DIR_END_OFF) {
*ldp = ents[min_idx];
- else
+ op_data->op_stripe_offset = min_idx;
+ op_data->op_same_hash_offset = min_same_hash_offset;
+ *ppage = min_page;
+ } else {
*ldp = NULL;
+ *ppage = NULL;
+ }
out:
+ /* We do not want to allocate md_op_data during each
+ * dir entry reading, so op_data will be shared by every stripe,
+ * then we need to restore it back to original value before
+ * return to the upper layer */
+ op_data->op_hash_offset = hash_offset;
+ op_data->op_fid1 = master_fid;
+ op_data->op_fid2 = master_fid;
+ op_data->op_data = master_data;
+ op_data->op_cli_flags = cli_flags;
if (stripe_count > NORMAL_MAX_STRIPES && ents != NULL)
OBD_FREE(ents, sizeof(ents[0]) * stripe_count);
if (rc != 0 && min_page != NULL) {
kunmap(min_page);
page_cache_release(min_page);
- } else {
- *ppage = min_page;
}
RETURN(rc);
}
+int lmv_read_entry(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_callback *cb_op, struct lu_dirent **ldp,
+ struct page **ppage)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+ ENTRY;
+
+ rc = lmv_check_connect(obd);
+ if (rc != 0)
+ RETURN(rc);
+
+ if (unlikely(lsm != NULL)) {
+ rc = lmv_read_striped_entry(exp, op_data, cb_op,
+ ldp, ppage);
+ RETURN(rc);
+ }
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
+
+ rc = md_read_entry(tgt->ltd_exp, op_data, cb_op, ldp,
+ ppage);
+ RETURN(rc);
+}
+
static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
RETURN(PTR_ERR(tgt));
/* For striped dir, we need to locate the parent as well */
- if (op_data->op_mea1 != NULL &&
- op_data->op_mea1->lsm_md_stripe_count > 1) {
+ if (op_data->op_mea1 != NULL) {
struct lmv_tgt_desc *tmp;
LASSERT(op_data->op_name != NULL &&
RETURN(PTR_ERR(tgt));
}
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
/*
RETURN(-EPROTO);
/* Not cross-ref case, just get out of here. */
- if (likely(!(body->valid & OBD_MD_MDS)))
+ if (likely(!(body->mbo_valid & OBD_MD_MDS)))
RETURN(0);
CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
- exp->exp_obd->obd_name, PFID(&body->fid1));
+ exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
/* This is a remote object, try remote MDT, Note: it may
* try more than 1 time here, Considering following case
*
* In theory, it might try unlimited time here, but it should
* be very rare case. */
- op_data->op_fid2 = body->fid1;
+ op_data->op_fid2 = body->mbo_fid1;
ptlrpc_req_finished(*request);
*request = NULL;
lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
+ fid_le_to_cpu(&lsm->lsm_md_master_fid, &lmm1->lmv_master_fid);
cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name,
sizeof(lsm->lsm_md_pool_name));
+ if (!fid_is_sane(&lsm->lsm_md_master_fid))
+ RETURN(-EPROTO);
+
if (cplen >= sizeof(lsm->lsm_md_pool_name))
RETURN(-E2BIG);
if (lsm != NULL && lmm == NULL) {
#ifdef __KERNEL__
int i;
- for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
- if (lsm->lsm_md_oinfo[i].lmo_root != NULL)
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ /* For migrating inode, the master stripe and master
+ * object will be the same, so do not need iput, see
+ * ll_update_lsm_md */
+ if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION &&
+ i == 0) && lsm->lsm_md_oinfo[i].lmo_root != NULL)
iput(lsm->lsm_md_oinfo[i].lmo_root);
}
#endif
RETURN(0);
}
+ if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
+ RETURN(-EPERM);
+
/* Unpack memmd */
if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
- le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_MIGRATE &&
le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
CERROR("%s: invalid lmv magic %x: rc = %d\n",
exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic),
RETURN(-EIO);
}
- if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1 ||
- le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_MIGRATE)
+ if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1)
lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
else
/**
switch (le32_to_cpu(lmm->lmv_magic)) {
case LMV_MAGIC_V1:
- case LMV_MAGIC_MIGRATE:
rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1);
break;
default:
ldlm_policy_data_t *policy, ldlm_mode_t mode,
struct lustre_handle *lockh)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_mode_t rc;
- __u32 i;
- ENTRY;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ ldlm_mode_t rc;
+ int tgt;
+ int i;
+ ENTRY;
- CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
+ CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
/*
- * With CMD every object can have two locks in different namespaces:
- * lookup lock in space of mds storing direntry and update/open lock in
- * space of mds storing inode. Thus we check all targets, not only that
- * one fid was created in.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
+ * With DNE every object can have two locks in different namespaces:
+ * lookup lock in space of MDT storing direntry and update/open lock in
+ * space of MDT storing inode. Try the MDT that the FID maps to first,
+ * since this can be easily found, and only try others if that fails.
+ */
+ for (i = 0, tgt = lmv_find_target_index(lmv, fid);
+ i < lmv->desc.ld_tgt_count;
+ i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
+ if (tgt < 0) {
+ CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
+ obd->obd_name, PFID(fid), tgt);
+ tgt = 0;
+ }
- if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
+ if (lmv->tgts[tgt] == NULL ||
+ lmv->tgts[tgt]->ltd_exp == NULL ||
+ lmv->tgts[tgt]->ltd_active == 0)
continue;
- rc = md_lock_match(tgt->ltd_exp, flags, fid, type, policy, mode,
- lockh);
- if (rc)
- RETURN(rc);
- }
+ rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid,
+ type, policy, mode, lockh);
+ if (rc)
+ RETURN(rc);
+ }
- RETURN(0);
+ RETURN(0);
}
int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
RETURN(rc);
}
+int lmv_get_fid_from_lsm(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ const char *name, int namelen, struct lu_fid *fid)
+{
+ const struct lmv_oinfo *oinfo;
+
+ LASSERT(lsm != NULL);
+ oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
+ if (IS_ERR(oinfo))
+ return PTR_ERR(oinfo);
+
+ *fid = oinfo->lmo_fid;
+
+ RETURN(0);
+}
+
/**
* For lmv, only need to send request to master MDT, and the master MDT will
* process with other slave MDTs. The only exception is Q_GETOQUOTA for which
int lmv_update_lsm_md(struct obd_export *exp, struct lmv_stripe_md *lsm,
struct mdt_body *body, ldlm_blocking_callback cb_blocking)
{
- if (lsm->lsm_md_stripe_count <= 1)
- return 0;
-
return lmv_revalidate_slaves(exp, body, lsm, cb_blocking, 0);
}
.m_unpack_capa = lmv_unpack_capa,
.m_get_remote_perm = lmv_get_remote_perm,
.m_intent_getattr_async = lmv_intent_getattr_async,
- .m_revalidate_lock = lmv_revalidate_lock
+ .m_revalidate_lock = lmv_revalidate_lock,
+ .m_get_fid_from_lsm = lmv_get_fid_from_lsm,
};
int __init lmv_init(void)
{
- return class_register_type(&lmv_obd_ops, &lmv_md_ops, NULL,
+ return class_register_type(&lmv_obd_ops, &lmv_md_ops, true, NULL,
#ifndef HAVE_ONLY_PROCFS_SEQ
- NULL,
+ NULL,
#endif
- LUSTRE_LMV_NAME, NULL);
+ LUSTRE_LMV_NAME, NULL);
}
#ifdef __KERNEL__