if (lmv->lmv_tgts_kobj)
kobject_put(lmv->lmv_tgts_kobj);
- if (!lmv->connected)
- class_export_put(exp);
- rc = class_disconnect(exp);
lmv->connected = 0;
+ rc = class_disconnect(exp);
RETURN(rc);
}
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl = karg;
struct obd_quotactl *oqctl;
+ struct obd_import *imp;
if (qctl->qc_valid == QC_MDTIDX) {
tgt = lmv_tgt(lmv, qctl->qc_idx);
RETURN(-EINVAL);
}
- if (!tgt || !tgt->ltd_exp)
+ if (!tgt)
+ RETURN(-ENODEV);
+
+ if (!tgt->ltd_exp)
RETURN(-EINVAL);
+ imp = class_exp2cliimp(tgt->ltd_exp);
+ if (!tgt->ltd_active && imp->imp_state != LUSTRE_IMP_IDLE) {
+ qctl->qc_valid = QC_MDTIDX;
+ qctl->obd_uuid = tgt->ltd_uuid;
+ RETURN(-ENODATA);
+ }
+
OBD_ALLOC_PTR(oqctl);
if (!oqctl)
RETURN(-ENOMEM);
{
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_desc *desc;
- struct lnet_process_id lnet_id;
+ struct lnet_processid lnet_id;
int i = 0;
int rc;
* can distribute subdirs evenly from the beginning.
*/
while (LNetGetId(i++, &lnet_id) != -ENOENT) {
- if (lnet_id.nid != LNET_NID_LO_0) {
- lmv->lmv_qos_rr_index = (u32)lnet_id.nid;
+ if (!nid_is_lo0(&lnet_id.nid)) {
+ lmv->lmv_qos_rr_index = ntohl(lnet_id.nid.nid_addr[0]);
break;
}
}
ENTRY;
fld_client_fini(&lmv->lmv_fld);
+ fld_client_debugfs_fini(&lmv->lmv_fld);
+
+ lprocfs_obd_cleanup(obd);
+ lprocfs_free_md_stats(obd);
+
lmv_foreach_tgt_safe(lmv, tgt, tmp)
lmv_del_target(lmv, tgt);
lu_tgt_descs_fini(&lmv->lmv_mdt_descs);
/* choose initial MDT for this client */
for (i = 0;; i++) {
- struct lnet_process_id lnet_id;
+ struct lnet_processid lnet_id;
if (LNetGetId(i, &lnet_id) == -ENOENT)
break;
- if (lnet_id.nid != LNET_NID_LO_0) {
+ if (!nid_is_lo0(&lnet_id.nid)) {
/* We dont need a full 64-bit modulus, just enough
* to distribute the requests across MDTs evenly.
*/
- lmv->lmv_statfs_start = (u32)lnet_id.nid %
+ lmv->lmv_statfs_start = nidhash(&lnet_id.nid) %
lmv->lmv_mdt_count;
break;
}
__u32 i;
__u32 idx;
int rc = 0;
+ int err = 0;
ENTRY;
if (rc) {
CERROR("%s: can't stat MDS #%d: rc = %d\n",
tgt->ltd_exp->exp_obd->obd_name, i, rc);
+ err = rc;
+ /* Try another MDT */
+ if (flags & OBD_STATFS_SUM)
+ continue;
GOTO(out_free_temp, rc);
}
* clients can be mounted as long as MDT0 is in
* service */
*osfs = *temp;
- break;
+ GOTO(out_free_temp, rc);
}
if (i == 0) {
osfs->os_granted += temp->os_granted;
}
}
-
- EXIT;
+ /* There is no stats from some MDTs, data incomplete */
+ if (err)
+ rc = err;
out_free_temp:
OBD_FREE(temp, sizeof(*temp));
- return rc;
+ RETURN(rc);
}
static int lmv_statfs_update(void *cookie, int rc)
RETURN(rc);
}
-static struct lu_tgt_desc *lmv_locate_tgt_qos(struct lmv_obd *lmv, __u32 *mdt)
+static struct lu_tgt_desc *lmv_locate_tgt_qos(struct lmv_obd *lmv, __u32 mdt,
+ unsigned short dir_depth)
{
struct lu_tgt_desc *tgt, *cur = NULL;
+ __u64 total_avail = 0;
__u64 total_weight = 0;
__u64 cur_weight = 0;
int total_usable = 0;
tgt->ltd_qos.ltq_usable = 1;
lu_tgt_qos_weight_calc(tgt);
- if (tgt->ltd_index == *mdt) {
+ if (tgt->ltd_index == mdt)
cur = tgt;
- cur_weight = tgt->ltd_qos.ltq_weight;
- }
+ total_avail += tgt->ltd_qos.ltq_avail;
total_weight += tgt->ltd_qos.ltq_weight;
total_usable++;
}
- /* if current MDT has higher-than-average space, stay on same MDT */
- rand = total_weight / total_usable;
- if (cur_weight >= rand) {
+ /* if current MDT has above-average space, within range of the QOS
+ * threshold, stay on the same MDT to avoid creating needless remote
+ * MDT directories. It's more likely for low level directories
+ * "16 / (dir_depth + 10)" is the factor to make it more unlikely for
+ * top level directories, while more likely for low levels.
+ */
+ rand = total_avail * 16 / (total_usable * (dir_depth + 10));
+ if (cur && cur->ltd_qos.ltq_avail >= rand) {
tgt = cur;
- GOTO(unlock, rc = 0);
+ GOTO(unlock, tgt);
}
- cur_weight = 0;
rand = lu_prandom_u64_max(total_weight);
lmv_foreach_connected_tgt(lmv, tgt) {
if (cur_weight < rand)
continue;
- *mdt = tgt->ltd_index;
ltd_qos_update(&lmv->lmv_mdt_descs, tgt, &total_weight);
- GOTO(unlock, rc = 0);
+ GOTO(unlock, tgt);
}
/* no proper target found */
return tgt;
}
-static struct lu_tgt_desc *lmv_locate_tgt_rr(struct lmv_obd *lmv, __u32 *mdt)
+static struct lu_tgt_desc *lmv_locate_tgt_rr(struct lmv_obd *lmv)
{
struct lu_tgt_desc *tgt;
int i;
if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- *mdt = tgt->ltd_index;
- lmv->lmv_qos_rr_index = (*mdt + 1) %
+ lmv->lmv_qos_rr_index = (tgt->ltd_index + 1) %
lmv->lmv_mdt_descs.ltd_tgts_size;
spin_unlock(&lmv->lmv_lock);
RETURN(ERR_PTR(-ENODEV));
}
+/* locate MDT which is less full (avoid the most full MDT) */
+static struct lu_tgt_desc *lmv_locate_tgt_lf(struct lmv_obd *lmv)
+{
+ struct lu_tgt_desc *min = NULL;
+ struct lu_tgt_desc *tgt;
+ __u64 avail = 0;
+ __u64 rand;
+
+ ENTRY;
+
+ if (!ltd_qos_is_usable(&lmv->lmv_mdt_descs))
+ RETURN(ERR_PTR(-EAGAIN));
+
+ down_write(&lmv->lmv_qos.lq_rw_sem);
+
+ if (!ltd_qos_is_usable(&lmv->lmv_mdt_descs))
+ GOTO(unlock, tgt = ERR_PTR(-EAGAIN));
+
+ lmv_foreach_tgt(lmv, tgt) {
+ if (!tgt->ltd_exp || !tgt->ltd_active) {
+ tgt->ltd_qos.ltq_usable = 0;
+ continue;
+ }
+
+ tgt->ltd_qos.ltq_usable = 1;
+ lu_tgt_qos_weight_calc(tgt);
+ avail += tgt->ltd_qos.ltq_avail;
+ if (!min || min->ltd_qos.ltq_avail > tgt->ltd_qos.ltq_avail)
+ min = tgt;
+ }
+
+ /* avoid the most full MDT */
+ if (min)
+ avail -= min->ltd_qos.ltq_avail;
+
+ rand = lu_prandom_u64_max(avail);
+ avail = 0;
+ lmv_foreach_connected_tgt(lmv, tgt) {
+ if (!tgt->ltd_qos.ltq_usable)
+ continue;
+
+ if (tgt == min)
+ continue;
+
+ avail += tgt->ltd_qos.ltq_avail;
+ if (avail < rand)
+ continue;
+
+ GOTO(unlock, tgt);
+ }
+
+ /* no proper target found */
+ GOTO(unlock, tgt = ERR_PTR(-EAGAIN));
+unlock:
+ up_write(&lmv->lmv_qos.lq_rw_sem);
+
+ RETURN(tgt);
+}
+
/* locate MDT by file name, for striped directory, the file name hash decides
* which stripe its dirent is stored.
*/
return rc;
}
+/* mkdir by QoS upon 'lfs mkdir -i -1'.
+ *
+ * NB, mkdir by QoS only if parent is not striped, this is to avoid remote
+ * directories under striped directory.
+ */
static inline bool lmv_op_user_qos_mkdir(const struct md_op_data *op_data)
{
const struct lmv_user_md *lum = op_data->op_data;
+ if (op_data->op_code != LUSTRE_OPC_MKDIR)
+ return false;
+
+ if (lmv_dir_striped(op_data->op_mea1))
+ return false;
+
return (op_data->op_cli_flags & CLI_SET_MEA) && lum &&
le32_to_cpu(lum->lum_magic) == LMV_USER_MAGIC &&
le32_to_cpu(lum->lum_stripe_offset) == LMV_OFFSET_DEFAULT;
}
+/* mkdir by QoS if either ROOT or parent default LMV is space balanced. */
static inline bool lmv_op_default_qos_mkdir(const struct md_op_data *op_data)
{
const struct lmv_stripe_md *lsm = op_data->op_default_mea1;
- return lsm && lsm->lsm_md_master_mdt_index == LMV_OFFSET_DEFAULT;
-}
-
-/* mkdir by QoS in two cases:
- * 1. 'lfs mkdir -i -1'
- * 2. parent default LMV master_mdt_index is -1
- *
- * NB, mkdir by QoS only if parent is not striped, this is to avoid remote
- * directories under striped directory.
- */
-static inline bool lmv_op_qos_mkdir(const struct md_op_data *op_data)
-{
if (op_data->op_code != LUSTRE_OPC_MKDIR)
return false;
if (lmv_dir_striped(op_data->op_mea1))
return false;
- if (lmv_op_user_qos_mkdir(op_data))
- return true;
-
- if (lmv_op_default_qos_mkdir(op_data))
- return true;
-
- return false;
+ return (op_data->op_flags & MF_QOS_MKDIR) ||
+ (lsm && lsm->lsm_md_master_mdt_index == LMV_OFFSET_DEFAULT);
}
-/* if default LMV is set, and its index is LMV_OFFSET_DEFAULT, and
- * 1. max_inherit_rr is set and is not LMV_INHERIT_RR_NONE
+/* if parent default LMV is space balanced, and
+ * 1. max_inherit_rr is set
* 2. or parent is ROOT
- * mkdir roundrobin.
- * NB, this also needs to check server is balanced, which is checked by caller.
+ * mkdir roundrobin. Or if parent doesn't have default LMV, while ROOT default
+ * LMV requests roundrobin mkdir, do the same.
+ * NB, this needs to check server is balanced, which is done by caller.
*/
static inline bool lmv_op_default_rr_mkdir(const struct md_op_data *op_data)
{
if (!lmv_op_default_qos_mkdir(op_data))
return false;
- return lsm->lsm_md_max_inherit_rr != LMV_INHERIT_RR_NONE ||
+ return (op_data->op_flags & MF_RR_MKDIR) ||
+ (lsm && lsm->lsm_md_max_inherit_rr != LMV_INHERIT_RR_NONE) ||
fid_is_root(&op_data->op_fid1);
}
LMV_OFFSET_DEFAULT;
}
+/* locate MDT by space usage */
+static struct lu_tgt_desc *lmv_locate_tgt_by_space(struct lmv_obd *lmv,
+ struct md_op_data *op_data,
+ struct lmv_tgt_desc *tgt)
+{
+ struct lmv_tgt_desc *tmp = tgt;
+
+ tgt = lmv_locate_tgt_qos(lmv, op_data->op_mds, op_data->op_dir_depth);
+ if (tgt == ERR_PTR(-EAGAIN)) {
+ if (ltd_qos_is_balanced(&lmv->lmv_mdt_descs) &&
+ !lmv_op_default_rr_mkdir(op_data) &&
+ !lmv_op_user_qos_mkdir(op_data))
+ /* if not necessary, don't create remote directory. */
+ tgt = tmp;
+ else
+ tgt = lmv_locate_tgt_rr(lmv);
+ }
+
+ /*
+ * only update statfs after QoS mkdir, this means the cached statfs may
+ * be stale, and current mkdir may not follow QoS accurately, but it's
+ * not serious, and avoids periodic statfs when client doesn't mkdir by
+ * QoS.
+ */
+ if (!IS_ERR(tgt)) {
+ op_data->op_mds = tgt->ltd_index;
+ lmv_statfs_check_update(lmv2obd_dev(lmv), tgt);
+ }
+
+ return tgt;
+}
+
int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
const void *data, size_t datalen, umode_t mode, uid_t uid,
- gid_t gid, cfs_cap_t cap_effective, __u64 rdev,
+ gid_t gid, kernel_cap_t cap_effective, __u64 rdev,
struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
+ /* the order to apply policy in mkdir:
+ * 1. is "lfs mkdir -i N"? mkdir on MDT N.
+ * 2. is "lfs mkdir -i -1"? mkdir by space usage.
+ * 3. is starting MDT specified in default LMV? mkdir on MDT N.
+ * 4. is default LMV space balanced? mkdir by space usage.
+ */
if (lmv_op_user_specific_mkdir(op_data)) {
struct lmv_user_md *lum = op_data->op_data;
tgt = lmv_tgt(lmv, op_data->op_mds);
if (!tgt)
RETURN(-ENODEV);
+ } else if (lmv_op_user_qos_mkdir(op_data)) {
+ tgt = lmv_locate_tgt_by_space(lmv, op_data, tgt);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
} else if (lmv_op_default_specific_mkdir(op_data)) {
op_data->op_mds =
op_data->op_default_mea1->lsm_md_master_mdt_index;
tgt = lmv_tgt(lmv, op_data->op_mds);
if (!tgt)
RETURN(-ENODEV);
- } else if (lmv_op_qos_mkdir(op_data)) {
- struct lmv_tgt_desc *tmp = tgt;
-
- tgt = lmv_locate_tgt_qos(lmv, &op_data->op_mds);
- if (tgt == ERR_PTR(-EAGAIN)) {
- if (ltd_qos_is_balanced(&lmv->lmv_mdt_descs) &&
- !lmv_op_default_rr_mkdir(op_data) &&
- !lmv_op_user_qos_mkdir(op_data))
- /* if it's not necessary, don't create remote
- * directory.
- */
- tgt = tmp;
- else
- tgt = lmv_locate_tgt_rr(lmv, &op_data->op_mds);
- }
+ } else if (lmv_op_default_qos_mkdir(op_data)) {
+ tgt = lmv_locate_tgt_by_space(lmv, op_data, tgt);
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
-
- /*
- * only update statfs after QoS mkdir, this means the cached
- * statfs may be stale, and current mkdir may not follow QoS
- * accurately, but it's not serious, and avoids periodic statfs
- * when client doesn't mkdir by QoS.
- */
- lmv_statfs_check_update(obd, tgt);
}
retry:
ENTRY;
retry:
- tgt = lmv_locate_tgt(lmv, op_data);
+ if (op_data->op_namelen == 2 &&
+ op_data->op_name[0] == '.' && op_data->op_name[1] == '.')
+ tgt = lmv_fid2tgt(lmv, &op_data->op_fid1);
+ else
+ tgt = lmv_locate_tgt(lmv, op_data);
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
+ op_data->op_cap = current_cap();
tgt = lmv_locate_tgt2(lmv, op_data);
if (IS_ERR(tgt))
RETURN(rc);
}
+/* migrate the top directory */
+static inline bool lmv_op_topdir_migrate(const struct md_op_data *op_data)
+{
+ if (!S_ISDIR(op_data->op_mode))
+ return false;
+
+ if (lmv_dir_layout_changing(op_data->op_mea1))
+ return false;
+
+ return true;
+}
+
+/* migrate top dir to specific MDTs */
+static inline bool lmv_topdir_specific_migrate(const struct md_op_data *op_data)
+{
+ const struct lmv_user_md *lum = op_data->op_data;
+
+ if (!lmv_op_topdir_migrate(op_data))
+ return false;
+
+ return le32_to_cpu(lum->lum_stripe_offset) != LMV_OFFSET_DEFAULT;
+}
+
+/* migrate top dir in QoS mode if user issued "lfs migrate -m -1..." */
+static inline bool lmv_topdir_qos_migrate(const struct md_op_data *op_data)
+{
+ const struct lmv_user_md *lum = op_data->op_data;
+
+ if (!lmv_op_topdir_migrate(op_data))
+ return false;
+
+ return le32_to_cpu(lum->lum_stripe_offset) == LMV_OFFSET_DEFAULT;
+}
+
+static inline bool lmv_subdir_specific_migrate(const struct md_op_data *op_data)
+{
+ const struct lmv_user_md *lum = op_data->op_data;
+
+ if (!S_ISDIR(op_data->op_mode))
+ return false;
+
+ if (!lmv_dir_layout_changing(op_data->op_mea1))
+ return false;
+
+ return le32_to_cpu(lum->lum_stripe_offset) != LMV_OFFSET_DEFAULT;
+}
+
static int lmv_migrate(struct obd_export *exp, struct md_op_data *op_data,
const char *name, size_t namelen,
struct ptlrpc_request **request)
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
+ op_data->op_cap = current_cap();
parent_tgt = lmv_fid2tgt(lmv, &op_data->op_fid1);
if (IS_ERR(parent_tgt))
tp_tgt = lmv_tgt(lmv, oinfo->lmo_mds);
if (!tp_tgt)
RETURN(-ENODEV);
+
+ /* parent unchanged and update namespace only */
+ if (lu_fid_eq(&op_data->op_fid4, &op_data->op_fid2) &&
+ op_data->op_bias & MDS_MIGRATE_NSONLY)
+ RETURN(-EALREADY);
}
} else {
sp_tgt = parent_tgt;
if (IS_ERR(child_tgt))
RETURN(PTR_ERR(child_tgt));
- /* for directory, migrate to MDT specified by lum_stripe_offset;
- * otherwise migrate to the target stripe of parent, but parent
- * directory may have finished migration (normally current file too),
- * allocate FID on MDT lum_stripe_offset, and server will check
- * whether file was migrated already.
- */
- if (S_ISDIR(op_data->op_mode) || !tp_tgt) {
+ if (lmv_topdir_specific_migrate(op_data)) {
struct lmv_user_md *lum = op_data->op_data;
op_data->op_mds = le32_to_cpu(lum->lum_stripe_offset);
- } else {
+ } else if (lmv_topdir_qos_migrate(op_data)) {
+ tgt = lmv_locate_tgt_lf(lmv);
+ if (tgt == ERR_PTR(-EAGAIN))
+ tgt = lmv_locate_tgt_rr(lmv);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
+
+ op_data->op_mds = tgt->ltd_index;
+ } else if (lmv_subdir_specific_migrate(op_data)) {
+ struct lmv_user_md *lum = op_data->op_data;
+ __u32 i;
+
+ LASSERT(tp_tgt);
+ if (le32_to_cpu(lum->lum_magic) == LMV_USER_MAGIC_SPECIFIC) {
+ /* adjust MDTs in lum, since subdir is located on where
+ * its parent stripe is, not the first specified MDT.
+ */
+ for (i = 0; i < le32_to_cpu(lum->lum_stripe_count);
+ i++) {
+ if (le32_to_cpu(lum->lum_objects[i].lum_mds) ==
+ tp_tgt->ltd_index)
+ break;
+ }
+
+ if (i == le32_to_cpu(lum->lum_stripe_count))
+ RETURN(-ENODEV);
+
+ lum->lum_objects[i].lum_mds =
+ lum->lum_objects[0].lum_mds;
+ lum->lum_objects[0].lum_mds =
+ cpu_to_le32(tp_tgt->ltd_index);
+ }
+ /* NB, the above adjusts subdir migration for command like
+ * "lfs migrate -m 0,1,2 ...", but for migration like
+ * "lfs migrate -m 0 -c 2 ...", the top dir is migrated to MDT0
+ * and MDT1, however its subdir may be migrated to MDT1 and MDT2
+ */
+
+ lum->lum_stripe_offset = cpu_to_le32(tp_tgt->ltd_index);
+ op_data->op_mds = tp_tgt->ltd_index;
+ } else if (tp_tgt) {
op_data->op_mds = tp_tgt->ltd_index;
+ } else {
+ op_data->op_mds = sp_tgt->ltd_index;
}
+
rc = lmv_fid_alloc(NULL, exp, &target_fid, op_data);
if (rc)
RETURN(rc);
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
+ op_data->op_cap = current_cap();
op_data->op_name = new;
op_data->op_namelen = newlen;
struct lmv_dir_ctxt {
struct lmv_obd *ldc_lmv;
struct md_op_data *ldc_op_data;
- struct md_callback *ldc_cb_op;
+ struct md_readdir_info *ldc_mrinfo;
__u64 ldc_hash;
int ldc_count;
struct stripe_dirent ldc_stripes[0];
op_data->op_fid2 = oinfo->lmo_fid;
op_data->op_data = oinfo->lmo_root;
- rc = md_read_page(tgt->ltd_exp, op_data, ctxt->ldc_cb_op, hash,
+ rc = md_read_page(tgt->ltd_exp, op_data, ctxt->ldc_mrinfo, hash,
&stripe->sd_page);
op_data->op_fid1 = fid;
LASSERT(!ent);
/* treat error as eof, so dir can be partially accessed */
stripe->sd_eof = true;
+ ctxt->ldc_mrinfo->mr_partial_readdir_rc = rc;
LCONSOLE_WARN("dir "DFID" stripe %d readdir failed: %d, "
"directory is partially accessed!\n",
PFID(&ctxt->ldc_op_data->op_fid1), stripe_index,
*
* \param[in] exp obd export refer to LMV
* \param[in] op_data hold those MD parameters of read_entry
- * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry
+ * \param[in] mrinfo ldlm callback being used in enqueue in mdc_read_entry,
+ * and partial readdir result will be stored in it.
* \param[in] offset starting hash offset
* \param[out] ppage the page holding the entry. Note: because the entry
* will be accessed in upper layer, so we need hold the
*/
static int lmv_striped_read_page(struct obd_export *exp,
struct md_op_data *op_data,
- struct md_callback *cb_op,
- __u64 offset, struct page **ppage)
+ struct md_readdir_info *mrinfo, __u64 offset,
+ struct page **ppage)
{
struct page *page = NULL;
struct lu_dirpage *dp;
GOTO(free_page, rc = -ENOMEM);
ctxt->ldc_lmv = &exp->exp_obd->u.lmv;
ctxt->ldc_op_data = op_data;
- ctxt->ldc_cb_op = cb_op;
+ ctxt->ldc_mrinfo = mrinfo;
ctxt->ldc_hash = offset;
ctxt->ldc_count = stripe_count;
}
static int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data,
- struct md_callback *cb_op, __u64 offset,
+ struct md_readdir_info *mrinfo, __u64 offset,
struct page **ppage)
{
struct obd_device *obd = exp->exp_obd;
RETURN(-ENODATA);
if (unlikely(lmv_dir_striped(op_data->op_mea1))) {
- rc = lmv_striped_read_page(exp, op_data, cb_op, offset, ppage);
+ rc = lmv_striped_read_page(exp, op_data, mrinfo, offset, ppage);
RETURN(rc);
}
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
- rc = md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage);
+ rc = md_read_page(tgt->ltd_exp, op_data, mrinfo, offset, ppage);
RETURN(rc);
}
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
+ op_data->op_cap = current_cap();
retry:
parent_tgt = lmv_locate_tgt(lmv, op_data);
{
ENTRY;
libcfs_kkuc_group_rem(&obd->obd_uuid, 0, KUC_GRP_HSM);
- fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
- lprocfs_obd_cleanup(obd);
- lprocfs_free_md_stats(obd);
RETURN(0);
}
exp->exp_connect_data = *(struct obd_connect_data *)val;
RETURN(rc);
} else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = lmv->lmv_mdt_descs.ltd_lmv_desc.ld_tgt_count;
+ *((int *)val) = lmv->lmv_mdt_descs.ltd_tgts_size;
RETURN(0);
}
lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic);
lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
- if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE))
- lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN;
+ if (CFS_FAIL_CHECK(OBD_FAIL_LMV_UNKNOWN_STRIPE))
+ lsm->lsm_md_hash_type = cfs_fail_val ?: LMV_HASH_TYPE_UNKNOWN;
else
lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
{
const struct lmv_oinfo *oinfo;
- LASSERT(lmv_dir_striped(lsm));
+ if (!lmv_dir_striped(lsm))
+ RETURN(-ESTALE);
oinfo = lsm_name_to_stripe_info(lsm, name, namelen, false);
if (IS_ERR(oinfo))