static struct lu_tgt_desc *lmv_locate_tgt_qos(struct lmv_obd *lmv, __u32 *mdt)
{
struct lu_tgt_desc *tgt, *cur = NULL;
+ __u64 total_avail = 0;
__u64 total_weight = 0;
__u64 cur_weight = 0;
int total_usable = 0;
tgt->ltd_qos.ltq_usable = 1;
lu_tgt_qos_weight_calc(tgt);
- if (tgt->ltd_index == *mdt) {
+ if (tgt->ltd_index == *mdt)
cur = tgt;
- cur_weight = tgt->ltd_qos.ltq_weight;
- }
+ total_avail += tgt->ltd_qos.ltq_avail;
total_weight += tgt->ltd_qos.ltq_weight;
total_usable++;
}
- /* if current MDT has higher-than-average space, stay on same MDT */
- rand = total_weight / total_usable;
- if (cur_weight >= rand) {
+ /* if current MDT has above-average space, within range of the QOS
+ * threshold, stay on the same MDT to avoid creating needless remote
+ * MDT directories.
+ */
+ rand = total_avail * (256 - lmv->lmv_qos.lq_threshold_rr) /
+ (total_usable * 256);
+ if (cur && cur->ltd_qos.ltq_avail >= rand) {
tgt = cur;
GOTO(unlock, rc = 0);
}
- cur_weight = 0;
rand = lu_prandom_u64_max(total_weight);
lmv_foreach_connected_tgt(lmv, tgt) {
}
static int
-lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
+lmv_get_lustre_md(struct obd_export *exp, struct req_capsule *pill,
struct obd_export *dt_exp, struct obd_export *md_exp,
struct lustre_md *md)
{
if (!tgt || !tgt->ltd_exp)
return -EINVAL;
- return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md);
+ return md_get_lustre_md(tgt->ltd_exp, pill, dt_exp, md_exp, md);
}
static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
}
static int lmv_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo)
+ struct md_op_item *item)
{
- struct md_op_data *op_data = &minfo->mi_data;
+ struct md_op_data *op_data = &item->mop_data;
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *ptgt;
if (ctgt != ptgt)
RETURN(-EREMOTE);
- rc = md_intent_getattr_async(ptgt->ltd_exp, minfo);
+ rc = md_intent_getattr_async(ptgt->ltd_exp, item);
RETURN(rc);
}