__u64 ba_max, ba_min, ba;
__u64 ia_max, ia_min, ia;
__u32 num_active;
- unsigned int i;
int prio_wide;
time64_t now, age;
__u32 maxage = lmv->desc.ld_qos_maxage;
now = ktime_get_real_seconds();
/* Calculate server penalty per object */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
+ lmv_foreach_tgt(lmv, tgt) {
+ if (!tgt->ltd_exp || !tgt->ltd_active)
continue;
/* bavail >> 16 to avoid overflow */
* we have to double the MDT penalty
*/
num_active = 2;
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
+ lmv_foreach_tgt(lmv, tgt) {
+ if (!tgt->ltd_exp || !tgt->ltd_active)
continue;
tgt->ltd_qos.ltq_penalty_per_obj <<= 1;
{
struct lu_tgt_qos *ltq;
struct lu_svr_qos *svr;
- unsigned int i;
ENTRY;
*total_wt = 0;
/* Decrease all MDT penalties */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- ltq = &lmv->tgts[i]->ltd_qos;
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
+ lmv_foreach_tgt(lmv, tgt) {
+ if (!tgt->ltd_exp || !tgt->ltd_active)
continue;
if (ltq->ltq_penalty < ltq->ltq_penalty_per_obj)
else
ltq->ltq_penalty -= ltq->ltq_penalty_per_obj;
- lmv_qos_calc_weight(lmv->tgts[i]);
+ lmv_qos_calc_weight(tgt);
/* Recalc the total weight of usable osts */
if (ltq->ltq_usable)
CDEBUG(D_OTHER, "recalc tgt %d usable=%d avail=%llu"
" tgtppo=%llu tgtp=%llu svrppo=%llu"
" svrp=%llu wt=%llu\n",
- i, ltq->ltq_usable,
+ tgt->ltd_index, ltq->ltq_usable,
tgt_statfs_bavail(tgt) >> 10,
ltq->ltq_penalty_per_obj >> 10,
ltq->ltq_penalty >> 10,
__u64 total_weight = 0;
__u64 cur_weight = 0;
__u64 rand;
- int i;
int rc;
ENTRY;
if (rc)
GOTO(unlock, tgt = ERR_PTR(rc));
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (!tgt)
- continue;
-
+ lmv_foreach_tgt(lmv, tgt) {
tgt->ltd_qos.ltq_usable = 0;
if (!tgt->ltd_exp || !tgt->ltd_active)
continue;
rand = lu_prandom_u64_max(total_weight);
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
-
- if (!tgt || !tgt->ltd_qos.ltq_usable)
+ lmv_foreach_tgt(lmv, tgt) {
+ if (!tgt->ltd_qos.ltq_usable)
continue;
cur_weight += tgt->ltd_qos.ltq_weight;
spin_lock(&lmv->lmv_qos.lq_rr.lqr_alloc);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[(i + lmv->lmv_qos_rr_index) %
- lmv->desc.ld_tgt_count];
- if (tgt && tgt->ltd_exp && tgt->ltd_active) {
- *mdt = tgt->ltd_index;
- lmv->lmv_qos_rr_index =
- (i + lmv->lmv_qos_rr_index + 1) %
- lmv->desc.ld_tgt_count;
- spin_unlock(&lmv->lmv_qos.lq_rr.lqr_alloc);
-
- RETURN(tgt);
- }
+ tgt = lmv_tgt(lmv,
+ (i + lmv->lmv_qos_rr_index) % lmv->desc.ld_tgt_count);
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
+ continue;
+
+ *mdt = tgt->ltd_index;
+ lmv->lmv_qos_rr_index =
+ (i + lmv->lmv_qos_rr_index + 1) %
+ lmv->desc.ld_tgt_count;
+ spin_unlock(&lmv->lmv_qos.lq_rr.lqr_alloc);
+
+ RETURN(tgt);
}
spin_unlock(&lmv->lmv_qos.lq_rr.lqr_alloc);