Whamcloud - gitweb
LU-12624 obdclass: lu_tgt_descs cleanup
[fs/lustre-release.git] / lustre / lod / lod_qos.c
index 739720f..1892dee 100644 (file)
@@ -71,7 +71,8 @@
  *
  * \param[in] env      execution environment for this thread
  * \param[in] d                LOD device
- * \param[in] index    index of OST target to check
+ * \param[in] ltd      target table
+ * \param[in] index    target index
  * \param[out] sfs     buffer for statfs data
  *
  * \retval 0           if the target is good
 
  */
 static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
-                               int index, struct obd_statfs *sfs)
+                               struct lu_tgt_descs *ltd, int index,
+                               struct obd_statfs *sfs)
 {
-       struct lod_tgt_desc *ost;
-       int                  rc;
+       struct lov_desc *desc = &ltd->ltd_lov_desc;
+       struct lu_tgt_desc *tgt = LTD_TGT(ltd, index);
+       int rc;
+
        ENTRY;
 
        LASSERT(d);
-       ost = OST_TGT(d,index);
-       LASSERT(ost);
+       LASSERT(tgt);
 
-       rc = dt_statfs(env, ost->ltd_ost, sfs);
+       rc = dt_statfs(env, tgt->ltd_tgt, sfs);
 
        if (rc == 0 && ((sfs->os_state & OS_STATE_ENOSPC) ||
            (sfs->os_state & OS_STATE_ENOINO && sfs->os_fprecreated == 0)))
@@ -107,36 +110,36 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
                rc = -ENOBUFS;
 
        /* check whether device has changed state (active, inactive) */
-       if (rc != 0 && ost->ltd_active) {
+       if (rc != 0 && tgt->ltd_active) {
                /* turned inactive? */
                spin_lock(&d->lod_lock);
-               if (ost->ltd_active) {
-                       ost->ltd_active = 0;
+               if (tgt->ltd_active) {
+                       tgt->ltd_active = 0;
                        if (rc == -ENOTCONN)
-                               ost->ltd_connecting = 1;
+                               tgt->ltd_connecting = 1;
 
-                       LASSERT(d->lod_desc.ld_active_tgt_count > 0);
-                       d->lod_desc.ld_active_tgt_count--;
-                       d->lod_qos.lq_dirty = 1;
-                       d->lod_qos.lq_rr.lqr_dirty = 1;
+                       LASSERT(desc->ld_active_tgt_count > 0);
+                       desc->ld_active_tgt_count--;
+                       ltd->ltd_qos.lq_dirty = 1;
+                       ltd->ltd_qos.lq_rr.lqr_dirty = 1;
                        CDEBUG(D_CONFIG, "%s: turns inactive\n",
-                              ost->ltd_exp->exp_obd->obd_name);
+                              tgt->ltd_exp->exp_obd->obd_name);
                }
                spin_unlock(&d->lod_lock);
-       } else if (rc == 0 && ost->ltd_active == 0) {
+       } else if (rc == 0 && tgt->ltd_active == 0) {
                /* turned active? */
-               LASSERTF(d->lod_desc.ld_active_tgt_count < d->lod_ostnr,
-                        "active tgt count %d, ost nr %d\n",
-                        d->lod_desc.ld_active_tgt_count, d->lod_ostnr);
+               LASSERTF(desc->ld_active_tgt_count < desc->ld_tgt_count,
+                        "active tgt count %d, tgt nr %d\n",
+                        desc->ld_active_tgt_count, desc->ld_tgt_count);
                spin_lock(&d->lod_lock);
-               if (ost->ltd_active == 0) {
-                       ost->ltd_active = 1;
-                       ost->ltd_connecting = 0;
-                       d->lod_desc.ld_active_tgt_count++;
-                       d->lod_qos.lq_dirty = 1;
-                       d->lod_qos.lq_rr.lqr_dirty = 1;
+               if (tgt->ltd_active == 0) {
+                       tgt->ltd_active = 1;
+                       tgt->ltd_connecting = 0;
+                       desc->ld_active_tgt_count++;
+                       ltd->ltd_qos.lq_dirty = 1;
+                       ltd->ltd_qos.lq_rr.lqr_dirty = 1;
                        CDEBUG(D_CONFIG, "%s: turns active\n",
-                              ost->ltd_exp->exp_obd->obd_name);
+                              tgt->ltd_exp->exp_obd->obd_name);
                }
                spin_unlock(&d->lod_lock);
        }
@@ -157,20 +160,21 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
 void lod_qos_statfs_update(const struct lu_env *env, struct lod_device *lod)
 {
        struct obd_device *obd = lod2obd(lod);
-       struct ost_pool *osts = &(lod->lod_pool_info);
+       struct lu_tgt_pool *osts = &lod->lod_ost_descs.ltd_tgt_pool;
        time64_t max_age;
        unsigned int i;
        u64 avail;
        int idx;
        ENTRY;
 
-       max_age = ktime_get_seconds() - 2 * lod->lod_desc.ld_qos_maxage;
+       max_age = ktime_get_seconds() -
+                 2 * lod->lod_ost_descs.ltd_lov_desc.ld_qos_maxage;
 
        if (obd->obd_osfs_age > max_age)
                /* statfs data are quite recent, don't need to refresh it */
                RETURN_EXIT;
 
-       down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
 
        if (obd->obd_osfs_age > max_age)
                goto out;
@@ -178,262 +182,20 @@ void lod_qos_statfs_update(const struct lu_env *env, struct lod_device *lod)
        for (i = 0; i < osts->op_count; i++) {
                idx = osts->op_array[i];
                avail = OST_TGT(lod,idx)->ltd_statfs.os_bavail;
-               if (lod_statfs_and_check(env, lod, idx,
+               if (lod_statfs_and_check(env, lod, &lod->lod_ost_descs, idx,
                                         &OST_TGT(lod, idx)->ltd_statfs))
                        continue;
                if (OST_TGT(lod,idx)->ltd_statfs.os_bavail != avail)
                        /* recalculate weigths */
-                       lod->lod_qos.lq_dirty = 1;
+                       lod->lod_ost_descs.ltd_qos.lq_dirty = 1;
        }
        obd->obd_osfs_age = ktime_get_seconds();
 
 out:
-       up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
        EXIT;
 }
 
-/**
- * Calculate per-OST and per-OSS penalties
- *
- * Re-calculate penalties when the configuration changes, active targets
- * change and after statfs refresh (all these are reflected by lq_dirty flag).
- * On every OST and OSS: decay the penalty by half for every 8x the update
- * interval that the device has been idle. That gives lots of time for the
- * statfs information to be updated (which the penalty is only a proxy for),
- * and avoids penalizing OSS/OSTs under light load.
- * See lod_qos_calc_weight() for how penalties are factored into the weight.
- *
- * \param[in] lod      LOD device
- *
- * \retval 0           on success
- * \retval -EAGAIN     the number of OSTs isn't enough
- */
-static int lod_qos_calc_ppo(struct lod_device *lod)
-{
-       struct lu_svr_qos *oss;
-       __u64 ba_max, ba_min, temp;
-       __u32 num_active;
-       unsigned int i;
-       int rc, prio_wide;
-       time64_t now, age;
-
-       ENTRY;
-
-       if (!lod->lod_qos.lq_dirty)
-               GOTO(out, rc = 0);
-
-       num_active = lod->lod_desc.ld_active_tgt_count - 1;
-       if (num_active < 1)
-               GOTO(out, rc = -EAGAIN);
-
-       /* find bavail on each OSS */
-       list_for_each_entry(oss, &lod->lod_qos.lq_svr_list, lsq_svr_list)
-               oss->lsq_bavail = 0;
-       lod->lod_qos.lq_active_svr_count = 0;
-
-       /*
-        * How badly user wants to select OSTs "widely" (not recently chosen
-        * and not on recent OSS's).  As opposed to "freely" (free space
-        * avail.) 0-256
-        */
-       prio_wide = 256 - lod->lod_qos.lq_prio_free;
-
-       ba_min = (__u64)(-1);
-       ba_max = 0;
-       now = ktime_get_real_seconds();
-       /* Calculate OST penalty per object
-        * (lod ref taken in lod_qos_prep_create())
-        */
-       cfs_foreach_bit(lod->lod_ost_bitmap, i) {
-               LASSERT(OST_TGT(lod,i));
-               temp = TGT_BAVAIL(i);
-               if (!temp)
-                       continue;
-               ba_min = min(temp, ba_min);
-               ba_max = max(temp, ba_max);
-
-               /* Count the number of usable OSS's */
-               if (OST_TGT(lod, i)->ltd_qos.ltq_svr->lsq_bavail == 0)
-                       lod->lod_qos.lq_active_svr_count++;
-               OST_TGT(lod, i)->ltd_qos.ltq_svr->lsq_bavail += temp;
-
-               /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
-               temp >>= 1;
-               do_div(temp, num_active);
-               OST_TGT(lod,i)->ltd_qos.ltq_penalty_per_obj =
-                       (temp * prio_wide) >> 8;
-
-               age = (now - OST_TGT(lod,i)->ltd_qos.ltq_used) >> 3;
-               if (lod->lod_qos.lq_reset ||
-                   age > 32 * lod->lod_desc.ld_qos_maxage)
-                       OST_TGT(lod,i)->ltd_qos.ltq_penalty = 0;
-               else if (age > lod->lod_desc.ld_qos_maxage)
-                       /* Decay OST penalty. */
-                       OST_TGT(lod,i)->ltd_qos.ltq_penalty >>=
-                               (age / lod->lod_desc.ld_qos_maxage);
-       }
-
-       num_active = lod->lod_qos.lq_active_svr_count - 1;
-       if (num_active < 1) {
-               /* If there's only 1 OSS, we can't penalize it, so instead
-                  we have to double the OST penalty */
-               num_active = 1;
-               cfs_foreach_bit(lod->lod_ost_bitmap, i)
-                       OST_TGT(lod,i)->ltd_qos.ltq_penalty_per_obj <<= 1;
-       }
-
-       /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
-       list_for_each_entry(oss, &lod->lod_qos.lq_svr_list, lsq_svr_list) {
-               temp = oss->lsq_bavail >> 1;
-               do_div(temp, oss->lsq_tgt_count * num_active);
-               oss->lsq_penalty_per_obj = (temp * prio_wide) >> 8;
-
-               age = (now - oss->lsq_used) >> 3;
-               if (lod->lod_qos.lq_reset ||
-                   age > 32 * lod->lod_desc.ld_qos_maxage)
-                       oss->lsq_penalty = 0;
-               else if (age > lod->lod_desc.ld_qos_maxage)
-                       /* Decay OSS penalty. */
-                       oss->lsq_penalty >>= age / lod->lod_desc.ld_qos_maxage;
-       }
-
-       lod->lod_qos.lq_dirty = 0;
-       lod->lod_qos.lq_reset = 0;
-
-       /* If each ost has almost same free space,
-        * do rr allocation for better creation performance */
-       lod->lod_qos.lq_same_space = 0;
-       if ((ba_max * (256 - lod->lod_qos.lq_threshold_rr)) >> 8 < ba_min) {
-               lod->lod_qos.lq_same_space = 1;
-               /* Reset weights for the next time we enter qos mode */
-               lod->lod_qos.lq_reset = 1;
-       }
-       rc = 0;
-
-out:
-#ifndef FORCE_QOS
-       if (!rc && lod->lod_qos.lq_same_space)
-               RETURN(-EAGAIN);
-#endif
-       RETURN(rc);
-}
-
-/**
- * Calculate weight for a given OST target.
- *
- * The final OST weight is the number of bytes available minus the OST and
- * OSS penalties.  See lod_qos_calc_ppo() for how penalties are calculated.
- *
- * \param[in] lod      LOD device, where OST targets are listed
- * \param[in] i                OST target index
- *
- * \retval             0
- */
-static int lod_qos_calc_weight(struct lod_device *lod, int i)
-{
-       __u64 temp, temp2;
-
-       temp = TGT_BAVAIL(i);
-       temp2 = OST_TGT(lod, i)->ltd_qos.ltq_penalty +
-               OST_TGT(lod, i)->ltd_qos.ltq_svr->lsq_penalty;
-       if (temp < temp2)
-               OST_TGT(lod, i)->ltd_qos.ltq_weight = 0;
-       else
-               OST_TGT(lod, i)->ltd_qos.ltq_weight = temp - temp2;
-       return 0;
-}
-
-/**
- * Re-calculate weights.
- *
- * The function is called when some OST target was used for a new object. In
- * this case we should re-calculate all the weights to keep new allocations
- * balanced well.
- *
- * \param[in] lod      LOD device
- * \param[in] osts     OST pool where a new object was placed
- * \param[in] index    OST target where a new object was placed
- * \param[out] total_wt        new total weight for the pool
- *
- * \retval             0
- */
-static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts,
-                       __u32 index, __u64 *total_wt)
-{
-       struct lod_tgt_desc *ost;
-       struct lu_svr_qos  *oss;
-       unsigned int j;
-       ENTRY;
-
-       ost = OST_TGT(lod,index);
-       LASSERT(ost);
-
-       /* Don't allocate on this devuce anymore, until the next alloc_qos */
-       ost->ltd_qos.ltq_usable = 0;
-
-       oss = ost->ltd_qos.ltq_svr;
-
-       /* Decay old penalty by half (we're adding max penalty, and don't
-          want it to run away.) */
-       ost->ltd_qos.ltq_penalty >>= 1;
-       oss->lsq_penalty >>= 1;
-
-       /* mark the OSS and OST as recently used */
-       ost->ltd_qos.ltq_used = oss->lsq_used = ktime_get_real_seconds();
-
-       /* Set max penalties for this OST and OSS */
-       ost->ltd_qos.ltq_penalty +=
-               ost->ltd_qos.ltq_penalty_per_obj * lod->lod_ostnr;
-       oss->lsq_penalty += oss->lsq_penalty_per_obj *
-               lod->lod_qos.lq_active_svr_count;
-
-       /* Decrease all OSS penalties */
-       list_for_each_entry(oss, &lod->lod_qos.lq_svr_list, lsq_svr_list) {
-               if (oss->lsq_penalty < oss->lsq_penalty_per_obj)
-                       oss->lsq_penalty = 0;
-               else
-                       oss->lsq_penalty -= oss->lsq_penalty_per_obj;
-       }
-
-       *total_wt = 0;
-       /* Decrease all OST penalties */
-       for (j = 0; j < osts->op_count; j++) {
-               int i;
-
-               i = osts->op_array[j];
-               if (!cfs_bitmap_check(lod->lod_ost_bitmap, i))
-                       continue;
-
-               ost = OST_TGT(lod,i);
-               LASSERT(ost);
-
-               if (ost->ltd_qos.ltq_penalty <
-                               ost->ltd_qos.ltq_penalty_per_obj)
-                       ost->ltd_qos.ltq_penalty = 0;
-               else
-                       ost->ltd_qos.ltq_penalty -=
-                               ost->ltd_qos.ltq_penalty_per_obj;
-
-               lod_qos_calc_weight(lod, i);
-
-               /* Recalc the total weight of usable osts */
-               if (ost->ltd_qos.ltq_usable)
-                       *total_wt += ost->ltd_qos.ltq_weight;
-
-               QOS_DEBUG("recalc tgt %d usable=%d avail=%llu"
-                         " ostppo=%llu ostp=%llu ossppo=%llu"
-                         " ossp=%llu wt=%llu\n",
-                         i, ost->ltd_qos.ltq_usable, TGT_BAVAIL(i) >> 10,
-                         ost->ltd_qos.ltq_penalty_per_obj >> 10,
-                         ost->ltd_qos.ltq_penalty >> 10,
-                         ost->ltd_qos.ltq_svr->lsq_penalty_per_obj >> 10,
-                         ost->ltd_qos.ltq_svr->lsq_penalty >> 10,
-                         ost->ltd_qos.ltq_weight >> 10);
-       }
-
-       RETURN(0);
-}
-
 #define LOV_QOS_EMPTY ((__u32)-1)
 
 /**
@@ -452,7 +214,7 @@ static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts,
  * \retval 0           on success
  * \retval -ENOMEM     fails to allocate the array
  */
-static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
+static int lod_qos_calc_rr(struct lod_device *lod, struct lu_tgt_pool *src_pool,
                           struct lu_qos_rr *lqr)
 {
        struct lu_svr_qos  *oss;
@@ -468,7 +230,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
        }
 
        /* Do actual allocation. */
-       down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
 
        /*
         * Check again. While we were sleeping on @lq_rw_sem something could
@@ -476,7 +238,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
         */
        if (!lqr->lqr_dirty) {
                LASSERT(lqr->lqr_pool.op_size);
-               up_write(&lod->lod_qos.lq_rw_sem);
+               up_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
                RETURN(0);
        }
 
@@ -489,7 +251,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
        lqr->lqr_pool.op_count = real_count;
        rc = lod_ost_pool_extend(&lqr->lqr_pool, real_count);
        if (rc) {
-               up_write(&lod->lod_qos.lq_rw_sem);
+               up_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
                RETURN(rc);
        }
        for (i = 0; i < lqr->lqr_pool.op_count; i++)
@@ -497,7 +259,8 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
 
        /* Place all the OSTs from 1 OSS at the same time. */
        placed = 0;
-       list_for_each_entry(oss, &lod->lod_qos.lq_svr_list, lsq_svr_list) {
+       list_for_each_entry(oss, &lod->lod_ost_descs.ltd_qos.lq_svr_list,
+                           lsq_svr_list) {
                int j = 0;
 
                for (i = 0; i < lqr->lqr_pool.op_count; i++) {
@@ -508,7 +271,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
                                continue;
 
                        ost = OST_TGT(lod,src_pool->op_array[i]);
-                       LASSERT(ost && ost->ltd_ost);
+                       LASSERT(ost && ost->ltd_tgt);
                        if (ost->ltd_qos.ltq_svr != oss)
                                continue;
 
@@ -524,7 +287,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
        }
 
        lqr->lqr_dirty = 0;
-       up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
 
        if (placed != real_count) {
                /* This should never happen */
@@ -580,12 +343,12 @@ static struct dt_object *lod_qos_declare_object_on(const struct lu_env *env,
        ENTRY;
 
        LASSERT(d);
-       LASSERT(ost_idx < d->lod_osts_size);
+       LASSERT(ost_idx < d->lod_ost_descs.ltd_tgts_size);
        ost = OST_TGT(d,ost_idx);
        LASSERT(ost);
-       LASSERT(ost->ltd_ost);
+       LASSERT(ost->ltd_tgt);
 
-       nd = &ost->ltd_ost->dd_lu_dev;
+       nd = &ost->ltd_tgt->dd_lu_dev;
 
        /*
         * allocate anonymous object with zero fid, real fid
@@ -831,7 +594,7 @@ static int lod_check_and_reserve_ost(const struct lu_env *env,
        int rc;
        ENTRY;
 
-       rc = lod_statfs_and_check(env, lod, ost_idx, sfs);
+       rc = lod_statfs_and_check(env, lod, &lod->lod_ost_descs, ost_idx, sfs);
        if (rc)
                RETURN(rc);
 
@@ -936,9 +699,9 @@ static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo,
        struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
        struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs;
        struct pool_desc  *pool = NULL;
-       struct ost_pool   *osts;
+       struct lu_tgt_pool *osts;
        struct lu_qos_rr *lqr;
-       unsigned int    i, array_idx;
+       unsigned int i, array_idx;
        __u32 ost_start_idx_temp;
        __u32 stripe_idx = 0;
        __u32 stripe_count, stripe_count_min, ost_idx;
@@ -960,8 +723,8 @@ static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo,
                osts = &(pool->pool_obds);
                lqr = &(pool->pool_rr);
        } else {
-               osts = &(m->lod_pool_info);
-               lqr = &(m->lod_qos.lq_rr);
+               osts = &m->lod_ost_descs.ltd_tgt_pool;
+               lqr = &(m->lod_ost_descs.ltd_qos.lq_rr);
        }
 
        rc = lod_qos_calc_rr(m, osts, lqr);
@@ -972,7 +735,7 @@ static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo,
        if (rc)
                GOTO(out, rc);
 
-       down_read(&m->lod_qos.lq_rw_sem);
+       down_read(&m->lod_ost_descs.ltd_qos.lq_rw_sem);
        spin_lock(&lqr->lqr_alloc);
        if (--lqr->lqr_start_count <= 0) {
                lqr->lqr_start_idx = prandom_u32_max(osts->op_count);
@@ -1041,7 +804,7 @@ repeat_find:
        }
 
        spin_unlock(&lqr->lqr_alloc);
-       up_read(&m->lod_qos.lq_rw_sem);
+       up_read(&m->lod_ost_descs.ltd_qos.lq_rw_sem);
 
        /* If there are enough OSTs, a component with overstriping requested
         * will not actually end up overstriped.  The comp should reflect this.
@@ -1156,7 +919,8 @@ static int lod_alloc_ost_list(const struct lu_env *env, struct lod_object *lo,
                        break;
                }
 
-               rc = lod_statfs_and_check(env, m, ost_idx, sfs);
+               rc = lod_statfs_and_check(env, m, &m->lod_ost_descs, ost_idx,
+                                         sfs);
                if (rc < 0) /* this OSP doesn't feel well */
                        break;
 
@@ -1219,8 +983,8 @@ static int lod_alloc_specific(const struct lu_env *env, struct lod_object *lo,
        unsigned int i, array_idx, ost_count;
        int rc, stripe_num = 0;
        int speed = 0;
-       struct pool_desc  *pool = NULL;
-       struct ost_pool   *osts;
+       struct pool_desc *pool = NULL;
+       struct lu_tgt_pool *osts;
        int stripes_per_ost = 1;
        bool overstriped = false;
        ENTRY;
@@ -1239,7 +1003,7 @@ static int lod_alloc_specific(const struct lu_env *env, struct lod_object *lo,
                down_read(&pool_tgt_rw_sem(pool));
                osts = &(pool->pool_obds);
        } else {
-               osts = &(m->lod_pool_info);
+               osts = &m->lod_ost_descs.ltd_tgt_pool;
        }
 
        ost_count = osts->op_count;
@@ -1300,7 +1064,8 @@ repeat_find:
                 * start OST, then it can be skipped, otherwise skip it only
                 * if it is inactive/recovering/out-of-space." */
 
-               rc = lod_statfs_and_check(env, m, ost_idx, sfs);
+               rc = lod_statfs_and_check(env, m, &m->lod_ost_descs, ost_idx,
+                                         sfs);
                if (rc) {
                        /* this OSP doesn't feel well */
                        continue;
@@ -1365,36 +1130,6 @@ out:
 }
 
 /**
- * Check whether QoS allocation should be used.
- *
- * A simple helper to decide when QoS allocation should be used:
- * if it's just a single available target or the used space is
- * evenly distributed among the targets at the moment, then QoS
- * allocation algorithm should not be used.
- *
- * \param[in] lod      LOD device
- *
- * \retval 0           should not be used
- * \retval 1           should be used
- */
-static inline int lod_qos_is_usable(struct lod_device *lod)
-{
-#ifdef FORCE_QOS
-       /* to be able to debug QoS code */
-       return 1;
-#endif
-
-       /* Detect -EAGAIN early, before expensive lock is taken. */
-       if (!lod->lod_qos.lq_dirty && lod->lod_qos.lq_same_space)
-               return 0;
-
-       if (lod->lod_desc.ld_active_tgt_count < 2)
-               return 0;
-
-       return 1;
-}
-
-/**
  * Allocate a striping using an algorithm with weights.
  *
  * The function allocates OST objects to create a striping. The algorithm
@@ -1441,7 +1176,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
        struct dt_object *o;
        __u64 total_weight = 0;
        struct pool_desc *pool = NULL;
-       struct ost_pool *osts;
+       struct lu_tgt_pool *osts;
        unsigned int i;
        __u32 nfound, good_osts, stripe_count, stripe_count_min;
        bool overstriped = false;
@@ -1463,11 +1198,11 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                down_read(&pool_tgt_rw_sem(pool));
                osts = &(pool->pool_obds);
        } else {
-               osts = &(lod->lod_pool_info);
+               osts = &lod->lod_ost_descs.ltd_tgt_pool;
        }
 
        /* Detect -EAGAIN early, before expensive lock is taken. */
-       if (!lod_qos_is_usable(lod))
+       if (!ltd_qos_is_usable(&lod->lod_ost_descs))
                GOTO(out_nolock, rc = -EAGAIN);
 
        if (lod_comp->llc_pattern & LOV_PATTERN_OVERSTRIPING)
@@ -1475,16 +1210,16 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                        (lod_comp->llc_stripe_count - 1)/osts->op_count + 1;
 
        /* Do actual allocation, use write lock here. */
-       down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
 
        /*
         * Check again, while we were sleeping on @lq_rw_sem things could
         * change.
         */
-       if (!lod_qos_is_usable(lod))
+       if (!ltd_qos_is_usable(&lod->lod_ost_descs))
                GOTO(out, rc = -EAGAIN);
 
-       rc = lod_qos_calc_ppo(lod);
+       rc = ltd_qos_penalties_calc(&lod->lod_ost_descs);
        if (rc)
                GOTO(out, rc);
 
@@ -1501,7 +1236,8 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                ost = OST_TGT(lod, osts->op_array[i]);
                ost->ltd_qos.ltq_usable = 0;
 
-               rc = lod_statfs_and_check(env, lod, osts->op_array[i], sfs);
+               rc = lod_statfs_and_check(env, lod, &lod->lod_ost_descs,
+                                         osts->op_array[i], sfs);
                if (rc) {
                        /* this OSP doesn't feel well */
                        continue;
@@ -1517,7 +1253,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                        continue;
 
                ost->ltd_qos.ltq_usable = 1;
-               lod_qos_calc_weight(lod, osts->op_array[i]);
+               lu_tgt_qos_weight_calc(ost);
                total_weight += ost->ltd_qos.ltq_weight;
 
                good_osts++;
@@ -1594,7 +1330,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                        lod_qos_ost_in_use(env, nfound, idx);
                        stripe[nfound] = o;
                        ost_indices[nfound] = idx;
-                       lod_qos_used(lod, osts, idx, &total_weight);
+                       ltd_qos_update(&lod->lod_ost_descs, ost, &total_weight);
                        nfound++;
                        rc = 0;
                        break;
@@ -1623,8 +1359,8 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                }
 
                /* makes sense to rebalance next time */
-               lod->lod_qos.lq_dirty = 1;
-               lod->lod_qos.lq_same_space = 0;
+               lod->lod_ost_descs.ltd_qos.lq_dirty = 1;
+               lod->lod_ost_descs.ltd_qos.lq_same_space = 0;
 
                rc = -EAGAIN;
        }
@@ -1636,7 +1372,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                lod_comp->llc_pattern &= ~LOV_PATTERN_OVERSTRIPING;
 
 out:
-       up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_ost_descs.ltd_qos.lq_rw_sem);
 
 out_nolock:
        if (pool != NULL) {
@@ -1675,12 +1411,16 @@ __u16 lod_get_stripe_count(struct lod_device *lod, struct lod_object *lo,
 
 
        if (!stripe_count)
-               stripe_count = lod->lod_desc.ld_default_stripe_count;
+               stripe_count =
+                       lod->lod_ost_descs.ltd_lov_desc.ld_default_stripe_count;
        if (!stripe_count)
                stripe_count = 1;
        /* Overstriping allows more stripes than targets */
-       if (stripe_count > lod->lod_desc.ld_active_tgt_count && !overstriping)
-               stripe_count = lod->lod_desc.ld_active_tgt_count;
+       if (stripe_count >
+               lod->lod_ost_descs.ltd_lov_desc.ld_active_tgt_count &&
+           !overstriping)
+               stripe_count =
+                       lod->lod_ost_descs.ltd_lov_desc.ld_active_tgt_count;
 
        if (lo->ldo_is_composite) {
                struct lod_layout_component *lod_comp;
@@ -1896,7 +1636,7 @@ int lod_qos_parse_config(const struct lu_env *env, struct lod_object *lo,
 {
        struct lod_layout_component *lod_comp;
        struct lod_device *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev);
-       struct lov_desc *desc = &d->lod_desc;
+       struct lov_desc *desc = &d->lod_ost_descs.ltd_lov_desc;
        struct lov_user_md_v1 *v1 = NULL;
        struct lov_user_md_v3 *v3 = NULL;
        struct lov_comp_md_v1 *comp_v1 = NULL;
@@ -2112,16 +1852,16 @@ free_comp:
 int lod_prepare_avoidance(const struct lu_env *env, struct lod_object *lo)
 {
        struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
-       struct lod_tgt_descs *ltds = &lod->lod_ost_descs;
        struct lod_avoid_guide *lag = &lod_env_info(env)->lti_avoid;
        struct cfs_bitmap *bitmap = NULL;
        __u32 *new_oss = NULL;
 
-       lag->lag_ost_avail = ltds->ltd_tgtnr;
+       lag->lag_ost_avail = lod->lod_ost_count;
 
        /* reset OSS avoid guide array */
        lag->lag_oaa_count = 0;
-       if (lag->lag_oss_avoid_array && lag->lag_oaa_size < ltds->ltd_tgtnr) {
+       if (lag->lag_oss_avoid_array &&
+           lag->lag_oaa_size < lod->lod_ost_count) {
                OBD_FREE(lag->lag_oss_avoid_array,
                         sizeof(__u32) * lag->lag_oaa_size);
                lag->lag_oss_avoid_array = NULL;
@@ -2130,7 +1870,7 @@ int lod_prepare_avoidance(const struct lu_env *env, struct lod_object *lo)
 
        /* init OST avoid guide bitmap */
        if (lag->lag_ost_avoid_bitmap) {
-               if (ltds->ltd_tgtnr <= lag->lag_ost_avoid_bitmap->size) {
+               if (lod->lod_ost_count <= lag->lag_ost_avoid_bitmap->size) {
                        CFS_RESET_BITMAP(lag->lag_ost_avoid_bitmap);
                } else {
                        CFS_FREE_BITMAP(lag->lag_ost_avoid_bitmap);
@@ -2139,7 +1879,7 @@ int lod_prepare_avoidance(const struct lu_env *env, struct lod_object *lo)
        }
 
        if (!lag->lag_ost_avoid_bitmap) {
-               bitmap = CFS_ALLOCATE_BITMAP(ltds->ltd_tgtnr);
+               bitmap = CFS_ALLOCATE_BITMAP(lod->lod_ost_count);
                if (!bitmap)
                        return -ENOMEM;
        }
@@ -2151,7 +1891,7 @@ int lod_prepare_avoidance(const struct lu_env *env, struct lod_object *lo)
                 * using OST count to allocate the array to store the OSS
                 * id.
                 */
-               OBD_ALLOC(new_oss, sizeof(*new_oss) * ltds->ltd_tgtnr);
+               OBD_ALLOC(new_oss, sizeof(*new_oss) * lod->lod_ost_count);
                if (!new_oss) {
                        CFS_FREE_BITMAP(bitmap);
                        return -ENOMEM;
@@ -2160,7 +1900,7 @@ int lod_prepare_avoidance(const struct lu_env *env, struct lod_object *lo)
 
        if (new_oss) {
                lag->lag_oss_avoid_array = new_oss;
-               lag->lag_oaa_size = ltds->ltd_tgtnr;
+               lag->lag_oaa_size = lod->lod_ost_count;
        }
        if (bitmap)
                lag->lag_ost_avoid_bitmap = bitmap;
@@ -2320,7 +2060,7 @@ int lod_qos_prep_create(const struct lu_env *env, struct lod_object *lo,
                lod_getref(&d->lod_ost_descs);
                /* XXX: support for non-0 files w/o objects */
                CDEBUG(D_OTHER, "tgt_count %d stripe_count %d\n",
-                               d->lod_desc.ld_tgt_count, stripe_len);
+                      d->lod_ost_count, stripe_len);
 
                if (lod_comp->llc_ostlist.op_array &&
                    lod_comp->llc_ostlist.op_count) {
@@ -2414,7 +2154,7 @@ int lod_prepare_create(const struct lu_env *env, struct lod_object *lo,
        /* no OST available */
        /* XXX: should we be waiting a bit to prevent failures during
         * cluster initialization? */
-       if (d->lod_ostnr == 0)
+       if (!d->lod_ost_count)
                RETURN(-EIO);
 
        /*