+/* #define QOS_DEBUG 1 */
+#define D_QOS D_OTHER
+
+#define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
+ lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
+
+
+int qos_add_tgt(struct obd_device *obd, __u32 index)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_qos_oss *oss, *temposs;
+ struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
+ int rc = 0, found = 0;
+ ENTRY;
+
+ /* We only need this QOS struct on MDT, not clients - but we may not
+ * have registered the LOV's observer yet, so there's no way to know */
+ if (!exp || !exp->exp_connection) {
+ CERROR("Missing connection\n");
+ RETURN(-ENOTCONN);
+ }
+
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_lock(&lov->lov_lock);
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ if (obd_uuid_equals(&oss->lqo_uuid,
+ &exp->exp_connection->c_remote_uuid)) {
+ found++;
+ break;
+ }
+ }
+
+ if (!found) {
+ OBD_ALLOC_PTR(oss);
+ if (!oss)
+ GOTO(out, rc = -ENOMEM);
+ memcpy(&oss->lqo_uuid,
+ &exp->exp_connection->c_remote_uuid,
+ sizeof(oss->lqo_uuid));
+ } else {
+ /* Assume we have to move this one */
+ cfs_list_del(&oss->lqo_oss_list);
+ }
+
+ oss->lqo_ost_count++;
+ lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
+
+ /* Add sorted by # of OSTs. Find the first entry that we're
+ bigger than... */
+ cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
+ lqo_oss_list) {
+ if (oss->lqo_ost_count > temposs->lqo_ost_count)
+ break;
+ }
+ /* ...and add before it. If we're the first or smallest, temposs
+ points to the list head, and we add to the end. */
+ cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
+
+ lov->lov_qos.lq_dirty = 1;
+ lov->lov_qos.lq_rr.lqr_dirty = 1;
+
+ CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
+ obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
+ obd_uuid2str(&oss->lqo_uuid),
+ oss->lqo_ost_count);
+
+out:
+ cfs_mutex_unlock(&lov->lov_lock);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+ RETURN(rc);
+}
+
+int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_qos_oss *oss;
+ int rc = 0;
+ ENTRY;
+
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+
+ oss = tgt->ltd_qos.ltq_oss;
+ if (!oss)
+ GOTO(out, rc = -ENOENT);
+
+ oss->lqo_ost_count--;
+ if (oss->lqo_ost_count == 0) {
+ CDEBUG(D_QOS, "removing OSS %s\n",
+ obd_uuid2str(&oss->lqo_uuid));
+ cfs_list_del(&oss->lqo_oss_list);
+ OBD_FREE_PTR(oss);
+ }
+
+ lov->lov_qos.lq_dirty = 1;
+ lov->lov_qos.lq_rr.lqr_dirty = 1;
+out:
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+ RETURN(rc);
+}
+
+/* Recalculate per-object penalties for OSSs and OSTs,
+ depends on size of each ost in an oss */
+static int qos_calc_ppo(struct obd_device *obd)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_qos_oss *oss;
+ __u64 ba_max, ba_min, temp;
+ __u32 num_active;
+ int rc, i, prio_wide;
+ time_t now, age;
+ ENTRY;
+
+ if (!lov->lov_qos.lq_dirty)
+ GOTO(out, rc = 0);
+
+ num_active = lov->desc.ld_active_tgt_count - 1;
+ if (num_active < 1)
+ GOTO(out, rc = -EAGAIN);
+
+ /* find bavail on each OSS */
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ oss->lqo_bavail = 0;
+ }
+ lov->lov_qos.lq_active_oss_count = 0;
+
+ /* How badly user wants to select osts "widely" (not recently chosen
+ and not on recent oss's). As opposed to "freely" (free space
+ avail.) 0-256. */
+ prio_wide = 256 - lov->lov_qos.lq_prio_free;
+
+ ba_min = (__u64)(-1);
+ ba_max = 0;
+ now = cfs_time_current_sec();
+ /* Calculate OST penalty per object */
+ /* (lov ref taken in alloc_qos) */
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
+ continue;
+ temp = TGT_BAVAIL(i);
+ if (!temp)
+ continue;
+ ba_min = min(temp, ba_min);
+ ba_max = max(temp, ba_max);
+
+ /* Count the number of usable OSS's */
+ if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
+ lov->lov_qos.lq_active_oss_count++;
+ lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
+
+ /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
+ temp >>= 1;
+ lov_do_div64(temp, num_active);
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
+ (temp * prio_wide) >> 8;
+
+ age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
+ if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
+ else if (age > lov->desc.ld_qos_maxage)
+ /* Decay the penalty by half for every 8x the update
+ * interval that the device has been idle. That gives
+ * lots of time for the statfs information to be
+ * updated (which the penalty is only a proxy for),
+ * and avoids penalizing OSS/OSTs under light load. */
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
+ (age / lov->desc.ld_qos_maxage);
+ }
+
+ num_active = lov->lov_qos.lq_active_oss_count - 1;
+ if (num_active < 1) {
+ /* If there's only 1 OSS, we can't penalize it, so instead
+ we have to double the OST penalty */
+ num_active = 1;
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (lov->lov_tgts[i] == NULL)
+ continue;
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
+ }
+ }
+
+ /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ temp = oss->lqo_bavail >> 1;
+ lov_do_div64(temp, oss->lqo_ost_count * num_active);
+ oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
+
+ age = (now - oss->lqo_used) >> 3;
+ if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
+ oss->lqo_penalty = 0;
+ else if (age > lov->desc.ld_qos_maxage)
+ /* Decay the penalty by half for every 8x the update
+ * interval that the device has been idle. That gives
+ * lots of time for the statfs information to be
+ * updated (which the penalty is only a proxy for),
+ * and avoids penalizing OSS/OSTs under light load. */
+ oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
+ }
+
+ lov->lov_qos.lq_dirty = 0;
+ lov->lov_qos.lq_reset = 0;
+
+ /* If each ost has almost same free space,
+ * do rr allocation for better creation performance */
+ lov->lov_qos.lq_same_space = 0;
+ if ((ba_max * (256 - lov->lov_qos.lq_threshold_rr)) >> 8 < ba_min) {
+ lov->lov_qos.lq_same_space = 1;
+ /* Reset weights for the next time we enter qos mode */
+ lov->lov_qos.lq_reset = 1;
+ }
+ rc = 0;
+
+out:
+ if (!rc && lov->lov_qos.lq_same_space)
+ RETURN(-EAGAIN);
+ RETURN(rc);
+}
+
+static int qos_calc_weight(struct lov_obd *lov, int i)
+{
+ __u64 temp, temp2;
+
+ /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
+ temp = TGT_BAVAIL(i);
+ temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
+ lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
+ if (temp < temp2)
+ lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
+ else
+ lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
+ return 0;
+}
+
+/* We just used this index for a stripe; adjust everyone's weights */
+static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
+ __u32 index, __u64 *total_wt)
+{
+ struct lov_qos_oss *oss;
+ int j;
+ ENTRY;
+
+ /* Don't allocate from this stripe anymore, until the next alloc_qos */
+ lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
+
+ oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
+
+ /* Decay old penalty by half (we're adding max penalty, and don't
+ want it to run away.) */
+ lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
+ oss->lqo_penalty >>= 1;
+
+ /* mark the OSS and OST as recently used */
+ lov->lov_tgts[index]->ltd_qos.ltq_used =
+ oss->lqo_used = cfs_time_current_sec();
+
+ /* Set max penalties for this OST and OSS */
+ lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
+ lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
+ lov->desc.ld_active_tgt_count;
+ oss->lqo_penalty += oss->lqo_penalty_per_obj *
+ lov->lov_qos.lq_active_oss_count;
+
+ /* Decrease all OSS penalties */
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
+ oss->lqo_penalty = 0;
+ else
+ oss->lqo_penalty -= oss->lqo_penalty_per_obj;
+ }
+
+ *total_wt = 0;
+ /* Decrease all OST penalties */
+ for (j = 0; j < osts->op_count; j++) {
+ int i;
+
+ i = osts->op_array[j];
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
+ continue;
+ if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
+ else
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
+
+ qos_calc_weight(lov, i);
+
+ /* Recalc the total weight of usable osts */
+ if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
+ *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
+
+#ifdef QOS_DEBUG
+ CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
+ " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
+ " ossp="LPU64" wt="LPU64"\n",
+ i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
+ TGT_BAVAIL(i) >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
+ lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
+#endif
+ }
+
+ RETURN(0);
+}
+
+#define LOV_QOS_EMPTY ((__u32)-1)
+/* compute optimal round-robin order, based on OSTs per OSS */
+static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
+ struct lov_qos_rr *lqr)
+{
+ struct lov_qos_oss *oss;
+ unsigned placed, real_count;
+ int i, rc;
+ ENTRY;
+
+ if (!lqr->lqr_dirty) {
+ LASSERT(lqr->lqr_pool.op_size);
+ RETURN(0);
+ }
+
+ /* Do actual allocation. */
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+
+ /*
+ * Check again. While we were sleeping on @lq_rw_sem something could
+ * change.
+ */
+ if (!lqr->lqr_dirty) {
+ LASSERT(lqr->lqr_pool.op_size);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+ RETURN(0);
+ }
+
+ real_count = src_pool->op_count;
+
+ /* Zero the pool array */
+ /* alloc_rr is holding a read lock on the pool, so nobody is adding/
+ deleting from the pool. The lq_rw_sem insures that nobody else
+ is reading. */
+ lqr->lqr_pool.op_count = real_count;
+ rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
+ if (rc) {
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+ RETURN(rc);
+ }
+ for (i = 0; i < lqr->lqr_pool.op_count; i++)
+ lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
+
+ /* Place all the OSTs from 1 OSS at the same time. */
+ placed = 0;
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ int j = 0;
+ for (i = 0; i < lqr->lqr_pool.op_count; i++) {
+ if (lov->lov_tgts[src_pool->op_array[i]] &&
+ (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
+ /* Evenly space these OSTs across arrayspace */
+ int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
+ while (lqr->lqr_pool.op_array[next] !=
+ LOV_QOS_EMPTY)
+ next = (next + 1) % lqr->lqr_pool.op_count;
+ lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
+ j++;
+ placed++;
+ }
+ }
+ }
+
+ lqr->lqr_dirty = 0;
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+
+ if (placed != real_count) {
+ /* This should never happen */
+ LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
+ "round-robin list (%d of %d).\n",
+ placed, real_count);
+ for (i = 0; i < lqr->lqr_pool.op_count; i++) {
+ LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
+ lqr->lqr_pool.op_array[i]);
+ }
+ lqr->lqr_dirty = 1;
+ RETURN(-EAGAIN);
+ }
+
+#ifdef QOS_DEBUG
+ for (i = 0; i < lqr->lqr_pool.op_count; i++) {
+ LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
+ lqr->lqr_pool.op_array[i]);
+ }
+#endif
+
+ RETURN(0);
+}
+
+