1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LOV
43 #include <libcfs/libcfs.h>
45 #include <liblustre.h>
48 #include <obd_class.h>
50 #include "lov_internal.h"
52 /* #define QOS_DEBUG 1 */
55 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
56 lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
57 #define TGT_FFREE(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
60 int qos_add_tgt(struct obd_device *obd, __u32 index)
62 struct lov_obd *lov = &obd->u.lov;
63 struct lov_qos_oss *oss, *temposs;
64 struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
65 int rc = 0, found = 0;
68 /* We only need this QOS struct on MDT, not clients - but we may not
69 * have registered the LOV's observer yet, so there's no way to know */
70 if (!exp || !exp->exp_connection) {
71 CERROR("Missing connection\n");
75 down_write(&lov->lov_qos.lq_rw_sem);
76 mutex_down(&lov->lov_lock);
77 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
78 if (obd_uuid_equals(&oss->lqo_uuid,
79 &exp->exp_connection->c_remote_uuid)) {
88 GOTO(out, rc = -ENOMEM);
89 memcpy(&oss->lqo_uuid,
90 &exp->exp_connection->c_remote_uuid,
91 sizeof(oss->lqo_uuid));
93 /* Assume we have to move this one */
94 list_del(&oss->lqo_oss_list);
98 lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
100 /* Add sorted by # of OSTs. Find the first entry that we're
102 list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
103 if (oss->lqo_ost_count > temposs->lqo_ost_count)
106 /* ...and add before it. If we're the first or smallest, temposs
107 points to the list head, and we add to the end. */
108 list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
110 lov->lov_qos.lq_dirty = 1;
111 lov->lov_qos.lq_rr.lqr_dirty = 1;
113 CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
114 obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
115 obd_uuid2str(&oss->lqo_uuid),
119 mutex_up(&lov->lov_lock);
120 up_write(&lov->lov_qos.lq_rw_sem);
124 int qos_del_tgt(struct obd_device *obd, __u32 index)
126 struct lov_obd *lov = &obd->u.lov;
127 struct lov_qos_oss *oss;
131 if (!lov->lov_tgts[index])
134 down_write(&lov->lov_qos.lq_rw_sem);
136 oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
138 GOTO(out, rc = -ENOENT);
140 oss->lqo_ost_count--;
141 if (oss->lqo_ost_count == 0) {
142 CDEBUG(D_QOS, "removing OSS %s\n",
143 obd_uuid2str(&oss->lqo_uuid));
144 list_del(&oss->lqo_oss_list);
148 lov->lov_qos.lq_dirty = 1;
149 lov->lov_qos.lq_rr.lqr_dirty = 1;
151 up_write(&lov->lov_qos.lq_rw_sem);
155 /* Recalculate per-object penalties for OSSs and OSTs,
156 depends on size of each ost in an oss */
157 static int qos_calc_ppo(struct obd_device *obd)
159 struct lov_obd *lov = &obd->u.lov;
160 struct lov_qos_oss *oss;
161 __u64 ba_max, ba_min, temp;
163 int rc, i, prio_wide;
167 if (!lov->lov_qos.lq_dirty)
170 num_active = lov->desc.ld_active_tgt_count - 1;
172 GOTO(out, rc = -EAGAIN);
174 /* find bavail on each OSS */
175 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
178 lov->lov_qos.lq_active_oss_count = 0;
180 /* How badly user wants to select osts "widely" (not recently chosen
181 and not on recent oss's). As opposed to "freely" (free space
183 prio_wide = 256 - lov->lov_qos.lq_prio_free;
185 ba_min = (__u64)(-1);
187 now = cfs_time_current_sec();
188 /* Calculate OST penalty per object */
189 /* (lov ref taken in alloc_qos) */
190 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
191 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
193 temp = TGT_BAVAIL(i);
196 ba_min = min(temp, ba_min);
197 ba_max = max(temp, ba_max);
199 /* Count the number of usable OSS's */
200 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
201 lov->lov_qos.lq_active_oss_count++;
202 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
204 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
206 do_div(temp, num_active);
207 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
208 (temp * prio_wide) >> 8;
210 age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
211 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
212 lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
213 else if (age > lov->desc.ld_qos_maxage)
214 /* Decay the penalty by half for every 8x the update
215 * interval that the device has been idle. That gives
216 * lots of time for the statfs information to be
217 * updated (which the penalty is only a proxy for),
218 * and avoids penalizing OSS/OSTs under light load. */
219 lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
220 (age / lov->desc.ld_qos_maxage);
223 num_active = lov->lov_qos.lq_active_oss_count - 1;
224 if (num_active < 1) {
225 /* If there's only 1 OSS, we can't penalize it, so instead
226 we have to double the OST penalty */
228 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
229 if (lov->lov_tgts[i] == NULL)
231 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
235 /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
236 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
237 temp = oss->lqo_bavail >> 1;
238 do_div(temp, oss->lqo_ost_count * num_active);
239 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
241 age = (now - oss->lqo_used) >> 3;
242 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
243 oss->lqo_penalty = 0;
244 else if (age > lov->desc.ld_qos_maxage)
245 /* Decay the penalty by half for every 8x the update
246 * interval that the device has been idle. That gives
247 * lots of time for the statfs information to be
248 * updated (which the penalty is only a proxy for),
249 * and avoids penalizing OSS/OSTs under light load. */
250 oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
253 lov->lov_qos.lq_dirty = 0;
254 lov->lov_qos.lq_reset = 0;
256 /* If each ost has almost same free space,
257 * do rr allocation for better creation performance */
258 lov->lov_qos.lq_same_space = 0;
259 temp = ba_max - ba_min;
260 ba_min = (ba_min * 51) >> 8; /* 51/256 = .20 */
262 /* Difference is less than 20% */
263 lov->lov_qos.lq_same_space = 1;
264 /* Reset weights for the next time we enter qos mode */
265 lov->lov_qos.lq_reset = 1;
270 if (!rc && lov->lov_qos.lq_same_space)
275 static int qos_calc_weight(struct lov_obd *lov, int i)
279 /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
280 temp = TGT_BAVAIL(i);
281 temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
282 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
284 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
286 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
290 /* We just used this index for a stripe; adjust everyone's weights */
291 static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
292 __u32 index, __u64 *total_wt)
294 struct lov_qos_oss *oss;
298 /* Don't allocate from this stripe anymore, until the next alloc_qos */
299 lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
301 oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
303 /* Decay old penalty by half (we're adding max penalty, and don't
304 want it to run away.) */
305 lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
306 oss->lqo_penalty >>= 1;
308 /* mark the OSS and OST as recently used */
309 lov->lov_tgts[index]->ltd_qos.ltq_used =
310 oss->lqo_used = cfs_time_current_sec();
312 /* Set max penalties for this OST and OSS */
313 lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
314 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
315 lov->desc.ld_active_tgt_count;
316 oss->lqo_penalty += oss->lqo_penalty_per_obj *
317 lov->lov_qos.lq_active_oss_count;
319 /* Decrease all OSS penalties */
320 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
321 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
322 oss->lqo_penalty = 0;
324 oss->lqo_penalty -= oss->lqo_penalty_per_obj;
328 /* Decrease all OST penalties */
329 for (j = 0; j < osts->op_count; j++) {
332 i = osts->op_array[j];
333 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
335 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
336 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
337 lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
339 lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
340 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
342 qos_calc_weight(lov, i);
344 /* Recalc the total weight of usable osts */
345 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
346 *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
349 CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
350 " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
351 " ossp="LPU64" wt="LPU64"\n",
352 i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
354 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
355 lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
356 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
357 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
358 lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
365 #define LOV_QOS_EMPTY ((__u32)-1)
366 /* compute optimal round-robin order, based on OSTs per OSS */
367 static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
368 struct lov_qos_rr *lqr)
370 struct lov_qos_oss *oss;
371 unsigned placed, real_count;
375 if (!lqr->lqr_dirty) {
376 LASSERT(lqr->lqr_pool.op_size);
380 /* Do actual allocation. */
381 down_write(&lov->lov_qos.lq_rw_sem);
384 * Check again. While we were sleeping on @lq_rw_sem something could
387 if (!lqr->lqr_dirty) {
388 LASSERT(lqr->lqr_pool.op_size);
389 up_write(&lov->lov_qos.lq_rw_sem);
393 real_count = src_pool->op_count;
395 /* Zero the pool array */
396 /* alloc_rr is holding a read lock on the pool, so nobody is adding/
397 deleting from the pool. The lq_rw_sem insures that nobody else
399 lqr->lqr_pool.op_count = real_count;
400 rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
402 up_write(&lov->lov_qos.lq_rw_sem);
405 for (i = 0; i < lqr->lqr_pool.op_count; i++)
406 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
408 /* Place all the OSTs from 1 OSS at the same time. */
410 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
412 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
413 if (lov->lov_tgts[src_pool->op_array[i]] &&
414 (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
415 /* Evenly space these OSTs across arrayspace */
416 int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
417 while (lqr->lqr_pool.op_array[next] !=
419 next = (next + 1) % lqr->lqr_pool.op_count;
420 lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
428 up_write(&lov->lov_qos.lq_rw_sem);
430 if (placed != real_count) {
431 /* This should never happen */
432 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
433 "round-robin list (%d of %d).\n",
435 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
436 LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
437 lqr->lqr_pool.op_array[i]);
444 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
445 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
446 lqr->lqr_pool.op_array[i]);
454 void qos_shrink_lsm(struct lov_request_set *set)
456 struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
457 /* XXX LOV STACKING call into osc for sizes */
458 unsigned oldsize, newsize;
460 if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
461 struct llog_cookie *cookies;
462 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
463 newsize = set->set_count * sizeof(*cookies);
465 cookies = set->set_cookies;
466 oti_alloc_cookies(set->set_oti, set->set_count);
467 if (set->set_oti->oti_logcookies) {
468 memcpy(set->set_oti->oti_logcookies, cookies, newsize);
469 OBD_FREE(cookies, oldsize);
470 set->set_cookies = set->set_oti->oti_logcookies;
472 CWARN("'leaking' %d bytes\n", oldsize - newsize);
476 CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
477 lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
478 LASSERT(lsm->lsm_stripe_count >= set->set_count);
480 newsize = lov_stripe_md_size(set->set_count);
481 OBD_ALLOC(lsm_new, newsize);
482 if (lsm_new != NULL) {
484 memcpy(lsm_new, lsm, sizeof(*lsm));
485 for (i = 0; i < lsm->lsm_stripe_count; i++) {
486 if (i < set->set_count) {
487 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
490 OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
491 sizeof(struct lov_oinfo));
493 lsm_new->lsm_stripe_count = set->set_count;
494 OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
495 lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
496 set->set_oi->oi_md = lsm_new;
498 CWARN("'leaking' few bytes\n");
502 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
504 struct lov_stripe_md *lsm = set->set_oi->oi_md;
505 struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
506 unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
507 int stripe, i, rc = -EIO;
510 ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
511 for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
512 if (!lov->lov_tgts[ost_idx] ||
513 !lov->lov_tgts[ost_idx]->ltd_active)
515 /* check if objects has been created on this ost */
516 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
517 if (stripe == req->rq_stripe)
519 if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
523 if (stripe >= lsm->lsm_stripe_count) {
524 req->rq_idx = ost_idx;
525 rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
526 req->rq_oi.oi_oa, &req->rq_oi.oi_md,
535 static int min_stripe_count(int stripe_cnt, int flags)
537 return (flags & LOV_USES_DEFAULT_STRIPE ?
538 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
541 #define LOV_CREATE_RESEED_MULT 4
542 #define LOV_CREATE_RESEED_MIN 1000
543 /* Allocate objects on osts with round-robin algorithm */
544 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
545 char *poolname, int flags)
550 int ost_start_idx_temp;
552 int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
553 struct pool_desc *pool;
554 struct ost_pool *osts;
555 struct lov_qos_rr *lqr;
558 pool = lov_find_pool(lov, poolname);
560 osts = &(lov->lov_packed);
561 lqr = &(lov->lov_qos.lq_rr);
563 read_lock(&pool_tgt_rwlock(pool));
564 osts = &(pool->pool_obds);
565 lqr = &(pool->pool_rr);
568 rc = qos_calc_rr(lov, osts, lqr);
572 if (--lqr->lqr_start_count <= 0) {
573 lqr->lqr_start_idx = ll_rand() % osts->op_count;
574 lqr->lqr_start_count =
575 (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
576 LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
577 } else if (stripe_cnt_min >= osts->op_count ||
578 lqr->lqr_start_idx > osts->op_count) {
579 /* If we have allocated from all of the OSTs, slowly
580 * precess the next start if the OST/stripe count isn't
581 * already doing this for us. */
582 lqr->lqr_start_idx %= osts->op_count;
583 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
584 ++lqr->lqr_offset_idx;
586 down_read(&lov->lov_qos.lq_rw_sem);
587 ost_start_idx_temp = lqr->lqr_start_idx;
590 array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) % osts->op_count;
593 CDEBUG(D_QOS, "pool '%s' want %d startidx %d startcnt %d offset %d "
594 "active %d count %d arrayidx %d\n", poolname,
595 *stripe_cnt, lqr->lqr_start_idx, lqr->lqr_start_count,
596 lqr->lqr_offset_idx, osts->op_count, osts->op_count, array_idx);
599 for (i = 0; i < osts->op_count;
600 i++, array_idx=(array_idx + 1) % osts->op_count) {
601 ++lqr->lqr_start_idx;
602 ost_idx = lqr->lqr_pool.op_array[array_idx];
604 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
605 i, lqr->lqr_start_idx,
606 ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
607 lov->lov_tgts[ost_idx]->ltd_active : 0,
608 idx_pos - idx_arr, array_idx, ost_idx);
610 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
611 !lov->lov_tgts[ost_idx]->ltd_active)
614 /* Fail Check before osc_precreate() is called
615 so we can only 'fail' single OSC. */
616 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
619 /* Drop slow OSCs if we can */
620 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
625 /* We have enough stripes */
626 if (idx_pos - idx_arr == *stripe_cnt)
629 if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
630 /* Try again, allowing slower OSCs */
632 lqr->lqr_start_idx = ost_start_idx_temp;
636 up_read(&lov->lov_qos.lq_rw_sem);
638 *stripe_cnt = idx_pos - idx_arr;
641 read_unlock(&pool_tgt_rwlock(pool));
645 /* alloc objects on osts with specific stripe offset */
646 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
649 unsigned ost_idx, array_idx, ost_count;
652 struct pool_desc *pool;
653 struct ost_pool *osts;
656 pool = lov_find_pool(lov, lsm->lsm_pool_name);
658 osts = &(lov->lov_packed);
660 read_lock(&pool_tgt_rwlock(pool));
661 osts = &(pool->pool_obds);
664 ost_count = osts->op_count;
667 /* search loi_ost_idx in ost array */
669 for (i = 0; i < ost_count; i++) {
670 if (osts->op_array[i] == lsm->lsm_oinfo[0]->loi_ost_idx) {
675 if (i == ost_count) {
676 CERROR("Start index %d not found in pool '%s'\n",
677 lsm->lsm_oinfo[0]->loi_ost_idx, lsm->lsm_pool_name);
678 GOTO(out, rc = -EINVAL);
682 for (i = 0; i < ost_count;
683 i++, array_idx = (array_idx + 1) % ost_count) {
684 ost_idx = osts->op_array[array_idx];
686 if (!lov->lov_tgts[ost_idx] ||
687 !lov->lov_tgts[ost_idx]->ltd_active) {
691 /* Fail Check before osc_precreate() is called
692 so we can only 'fail' single OSC. */
693 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
696 /* Drop slow OSCs if we can, but not for requested start idx */
697 if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
698 (i != 0 || speed < 2))
703 /* We have enough stripes */
704 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
708 /* Try again, allowing slower OSCs */
713 /* If we were passed specific striping params, then a failure to
714 * meet those requirements is an error, since we can't reallocate
715 * that memory (it might be part of a larger array or something).
717 * We can only get here if lsm_stripe_count was originally > 1.
719 CERROR("can't lstripe objid "LPX64": have %d want %u\n",
720 lsm->lsm_object_id, (int)(idx_pos - idx_arr),
721 lsm->lsm_stripe_count);
725 read_unlock(&pool_tgt_rwlock(pool));
729 /* Alloc objects on osts with optimization based on:
731 - network resources (shared OSS's)
733 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
734 char *poolname, int flags)
736 struct lov_obd *lov = &exp->exp_obd->u.lov;
737 static time_t last_warn = 0;
738 time_t now = cfs_time_current_sec();
739 __u64 total_bavail, total_weight = 0;
740 int nfound, good_osts, i, warn = 0, rc = 0;
741 int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
742 struct pool_desc *pool;
743 struct ost_pool *osts;
744 struct lov_qos_rr *lqr;
747 if (stripe_cnt_min < 1)
750 pool = lov_find_pool(lov, poolname);
752 osts = &(lov->lov_packed);
753 lqr = &(lov->lov_qos.lq_rr);
755 read_lock(&pool_tgt_rwlock(pool));
756 osts = &(pool->pool_obds);
757 lqr = &(pool->pool_rr);
760 lov_getref(exp->exp_obd);
762 /* Detect -EAGAIN early, before expensive lock is taken. */
763 if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
764 GOTO(out_nolock, rc = -EAGAIN);
766 /* Do actual allocation, use write lock here. */
767 down_write(&lov->lov_qos.lq_rw_sem);
770 * Check again, while we were sleeping on @lq_rw_sem things could
773 if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
774 GOTO(out, rc = -EAGAIN);
776 if (lov->desc.ld_active_tgt_count < 2)
777 GOTO(out, rc = -EAGAIN);
779 rc = qos_calc_ppo(exp->exp_obd);
785 /* Warn users about zero available space/inode every 30 min */
786 if (cfs_time_sub(now, last_warn) > 60 * 30)
788 /* Find all the OSTs that are valid stripe candidates */
789 for (i = 0; i < osts->op_count; i++) {
792 if (!lov->lov_tgts[osts->op_array[i]] ||
793 !lov->lov_tgts[osts->op_array[i]]->ltd_active)
795 bavail = TGT_BAVAIL(osts->op_array[i]);
798 CDEBUG(D_QOS, "no free space on %s\n",
799 obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
804 if (!TGT_FFREE(osts->op_array[i])) {
806 CDEBUG(D_QOS, "no free inodes on %s\n",
807 obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
813 /* Fail Check before osc_precreate() is called
814 so we can only 'fail' single OSC. */
815 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && osts->op_array[i] == 0)
818 if (obd_precreate(lov->lov_tgts[osts->op_array[i]]->ltd_exp) > 2)
821 lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
822 qos_calc_weight(lov, osts->op_array[i]);
823 total_bavail += bavail;
824 total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
830 CDEBUG(D_QOS, "found %d good osts\n", good_osts);
833 if (good_osts < stripe_cnt_min)
834 GOTO(out, rc = -EAGAIN);
837 GOTO(out, rc = -ENOSPC);
839 /* We have enough osts */
840 if (good_osts < *stripe_cnt)
841 *stripe_cnt = good_osts;
844 GOTO(out, rc = -EAGAIN);
846 /* Find enough OSTs with weighted random allocation. */
848 while (nfound < *stripe_cnt) {
849 __u64 rand, cur_weight;
855 #if BITS_PER_LONG == 32
856 rand = ll_rand() % (unsigned)total_weight;
857 /* If total_weight > 32-bit, first generate the high
858 * 32 bits of the random number, then add in the low
859 * 32 bits (truncated to the upper limit, if needed) */
860 if (total_weight > 0xffffffffULL)
861 rand = (__u64)(ll_rand() %
862 (unsigned)(total_weight >> 32)) << 32;
866 if (rand == (total_weight & 0xffffffff00000000ULL))
867 rand |= ll_rand() % (unsigned)total_weight;
872 rand = ((__u64)ll_rand() << 32 | ll_rand()) %
879 /* On average, this will hit larger-weighted osts more often.
880 0-weight osts will always get used last (only when rand=0).*/
881 for (i = 0; i < osts->op_count; i++) {
882 if (!lov->lov_tgts[osts->op_array[i]] ||
883 !lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable)
886 cur_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
888 CDEBUG(D_QOS, "stripe_cnt=%d nfound=%d cur_weight="LPU64
889 " rand="LPU64" total_weight="LPU64"\n",
890 *stripe_cnt, nfound, cur_weight, rand, total_weight);
892 if (cur_weight >= rand) {
894 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
895 nfound, osts->op_array[i]);
897 idx_arr[nfound++] = osts->op_array[i];
898 qos_used(lov, osts, osts->op_array[i], &total_weight);
903 /* should never satisfy below condition */
905 CERROR("Didn't find any OSTs?\n");
909 LASSERT(nfound == *stripe_cnt);
912 up_write(&lov->lov_qos.lq_rw_sem);
916 read_unlock(&pool_tgt_rwlock(pool));
919 rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
921 lov_putref(exp->exp_obd);
925 /* return new alloced stripe count on success */
926 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
927 int newea, int **idx_arr, int *arr_cnt, int flags)
929 struct lov_obd *lov = &exp->exp_obd->u.lov;
930 int stripe_cnt = lsm->lsm_stripe_count;
935 *arr_cnt = stripe_cnt;
936 OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
939 for (i = 0; i < *arr_cnt; i++)
943 lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
944 rc = alloc_qos(exp, tmp_arr, &stripe_cnt,
945 lsm->lsm_pool_name, flags);
947 rc = alloc_specific(lov, lsm, tmp_arr);
955 OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
960 static void free_idx_array(int *idx_arr, int arr_cnt)
963 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
966 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
968 struct lov_obd *lov = &exp->exp_obd->u.lov;
969 struct lov_stripe_md *lsm;
970 struct obdo *src_oa = set->set_oi->oi_oa;
971 struct obd_trans_info *oti = set->set_oti;
972 int i, stripes, rc = 0, newea = 0;
973 int flag = LOV_USES_ASSIGNED_STRIPE;
974 int *idx_arr = NULL, idx_cnt = 0;
977 LASSERT(src_oa->o_valid & OBD_MD_FLID);
978 LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
980 if (set->set_oi->oi_md == NULL) {
981 int stripes_def = lov_get_stripecnt(lov, 0);
983 /* If the MDS file was truncated up to some size, stripe over
984 * enough OSTs to allow the file to be created at that size.
985 * This may mean we use more than the default # of stripes. */
986 if (src_oa->o_valid & OBD_MD_FLSIZE) {
987 obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
989 /* Find a small number of stripes we can use
990 (up to # of active osts). */
992 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
993 if (!lov->lov_tgts[i] ||
994 !lov->lov_tgts[i]->ltd_active)
996 min_bavail = min(min_bavail, TGT_BAVAIL(i));
997 if (min_bavail * stripes > src_oa->o_size)
1002 if (stripes < stripes_def)
1003 stripes = stripes_def;
1005 flag = LOV_USES_DEFAULT_STRIPE;
1006 stripes = stripes_def;
1009 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
1010 lov->desc.ld_pattern ?
1011 lov->desc.ld_pattern : LOV_PATTERN_RAID0,
1019 lsm = set->set_oi->oi_md;
1020 lsm->lsm_object_id = src_oa->o_id;
1021 lsm->lsm_object_gr = src_oa->o_gr;
1023 if (!lsm->lsm_stripe_size)
1024 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
1025 if (!lsm->lsm_pattern) {
1026 LASSERT(lov->desc.ld_pattern);
1027 lsm->lsm_pattern = lov->desc.ld_pattern;
1030 stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
1032 GOTO(out_err, rc = stripes ? stripes : -EIO);
1033 LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
1034 lsm->lsm_stripe_count, stripes);
1036 for (i = 0; i < stripes; i++) {
1037 struct lov_request *req;
1038 int ost_idx = idx_arr[i];
1039 LASSERT(ost_idx >= 0);
1041 OBD_ALLOC(req, sizeof(*req));
1043 GOTO(out_err, rc = -ENOMEM);
1044 lov_set_add_req(req, set);
1046 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
1047 OBD_ALLOC(req->rq_oi.oi_md, req->rq_buflen);
1048 if (req->rq_oi.oi_md == NULL)
1049 GOTO(out_err, rc = -ENOMEM);
1051 OBDO_ALLOC(req->rq_oi.oi_oa);
1052 if (req->rq_oi.oi_oa == NULL)
1053 GOTO(out_err, rc = -ENOMEM);
1055 req->rq_idx = ost_idx;
1057 /* create data objects with "parent" OA */
1058 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
1060 /* XXX When we start creating objects on demand, we need to
1061 * make sure that we always create the object on the
1062 * stripe which holds the existing file size.
1064 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1065 req->rq_oi.oi_oa->o_size =
1066 lov_size_to_stripe(lsm, src_oa->o_size, i);
1068 CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
1069 i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
1072 LASSERT(set->set_count == stripes);
1074 if (stripes < lsm->lsm_stripe_count)
1075 qos_shrink_lsm(set);
1077 if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
1078 oti_alloc_cookies(oti, set->set_count);
1079 if (!oti->oti_logcookies)
1080 GOTO(out_err, rc = -ENOMEM);
1081 set->set_cookies = oti->oti_logcookies;
1085 obd_free_memmd(exp, &set->set_oi->oi_md);
1087 free_idx_array(idx_arr, idx_cnt);
1092 void qos_update(struct lov_obd *lov)
1095 lov->lov_qos.lq_dirty = 1;