1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LOV
43 #include <libcfs/libcfs.h>
45 #include <liblustre.h>
48 #include <obd_class.h>
50 #include <lustre/lustre_idl.h>
51 #include "lov_internal.h"
53 /* #define QOS_DEBUG 1 */
56 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
57 lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
60 int qos_add_tgt(struct obd_device *obd, __u32 index)
62 struct lov_obd *lov = &obd->u.lov;
63 struct lov_qos_oss *oss, *temposs;
64 struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
65 int rc = 0, found = 0;
68 /* We only need this QOS struct on MDT, not clients - but we may not
69 * have registered the LOV's observer yet, so there's no way to know */
70 if (!exp || !exp->exp_connection) {
71 CERROR("Missing connection\n");
75 cfs_down_write(&lov->lov_qos.lq_rw_sem);
76 cfs_mutex_down(&lov->lov_lock);
77 cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
78 if (obd_uuid_equals(&oss->lqo_uuid,
79 &exp->exp_connection->c_remote_uuid)) {
88 GOTO(out, rc = -ENOMEM);
89 memcpy(&oss->lqo_uuid,
90 &exp->exp_connection->c_remote_uuid,
91 sizeof(oss->lqo_uuid));
93 /* Assume we have to move this one */
94 cfs_list_del(&oss->lqo_oss_list);
98 lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
100 /* Add sorted by # of OSTs. Find the first entry that we're
102 cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
104 if (oss->lqo_ost_count > temposs->lqo_ost_count)
107 /* ...and add before it. If we're the first or smallest, temposs
108 points to the list head, and we add to the end. */
109 cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
111 lov->lov_qos.lq_dirty = 1;
112 lov->lov_qos.lq_rr.lqr_dirty = 1;
114 CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
115 obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
116 obd_uuid2str(&oss->lqo_uuid),
120 cfs_mutex_up(&lov->lov_lock);
121 cfs_up_write(&lov->lov_qos.lq_rw_sem);
125 int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
127 struct lov_obd *lov = &obd->u.lov;
128 struct lov_qos_oss *oss;
132 cfs_down_write(&lov->lov_qos.lq_rw_sem);
134 oss = tgt->ltd_qos.ltq_oss;
136 GOTO(out, rc = -ENOENT);
138 oss->lqo_ost_count--;
139 if (oss->lqo_ost_count == 0) {
140 CDEBUG(D_QOS, "removing OSS %s\n",
141 obd_uuid2str(&oss->lqo_uuid));
142 cfs_list_del(&oss->lqo_oss_list);
146 lov->lov_qos.lq_dirty = 1;
147 lov->lov_qos.lq_rr.lqr_dirty = 1;
149 cfs_up_write(&lov->lov_qos.lq_rw_sem);
153 /* Recalculate per-object penalties for OSSs and OSTs,
154 depends on size of each ost in an oss */
155 static int qos_calc_ppo(struct obd_device *obd)
157 struct lov_obd *lov = &obd->u.lov;
158 struct lov_qos_oss *oss;
159 __u64 ba_max, ba_min, temp;
161 int rc, i, prio_wide;
165 if (!lov->lov_qos.lq_dirty)
168 num_active = lov->desc.ld_active_tgt_count - 1;
170 GOTO(out, rc = -EAGAIN);
172 /* find bavail on each OSS */
173 cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
176 lov->lov_qos.lq_active_oss_count = 0;
178 /* How badly user wants to select osts "widely" (not recently chosen
179 and not on recent oss's). As opposed to "freely" (free space
181 prio_wide = 256 - lov->lov_qos.lq_prio_free;
183 ba_min = (__u64)(-1);
185 now = cfs_time_current_sec();
186 /* Calculate OST penalty per object */
187 /* (lov ref taken in alloc_qos) */
188 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
189 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
191 temp = TGT_BAVAIL(i);
194 ba_min = min(temp, ba_min);
195 ba_max = max(temp, ba_max);
197 /* Count the number of usable OSS's */
198 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
199 lov->lov_qos.lq_active_oss_count++;
200 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
202 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
204 do_div(temp, num_active);
205 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
206 (temp * prio_wide) >> 8;
208 age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
209 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
210 lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
211 else if (age > lov->desc.ld_qos_maxage)
212 /* Decay the penalty by half for every 8x the update
213 * interval that the device has been idle. That gives
214 * lots of time for the statfs information to be
215 * updated (which the penalty is only a proxy for),
216 * and avoids penalizing OSS/OSTs under light load. */
217 lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
218 (age / lov->desc.ld_qos_maxage);
221 num_active = lov->lov_qos.lq_active_oss_count - 1;
222 if (num_active < 1) {
223 /* If there's only 1 OSS, we can't penalize it, so instead
224 we have to double the OST penalty */
226 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
227 if (lov->lov_tgts[i] == NULL)
229 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
233 /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
234 cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
235 temp = oss->lqo_bavail >> 1;
236 do_div(temp, oss->lqo_ost_count * num_active);
237 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
239 age = (now - oss->lqo_used) >> 3;
240 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
241 oss->lqo_penalty = 0;
242 else if (age > lov->desc.ld_qos_maxage)
243 /* Decay the penalty by half for every 8x the update
244 * interval that the device has been idle. That gives
245 * lots of time for the statfs information to be
246 * updated (which the penalty is only a proxy for),
247 * and avoids penalizing OSS/OSTs under light load. */
248 oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
251 lov->lov_qos.lq_dirty = 0;
252 lov->lov_qos.lq_reset = 0;
254 /* If each ost has almost same free space,
255 * do rr allocation for better creation performance */
256 lov->lov_qos.lq_same_space = 0;
257 if ((ba_max * (256 - lov->lov_qos.lq_threshold_rr)) >> 8 < ba_min) {
258 lov->lov_qos.lq_same_space = 1;
259 /* Reset weights for the next time we enter qos mode */
260 lov->lov_qos.lq_reset = 1;
265 if (!rc && lov->lov_qos.lq_same_space)
270 static int qos_calc_weight(struct lov_obd *lov, int i)
274 /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
275 temp = TGT_BAVAIL(i);
276 temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
277 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
279 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
281 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
285 /* We just used this index for a stripe; adjust everyone's weights */
286 static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
287 __u32 index, __u64 *total_wt)
289 struct lov_qos_oss *oss;
293 /* Don't allocate from this stripe anymore, until the next alloc_qos */
294 lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
296 oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
298 /* Decay old penalty by half (we're adding max penalty, and don't
299 want it to run away.) */
300 lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
301 oss->lqo_penalty >>= 1;
303 /* mark the OSS and OST as recently used */
304 lov->lov_tgts[index]->ltd_qos.ltq_used =
305 oss->lqo_used = cfs_time_current_sec();
307 /* Set max penalties for this OST and OSS */
308 lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
309 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
310 lov->desc.ld_active_tgt_count;
311 oss->lqo_penalty += oss->lqo_penalty_per_obj *
312 lov->lov_qos.lq_active_oss_count;
314 /* Decrease all OSS penalties */
315 cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
316 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
317 oss->lqo_penalty = 0;
319 oss->lqo_penalty -= oss->lqo_penalty_per_obj;
323 /* Decrease all OST penalties */
324 for (j = 0; j < osts->op_count; j++) {
327 i = osts->op_array[j];
328 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
330 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
331 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
332 lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
334 lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
335 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
337 qos_calc_weight(lov, i);
339 /* Recalc the total weight of usable osts */
340 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
341 *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
344 CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
345 " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
346 " ossp="LPU64" wt="LPU64"\n",
347 i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
349 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
350 lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
351 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
352 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
353 lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
360 #define LOV_QOS_EMPTY ((__u32)-1)
361 /* compute optimal round-robin order, based on OSTs per OSS */
362 static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
363 struct lov_qos_rr *lqr)
365 struct lov_qos_oss *oss;
366 unsigned placed, real_count;
370 if (!lqr->lqr_dirty) {
371 LASSERT(lqr->lqr_pool.op_size);
375 /* Do actual allocation. */
376 cfs_down_write(&lov->lov_qos.lq_rw_sem);
379 * Check again. While we were sleeping on @lq_rw_sem something could
382 if (!lqr->lqr_dirty) {
383 LASSERT(lqr->lqr_pool.op_size);
384 cfs_up_write(&lov->lov_qos.lq_rw_sem);
388 real_count = src_pool->op_count;
390 /* Zero the pool array */
391 /* alloc_rr is holding a read lock on the pool, so nobody is adding/
392 deleting from the pool. The lq_rw_sem insures that nobody else
394 lqr->lqr_pool.op_count = real_count;
395 rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
397 cfs_up_write(&lov->lov_qos.lq_rw_sem);
400 for (i = 0; i < lqr->lqr_pool.op_count; i++)
401 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
403 /* Place all the OSTs from 1 OSS at the same time. */
405 cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
407 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
408 if (lov->lov_tgts[src_pool->op_array[i]] &&
409 (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
410 /* Evenly space these OSTs across arrayspace */
411 int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
412 while (lqr->lqr_pool.op_array[next] !=
414 next = (next + 1) % lqr->lqr_pool.op_count;
415 lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
423 cfs_up_write(&lov->lov_qos.lq_rw_sem);
425 if (placed != real_count) {
426 /* This should never happen */
427 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
428 "round-robin list (%d of %d).\n",
430 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
431 LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
432 lqr->lqr_pool.op_array[i]);
439 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
440 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
441 lqr->lqr_pool.op_array[i]);
449 void qos_shrink_lsm(struct lov_request_set *set)
451 struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
452 /* XXX LOV STACKING call into osc for sizes */
453 unsigned oldsize, newsize;
455 if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
456 struct llog_cookie *cookies;
457 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
458 newsize = set->set_count * sizeof(*cookies);
460 cookies = set->set_cookies;
461 oti_alloc_cookies(set->set_oti, set->set_count);
462 if (set->set_oti->oti_logcookies) {
463 memcpy(set->set_oti->oti_logcookies, cookies, newsize);
464 OBD_FREE_LARGE(cookies, oldsize);
465 set->set_cookies = set->set_oti->oti_logcookies;
467 CWARN("'leaking' %d bytes\n", oldsize - newsize);
471 CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
472 lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
473 LASSERT(lsm->lsm_stripe_count >= set->set_count);
475 newsize = lov_stripe_md_size(set->set_count);
476 OBD_ALLOC_LARGE(lsm_new, newsize);
477 if (lsm_new != NULL) {
479 memcpy(lsm_new, lsm, sizeof(*lsm));
480 for (i = 0; i < lsm->lsm_stripe_count; i++) {
481 if (i < set->set_count) {
482 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
485 OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
486 sizeof(struct lov_oinfo));
488 lsm_new->lsm_stripe_count = set->set_count;
489 OBD_FREE_LARGE(lsm, sizeof(struct lov_stripe_md) +
490 lsm->lsm_stripe_count*sizeof(struct lov_oinfo*));
491 set->set_oi->oi_md = lsm_new;
493 CWARN("'leaking' few bytes\n");
498 * Check whether we can create the object on the OST(refered by ost_idx)
500 * 0: create the object.
501 * other value: did not create the object.
503 static int lov_check_and_create_object(struct lov_obd *lov, int ost_idx,
504 struct lov_stripe_md *lsm,
505 struct lov_request *req,
506 struct obd_trans_info *oti)
512 CDEBUG(D_QOS, "Check and create on idx %d \n", ost_idx);
513 if (!lov->lov_tgts[ost_idx] ||
514 !lov->lov_tgts[ost_idx]->ltd_active)
517 /* check if objects has been created on this ost */
518 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
519 /* already have object at this stripe */
520 if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
524 if (stripe >= lsm->lsm_stripe_count) {
525 req->rq_idx = ost_idx;
526 rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
527 req->rq_oi.oi_oa, &req->rq_oi.oi_md,
533 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
535 struct lov_stripe_md *lsm = set->set_oi->oi_md;
536 struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
537 unsigned ost_idx = 0, ost_count;
538 struct pool_desc *pool;
539 struct ost_pool *osts = NULL;
543 /* First check whether we can create the objects on the pool */
544 pool = lov_find_pool(lov, lsm->lsm_pool_name);
546 cfs_down_read(&pool_tgt_rw_sem(pool));
547 osts = &(pool->pool_obds);
548 ost_count = osts->op_count;
549 for (i = 0; i < ost_count; i++, ost_idx = osts->op_array[i]) {
550 rc = lov_check_and_create_object(lov, ost_idx, lsm, req,
555 cfs_up_read(&pool_tgt_rw_sem(pool));
556 lov_pool_putref(pool);
560 ost_count = lov->desc.ld_tgt_count;
561 /* Then check whether we can create the objects on other OSTs */
562 ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
563 for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
564 rc = lov_check_and_create_object(lov, ost_idx, lsm, req,
574 static int min_stripe_count(int stripe_cnt, int flags)
576 return (flags & LOV_USES_DEFAULT_STRIPE ?
577 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
580 #define LOV_CREATE_RESEED_MULT 30
581 #define LOV_CREATE_RESEED_MIN 2000
582 /* Allocate objects on osts with round-robin algorithm */
583 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
584 char *poolname, int flags)
589 int ost_start_idx_temp;
591 int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
592 struct pool_desc *pool;
593 struct ost_pool *osts;
594 struct lov_qos_rr *lqr;
597 pool = lov_find_pool(lov, poolname);
599 osts = &(lov->lov_packed);
600 lqr = &(lov->lov_qos.lq_rr);
602 cfs_down_read(&pool_tgt_rw_sem(pool));
603 osts = &(pool->pool_obds);
604 lqr = &(pool->pool_rr);
607 rc = qos_calc_rr(lov, osts, lqr);
611 if (--lqr->lqr_start_count <= 0) {
612 lqr->lqr_start_idx = cfs_rand() % osts->op_count;
613 lqr->lqr_start_count =
614 (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
615 LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
616 } else if (stripe_cnt_min >= osts->op_count ||
617 lqr->lqr_start_idx > osts->op_count) {
618 /* If we have allocated from all of the OSTs, slowly
619 * precess the next start if the OST/stripe count isn't
620 * already doing this for us. */
621 lqr->lqr_start_idx %= osts->op_count;
622 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
623 ++lqr->lqr_offset_idx;
625 cfs_down_read(&lov->lov_qos.lq_rw_sem);
626 ost_start_idx_temp = lqr->lqr_start_idx;
629 array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) % osts->op_count;
632 CDEBUG(D_QOS, "pool '%s' want %d startidx %d startcnt %d offset %d "
633 "active %d count %d arrayidx %d\n", poolname,
634 *stripe_cnt, lqr->lqr_start_idx, lqr->lqr_start_count,
635 lqr->lqr_offset_idx, osts->op_count, osts->op_count, array_idx);
638 for (i = 0; i < osts->op_count;
639 i++, array_idx=(array_idx + 1) % osts->op_count) {
640 ++lqr->lqr_start_idx;
641 ost_idx = lqr->lqr_pool.op_array[array_idx];
643 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
644 i, lqr->lqr_start_idx,
645 ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
646 lov->lov_tgts[ost_idx]->ltd_active : 0,
647 idx_pos - idx_arr, array_idx, ost_idx);
649 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
650 !lov->lov_tgts[ost_idx]->ltd_active)
653 /* Fail Check before osc_precreate() is called
654 so we can only 'fail' single OSC. */
655 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
658 /* Drop slow OSCs if we can */
659 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
664 /* We have enough stripes */
665 if (idx_pos - idx_arr == *stripe_cnt)
668 if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
669 /* Try again, allowing slower OSCs */
671 lqr->lqr_start_idx = ost_start_idx_temp;
675 cfs_up_read(&lov->lov_qos.lq_rw_sem);
677 *stripe_cnt = idx_pos - idx_arr;
680 cfs_up_read(&pool_tgt_rw_sem(pool));
681 /* put back ref got by lov_find_pool() */
682 lov_pool_putref(pool);
688 /* alloc objects on osts with specific stripe offset */
689 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
692 unsigned ost_idx, array_idx, ost_count;
695 struct pool_desc *pool;
696 struct ost_pool *osts;
699 pool = lov_find_pool(lov, lsm->lsm_pool_name);
701 osts = &(lov->lov_packed);
703 cfs_down_read(&pool_tgt_rw_sem(pool));
704 osts = &(pool->pool_obds);
707 ost_count = osts->op_count;
710 /* search loi_ost_idx in ost array */
712 for (i = 0; i < ost_count; i++) {
713 if (osts->op_array[i] == lsm->lsm_oinfo[0]->loi_ost_idx) {
718 if (i == ost_count) {
719 CERROR("Start index %d not found in pool '%s'\n",
720 lsm->lsm_oinfo[0]->loi_ost_idx, lsm->lsm_pool_name);
721 GOTO(out, rc = -EINVAL);
725 for (i = 0; i < ost_count;
726 i++, array_idx = (array_idx + 1) % ost_count) {
727 ost_idx = osts->op_array[array_idx];
729 if (!lov->lov_tgts[ost_idx] ||
730 !lov->lov_tgts[ost_idx]->ltd_active) {
734 /* Fail Check before osc_precreate() is called
735 so we can only 'fail' single OSC. */
736 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
739 /* Drop slow OSCs if we can, but not for requested start idx.
741 * This means "if OSC is slow and it is not the requested
742 * start OST, then it can be skipped, otherwise skip it only
743 * if it is inactive/recovering/out-of-space." */
744 if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
745 (i != 0 || speed >= 2))
750 /* We have enough stripes */
751 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
755 /* Try again, allowing slower OSCs */
760 /* If we were passed specific striping params, then a failure to
761 * meet those requirements is an error, since we can't reallocate
762 * that memory (it might be part of a larger array or something).
764 * We can only get here if lsm_stripe_count was originally > 1.
766 CERROR("can't lstripe objid "LPX64": have %d want %u\n",
767 lsm->lsm_object_id, (int)(idx_pos - idx_arr),
768 lsm->lsm_stripe_count);
772 cfs_up_read(&pool_tgt_rw_sem(pool));
773 /* put back ref got by lov_find_pool() */
774 lov_pool_putref(pool);
780 /* Alloc objects on osts with optimization based on:
782 - network resources (shared OSS's)
784 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
785 char *poolname, int flags)
787 struct lov_obd *lov = &exp->exp_obd->u.lov;
788 __u64 total_weight = 0;
789 int nfound, good_osts, i, rc = 0;
790 int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
791 struct pool_desc *pool;
792 struct ost_pool *osts;
793 struct lov_qos_rr *lqr;
796 if (stripe_cnt_min < 1)
799 pool = lov_find_pool(lov, poolname);
801 osts = &(lov->lov_packed);
802 lqr = &(lov->lov_qos.lq_rr);
804 cfs_down_read(&pool_tgt_rw_sem(pool));
805 osts = &(pool->pool_obds);
806 lqr = &(pool->pool_rr);
809 obd_getref(exp->exp_obd);
811 /* wait for fresh statfs info if needed, the rpcs are sent in
813 qos_statfs_update(exp->exp_obd,
814 cfs_time_shift_64(-2 * lov->desc.ld_qos_maxage), 1);
816 /* Detect -EAGAIN early, before expensive lock is taken. */
817 if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
818 GOTO(out_nolock, rc = -EAGAIN);
820 /* Do actual allocation, use write lock here. */
821 cfs_down_write(&lov->lov_qos.lq_rw_sem);
824 * Check again, while we were sleeping on @lq_rw_sem things could
827 if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
828 GOTO(out, rc = -EAGAIN);
830 if (lov->desc.ld_active_tgt_count < 2)
831 GOTO(out, rc = -EAGAIN);
833 rc = qos_calc_ppo(exp->exp_obd);
838 /* Find all the OSTs that are valid stripe candidates */
839 for (i = 0; i < osts->op_count; i++) {
840 if (!lov->lov_tgts[osts->op_array[i]] ||
841 !lov->lov_tgts[osts->op_array[i]]->ltd_active)
844 /* Fail Check before osc_precreate() is called
845 so we can only 'fail' single OSC. */
846 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && osts->op_array[i] == 0)
849 if (obd_precreate(lov->lov_tgts[osts->op_array[i]]->ltd_exp) > 2)
852 lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
853 qos_calc_weight(lov, osts->op_array[i]);
854 total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
860 CDEBUG(D_QOS, "found %d good osts\n", good_osts);
863 if (good_osts < stripe_cnt_min)
864 GOTO(out, rc = -EAGAIN);
866 /* We have enough osts */
867 if (good_osts < *stripe_cnt)
868 *stripe_cnt = good_osts;
871 GOTO(out, rc = -EAGAIN);
873 /* Find enough OSTs with weighted random allocation. */
875 while (nfound < *stripe_cnt) {
876 __u64 rand, cur_weight;
882 #if BITS_PER_LONG == 32
883 rand = cfs_rand() % (unsigned)total_weight;
884 /* If total_weight > 32-bit, first generate the high
885 * 32 bits of the random number, then add in the low
886 * 32 bits (truncated to the upper limit, if needed) */
887 if (total_weight > 0xffffffffULL)
888 rand = (__u64)(cfs_rand() %
889 (unsigned)(total_weight >> 32)) << 32;
893 if (rand == (total_weight & 0xffffffff00000000ULL))
894 rand |= cfs_rand() % (unsigned)total_weight;
899 rand = ((__u64)cfs_rand() << 32 | cfs_rand()) %
906 /* On average, this will hit larger-weighted osts more often.
907 0-weight osts will always get used last (only when rand=0).*/
908 for (i = 0; i < osts->op_count; i++) {
909 if (!lov->lov_tgts[osts->op_array[i]] ||
910 !lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable)
913 cur_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
915 CDEBUG(D_QOS, "stripe_cnt=%d nfound=%d cur_weight="LPU64
916 " rand="LPU64" total_weight="LPU64"\n",
917 *stripe_cnt, nfound, cur_weight, rand, total_weight);
919 if (cur_weight >= rand) {
921 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
922 nfound, osts->op_array[i]);
924 idx_arr[nfound++] = osts->op_array[i];
925 qos_used(lov, osts, osts->op_array[i], &total_weight);
930 /* should never satisfy below condition */
932 CERROR("Didn't find any OSTs?\n");
936 LASSERT(nfound == *stripe_cnt);
939 cfs_up_write(&lov->lov_qos.lq_rw_sem);
943 cfs_up_read(&pool_tgt_rw_sem(pool));
944 /* put back ref got by lov_find_pool() */
945 lov_pool_putref(pool);
949 rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
951 obd_putref(exp->exp_obd);
955 /* return new alloced stripe count on success */
956 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
957 int newea, int **idx_arr, int *arr_cnt, int flags)
959 struct lov_obd *lov = &exp->exp_obd->u.lov;
960 int stripe_cnt = lsm->lsm_stripe_count;
965 *arr_cnt = stripe_cnt;
966 OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
969 for (i = 0; i < *arr_cnt; i++)
973 lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
974 rc = alloc_qos(exp, tmp_arr, &stripe_cnt,
975 lsm->lsm_pool_name, flags);
977 rc = alloc_specific(lov, lsm, tmp_arr);
985 OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
990 static void free_idx_array(int *idx_arr, int arr_cnt)
993 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
996 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
998 struct lov_obd *lov = &exp->exp_obd->u.lov;
999 struct lov_stripe_md *lsm;
1000 struct obdo *src_oa = set->set_oi->oi_oa;
1001 struct obd_trans_info *oti = set->set_oti;
1002 int i, stripes, rc = 0, newea = 0;
1003 int flag = LOV_USES_ASSIGNED_STRIPE;
1004 int *idx_arr = NULL, idx_cnt = 0;
1007 LASSERT(src_oa->o_valid & OBD_MD_FLID);
1008 LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
1010 if (set->set_oi->oi_md == NULL) {
1011 int stripes_def = lov_get_stripecnt(lov, 0);
1013 /* If the MDS file was truncated up to some size, stripe over
1014 * enough OSTs to allow the file to be created at that size.
1015 * This may mean we use more than the default # of stripes. */
1016 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1017 obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
1019 /* Find a small number of stripes we can use
1020 (up to # of active osts). */
1022 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
1023 if (!lov->lov_tgts[i] ||
1024 !lov->lov_tgts[i]->ltd_active)
1026 min_bavail = min(min_bavail, TGT_BAVAIL(i));
1027 if (min_bavail * stripes > src_oa->o_size)
1032 if (stripes < stripes_def)
1033 stripes = stripes_def;
1035 flag = LOV_USES_DEFAULT_STRIPE;
1036 stripes = stripes_def;
1039 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
1040 lov->desc.ld_pattern ?
1041 lov->desc.ld_pattern : LOV_PATTERN_RAID0,
1049 lsm = set->set_oi->oi_md;
1050 lsm->lsm_object_id = src_oa->o_id;
1051 lsm->lsm_object_seq = src_oa->o_seq;
1053 if (!lsm->lsm_stripe_size)
1054 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
1055 if (!lsm->lsm_pattern) {
1056 LASSERT(lov->desc.ld_pattern);
1057 lsm->lsm_pattern = lov->desc.ld_pattern;
1060 stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
1062 GOTO(out_err, rc = stripes ? stripes : -EIO);
1063 LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
1064 lsm->lsm_stripe_count, stripes);
1066 for (i = 0; i < stripes; i++) {
1067 struct lov_request *req;
1068 int ost_idx = idx_arr[i];
1069 LASSERT(ost_idx >= 0);
1071 OBD_ALLOC(req, sizeof(*req));
1073 GOTO(out_err, rc = -ENOMEM);
1074 lov_set_add_req(req, set);
1076 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
1077 OBD_ALLOC_LARGE(req->rq_oi.oi_md, req->rq_buflen);
1078 if (req->rq_oi.oi_md == NULL)
1079 GOTO(out_err, rc = -ENOMEM);
1081 OBDO_ALLOC(req->rq_oi.oi_oa);
1082 if (req->rq_oi.oi_oa == NULL)
1083 GOTO(out_err, rc = -ENOMEM);
1085 req->rq_idx = ost_idx;
1087 /* create data objects with "parent" OA */
1088 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
1089 req->rq_oi.oi_cb_up = cb_create_update;
1091 /* XXX When we start creating objects on demand, we need to
1092 * make sure that we always create the object on the
1093 * stripe which holds the existing file size.
1095 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1096 req->rq_oi.oi_oa->o_size =
1097 lov_size_to_stripe(lsm, src_oa->o_size, i);
1099 CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
1100 i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
1103 LASSERT(set->set_count == stripes);
1105 if (stripes < lsm->lsm_stripe_count)
1106 qos_shrink_lsm(set);
1107 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LOV_PREP_CREATE)) {
1108 qos_shrink_lsm(set);
1112 if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
1113 oti_alloc_cookies(oti, set->set_count);
1114 if (!oti->oti_logcookies)
1115 GOTO(out_err, rc = -ENOMEM);
1116 set->set_cookies = oti->oti_logcookies;
1120 obd_free_memmd(exp, &set->set_oi->oi_md);
1122 free_idx_array(idx_arr, idx_cnt);
1127 void qos_update(struct lov_obd *lov)
1130 lov->lov_qos.lq_dirty = 1;
1133 void qos_statfs_done(struct lov_obd *lov)
1135 LASSERT(lov->lov_qos.lq_statfs_in_progress);
1136 cfs_down_write(&lov->lov_qos.lq_rw_sem);
1137 lov->lov_qos.lq_statfs_in_progress = 0;
1138 /* wake up any threads waiting for the statfs rpcs to complete */
1139 cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
1140 cfs_up_write(&lov->lov_qos.lq_rw_sem);
1143 static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
1145 struct lov_obd *lov = &obd->u.lov;
1148 cfs_down_read(&lov->lov_qos.lq_rw_sem);
1149 rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
1150 cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
1151 cfs_up_read(&lov->lov_qos.lq_rw_sem);
1156 * Update statfs data if the current osfs age is older than max_age.
1157 * If wait is not set, it means that we are called from lov_create()
1158 * and we should just issue the rpcs without waiting for them to complete.
1159 * If wait is set, we are called from alloc_qos() and we just have
1160 * to wait for the request set to complete.
1162 void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
1164 struct lov_obd *lov = &obd->u.lov;
1165 struct obd_info *oinfo;
1167 struct ptlrpc_request_set *set = NULL;
1170 if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
1171 /* statfs data are quite recent, don't need to refresh it */
1174 if (!wait && lov->lov_qos.lq_statfs_in_progress)
1175 /* statfs already in progress */
1178 cfs_down_write(&lov->lov_qos.lq_rw_sem);
1179 if (lov->lov_qos.lq_statfs_in_progress) {
1180 cfs_up_write(&lov->lov_qos.lq_rw_sem);
1183 /* no statfs in flight, send rpcs */
1184 lov->lov_qos.lq_statfs_in_progress = 1;
1185 cfs_up_write(&lov->lov_qos.lq_rw_sem);
1188 CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
1189 "in a timely manner (osfs age "LPU64", max age "LPU64")"
1190 ", sending new statfs rpcs\n",
1191 obd_uuid2str(&lov->desc.ld_uuid), obd->obd_osfs_age,
1194 /* need to send statfs rpcs */
1195 CDEBUG(D_QOS, "sending new statfs requests\n");
1196 memset(lov->lov_qos.lq_statfs_data, 0,
1197 sizeof(*lov->lov_qos.lq_statfs_data));
1198 oinfo = &lov->lov_qos.lq_statfs_data->lsd_oi;
1199 oinfo->oi_osfs = &lov->lov_qos.lq_statfs_data->lsd_statfs;
1200 oinfo->oi_flags = OBD_STATFS_NODELAY;
1201 set = ptlrpc_prep_set();
1203 GOTO(out_failed, rc = -ENOMEM);
1205 rc = obd_statfs_async(obd, oinfo, max_age, set);
1206 if (rc || cfs_list_empty(&set->set_requests)) {
1208 CWARN("statfs failed with %d\n", rc);
1209 GOTO(out_failed, rc);
1211 /* send requests via ptlrpcd */
1212 oinfo->oi_flags |= OBD_STATFS_PTLRPCD;
1213 ptlrpcd_add_rqset(set);
1217 cfs_down_write(&lov->lov_qos.lq_rw_sem);
1218 lov->lov_qos.lq_statfs_in_progress = 0;
1219 /* wake up any threads waiting for the statfs rpcs to complete */
1220 cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
1221 cfs_up_write(&lov->lov_qos.lq_rw_sem);
1225 ptlrpc_set_destroy(set);
1227 struct l_wait_info lwi = { 0 };
1228 CDEBUG(D_QOS, "waiting for statfs requests to complete\n");
1229 l_wait_event(lov->lov_qos.lq_statfs_waitq,
1230 qos_statfs_ready(obd, max_age), &lwi);
1231 if (cfs_time_before_64(obd->obd_osfs_age, max_age))
1232 CDEBUG(D_QOS, "%s: still no fresh statfs data after "
1233 "waiting (osfs age "LPU64", max age "
1235 obd_uuid2str(&lov->desc.ld_uuid),
1236 obd->obd_osfs_age, max_age);