1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 # define EXPORT_SYMTAB
28 #define DEBUG_SUBSYSTEM S_LOV
31 #include <libcfs/libcfs.h>
33 #include <liblustre.h>
36 #include <obd_class.h>
38 #include "lov_internal.h"
40 /* #define QOS_DEBUG 1 */
43 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail*\
44 lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
45 #define TGT_FFREE(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
48 int qos_add_tgt(struct obd_device *obd, __u32 index)
50 struct lov_obd *lov = &obd->u.lov;
51 struct lov_qos_oss *oss, *temposs;
52 struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
53 int rc = 0, found = 0;
56 /* We only need this QOS struct on MDT, not clients - but we may not
57 * have registered the LOV's observer yet, so there's no way to know */
58 if (!exp || !exp->exp_connection) {
59 CERROR("Missing connection\n");
63 down_write(&lov->lov_qos.lq_rw_sem);
64 mutex_down(&lov->lov_lock);
65 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
66 if (obd_uuid_equals(&oss->lqo_uuid,
67 &exp->exp_connection->c_remote_uuid)) {
76 GOTO(out, rc = -ENOMEM);
77 memcpy(&oss->lqo_uuid,
78 &exp->exp_connection->c_remote_uuid,
79 sizeof(oss->lqo_uuid));
81 /* Assume we have to move this one */
82 list_del(&oss->lqo_oss_list);
86 lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
88 /* Add sorted by # of OSTs. Find the first entry that we're
90 list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
91 if (oss->lqo_ost_count > temposs->lqo_ost_count)
94 /* ...and add before it. If we're the first or smallest, temposs
95 points to the list head, and we add to the end. */
96 list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
98 lov->lov_qos.lq_dirty = 1;
99 lov->lov_qos.lq_dirty_rr = 1;
101 CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
102 obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
103 obd_uuid2str(&oss->lqo_uuid),
107 mutex_up(&lov->lov_lock);
108 up_write(&lov->lov_qos.lq_rw_sem);
112 int qos_del_tgt(struct obd_device *obd, __u32 index)
114 struct lov_obd *lov = &obd->u.lov;
115 struct lov_qos_oss *oss;
119 down_write(&lov->lov_qos.lq_rw_sem);
121 oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
123 GOTO(out, rc = -ENOENT);
125 oss->lqo_ost_count--;
126 if (oss->lqo_ost_count == 0) {
127 CDEBUG(D_QOS, "removing OSS %s\n",
128 obd_uuid2str(&oss->lqo_uuid));
129 list_del(&oss->lqo_oss_list);
133 lov->lov_qos.lq_dirty = 1;
134 lov->lov_qos.lq_dirty_rr = 1;
136 up_write(&lov->lov_qos.lq_rw_sem);
140 /* Recalculate per-object penalties for OSSs and OSTs,
141 depends on size of each ost in an oss */
142 static int qos_calc_ppo(struct obd_device *obd)
144 struct lov_obd *lov = &obd->u.lov;
145 struct lov_qos_oss *oss;
146 __u64 ba_max, ba_min, temp;
148 int rc, i, prio_wide;
151 if (!lov->lov_qos.lq_dirty)
154 num_active = lov->desc.ld_active_tgt_count - 1;
156 GOTO(out, rc = -EAGAIN);
158 /* find bavail on each OSS */
159 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
162 lov->lov_qos.lq_active_oss_count = 0;
164 /* How badly user wants to select osts "widely" (not recently chosen
165 and not on recent oss's). As opposed to "freely" (free space
167 prio_wide = 256 - lov->lov_qos.lq_prio_free;
169 ba_min = (__u64)(-1);
171 /* Calculate OST penalty per object */
172 /* (lov ref taken in alloc_qos) */
173 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
174 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
176 temp = TGT_BAVAIL(i);
179 ba_min = min(temp, ba_min);
180 ba_max = max(temp, ba_max);
182 /* Count the number of usable OSS's */
183 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
184 lov->lov_qos.lq_active_oss_count++;
185 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
187 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
189 do_div(temp, num_active);
190 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
191 (temp * prio_wide) >> 8;
193 if (lov->lov_qos.lq_reset == 0)
194 lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
197 num_active = lov->lov_qos.lq_active_oss_count - 1;
198 if (num_active < 1) {
199 /* If there's only 1 OSS, we can't penalize it, so instead
200 we have to double the OST penalty */
202 for (i = 0; i < lov->desc.ld_tgt_count; i++)
203 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
206 /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
207 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
208 temp = oss->lqo_bavail >> 1;
209 do_div(temp, oss->lqo_ost_count * num_active);
210 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
211 if (lov->lov_qos.lq_reset == 0)
212 oss->lqo_penalty = 0;
215 lov->lov_qos.lq_dirty = 0;
216 lov->lov_qos.lq_reset = 0;
218 /* If each ost has almost same free space,
219 * do rr allocation for better creation performance */
220 lov->lov_qos.lq_same_space = 0;
221 temp = ba_max - ba_min;
222 ba_min = (ba_min * 51) >> 8; /* 51/256 = .20 */
224 /* Difference is less than 20% */
225 lov->lov_qos.lq_same_space = 1;
226 /* Reset weights for the next time we enter qos mode */
227 lov->lov_qos.lq_reset = 0;
232 if (!rc && lov->lov_qos.lq_same_space)
237 static int qos_calc_weight(struct lov_obd *lov, int i)
241 /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
242 temp = TGT_BAVAIL(i);
243 temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
244 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
246 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
248 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
252 /* We just used this index for a stripe; adjust everyone's weights */
253 static int qos_used(struct lov_obd *lov, __u32 index, __u64 *total_wt)
255 struct lov_qos_oss *oss;
259 /* Don't allocate from this stripe anymore, until the next alloc_qos */
260 lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
262 oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
264 /* Decay old penalty by half (we're adding max penalty, and don't
265 want it to run away.) */
266 lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
267 oss->lqo_penalty >>= 1;
269 /* Set max penalties for this OST and OSS */
270 lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
271 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
272 lov->desc.ld_active_tgt_count;
273 oss->lqo_penalty += oss->lqo_penalty_per_obj *
274 lov->lov_qos.lq_active_oss_count;
276 /* Decrease all OSS penalties */
277 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
278 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
279 oss->lqo_penalty = 0;
281 oss->lqo_penalty -= oss->lqo_penalty_per_obj;
285 /* Decrease all OST penalties */
286 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
287 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
289 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
290 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
291 lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
293 lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
294 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
296 qos_calc_weight(lov, i);
298 /* Recalc the total weight of usable osts */
299 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
300 *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
303 CDEBUG(D_QOS, "recalc tgt %d avail="LPU64
304 " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
305 " ossp="LPU64" wt="LPU64"\n",
306 i, TGT_BAVAIL(i) >> 10,
307 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
308 lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
309 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
310 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty>>10,
311 lov->lov_tgts[i]->ltd_qos.ltq_weight>>10);
318 #define LOV_QOS_EMPTY ((__u32)-1)
319 /* compute optimal round-robin order, based on OSTs per OSS */
320 static int qos_calc_rr(struct lov_obd *lov)
322 struct lov_qos_oss *oss;
323 unsigned ost_count, placed;
327 if (!lov->lov_qos.lq_dirty_rr) {
328 LASSERT(lov->lov_qos.lq_rr_size);
332 down_write(&lov->lov_qos.lq_rw_sem);
333 ost_count = lov->desc.ld_tgt_count;
335 if (lov->lov_qos.lq_rr_size)
336 OBD_FREE(lov->lov_qos.lq_rr_array, lov->lov_qos.lq_rr_size);
337 lov->lov_qos.lq_rr_size = ost_count *
338 sizeof(lov->lov_qos.lq_rr_array[0]);
339 OBD_ALLOC(lov->lov_qos.lq_rr_array, lov->lov_qos.lq_rr_size);
340 if (!lov->lov_qos.lq_rr_array) {
341 lov->lov_qos.lq_rr_size = 0;
342 up_write(&lov->lov_qos.lq_rw_sem);
346 for (i = 0; i < ost_count; i++)
347 lov->lov_qos.lq_rr_array[i] = LOV_QOS_EMPTY;
349 /* Place all the OSTs from 1 OSS at the same time. */
351 list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
353 for (i = 0; i < ost_count; i++) {
354 if (lov->lov_tgts[i]->ltd_qos.ltq_oss == oss) {
355 /* Evenly space these OSTs across arrayspace */
356 int next = j * ost_count / oss->lqo_ost_count;
357 while (lov->lov_qos.lq_rr_array[next] !=
359 next = (next + 1) % ost_count;
360 lov->lov_qos.lq_rr_array[next] = i;
365 LASSERT(j == oss->lqo_ost_count);
368 lov->lov_qos.lq_dirty_rr = 0;
369 up_write(&lov->lov_qos.lq_rw_sem);
371 if (placed != ost_count) {
372 /* This should never happen */
373 LCONSOLE_ERROR("Failed to place all OSTs in the round-robin "
374 "list (%d of %d).\n", placed, ost_count);
375 for (i = 0; i < ost_count; i++) {
376 LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
377 lov->lov_qos.lq_rr_array[i]);
379 lov->lov_qos.lq_dirty_rr = 1;
384 for (i = 0; i < ost_count; i++) {
385 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
386 lov->lov_qos.lq_rr_array[i]);
394 void qos_shrink_lsm(struct lov_request_set *set)
396 struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
397 /* XXX LOV STACKING call into osc for sizes */
398 unsigned oldsize, newsize;
400 if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
401 struct llog_cookie *cookies;
402 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
403 newsize = set->set_count * sizeof(*cookies);
405 cookies = set->set_cookies;
406 oti_alloc_cookies(set->set_oti, set->set_count);
407 if (set->set_oti->oti_logcookies) {
408 memcpy(set->set_oti->oti_logcookies, cookies, newsize);
409 OBD_FREE(cookies, oldsize);
410 set->set_cookies = set->set_oti->oti_logcookies;
412 CWARN("'leaking' %d bytes\n", oldsize - newsize);
416 CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
417 lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
418 LASSERT(lsm->lsm_stripe_count >= set->set_count);
420 newsize = lov_stripe_md_size(set->set_count);
421 OBD_ALLOC(lsm_new, newsize);
422 if (lsm_new != NULL) {
424 memcpy(lsm_new, lsm, sizeof(*lsm));
425 for (i = 0; i < lsm->lsm_stripe_count; i++) {
426 if (i < set->set_count) {
427 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
430 OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
431 sizeof(struct lov_oinfo));
433 lsm_new->lsm_stripe_count = set->set_count;
434 OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
435 lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
436 set->set_oi->oi_md = lsm_new;
438 CWARN("'leaking' few bytes\n");
442 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
444 struct lov_stripe_md *lsm = set->set_oi->oi_md;
445 struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
446 unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
447 int stripe, i, rc = -EIO;
450 ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
451 for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
452 if (!lov->lov_tgts[ost_idx] ||
453 !lov->lov_tgts[ost_idx]->ltd_active)
455 /* check if objects has been created on this ost */
456 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
457 if (stripe == req->rq_stripe)
459 if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
463 if (stripe >= lsm->lsm_stripe_count) {
464 req->rq_idx = ost_idx;
465 rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
466 req->rq_oi.oi_oa, &req->rq_oi.oi_md,
475 #define LOV_CREATE_RESEED_MULT 4
476 #define LOV_CREATE_RESEED_MIN 1000
477 /* Allocate objects on osts with round-robin algorithm */
478 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt)
480 unsigned array_idx, ost_count = lov->desc.ld_tgt_count;
481 unsigned ost_active_count = lov->desc.ld_active_tgt_count;
482 int i, *idx_pos = idx_arr;
486 i = qos_calc_rr(lov);
490 if (--lov->lov_start_count <= 0) {
491 lov->lov_start_idx = ll_rand() % ost_count;
492 lov->lov_start_count =
493 (LOV_CREATE_RESEED_MIN / max(ost_active_count, 1U) +
494 LOV_CREATE_RESEED_MULT) * max(ost_active_count, 1U);
495 } else if (*stripe_cnt >= ost_active_count ||
496 lov->lov_start_idx > ost_count) {
497 /* If we have allocated from all of the OSTs, slowly
498 precess the next start */
499 lov->lov_start_idx %= ost_count;
500 ++lov->lov_offset_idx;
502 array_idx = (lov->lov_start_idx + lov->lov_offset_idx) % ost_count;
504 CDEBUG(D_QOS, "want %d startidx %d startcnt %d offset %d arrayidx %d\n",
505 *stripe_cnt, lov->lov_start_idx, lov->lov_start_count,
506 lov->lov_offset_idx, array_idx);
509 down_read(&lov->lov_qos.lq_rw_sem);
510 for (i = 0; i < ost_count; i++, array_idx=(array_idx + 1) % ost_count) {
511 ++lov->lov_start_idx;
512 ost_idx = lov->lov_qos.lq_rr_array[array_idx];
514 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
515 i, lov->lov_start_idx,
516 lov->lov_tgts[ost_idx] ?
517 lov->lov_tgts[ost_idx]->ltd_active : 0,
518 idx_pos - idx_arr, array_idx, ost_idx);
520 if (!lov->lov_tgts[ost_idx] ||
521 !lov->lov_tgts[ost_idx]->ltd_active)
525 /* We have enough stripes */
526 if (idx_pos - idx_arr == *stripe_cnt)
529 up_read(&lov->lov_qos.lq_rw_sem);
531 *stripe_cnt = idx_pos - idx_arr;
535 /* alloc objects on osts with specific stripe offset */
536 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
539 unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
540 int i, *idx_pos = idx_arr;
543 ost_idx = lsm->lsm_oinfo[0]->loi_ost_idx;
544 for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
545 if (!lov->lov_tgts[ost_idx] ||
546 !lov->lov_tgts[ost_idx]->ltd_active) {
552 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
555 /* If we were passed specific striping params, then a failure to
556 * meet those requirements is an error, since we can't reallocate
557 * that memory (it might be part of a larger array or something).
559 * We can only get here if lsm_stripe_count was originally > 1.
561 CERROR("can't lstripe objid "LPX64": have "LPSZ" want %u\n",
562 lsm->lsm_object_id, idx_pos - idx_arr, lsm->lsm_stripe_count);
566 /* Alloc objects on osts with optimization based on:
568 - network resources (shared OSS's)
570 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt)
572 struct lov_obd *lov = &exp->exp_obd->u.lov;
573 static time_t last_warn = 0;
574 time_t now = cfs_time_current_sec();
575 __u64 total_bavail, total_weight = 0;
577 int nfound, good_osts, i, warn = 0, rc = 0;
580 lov_getref(exp->exp_obd);
581 down_write(&lov->lov_qos.lq_rw_sem);
583 ost_count = lov->desc.ld_tgt_count;
585 if (lov->desc.ld_active_tgt_count < 2)
586 GOTO(out, rc = -EAGAIN);
588 rc = qos_calc_ppo(exp->exp_obd);
594 /* Warn users about zero available space/inode every 30 min */
595 if (cfs_time_sub(now, last_warn) > 60 * 30)
597 /* Find all the OSTs that are valid stripe candidates */
598 for (i = 0; i < ost_count; i++) {
601 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
603 bavail = TGT_BAVAIL(i);
606 CDEBUG(D_QOS, "no free space on %s\n",
607 obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
614 CDEBUG(D_QOS, "no free inodes on %s\n",
615 obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
621 lov->lov_tgts[i]->ltd_qos.ltq_usable = 1;
622 qos_calc_weight(lov, i);
623 total_bavail += bavail;
624 total_weight += lov->lov_tgts[i]->ltd_qos.ltq_weight;
630 GOTO(out, rc = -ENOSPC);
632 /* if we don't have enough good OSTs, we reduce the stripe count. */
633 if (good_osts < *stripe_cnt)
634 *stripe_cnt = good_osts;
637 GOTO(out, rc = -EAGAIN);
639 /* Find enough OSTs with weighted random allocation. */
641 while (nfound < *stripe_cnt) {
642 __u64 rand, cur_weight;
648 #if BITS_PER_LONG == 32
649 rand = ll_rand() % (unsigned)total_weight;
650 /* If total_weight > 32-bit, first generate the high
651 * 32 bits of the random number, then add in the low
652 * 32 bits (truncated to the upper limit, if needed) */
653 if (total_weight > 0xffffffffULL)
654 rand = (__u64)(ll_rand() %
655 (unsigned)(total_weight >> 32)) << 32;
659 if (rand == (total_weight & 0xffffffff00000000ULL))
660 rand |= ll_rand() % (unsigned)total_weight;
665 rand = ((__u64)ll_rand() << 32 | ll_rand()) %
672 /* On average, this will hit larger-weighted osts more often.
673 0-weight osts will always get used last (only when rand=0).*/
674 for (i = 0; i < ost_count; i++) {
675 if (!lov->lov_tgts[i]->ltd_qos.ltq_usable)
677 cur_weight += lov->lov_tgts[i]->ltd_qos.ltq_weight;
678 if (cur_weight >= rand) {
680 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
683 idx_arr[nfound++] = i;
684 qos_used(lov, i, &total_weight);
689 /* should never satisfy below condition */
691 CERROR("Didn't find any OSTs?\n");
695 LASSERT(nfound == *stripe_cnt);
698 up_write(&lov->lov_qos.lq_rw_sem);
701 rc = alloc_rr(lov, idx_arr, stripe_cnt);
703 lov_putref(exp->exp_obd);
707 /* return new alloced stripe count on success */
708 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
709 int newea, int **idx_arr, int *arr_cnt)
711 struct lov_obd *lov = &exp->exp_obd->u.lov;
712 int stripe_cnt = lsm->lsm_stripe_count;
717 *arr_cnt = stripe_cnt;
718 OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
721 for (i = 0; i < *arr_cnt; i++)
725 lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
726 rc = alloc_qos(exp, tmp_arr, &stripe_cnt);
728 rc = alloc_specific(lov, lsm, tmp_arr);
736 OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
741 static void free_idx_array(int *idx_arr, int arr_cnt)
744 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
747 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
749 struct lov_obd *lov = &exp->exp_obd->u.lov;
750 struct lov_stripe_md *lsm;
751 struct obdo *src_oa = set->set_oi->oi_oa;
752 struct obd_trans_info *oti = set->set_oti;
753 int i, stripes, rc = 0, newea = 0;
754 int *idx_arr, idx_cnt = 0;
757 LASSERT(src_oa->o_valid & OBD_MD_FLID);
759 if (set->set_oi->oi_md == NULL) {
760 int stripe_cnt = lov_get_stripecnt(lov, 0);
762 /* If the MDS file was truncated up to some size, stripe over
763 * enough OSTs to allow the file to be created at that size.
764 * This may mean we use more than the default # of stripes. */
765 if (src_oa->o_valid & OBD_MD_FLSIZE) {
766 obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
768 /* Find a small number of stripes we can use
769 (up to # of active osts). */
771 lov_getref(exp->exp_obd);
772 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
773 if (!lov->lov_tgts[i] ||
774 !lov->lov_tgts[i]->ltd_active)
776 min_bavail = min(min_bavail, TGT_BAVAIL(i));
777 if (min_bavail * stripes > src_oa->o_size)
781 lov_putref(exp->exp_obd);
783 if (stripes < stripe_cnt)
784 stripes = stripe_cnt;
786 stripes = stripe_cnt;
789 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
790 lov->desc.ld_pattern ?
791 lov->desc.ld_pattern : LOV_PATTERN_RAID0,
799 lsm = set->set_oi->oi_md;
800 lsm->lsm_object_id = src_oa->o_id;
801 if (!lsm->lsm_stripe_size)
802 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
803 if (!lsm->lsm_pattern) {
804 LASSERT(lov->desc.ld_pattern);
805 lsm->lsm_pattern = lov->desc.ld_pattern;
808 stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt);
809 LASSERT(stripes <= lsm->lsm_stripe_count);
811 GOTO(out_err, rc = stripes ? stripes : -EIO);
813 for (i = 0; i < stripes; i++) {
814 struct lov_request *req;
815 int ost_idx = idx_arr[i];
816 LASSERT(ost_idx >= 0);
818 OBD_ALLOC(req, sizeof(*req));
820 GOTO(out_err, rc = -ENOMEM);
821 lov_set_add_req(req, set);
823 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
824 OBD_ALLOC(req->rq_oi.oi_md, req->rq_buflen);
825 if (req->rq_oi.oi_md == NULL)
826 GOTO(out_err, rc = -ENOMEM);
828 req->rq_oi.oi_oa = obdo_alloc();
829 if (req->rq_oi.oi_oa == NULL)
830 GOTO(out_err, rc = -ENOMEM);
832 req->rq_idx = ost_idx;
834 /* create data objects with "parent" OA */
835 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
837 /* XXX When we start creating objects on demand, we need to
838 * make sure that we always create the object on the
839 * stripe which holds the existing file size.
841 if (src_oa->o_valid & OBD_MD_FLSIZE) {
842 req->rq_oi.oi_oa->o_size =
843 lov_size_to_stripe(lsm, src_oa->o_size, i);
845 CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
846 i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
850 LASSERT(set->set_count == stripes);
852 if (stripes < lsm->lsm_stripe_count)
855 if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
856 oti_alloc_cookies(oti, set->set_count);
857 if (!oti->oti_logcookies)
858 GOTO(out_err, rc = -ENOMEM);
859 set->set_cookies = oti->oti_logcookies;
863 obd_free_memmd(exp, &set->set_oi->oi_md);
864 free_idx_array(idx_arr, idx_cnt);
869 void qos_update(struct lov_obd *lov)
872 lov->lov_qos.lq_dirty = 1;