Whamcloud - gitweb
d020dd73d11208865d0b9efa9636cc4bc4c77a7b
[fs/lustre-release.git] / lustre / lov / lov_qos.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #ifndef EXPORT_SYMTAB
38 # define EXPORT_SYMTAB
39 #endif
40 #define DEBUG_SUBSYSTEM S_LOV
41
42 #ifdef __KERNEL__
43 #include <libcfs/libcfs.h>
44 #else
45 #include <liblustre.h>
46 #endif
47
48 #include <obd_class.h>
49 #include <obd_lov.h>
50 #include "lov_internal.h"
51
52 /* #define QOS_DEBUG 1 */
53 #define D_QOS D_OTHER
54
55 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
56                        lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
57 #define TGT_FFREE(i)  (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
58
59
60 int qos_add_tgt(struct obd_device *obd, __u32 index)
61 {
62         struct lov_obd *lov = &obd->u.lov;
63         struct lov_qos_oss *oss, *temposs;
64         struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
65         int rc = 0, found = 0;
66         ENTRY;
67
68         /* We only need this QOS struct on MDT, not clients - but we may not
69          * have registered the LOV's observer yet, so there's no way to know */
70         if (!exp || !exp->exp_connection) {
71                 CERROR("Missing connection\n");
72                 RETURN(-ENOTCONN);
73         }
74
75         down_write(&lov->lov_qos.lq_rw_sem);
76         mutex_down(&lov->lov_lock);
77         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
78                 if (obd_uuid_equals(&oss->lqo_uuid,
79                                     &exp->exp_connection->c_remote_uuid)) {
80                         found++;
81                         break;
82                 }
83         }
84
85         if (!found) {
86                 OBD_ALLOC_PTR(oss);
87                 if (!oss)
88                         GOTO(out, rc = -ENOMEM);
89                 memcpy(&oss->lqo_uuid,
90                        &exp->exp_connection->c_remote_uuid,
91                        sizeof(oss->lqo_uuid));
92         } else {
93                 /* Assume we have to move this one */
94                 list_del(&oss->lqo_oss_list);
95         }
96
97         oss->lqo_ost_count++;
98         lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
99
100         /* Add sorted by # of OSTs.  Find the first entry that we're
101            bigger than... */
102         list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
103                 if (oss->lqo_ost_count > temposs->lqo_ost_count)
104                         break;
105         }
106         /* ...and add before it.  If we're the first or smallest, temposs
107            points to the list head, and we add to the end. */
108         list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
109
110         lov->lov_qos.lq_dirty = 1;
111         lov->lov_qos.lq_rr.lqr_dirty = 1;
112
113         CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
114                obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
115                obd_uuid2str(&oss->lqo_uuid),
116                oss->lqo_ost_count);
117
118 out:
119         mutex_up(&lov->lov_lock);
120         up_write(&lov->lov_qos.lq_rw_sem);
121         RETURN(rc);
122 }
123
124 int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
125 {
126         struct lov_obd *lov = &obd->u.lov;
127         struct lov_qos_oss *oss;
128         int rc = 0;
129         ENTRY;
130
131         down_write(&lov->lov_qos.lq_rw_sem);
132
133         oss = tgt->ltd_qos.ltq_oss;
134         if (!oss)
135                 GOTO(out, rc = -ENOENT);
136
137         oss->lqo_ost_count--;
138         if (oss->lqo_ost_count == 0) {
139                 CDEBUG(D_QOS, "removing OSS %s\n",
140                        obd_uuid2str(&oss->lqo_uuid));
141                 list_del(&oss->lqo_oss_list);
142                 OBD_FREE_PTR(oss);
143         }
144
145         lov->lov_qos.lq_dirty = 1;
146         lov->lov_qos.lq_rr.lqr_dirty = 1;
147 out:
148         up_write(&lov->lov_qos.lq_rw_sem);
149         RETURN(rc);
150 }
151
152 /* Recalculate per-object penalties for OSSs and OSTs,
153    depends on size of each ost in an oss */
154 static int qos_calc_ppo(struct obd_device *obd)
155 {
156         struct lov_obd *lov = &obd->u.lov;
157         struct lov_qos_oss *oss;
158         __u64 ba_max, ba_min, temp;
159         __u32 num_active;
160         int rc, i, prio_wide;
161         time_t now, age;
162         ENTRY;
163
164         if (!lov->lov_qos.lq_dirty)
165                 GOTO(out, rc = 0);
166
167         num_active = lov->desc.ld_active_tgt_count - 1;
168         if (num_active < 1)
169                 GOTO(out, rc = -EAGAIN);
170
171         /* find bavail on each OSS */
172         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
173                 oss->lqo_bavail = 0;
174         }
175         lov->lov_qos.lq_active_oss_count = 0;
176
177         /* How badly user wants to select osts "widely" (not recently chosen
178            and not on recent oss's).  As opposed to "freely" (free space
179            avail.) 0-256. */
180         prio_wide = 256 - lov->lov_qos.lq_prio_free;
181
182         ba_min = (__u64)(-1);
183         ba_max = 0;
184         now = cfs_time_current_sec();
185         /* Calculate OST penalty per object */
186         /* (lov ref taken in alloc_qos) */
187         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
188                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
189                         continue;
190                 temp = TGT_BAVAIL(i);
191                 if (!temp)
192                         continue;
193                 ba_min = min(temp, ba_min);
194                 ba_max = max(temp, ba_max);
195
196                 /* Count the number of usable OSS's */
197                 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
198                         lov->lov_qos.lq_active_oss_count++;
199                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
200
201                 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
202                 temp >>= 1;
203                 do_div(temp, num_active);
204                 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
205                         (temp * prio_wide) >> 8;
206
207                 age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
208                 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
209                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
210                 else if (age > lov->desc.ld_qos_maxage)
211                         /* Decay the penalty by half for every 8x the update
212                          * interval that the device has been idle.  That gives
213                          * lots of time for the statfs information to be
214                          * updated (which the penalty is only a proxy for),
215                          * and avoids penalizing OSS/OSTs under light load. */
216                         lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
217                                 (age / lov->desc.ld_qos_maxage);
218         }
219
220         num_active = lov->lov_qos.lq_active_oss_count - 1;
221         if (num_active < 1) {
222                 /* If there's only 1 OSS, we can't penalize it, so instead
223                    we have to double the OST penalty */
224                 num_active = 1;
225                 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
226                         if (lov->lov_tgts[i] == NULL)
227                                 continue;
228                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
229                 }
230         }
231
232         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
233         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
234                 temp = oss->lqo_bavail >> 1;
235                 do_div(temp, oss->lqo_ost_count * num_active);
236                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
237
238                 age = (now - oss->lqo_used) >> 3;
239                 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
240                         oss->lqo_penalty = 0;
241                 else if (age > lov->desc.ld_qos_maxage)
242                         /* Decay the penalty by half for every 8x the update
243                          * interval that the device has been idle.  That gives
244                          * lots of time for the statfs information to be
245                          * updated (which the penalty is only a proxy for),
246                          * and avoids penalizing OSS/OSTs under light load. */
247                         oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
248         }
249
250         lov->lov_qos.lq_dirty = 0;
251         lov->lov_qos.lq_reset = 0;
252
253         /* If each ost has almost same free space,
254          * do rr allocation for better creation performance */
255         lov->lov_qos.lq_same_space = 0;
256         temp = ba_max - ba_min;
257         ba_min = (ba_min * 51) >> 8;     /* 51/256 = .20 */
258         if (temp < ba_min) {
259                 /* Difference is less than 20% */
260                 lov->lov_qos.lq_same_space = 1;
261                 /* Reset weights for the next time we enter qos mode */
262                 lov->lov_qos.lq_reset = 1;
263         }
264         rc = 0;
265
266 out:
267         if (!rc && lov->lov_qos.lq_same_space)
268                 RETURN(-EAGAIN);
269         RETURN(rc);
270 }
271
272 static int qos_calc_weight(struct lov_obd *lov, int i)
273 {
274         __u64 temp, temp2;
275
276         /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
277         temp = TGT_BAVAIL(i);
278         temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
279                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
280         if (temp < temp2)
281                 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
282         else
283                 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
284         return 0;
285 }
286
287 /* We just used this index for a stripe; adjust everyone's weights */
288 static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
289                     __u32 index, __u64 *total_wt)
290 {
291         struct lov_qos_oss *oss;
292         int j;
293         ENTRY;
294
295         /* Don't allocate from this stripe anymore, until the next alloc_qos */
296         lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
297
298         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
299
300         /* Decay old penalty by half (we're adding max penalty, and don't
301            want it to run away.) */
302         lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
303         oss->lqo_penalty >>= 1;
304
305         /* mark the OSS and OST as recently used */
306         lov->lov_tgts[index]->ltd_qos.ltq_used =
307                 oss->lqo_used = cfs_time_current_sec();
308
309         /* Set max penalties for this OST and OSS */
310         lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
311                 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
312                 lov->desc.ld_active_tgt_count;
313         oss->lqo_penalty += oss->lqo_penalty_per_obj *
314                 lov->lov_qos.lq_active_oss_count;
315
316         /* Decrease all OSS penalties */
317         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
318                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
319                         oss->lqo_penalty = 0;
320                 else
321                         oss->lqo_penalty -= oss->lqo_penalty_per_obj;
322         }
323
324         *total_wt = 0;
325         /* Decrease all OST penalties */
326         for (j = 0; j < osts->op_count; j++) {
327                 int i;
328
329                 i = osts->op_array[j];
330                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
331                         continue;
332                 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
333                     lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
334                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
335                 else
336                         lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
337                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
338
339                 qos_calc_weight(lov, i);
340
341                 /* Recalc the total weight of usable osts */
342                 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
343                         *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
344
345 #ifdef QOS_DEBUG
346                 CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
347                        " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
348                        " ossp="LPU64" wt="LPU64"\n",
349                        i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
350                        TGT_BAVAIL(i) >> 10,
351                        lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
352                        lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
353                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
354                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
355                        lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
356 #endif
357         }
358
359         RETURN(0);
360 }
361
362 #define LOV_QOS_EMPTY ((__u32)-1)
363 /* compute optimal round-robin order, based on OSTs per OSS */
364 static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
365                        struct lov_qos_rr *lqr)
366 {
367         struct lov_qos_oss *oss;
368         unsigned placed, real_count;
369         int i, rc;
370         ENTRY;
371
372         if (!lqr->lqr_dirty) {
373                 LASSERT(lqr->lqr_pool.op_size);
374                 RETURN(0);
375         }
376
377         /* Do actual allocation. */
378         down_write(&lov->lov_qos.lq_rw_sem);
379
380         /*
381          * Check again. While we were sleeping on @lq_rw_sem something could
382          * change.
383          */
384         if (!lqr->lqr_dirty) {
385                 LASSERT(lqr->lqr_pool.op_size);
386                 up_write(&lov->lov_qos.lq_rw_sem);
387                 RETURN(0);
388         }
389
390         real_count = src_pool->op_count;
391
392         /* Zero the pool array */
393         /* alloc_rr is holding a read lock on the pool, so nobody is adding/
394            deleting from the pool. The lq_rw_sem insures that nobody else
395            is reading. */
396         lqr->lqr_pool.op_count = real_count;
397         rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
398         if (rc) {
399                 up_write(&lov->lov_qos.lq_rw_sem);
400                 RETURN(rc);
401         }
402         for (i = 0; i < lqr->lqr_pool.op_count; i++)
403                 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
404
405         /* Place all the OSTs from 1 OSS at the same time. */
406         placed = 0;
407         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
408                 int j = 0;
409                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
410                         if (lov->lov_tgts[src_pool->op_array[i]] &&
411                             (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
412                               /* Evenly space these OSTs across arrayspace */
413                               int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
414                               while (lqr->lqr_pool.op_array[next] !=
415                                      LOV_QOS_EMPTY)
416                                         next = (next + 1) % lqr->lqr_pool.op_count;
417                               lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
418                               j++;
419                               placed++;
420                         }
421                 }
422         }
423
424         lqr->lqr_dirty = 0;
425         up_write(&lov->lov_qos.lq_rw_sem);
426
427         if (placed != real_count) {
428                 /* This should never happen */
429                 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
430                                    "round-robin list (%d of %d).\n",
431                                    placed, real_count);
432                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
433                         LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
434                                  lqr->lqr_pool.op_array[i]);
435                 }
436                 lqr->lqr_dirty = 1;
437                 RETURN(-EAGAIN);
438         }
439
440 #ifdef QOS_DEBUG
441         for (i = 0; i < lqr->lqr_pool.op_count; i++) {
442                 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
443                          lqr->lqr_pool.op_array[i]);
444         }
445 #endif
446
447         RETURN(0);
448 }
449
450
451 void qos_shrink_lsm(struct lov_request_set *set)
452 {
453         struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
454         /* XXX LOV STACKING call into osc for sizes */
455         unsigned oldsize, newsize;
456
457         if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
458                 struct llog_cookie *cookies;
459                 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
460                 newsize = set->set_count * sizeof(*cookies);
461
462                 cookies = set->set_cookies;
463                 oti_alloc_cookies(set->set_oti, set->set_count);
464                 if (set->set_oti->oti_logcookies) {
465                         memcpy(set->set_oti->oti_logcookies, cookies, newsize);
466                         OBD_FREE(cookies, oldsize);
467                         set->set_cookies = set->set_oti->oti_logcookies;
468                 } else {
469                         CWARN("'leaking' %d bytes\n", oldsize - newsize);
470                 }
471         }
472
473         CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
474               lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
475         LASSERT(lsm->lsm_stripe_count >= set->set_count);
476
477         newsize = lov_stripe_md_size(set->set_count);
478         OBD_ALLOC(lsm_new, newsize);
479         if (lsm_new != NULL) {
480                 int i;
481                 memcpy(lsm_new, lsm, sizeof(*lsm));
482                 for (i = 0; i < lsm->lsm_stripe_count; i++) {
483                         if (i < set->set_count) {
484                                 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
485                                 continue;
486                         }
487                         OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
488                                       sizeof(struct lov_oinfo));
489                 }
490                 lsm_new->lsm_stripe_count = set->set_count;
491                 OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
492                          lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
493                 set->set_oi->oi_md = lsm_new;
494         } else {
495                 CWARN("'leaking' few bytes\n");
496         }
497 }
498
499 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
500 {
501         struct lov_stripe_md *lsm = set->set_oi->oi_md;
502         struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
503         unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
504         int stripe, i, rc = -EIO;
505         ENTRY;
506
507         ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
508         for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
509                 if (!lov->lov_tgts[ost_idx] ||
510                     !lov->lov_tgts[ost_idx]->ltd_active)
511                         continue;
512                 /* check if objects has been created on this ost */
513                 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
514                         if (stripe == req->rq_stripe)
515                                 continue;
516                         if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
517                                 break;
518                 }
519
520                 if (stripe >= lsm->lsm_stripe_count) {
521                         req->rq_idx = ost_idx;
522                         rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
523                                         req->rq_oi.oi_oa, &req->rq_oi.oi_md,
524                                         set->set_oti);
525                         if (!rc)
526                                 break;
527                 }
528         }
529         RETURN(rc);
530 }
531
532 static int min_stripe_count(int stripe_cnt, int flags)
533 {
534         return (flags & LOV_USES_DEFAULT_STRIPE ?
535                 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
536 }
537
538 #define LOV_CREATE_RESEED_MULT 4
539 #define LOV_CREATE_RESEED_MIN  1000
540 /* Allocate objects on osts with round-robin algorithm */
541 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
542                     char *poolname, int flags)
543 {
544         unsigned array_idx;
545         int i, rc, *idx_pos;
546         __u32 ost_idx;
547         int ost_start_idx_temp;
548         int speed = 0;
549         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
550         struct pool_desc *pool;
551         struct ost_pool *osts;
552         struct lov_qos_rr *lqr;
553         ENTRY;
554
555         pool = lov_find_pool(lov, poolname);
556         if (pool == NULL) {
557                 osts = &(lov->lov_packed);
558                 lqr = &(lov->lov_qos.lq_rr);
559         } else {
560                 down_read(&pool_tgt_rw_sem(pool));
561                 osts = &(pool->pool_obds);
562                 lqr = &(pool->pool_rr);
563         }
564
565         rc = qos_calc_rr(lov, osts, lqr);
566         if (rc)
567                 GOTO(out, rc);
568
569         if (--lqr->lqr_start_count <= 0) {
570                 lqr->lqr_start_idx = ll_rand() % osts->op_count;
571                 lqr->lqr_start_count =
572                         (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
573                          LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
574         } else if (stripe_cnt_min >= osts->op_count ||
575                    lqr->lqr_start_idx > osts->op_count) {
576                 /* If we have allocated from all of the OSTs, slowly
577                  * precess the next start if the OST/stripe count isn't
578                  * already doing this for us. */
579                 lqr->lqr_start_idx %= osts->op_count;
580                 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
581                         ++lqr->lqr_offset_idx;
582         }
583         down_read(&lov->lov_qos.lq_rw_sem);
584         ost_start_idx_temp = lqr->lqr_start_idx;
585
586 repeat_find:
587         array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) % osts->op_count;
588         idx_pos = idx_arr;
589 #ifdef QOS_DEBUG
590         CDEBUG(D_QOS, "pool '%s' want %d startidx %d startcnt %d offset %d "
591                "active %d count %d arrayidx %d\n", poolname,
592                *stripe_cnt, lqr->lqr_start_idx, lqr->lqr_start_count,
593                lqr->lqr_offset_idx, osts->op_count, osts->op_count, array_idx);
594 #endif
595
596         for (i = 0; i < osts->op_count;
597                     i++, array_idx=(array_idx + 1) % osts->op_count) {
598                 ++lqr->lqr_start_idx;
599                 ost_idx = lqr->lqr_pool.op_array[array_idx];
600 #ifdef QOS_DEBUG
601                 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
602                        i, lqr->lqr_start_idx,
603                        ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
604                        lov->lov_tgts[ost_idx]->ltd_active : 0,
605                        idx_pos - idx_arr, array_idx, ost_idx);
606 #endif
607                 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
608                     !lov->lov_tgts[ost_idx]->ltd_active)
609                         continue;
610
611                 /* Fail Check before osc_precreate() is called
612                    so we can only 'fail' single OSC. */
613                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
614                         continue;
615
616                 /* Drop slow OSCs if we can */
617                 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
618                         continue;
619
620                 *idx_pos = ost_idx;
621                 idx_pos++;
622                 /* We have enough stripes */
623                 if (idx_pos - idx_arr == *stripe_cnt)
624                         break;
625         }
626         if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
627                 /* Try again, allowing slower OSCs */
628                 speed++;
629                 lqr->lqr_start_idx = ost_start_idx_temp;
630                 goto repeat_find;
631         }
632
633         up_read(&lov->lov_qos.lq_rw_sem);
634
635         *stripe_cnt = idx_pos - idx_arr;
636 out:
637         if (pool != NULL) {
638                 up_read(&pool_tgt_rw_sem(pool));
639                 /* put back ref got by lov_find_pool() */
640                 lh_put(lov->lov_pools_hash_body, &pool->pool_hash);
641         }
642
643         RETURN(rc);
644 }
645
646 /* alloc objects on osts with specific stripe offset */
647 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
648                           int *idx_arr)
649 {
650         unsigned ost_idx, array_idx, ost_count;
651         int i, rc, *idx_pos;
652         int speed = 0;
653         struct pool_desc *pool;
654         struct ost_pool *osts;
655         ENTRY;
656
657         pool = lov_find_pool(lov, lsm->lsm_pool_name);
658         if (pool == NULL) {
659                 osts = &(lov->lov_packed);
660         } else {
661                 down_read(&pool_tgt_rw_sem(pool));
662                 osts = &(pool->pool_obds);
663         }
664
665         ost_count = osts->op_count;
666
667 repeat_find:
668         /* search loi_ost_idx in ost array */
669         array_idx = 0;
670         for (i = 0; i < ost_count; i++) {
671                 if (osts->op_array[i] == lsm->lsm_oinfo[0]->loi_ost_idx) {
672                         array_idx = i;
673                         break;
674                 }
675         }
676         if (i == ost_count) {
677                 CERROR("Start index %d not found in pool '%s'\n",
678                        lsm->lsm_oinfo[0]->loi_ost_idx, lsm->lsm_pool_name);
679                 GOTO(out, rc = -EINVAL);
680         }
681
682         idx_pos = idx_arr;
683         for (i = 0; i < ost_count;
684              i++, array_idx = (array_idx + 1) % ost_count) {
685                 ost_idx = osts->op_array[array_idx];
686
687                 if (!lov->lov_tgts[ost_idx] ||
688                     !lov->lov_tgts[ost_idx]->ltd_active) {
689                         continue;
690                 }
691
692                 /* Fail Check before osc_precreate() is called
693                    so we can only 'fail' single OSC. */
694                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
695                         continue;
696
697                 /* Drop slow OSCs if we can, but not for requested start idx.
698                  *
699                  * This means "if OSC is slow and it is not the requested
700                  * start OST, then it can be skipped, otherwise skip it only
701                  * if it is inactive/recovering/out-of-space." */
702                 if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
703                     (i != 0 || speed >= 2))
704                         continue;
705
706                 *idx_pos = ost_idx;
707                 idx_pos++;
708                 /* We have enough stripes */
709                 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
710                         GOTO(out, rc = 0);
711         }
712         if (speed < 2) {
713                 /* Try again, allowing slower OSCs */
714                 speed++;
715                 goto repeat_find;
716         }
717
718         /* If we were passed specific striping params, then a failure to
719          * meet those requirements is an error, since we can't reallocate
720          * that memory (it might be part of a larger array or something).
721          *
722          * We can only get here if lsm_stripe_count was originally > 1.
723          */
724         CERROR("can't lstripe objid "LPX64": have %d want %u\n",
725                lsm->lsm_object_id, (int)(idx_pos - idx_arr),
726                lsm->lsm_stripe_count);
727         rc = -EFBIG;
728 out:
729         if (pool != NULL) {
730                 up_read(&pool_tgt_rw_sem(pool));
731                 /* put back ref got by lov_find_pool() */
732                 lh_put(lov->lov_pools_hash_body, &pool->pool_hash);
733         }
734
735         RETURN(rc);
736 }
737
738 /* Alloc objects on osts with optimization based on:
739    - free space
740    - network resources (shared OSS's)
741 */
742 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
743                      char *poolname, int flags)
744 {
745         struct lov_obd *lov = &exp->exp_obd->u.lov;
746         static time_t last_warn = 0;
747         time_t now = cfs_time_current_sec();
748         __u64 total_bavail, total_weight = 0;
749         int nfound, good_osts, i, warn = 0, rc = 0;
750         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
751         struct pool_desc *pool;
752         struct ost_pool *osts;
753         struct lov_qos_rr *lqr;
754         ENTRY;
755
756         if (stripe_cnt_min < 1)
757                 RETURN(-EINVAL);
758
759         pool = lov_find_pool(lov, poolname);
760         if (pool == NULL) {
761                 osts = &(lov->lov_packed);
762                 lqr = &(lov->lov_qos.lq_rr);
763         } else {
764                 down_read(&pool_tgt_rw_sem(pool));
765                 osts = &(pool->pool_obds);
766                 lqr = &(pool->pool_rr);
767         }
768
769         lov_getref(exp->exp_obd);
770
771         /* Detect -EAGAIN early, before expensive lock is taken. */
772         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
773                 GOTO(out_nolock, rc = -EAGAIN);
774
775         /* Do actual allocation, use write lock here. */
776         down_write(&lov->lov_qos.lq_rw_sem);
777
778         /*
779          * Check again, while we were sleeping on @lq_rw_sem things could
780          * change.
781          */
782         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
783                 GOTO(out, rc = -EAGAIN);
784
785         if (lov->desc.ld_active_tgt_count < 2)
786                 GOTO(out, rc = -EAGAIN);
787
788         rc = qos_calc_ppo(exp->exp_obd);
789         if (rc)
790                 GOTO(out, rc);
791
792         total_bavail = 0;
793         good_osts = 0;
794         /* Warn users about zero available space/inode every 30 min */
795         if (cfs_time_sub(now, last_warn) > 60 * 30)
796                 warn = 1;
797         /* Find all the OSTs that are valid stripe candidates */
798         for (i = 0; i < osts->op_count; i++) {
799                 __u64 bavail;
800
801                 if (!lov->lov_tgts[osts->op_array[i]] ||
802                     !lov->lov_tgts[osts->op_array[i]]->ltd_active)
803                         continue;
804                 bavail = TGT_BAVAIL(osts->op_array[i]);
805                 if (!bavail) {
806                         if (warn) {
807                                 CDEBUG(D_QOS, "no free space on %s\n",
808                                      obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
809                                 last_warn = now;
810                         }
811                         continue;
812                 }
813                 if (!TGT_FFREE(osts->op_array[i])) {
814                         if (warn) {
815                                 CDEBUG(D_QOS, "no free inodes on %s\n",
816                                      obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
817                                 last_warn = now;
818                         }
819                         continue;
820                 }
821
822                 /* Fail Check before osc_precreate() is called
823                    so we can only 'fail' single OSC. */
824                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && osts->op_array[i] == 0)
825                         continue;
826
827                 if (obd_precreate(lov->lov_tgts[osts->op_array[i]]->ltd_exp) > 2)
828                         continue;
829
830                 lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
831                 qos_calc_weight(lov, osts->op_array[i]);
832                 total_bavail += bavail;
833                 total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
834
835                 good_osts++;
836         }
837
838 #ifdef QOS_DEBUG
839         CDEBUG(D_QOS, "found %d good osts\n", good_osts);
840 #endif
841
842         if (good_osts < stripe_cnt_min)
843                 GOTO(out, rc = -EAGAIN);
844
845         if (!total_bavail)
846                 GOTO(out, rc = -ENOSPC);
847
848         /* We have enough osts */
849         if (good_osts < *stripe_cnt)
850                 *stripe_cnt = good_osts;
851
852         if (!*stripe_cnt)
853                 GOTO(out, rc = -EAGAIN);
854
855         /* Find enough OSTs with weighted random allocation. */
856         nfound = 0;
857         while (nfound < *stripe_cnt) {
858                 __u64 rand, cur_weight;
859
860                 cur_weight = 0;
861                 rc = -ENODEV;
862
863                 if (total_weight) {
864 #if BITS_PER_LONG == 32
865                         rand = ll_rand() % (unsigned)total_weight;
866                         /* If total_weight > 32-bit, first generate the high
867                          * 32 bits of the random number, then add in the low
868                          * 32 bits (truncated to the upper limit, if needed) */
869                         if (total_weight > 0xffffffffULL)
870                                 rand = (__u64)(ll_rand() %
871                                           (unsigned)(total_weight >> 32)) << 32;
872                         else
873                                 rand = 0;
874
875                         if (rand == (total_weight & 0xffffffff00000000ULL))
876                                 rand |= ll_rand() % (unsigned)total_weight;
877                         else
878                                 rand |= ll_rand();
879
880 #else
881                         rand = ((__u64)ll_rand() << 32 | ll_rand()) %
882                                 total_weight;
883 #endif
884                 } else {
885                         rand = 0;
886                 }
887
888                 /* On average, this will hit larger-weighted osts more often.
889                    0-weight osts will always get used last (only when rand=0).*/
890                 for (i = 0; i < osts->op_count; i++) {
891                         if (!lov->lov_tgts[osts->op_array[i]] ||
892                             !lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable)
893                                 continue;
894
895                         cur_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
896 #ifdef QOS_DEBUG
897                         CDEBUG(D_QOS, "stripe_cnt=%d nfound=%d cur_weight="LPU64
898                                       " rand="LPU64" total_weight="LPU64"\n",
899                                *stripe_cnt, nfound, cur_weight, rand, total_weight);
900 #endif
901                         if (cur_weight >= rand) {
902 #ifdef QOS_DEBUG
903                                 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
904                                        nfound, osts->op_array[i]);
905 #endif
906                                 idx_arr[nfound++] = osts->op_array[i];
907                                 qos_used(lov, osts, osts->op_array[i], &total_weight);
908                                 rc = 0;
909                                 break;
910                         }
911                 }
912                 /* should never satisfy below condition */
913                 if (rc) {
914                         CERROR("Didn't find any OSTs?\n");
915                         break;
916                 }
917         }
918         LASSERT(nfound == *stripe_cnt);
919
920 out:
921         up_write(&lov->lov_qos.lq_rw_sem);
922
923 out_nolock:
924         if (pool != NULL) {
925                 up_read(&pool_tgt_rw_sem(pool));
926                 /* put back ref got by lov_find_pool() */
927                 lh_put(lov->lov_pools_hash_body, &pool->pool_hash);
928         }
929
930         if (rc == -EAGAIN)
931                 rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
932
933         lov_putref(exp->exp_obd);
934         RETURN(rc);
935 }
936
937 /* return new alloced stripe count on success */
938 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
939                            int newea, int **idx_arr, int *arr_cnt, int flags)
940 {
941         struct lov_obd *lov = &exp->exp_obd->u.lov;
942         int stripe_cnt = lsm->lsm_stripe_count;
943         int i, rc = 0;
944         int *tmp_arr = NULL;
945         ENTRY;
946
947         *arr_cnt = stripe_cnt;
948         OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
949         if (tmp_arr == NULL)
950                 RETURN(-ENOMEM);
951         for (i = 0; i < *arr_cnt; i++)
952                 tmp_arr[i] = -1;
953
954         if (newea ||
955             lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
956                 rc = alloc_qos(exp, tmp_arr, &stripe_cnt,
957                                lsm->lsm_pool_name, flags);
958         else
959                 rc = alloc_specific(lov, lsm, tmp_arr);
960
961         if (rc)
962                 GOTO(out_arr, rc);
963
964         *idx_arr = tmp_arr;
965         RETURN(stripe_cnt);
966 out_arr:
967         OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
968         *arr_cnt = 0;
969         RETURN(rc);
970 }
971
972 static void free_idx_array(int *idx_arr, int arr_cnt)
973 {
974         if (arr_cnt)
975                 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
976 }
977
978 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
979 {
980         struct lov_obd *lov = &exp->exp_obd->u.lov;
981         struct lov_stripe_md *lsm;
982         struct obdo *src_oa = set->set_oi->oi_oa;
983         struct obd_trans_info *oti = set->set_oti;
984         int i, stripes, rc = 0, newea = 0;
985         int flag = LOV_USES_ASSIGNED_STRIPE;
986         int *idx_arr = NULL, idx_cnt = 0;
987         ENTRY;
988
989         LASSERT(src_oa->o_valid & OBD_MD_FLID);
990         LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
991
992         if (set->set_oi->oi_md == NULL) {
993                 int stripes_def = lov_get_stripecnt(lov, 0);
994
995                 /* If the MDS file was truncated up to some size, stripe over
996                  * enough OSTs to allow the file to be created at that size.
997                  * This may mean we use more than the default # of stripes. */
998                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
999                         obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
1000
1001                         /* Find a small number of stripes we can use
1002                            (up to # of active osts). */
1003                         stripes = 1;
1004                         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
1005                                 if (!lov->lov_tgts[i] ||
1006                                     !lov->lov_tgts[i]->ltd_active)
1007                                         continue;
1008                                 min_bavail = min(min_bavail, TGT_BAVAIL(i));
1009                                 if (min_bavail * stripes > src_oa->o_size)
1010                                         break;
1011                                 stripes++;
1012                         }
1013
1014                         if (stripes < stripes_def)
1015                                 stripes = stripes_def;
1016                 } else {
1017                          flag = LOV_USES_DEFAULT_STRIPE;
1018                          stripes = stripes_def;
1019                 }
1020
1021                 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
1022                                      lov->desc.ld_pattern ?
1023                                      lov->desc.ld_pattern : LOV_PATTERN_RAID0,
1024                                      LOV_MAGIC);
1025                 if (rc < 0)
1026                         GOTO(out_err, rc);
1027                 newea = 1;
1028                 rc = 0;
1029         }
1030
1031         lsm = set->set_oi->oi_md;
1032         lsm->lsm_object_id = src_oa->o_id;
1033         lsm->lsm_object_gr = src_oa->o_gr;
1034
1035         if (!lsm->lsm_stripe_size)
1036                 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
1037         if (!lsm->lsm_pattern) {
1038                 LASSERT(lov->desc.ld_pattern);
1039                 lsm->lsm_pattern = lov->desc.ld_pattern;
1040         }
1041
1042         stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
1043         if (stripes <= 0)
1044                 GOTO(out_err, rc = stripes ? stripes : -EIO);
1045         LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
1046                  lsm->lsm_stripe_count, stripes);
1047
1048         for (i = 0; i < stripes; i++) {
1049                 struct lov_request *req;
1050                 int ost_idx = idx_arr[i];
1051                 LASSERT(ost_idx >= 0);
1052
1053                 OBD_ALLOC(req, sizeof(*req));
1054                 if (req == NULL)
1055                         GOTO(out_err, rc = -ENOMEM);
1056                 lov_set_add_req(req, set);
1057
1058                 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
1059                 OBD_ALLOC(req->rq_oi.oi_md, req->rq_buflen);
1060                 if (req->rq_oi.oi_md == NULL)
1061                         GOTO(out_err, rc = -ENOMEM);
1062
1063                 OBDO_ALLOC(req->rq_oi.oi_oa);
1064                 if (req->rq_oi.oi_oa == NULL)
1065                         GOTO(out_err, rc = -ENOMEM);
1066
1067                 req->rq_idx = ost_idx;
1068                 req->rq_stripe = i;
1069                 /* create data objects with "parent" OA */
1070                 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
1071
1072                 /* XXX When we start creating objects on demand, we need to
1073                  *     make sure that we always create the object on the
1074                  *     stripe which holds the existing file size.
1075                  */
1076                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1077                         req->rq_oi.oi_oa->o_size =
1078                                 lov_size_to_stripe(lsm, src_oa->o_size, i);
1079
1080                         CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
1081                                i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
1082                 }
1083         }
1084         LASSERT(set->set_count == stripes);
1085
1086         if (stripes < lsm->lsm_stripe_count)
1087                 qos_shrink_lsm(set);
1088
1089         if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
1090                 oti_alloc_cookies(oti, set->set_count);
1091                 if (!oti->oti_logcookies)
1092                         GOTO(out_err, rc = -ENOMEM);
1093                 set->set_cookies = oti->oti_logcookies;
1094         }
1095 out_err:
1096         if (newea && rc)
1097                 obd_free_memmd(exp, &set->set_oi->oi_md);
1098         if (idx_arr)
1099                 free_idx_array(idx_arr, idx_cnt);
1100         EXIT;
1101         return rc;
1102 }
1103
1104 void qos_update(struct lov_obd *lov)
1105 {
1106         ENTRY;
1107         lov->lov_qos.lq_dirty = 1;
1108 }