Whamcloud - gitweb
b=14836
[fs/lustre-release.git] / lustre / lov / lov_qos.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #ifndef EXPORT_SYMTAB
38 # define EXPORT_SYMTAB
39 #endif
40 #define DEBUG_SUBSYSTEM S_LOV
41
42 #ifdef __KERNEL__
43 #include <libcfs/libcfs.h>
44 #else
45 #include <liblustre.h>
46 #endif
47
48 #include <obd_class.h>
49 #include <obd_lov.h>
50 #include "lov_internal.h"
51
52 /* #define QOS_DEBUG 1 */
53 #define D_QOS D_OTHER
54
55 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
56                        lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
57 #define TGT_FFREE(i)  (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
58
59
60 int qos_add_tgt(struct obd_device *obd, __u32 index)
61 {
62         struct lov_obd *lov = &obd->u.lov;
63         struct lov_qos_oss *oss, *temposs;
64         struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
65         int rc = 0, found = 0;
66         ENTRY;
67
68         /* We only need this QOS struct on MDT, not clients - but we may not
69          * have registered the LOV's observer yet, so there's no way to know */
70         if (!exp || !exp->exp_connection) {
71                 CERROR("Missing connection\n");
72                 RETURN(-ENOTCONN);
73         }
74
75         down_write(&lov->lov_qos.lq_rw_sem);
76         mutex_down(&lov->lov_lock);
77         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
78                 if (obd_uuid_equals(&oss->lqo_uuid,
79                                     &exp->exp_connection->c_remote_uuid)) {
80                         found++;
81                         break;
82                 }
83         }
84
85         if (!found) {
86                 OBD_ALLOC_PTR(oss);
87                 if (!oss)
88                         GOTO(out, rc = -ENOMEM);
89                 memcpy(&oss->lqo_uuid,
90                        &exp->exp_connection->c_remote_uuid,
91                        sizeof(oss->lqo_uuid));
92         } else {
93                 /* Assume we have to move this one */
94                 list_del(&oss->lqo_oss_list);
95         }
96
97         oss->lqo_ost_count++;
98         lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
99
100         /* Add sorted by # of OSTs.  Find the first entry that we're
101            bigger than... */
102         list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
103                 if (oss->lqo_ost_count > temposs->lqo_ost_count)
104                         break;
105         }
106         /* ...and add before it.  If we're the first or smallest, temposs
107            points to the list head, and we add to the end. */
108         list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
109
110         lov->lov_qos.lq_dirty = 1;
111         lov->lov_qos.lq_rr.lqr_dirty = 1;
112
113         CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
114                obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
115                obd_uuid2str(&oss->lqo_uuid),
116                oss->lqo_ost_count);
117
118 out:
119         mutex_up(&lov->lov_lock);
120         up_write(&lov->lov_qos.lq_rw_sem);
121         RETURN(rc);
122 }
123
124 int qos_del_tgt(struct obd_device *obd, __u32 index)
125 {
126         struct lov_obd *lov = &obd->u.lov;
127         struct lov_qos_oss *oss;
128         int rc = 0;
129         ENTRY;
130
131         if (!lov->lov_tgts[index])
132                 RETURN(0);
133
134         down_write(&lov->lov_qos.lq_rw_sem);
135
136         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
137         if (!oss)
138                 GOTO(out, rc = -ENOENT);
139
140         oss->lqo_ost_count--;
141         if (oss->lqo_ost_count == 0) {
142                 CDEBUG(D_QOS, "removing OSS %s\n",
143                        obd_uuid2str(&oss->lqo_uuid));
144                 list_del(&oss->lqo_oss_list);
145                 OBD_FREE_PTR(oss);
146         }
147
148         lov->lov_qos.lq_dirty = 1;
149         lov->lov_qos.lq_rr.lqr_dirty = 1;
150 out:
151         up_write(&lov->lov_qos.lq_rw_sem);
152         RETURN(rc);
153 }
154
155 /* Recalculate per-object penalties for OSSs and OSTs,
156    depends on size of each ost in an oss */
157 static int qos_calc_ppo(struct obd_device *obd)
158 {
159         struct lov_obd *lov = &obd->u.lov;
160         struct lov_qos_oss *oss;
161         __u64 ba_max, ba_min, temp;
162         __u32 num_active;
163         int rc, i, prio_wide;
164         ENTRY;
165
166         if (!lov->lov_qos.lq_dirty)
167                 GOTO(out, rc = 0);
168
169         num_active = lov->desc.ld_active_tgt_count - 1;
170         if (num_active < 1)
171                 GOTO(out, rc = -EAGAIN);
172
173         /* find bavail on each OSS */
174         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
175                 oss->lqo_bavail = 0;
176         }
177         lov->lov_qos.lq_active_oss_count = 0;
178
179         /* How badly user wants to select osts "widely" (not recently chosen
180            and not on recent oss's).  As opposed to "freely" (free space
181            avail.) 0-256. */
182         prio_wide = 256 - lov->lov_qos.lq_prio_free;
183
184         ba_min = (__u64)(-1);
185         ba_max = 0;
186         /* Calculate OST penalty per object */
187         /* (lov ref taken in alloc_qos) */
188         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
189                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
190                         continue;
191                 temp = TGT_BAVAIL(i);
192                 if (!temp)
193                         continue;
194                 ba_min = min(temp, ba_min);
195                 ba_max = max(temp, ba_max);
196
197                 /* Count the number of usable OSS's */
198                 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
199                         lov->lov_qos.lq_active_oss_count++;
200                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
201
202                 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
203                 temp >>= 1;
204                 do_div(temp, num_active);
205                 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
206                         (temp * prio_wide) >> 8;
207
208                 if (lov->lov_qos.lq_reset == 0)
209                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
210         }
211
212         num_active = lov->lov_qos.lq_active_oss_count - 1;
213         if (num_active < 1) {
214                 /* If there's only 1 OSS, we can't penalize it, so instead
215                    we have to double the OST penalty */
216                 num_active = 1;
217                 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
218                         if (lov->lov_tgts[i] == NULL)
219                                 continue;
220                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
221                 }
222         }
223
224         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
225         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
226                 temp = oss->lqo_bavail >> 1;
227                 do_div(temp, oss->lqo_ost_count * num_active);
228                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
229                 if (lov->lov_qos.lq_reset == 0)
230                         oss->lqo_penalty = 0;
231         }
232
233         lov->lov_qos.lq_dirty = 0;
234         lov->lov_qos.lq_reset = 0;
235
236         /* If each ost has almost same free space,
237          * do rr allocation for better creation performance */
238         lov->lov_qos.lq_same_space = 0;
239         temp = ba_max - ba_min;
240         ba_min = (ba_min * 51) >> 8;     /* 51/256 = .20 */
241         if (temp < ba_min) {
242                 /* Difference is less than 20% */
243                 lov->lov_qos.lq_same_space = 1;
244                 /* Reset weights for the next time we enter qos mode */
245                 lov->lov_qos.lq_reset = 0;
246         }
247         rc = 0;
248
249 out:
250         if (!rc && lov->lov_qos.lq_same_space)
251                 RETURN(-EAGAIN);
252         RETURN(rc);
253 }
254
255 static int qos_calc_weight(struct lov_obd *lov, int i)
256 {
257         __u64 temp, temp2;
258
259         /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
260         temp = TGT_BAVAIL(i);
261         temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
262                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
263         if (temp < temp2)
264                 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
265         else
266                 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
267         return 0;
268 }
269
270 /* We just used this index for a stripe; adjust everyone's weights */
271 static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
272                     __u32 index, __u64 *total_wt)
273 {
274         struct lov_qos_oss *oss;
275         int j;
276         ENTRY;
277
278         /* Don't allocate from this stripe anymore, until the next alloc_qos */
279         lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
280
281         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
282
283         /* Decay old penalty by half (we're adding max penalty, and don't
284            want it to run away.) */
285         lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
286         oss->lqo_penalty >>= 1;
287
288         /* Set max penalties for this OST and OSS */
289         lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
290                 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
291                 lov->desc.ld_active_tgt_count;
292         oss->lqo_penalty += oss->lqo_penalty_per_obj *
293                 lov->lov_qos.lq_active_oss_count;
294
295         /* Decrease all OSS penalties */
296         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
297                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
298                         oss->lqo_penalty = 0;
299                 else
300                         oss->lqo_penalty -= oss->lqo_penalty_per_obj;
301         }
302
303         *total_wt = 0;
304         /* Decrease all OST penalties */
305         for (j = 0; j < osts->op_count; j++) {
306                 int i;
307
308                 i = osts->op_array[j];
309                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
310                         continue;
311                 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
312                     lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
313                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
314                 else
315                         lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
316                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
317
318                 qos_calc_weight(lov, i);
319
320                 /* Recalc the total weight of usable osts */
321                 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
322                         *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
323
324 #ifdef QOS_DEBUG
325                 CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
326                        " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
327                        " ossp="LPU64" wt="LPU64"\n",
328                        i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
329                        TGT_BAVAIL(i) >> 10,
330                        lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
331                        lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
332                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
333                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
334                        lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
335 #endif
336         }
337
338         RETURN(0);
339 }
340
341 #define LOV_QOS_EMPTY ((__u32)-1)
342 /* compute optimal round-robin order, based on OSTs per OSS */
343 static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
344                        struct lov_qos_rr *lqr)
345 {
346         struct lov_qos_oss *oss;
347         unsigned placed, real_count;
348         int i, rc;
349         ENTRY;
350
351         if (!lqr->lqr_dirty) {
352                 LASSERT(lqr->lqr_pool.op_size);
353                 RETURN(0);
354         }
355
356         /* Do actual allocation. */
357         down_write(&lov->lov_qos.lq_rw_sem);
358
359         /*
360          * Check again. While we were sleeping on @lq_rw_sem something could
361          * change.
362          */
363         if (!lqr->lqr_dirty) {
364                 LASSERT(lqr->lqr_pool.op_size);
365                 up_write(&lov->lov_qos.lq_rw_sem);
366                 RETURN(0);
367         }
368
369         if (lqr->lqr_pool.op_size)
370                 lov_ost_pool_free(&lqr->lqr_pool);
371         rc = lov_ost_pool_init(&lqr->lqr_pool, src_pool->op_count);
372         if (rc) {
373                 up_write(&lov->lov_qos.lq_rw_sem);
374                 RETURN(rc);
375         }
376
377         for (i = 0; i < src_pool->op_count; i++)
378                 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
379         lqr->lqr_pool.op_count = src_pool->op_count;
380
381         /* Place all the OSTs from 1 OSS at the same time. */
382         real_count = lqr->lqr_pool.op_count;
383         placed = 0;
384         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
385                 int j = 0;
386                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
387                         if (lov->lov_tgts[src_pool->op_array[i]] &&
388                             (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
389                               /* Evenly space these OSTs across arrayspace */
390                               int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
391                               while (lqr->lqr_pool.op_array[next] !=
392                                      LOV_QOS_EMPTY)
393                                         next = (next + 1) % lqr->lqr_pool.op_count;
394                               lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
395                               j++;
396                               placed++;
397                         }
398                 }
399         }
400
401         lqr->lqr_dirty = 0;
402         up_write(&lov->lov_qos.lq_rw_sem);
403
404         if (placed != real_count) {
405                 /* This should never happen */
406                 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
407                                    "round-robin list (%d of %d).\n",
408                                    placed, real_count);
409                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
410                         LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
411                                  lqr->lqr_pool.op_array[i]);
412                 }
413                 lqr->lqr_dirty = 1;
414                 RETURN(-EAGAIN);
415         }
416
417 #ifdef QOS_DEBUG
418         for (i = 0; i < lqr->lqr_pool.op_count; i++) {
419                 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
420                          lqr->lqr_pool.op_array[i]);
421         }
422 #endif
423
424         RETURN(0);
425 }
426
427
428 void qos_shrink_lsm(struct lov_request_set *set)
429 {
430         struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
431         /* XXX LOV STACKING call into osc for sizes */
432         unsigned oldsize, newsize;
433
434         if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
435                 struct llog_cookie *cookies;
436                 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
437                 newsize = set->set_count * sizeof(*cookies);
438
439                 cookies = set->set_cookies;
440                 oti_alloc_cookies(set->set_oti, set->set_count);
441                 if (set->set_oti->oti_logcookies) {
442                         memcpy(set->set_oti->oti_logcookies, cookies, newsize);
443                         OBD_FREE(cookies, oldsize);
444                         set->set_cookies = set->set_oti->oti_logcookies;
445                 } else {
446                         CWARN("'leaking' %d bytes\n", oldsize - newsize);
447                 }
448         }
449
450         CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
451               lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
452         LASSERT(lsm->lsm_stripe_count >= set->set_count);
453
454         newsize = lov_stripe_md_size(set->set_count);
455         OBD_ALLOC(lsm_new, newsize);
456         if (lsm_new != NULL) {
457                 int i;
458                 memcpy(lsm_new, lsm, sizeof(*lsm));
459                 for (i = 0; i < lsm->lsm_stripe_count; i++) {
460                         if (i < set->set_count) {
461                                 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
462                                 continue;
463                         }
464                         OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
465                                       sizeof(struct lov_oinfo));
466                 }
467                 lsm_new->lsm_stripe_count = set->set_count;
468                 OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
469                          lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
470                 set->set_oi->oi_md = lsm_new;
471         } else {
472                 CWARN("'leaking' few bytes\n");
473         }
474 }
475
476 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
477 {
478         struct lov_stripe_md *lsm = set->set_oi->oi_md;
479         struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
480         unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
481         int stripe, i, rc = -EIO;
482         ENTRY;
483
484         ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
485         for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
486                 if (!lov->lov_tgts[ost_idx] ||
487                     !lov->lov_tgts[ost_idx]->ltd_active)
488                         continue;
489                 /* check if objects has been created on this ost */
490                 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
491                         if (stripe == req->rq_stripe)
492                                 continue;
493                         if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
494                                 break;
495                 }
496
497                 if (stripe >= lsm->lsm_stripe_count) {
498                         req->rq_idx = ost_idx;
499                         rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
500                                         req->rq_oi.oi_oa, &req->rq_oi.oi_md,
501                                         set->set_oti);
502                         if (!rc)
503                                 break;
504                 }
505         }
506         RETURN(rc);
507 }
508
509 static int min_stripe_count(int stripe_cnt, int flags)
510 {
511         return (flags & LOV_USES_DEFAULT_STRIPE ?
512                 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
513 }
514
515 #define LOV_CREATE_RESEED_MULT 4
516 #define LOV_CREATE_RESEED_MIN  1000
517 /* Allocate objects on osts with round-robin algorithm */
518 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
519                     char *poolname, int flags)
520 {
521         unsigned array_idx;
522         int i, *idx_pos;
523         __u32 ost_idx;
524         int ost_start_idx_temp;
525         int speed = 0;
526         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
527         struct pool_desc *pool;
528         struct ost_pool *osts;
529         struct lov_qos_rr *lqr;
530         ENTRY;
531
532         pool = lov_find_pool(lov, poolname);
533         if (pool == NULL) {
534                 osts = &(lov->lov_packed);
535                 lqr = &(lov->lov_qos.lq_rr);
536         } else {
537                 read_lock(&pool_tgt_rwlock(pool));
538                 osts = &(pool->pool_obds);
539                 lqr = &(pool->pool_rr);
540         }
541
542         i = qos_calc_rr(lov, osts, lqr);
543         if (i) {
544                 if (pool != NULL)
545                         read_unlock(&pool_tgt_rwlock(pool));
546                 RETURN(i);
547         }
548
549         if (--lqr->lqr_start_count <= 0) {
550                 lqr->lqr_start_idx = ll_rand() % osts->op_count;
551                 lqr->lqr_start_count =
552                         (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
553                          LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
554         } else if (stripe_cnt_min >= osts->op_count ||
555                    lqr->lqr_start_idx > osts->op_count) {
556                 /* If we have allocated from all of the OSTs, slowly
557                  * precess the next start if the OST/stripe count isn't
558                  * already doing this for us. */
559                 lqr->lqr_start_idx %= osts->op_count;
560                 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
561                         ++lqr->lqr_offset_idx;
562         }
563         down_read(&lov->lov_qos.lq_rw_sem);
564         ost_start_idx_temp = lqr->lqr_start_idx;
565
566 repeat_find:
567         array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) % osts->op_count;
568         idx_pos = idx_arr;
569 #ifdef QOS_DEBUG
570         CDEBUG(D_QOS, "pool '%s' want %d startidx %d startcnt %d offset %d "
571                "active %d count %d arrayidx %d\n", poolname,
572                *stripe_cnt, lqr->lqr_start_idx, lqr->lqr_start_count,
573                lqr->lqr_offset_idx, osts->op_count, osts->op_count, array_idx);
574 #endif
575
576         for (i = 0; i < osts->op_count;
577                     i++, array_idx=(array_idx + 1) % osts->op_count) {
578                 ++lqr->lqr_start_idx;
579                 ost_idx = lqr->lqr_pool.op_array[array_idx];
580 #ifdef QOS_DEBUG
581                 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
582                        i, lqr->lqr_start_idx,
583                        ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
584                        lov->lov_tgts[ost_idx]->ltd_active : 0,
585                        idx_pos - idx_arr, array_idx, ost_idx);
586 #endif
587                 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
588                     !lov->lov_tgts[ost_idx]->ltd_active)
589                         continue;
590
591                 /* Fail Check before osc_precreate() is called
592                    so we can only 'fail' single OSC. */
593                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
594                         continue;
595
596                 /* Drop slow OSCs if we can */
597                 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
598                         continue;
599
600                 *idx_pos = ost_idx;
601                 idx_pos++;
602                 /* We have enough stripes */
603                 if (idx_pos - idx_arr == *stripe_cnt)
604                         break;
605         }
606         if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
607                 /* Try again, allowing slower OSCs */
608                 speed++;
609                 lqr->lqr_start_idx = ost_start_idx_temp;
610                 goto repeat_find;
611         }
612
613         if (pool != NULL)
614                 read_unlock(&pool_tgt_rwlock(pool));
615
616         up_read(&lov->lov_qos.lq_rw_sem);
617
618         *stripe_cnt = idx_pos - idx_arr;
619         RETURN(0);
620 }
621
622 /* alloc objects on osts with specific stripe offset */
623 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
624                           int *idx_arr)
625 {
626         unsigned ost_idx, array_idx, ost_count;
627         int i, *idx_pos;
628         int speed = 0;
629         struct pool_desc *pool = NULL;
630         struct ost_pool *osts;
631         ENTRY;
632
633         pool = lov_find_pool(lov, lsm->lsm_pool_name);
634         if (pool == NULL) {
635                 osts = &(lov->lov_packed);
636         } else {
637                 read_lock(&pool_tgt_rwlock(pool));
638                 osts = &(pool->pool_obds);
639         }
640
641         ost_count = osts->op_count;
642
643 repeat_find:
644         /* search loi_ost_idx in ost array */
645         array_idx = 0;
646         for (i = 0; i < ost_count; i++) {
647                 if (osts->op_array[i] == lsm->lsm_oinfo[0]->loi_ost_idx) {
648                         array_idx = i;
649                         break;
650                 }
651         }
652         if (i == ost_count) {
653                 if (pool != NULL)
654                         read_unlock(&pool_tgt_rwlock(pool));
655                 CERROR("Start index %d not found in pool '%s'\n",
656                        lsm->lsm_oinfo[0]->loi_ost_idx, lsm->lsm_pool_name);
657                 RETURN(-EINVAL);
658         }
659
660         idx_pos = idx_arr;
661         for (i = 0; i < ost_count;
662              i++, array_idx = (array_idx + 1) % ost_count) {
663                 ost_idx = osts->op_array[array_idx];
664
665                 if (!lov->lov_tgts[ost_idx] ||
666                     !lov->lov_tgts[ost_idx]->ltd_active) {
667                         continue;
668                 }
669
670                 /* Fail Check before osc_precreate() is called
671                    so we can only 'fail' single OSC. */
672                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
673                         continue;
674
675                 /* Drop slow OSCs if we can, but not for requested start idx */
676                 if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
677                     (i != 0 || speed < 2))
678                         continue;
679
680                 *idx_pos = ost_idx;
681                 idx_pos++;
682                 /* We have enough stripes */
683                 if (idx_pos - idx_arr == lsm->lsm_stripe_count) {
684                         if (pool != NULL)
685                                 read_unlock(&pool_tgt_rwlock(pool));
686                         RETURN(0);
687                 }
688         }
689         if (speed < 2) {
690                 /* Try again, allowing slower OSCs */
691                 speed++;
692                 goto repeat_find;
693         }
694
695         /* If we were passed specific striping params, then a failure to
696          * meet those requirements is an error, since we can't reallocate
697          * that memory (it might be part of a larger array or something).
698          *
699          * We can only get here if lsm_stripe_count was originally > 1.
700          */
701         CERROR("can't lstripe objid "LPX64": have %d want %u\n",
702                lsm->lsm_object_id, (int)(idx_pos - idx_arr),
703                lsm->lsm_stripe_count);
704
705         if (pool != NULL)
706                 read_unlock(&pool_tgt_rwlock(pool));
707
708         RETURN(-EFBIG);
709 }
710
711 /* Alloc objects on osts with optimization based on:
712    - free space
713    - network resources (shared OSS's)
714 */
715 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
716                      char *poolname, int flags)
717 {
718         struct lov_obd *lov = &exp->exp_obd->u.lov;
719         static time_t last_warn = 0;
720         time_t now = cfs_time_current_sec();
721         __u64 total_bavail, total_weight = 0;
722         int nfound, good_osts, i, warn = 0, rc = 0;
723         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
724         struct pool_desc *pool;
725         struct ost_pool *osts;
726         struct lov_qos_rr *lqr;
727         ENTRY;
728
729         if (stripe_cnt_min < 1)
730                 GOTO(out_nolock, rc = -EINVAL);
731
732         pool = lov_find_pool(lov, poolname);
733         if (pool == NULL) {
734                 osts = &(lov->lov_packed);
735                 lqr = &(lov->lov_qos.lq_rr);
736         } else {
737                 read_lock(&pool_tgt_rwlock(pool));
738                 osts = &(pool->pool_obds);
739                 lqr = &(pool->pool_rr);
740         }
741
742         lov_getref(exp->exp_obd);
743
744         /* Detect -EAGAIN early, before expensive lock is taken. */
745         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
746                 GOTO(out_nolock, rc = -EAGAIN);
747
748         /* Do actual allocation, use write lock here. */
749         down_write(&lov->lov_qos.lq_rw_sem);
750
751         /*
752          * Check again, while we were sleeping on @lq_rw_sem things could
753          * change.
754          */
755         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
756                 GOTO(out, rc = -EAGAIN);
757
758         if (lov->desc.ld_active_tgt_count < 2)
759                 GOTO(out, rc = -EAGAIN);
760
761         rc = qos_calc_ppo(exp->exp_obd);
762         if (rc)
763                 GOTO(out, rc);
764
765         total_bavail = 0;
766         good_osts = 0;
767         /* Warn users about zero available space/inode every 30 min */
768         if (cfs_time_sub(now, last_warn) > 60 * 30)
769                 warn = 1;
770         /* Find all the OSTs that are valid stripe candidates */
771         for (i = 0; i < osts->op_count; i++) {
772                 __u64 bavail;
773
774                 if (!lov->lov_tgts[osts->op_array[i]] ||
775                     !lov->lov_tgts[osts->op_array[i]]->ltd_active)
776                         continue;
777                 bavail = TGT_BAVAIL(osts->op_array[i]);
778                 if (!bavail) {
779                         if (warn) {
780                                 CDEBUG(D_QOS, "no free space on %s\n",
781                                      obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
782                                 last_warn = now;
783                         }
784                         continue;
785                 }
786                 if (!TGT_FFREE(osts->op_array[i])) {
787                         if (warn) {
788                                 CDEBUG(D_QOS, "no free inodes on %s\n",
789                                      obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
790                                 last_warn = now;
791                         }
792                         continue;
793                 }
794
795                 /* Fail Check before osc_precreate() is called
796                    so we can only 'fail' single OSC. */
797                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && osts->op_array[i] == 0)
798                         continue;
799
800                 if (obd_precreate(lov->lov_tgts[osts->op_array[i]]->ltd_exp) > 2)
801                         continue;
802
803                 lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
804                 qos_calc_weight(lov, osts->op_array[i]);
805                 total_bavail += bavail;
806                 total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
807
808                 good_osts++;
809         }
810
811 #ifdef QOS_DEBUG
812         CDEBUG(D_QOS, "found %d good osts\n", good_osts);
813 #endif
814
815         if (good_osts < stripe_cnt_min)
816                 GOTO(out, rc = -EAGAIN);
817
818         if (!total_bavail)
819                 GOTO(out, rc = -ENOSPC);
820
821         /* We have enough osts */
822         if (good_osts < *stripe_cnt)
823                 *stripe_cnt = good_osts;
824
825         if (!*stripe_cnt)
826                 GOTO(out, rc = -EAGAIN);
827
828         /* Find enough OSTs with weighted random allocation. */
829         nfound = 0;
830         while (nfound < *stripe_cnt) {
831                 __u64 rand, cur_weight;
832
833                 cur_weight = 0;
834                 rc = -ENODEV;
835
836                 if (total_weight) {
837 #if BITS_PER_LONG == 32
838                         rand = ll_rand() % (unsigned)total_weight;
839                         /* If total_weight > 32-bit, first generate the high
840                          * 32 bits of the random number, then add in the low
841                          * 32 bits (truncated to the upper limit, if needed) */
842                         if (total_weight > 0xffffffffULL)
843                                 rand = (__u64)(ll_rand() %
844                                           (unsigned)(total_weight >> 32)) << 32;
845                         else
846                                 rand = 0;
847
848                         if (rand == (total_weight & 0xffffffff00000000ULL))
849                                 rand |= ll_rand() % (unsigned)total_weight;
850                         else
851                                 rand |= ll_rand();
852
853 #else
854                         rand = ((__u64)ll_rand() << 32 | ll_rand()) %
855                                 total_weight;
856 #endif
857                 } else {
858                         rand = 0;
859                 }
860
861                 /* On average, this will hit larger-weighted osts more often.
862                    0-weight osts will always get used last (only when rand=0).*/
863                 for (i = 0; i < osts->op_count; i++) {
864                         if (!lov->lov_tgts[osts->op_array[i]] ||
865                             !lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable)
866                                 continue;
867
868                         cur_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
869 #ifdef QOS_DEBUG
870                         CDEBUG(D_QOS, "stripe_cnt=%d nfound=%d cur_weight="LPU64
871                                       " rand="LPU64" total_weight="LPU64"\n",
872                                *stripe_cnt, nfound, cur_weight, rand, total_weight);
873 #endif
874                         if (cur_weight >= rand) {
875 #ifdef QOS_DEBUG
876                                 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
877                                        nfound, osts->op_array[i]);
878 #endif
879                                 idx_arr[nfound++] = osts->op_array[i];
880                                 qos_used(lov, osts, osts->op_array[i], &total_weight);
881                                 rc = 0;
882                                 break;
883                         }
884                 }
885                 /* should never satisfy below condition */
886                 if (rc) {
887                         CERROR("Didn't find any OSTs?\n");
888                         break;
889                 }
890         }
891         LASSERT(nfound == *stripe_cnt);
892
893 out:
894         if (pool != NULL)
895                 read_unlock(&pool_tgt_rwlock(pool));
896
897         up_write(&lov->lov_qos.lq_rw_sem);
898
899 out_nolock:
900         if (rc == -EAGAIN)
901                 rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
902
903         lov_putref(exp->exp_obd);
904         RETURN(rc);
905 }
906
907 /* return new alloced stripe count on success */
908 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
909                            int newea, int **idx_arr, int *arr_cnt, int flags)
910 {
911         struct lov_obd *lov = &exp->exp_obd->u.lov;
912         int stripe_cnt = lsm->lsm_stripe_count;
913         int i, rc = 0;
914         int *tmp_arr = NULL;
915         ENTRY;
916
917         *arr_cnt = stripe_cnt;
918         OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
919         if (tmp_arr == NULL)
920                 RETURN(-ENOMEM);
921         for (i = 0; i < *arr_cnt; i++)
922                 tmp_arr[i] = -1;
923
924         if (newea ||
925             lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
926                 rc = alloc_qos(exp, tmp_arr, &stripe_cnt,
927                                lsm->lsm_pool_name, flags);
928         else
929                 rc = alloc_specific(lov, lsm, tmp_arr);
930
931         if (rc)
932                 GOTO(out_arr, rc);
933
934         *idx_arr = tmp_arr;
935         RETURN(stripe_cnt);
936 out_arr:
937         OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
938         *arr_cnt = 0;
939         RETURN(rc);
940 }
941
942 static void free_idx_array(int *idx_arr, int arr_cnt)
943 {
944         if (arr_cnt)
945                 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
946 }
947
948 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
949 {
950         struct lov_obd *lov = &exp->exp_obd->u.lov;
951         struct lov_stripe_md *lsm;
952         struct obdo *src_oa = set->set_oi->oi_oa;
953         struct obd_trans_info *oti = set->set_oti;
954         int i, stripes, rc = 0, newea = 0;
955         int flag = LOV_USES_ASSIGNED_STRIPE;
956         int *idx_arr = NULL, idx_cnt = 0;
957         ENTRY;
958
959         LASSERT(src_oa->o_valid & OBD_MD_FLID);
960         LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
961  
962         if (set->set_oi->oi_md == NULL) {
963                 int stripes_def = lov_get_stripecnt(lov, 0);
964
965                 /* If the MDS file was truncated up to some size, stripe over
966                  * enough OSTs to allow the file to be created at that size.
967                  * This may mean we use more than the default # of stripes. */
968                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
969                         obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
970
971                         /* Find a small number of stripes we can use
972                            (up to # of active osts). */
973                         stripes = 1;
974                         lov_getref(exp->exp_obd);
975                         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
976                                 if (!lov->lov_tgts[i] ||
977                                     !lov->lov_tgts[i]->ltd_active)
978                                         continue;
979                                 min_bavail = min(min_bavail, TGT_BAVAIL(i));
980                                 if (min_bavail * stripes > src_oa->o_size)
981                                         break;
982                                 stripes++;
983                         }
984                         lov_putref(exp->exp_obd);
985
986                         if (stripes < stripes_def)
987                                 stripes = stripes_def;
988                 } else {
989                          flag = LOV_USES_DEFAULT_STRIPE;
990                          stripes = stripes_def;
991                 }
992
993                 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
994                                      lov->desc.ld_pattern ?
995                                      lov->desc.ld_pattern : LOV_PATTERN_RAID0,
996                                      LOV_MAGIC);
997                 if (rc < 0)
998                         GOTO(out_err, rc);
999                 newea = 1;
1000                 rc = 0;
1001         }
1002
1003         lsm = set->set_oi->oi_md;
1004         lsm->lsm_object_id = src_oa->o_id;
1005         lsm->lsm_object_gr = src_oa->o_gr;
1006
1007         if (!lsm->lsm_stripe_size)
1008                 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
1009         if (!lsm->lsm_pattern) {
1010                 LASSERT(lov->desc.ld_pattern);
1011                 lsm->lsm_pattern = lov->desc.ld_pattern;
1012         }
1013
1014         stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
1015         if (stripes <= 0)
1016                 GOTO(out_err, rc = stripes ? stripes : -EIO);
1017         LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
1018                  lsm->lsm_stripe_count, stripes);
1019
1020         for (i = 0; i < stripes; i++) {
1021                 struct lov_request *req;
1022                 int ost_idx = idx_arr[i];
1023                 LASSERT(ost_idx >= 0);
1024
1025                 OBD_ALLOC(req, sizeof(*req));
1026                 if (req == NULL)
1027                         GOTO(out_err, rc = -ENOMEM);
1028                 lov_set_add_req(req, set);
1029
1030                 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
1031                 OBD_ALLOC(req->rq_oi.oi_md, req->rq_buflen);
1032                 if (req->rq_oi.oi_md == NULL)
1033                         GOTO(out_err, rc = -ENOMEM);
1034
1035                 OBDO_ALLOC(req->rq_oi.oi_oa);
1036                 if (req->rq_oi.oi_oa == NULL)
1037                         GOTO(out_err, rc = -ENOMEM);
1038
1039                 req->rq_idx = ost_idx;
1040                 req->rq_stripe = i;
1041                 /* create data objects with "parent" OA */
1042                 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
1043
1044                 /* XXX When we start creating objects on demand, we need to
1045                  *     make sure that we always create the object on the
1046                  *     stripe which holds the existing file size.
1047                  */
1048                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1049                         req->rq_oi.oi_oa->o_size =
1050                                 lov_size_to_stripe(lsm, src_oa->o_size, i);
1051
1052                         CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
1053                                i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
1054                 }
1055         }
1056         LASSERT(set->set_count == stripes);
1057
1058         if (stripes < lsm->lsm_stripe_count)
1059                 qos_shrink_lsm(set);
1060
1061         if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
1062                 oti_alloc_cookies(oti, set->set_count);
1063                 if (!oti->oti_logcookies)
1064                         GOTO(out_err, rc = -ENOMEM);
1065                 set->set_cookies = oti->oti_logcookies;
1066         }
1067 out_err:
1068         if (newea && rc)
1069                 obd_free_memmd(exp, &set->set_oi->oi_md);
1070         if (idx_arr)
1071                 free_idx_array(idx_arr, idx_cnt);
1072         EXIT;
1073         return rc;
1074 }
1075
1076 void qos_update(struct lov_obd *lov)
1077 {
1078         ENTRY;
1079         lov->lov_qos.lq_dirty = 1;
1080 }