Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / lov / lov_qos.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of the Lustre file system, http://www.lustre.org
7  *   Lustre is a trademark of Cluster File Systems, Inc.
8  *
9  *   You may have signed or agreed to another license before downloading
10  *   this software.  If so, you are bound by the terms and conditions
11  *   of that agreement, and the following does not apply to you.  See the
12  *   LICENSE file included with this distribution for more information.
13  *
14  *   If you did not agree to a different license, then this copy of Lustre
15  *   is open source software; you can redistribute it and/or modify it
16  *   under the terms of version 2 of the GNU General Public License as
17  *   published by the Free Software Foundation.
18  *
19  *   In either case, Lustre is distributed in the hope that it will be
20  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  *   license text for more details.
23  */
24
25 #ifndef EXPORT_SYMTAB
26 # define EXPORT_SYMTAB
27 #endif
28 #define DEBUG_SUBSYSTEM S_LOV
29
30 #ifdef __KERNEL__
31 #include <libcfs/libcfs.h>
32 #else
33 #include <liblustre.h>
34 #endif
35
36 #include <obd_class.h>
37 #include <obd_lov.h>
38 #include "lov_internal.h"
39
40 /* #define QOS_DEBUG 1  */
41 #define D_QOS D_OTHER
42
43 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail * \
44                        lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize) 
45 #define TGT_FFREE(i)  (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
46
47
48 int qos_add_tgt(struct obd_device *obd, __u32 index)
49 {
50         struct lov_obd *lov = &obd->u.lov;
51         struct lov_qos_oss *oss, *temposs;
52         struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
53         int rc = 0, found = 0;
54         ENTRY;
55
56         /* We only need this QOS struct on MDT, not clients - but we may not
57            have registered the LOV's observer yet, so there's no way to know */
58         if (!exp || !exp->exp_connection) {
59                 CERROR("Missing connection\n");
60                 RETURN(-ENOTCONN);
61         }
62
63         down_write(&lov->lov_qos.lq_rw_sem);
64         mutex_down(&lov->lov_lock);
65         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
66                 if (obd_uuid_equals(&oss->lqo_uuid, 
67                                     &exp->exp_connection->c_remote_uuid)) {
68                         found++;
69                         break;
70                 }
71         }
72
73         if (!found) {
74                 OBD_ALLOC_PTR(oss);
75                 if (!oss) 
76                         GOTO(out, rc = -ENOMEM);
77                 memcpy(&oss->lqo_uuid,
78                        &exp->exp_connection->c_remote_uuid,
79                        sizeof(oss->lqo_uuid));
80         } else {
81                 /* Assume we have to move this one */
82                 list_del(&oss->lqo_oss_list);
83         }
84                         
85         oss->lqo_ost_count++;
86         lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
87
88         /* Add sorted by # of OSTs.  Find the first entry that we're
89            bigger than... */
90         list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
91                 if (oss->lqo_ost_count > temposs->lqo_ost_count) 
92                         break;
93         }
94         /* ...and add before it.  If we're the first or smallest, temposs
95            points to the list head, and we add to the end. */
96         list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
97
98         lov->lov_qos.lq_dirty = 1;
99         lov->lov_qos.lq_dirty_rr = 1;
100
101         CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n", 
102                obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
103                obd_uuid2str(&oss->lqo_uuid),
104                oss->lqo_ost_count);
105
106 out:
107         mutex_up(&lov->lov_lock);
108         up_write(&lov->lov_qos.lq_rw_sem);
109         RETURN(rc);
110 }
111
112 int qos_del_tgt(struct obd_device *obd, __u32 index)
113 {
114         struct lov_obd *lov = &obd->u.lov;
115         struct lov_qos_oss *oss;
116         int rc = 0;
117         ENTRY;
118
119         if (!lov->lov_tgts[index])
120                 RETURN(0);
121
122         down_write(&lov->lov_qos.lq_rw_sem);
123
124         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
125         if (!oss)
126                 GOTO(out, rc = -ENOENT);
127
128         oss->lqo_ost_count--;
129         if (oss->lqo_ost_count == 0) {
130                 CDEBUG(D_QOS, "removing OSS %s\n", 
131                        obd_uuid2str(&oss->lqo_uuid));
132                 list_del(&oss->lqo_oss_list);
133                 OBD_FREE_PTR(oss);
134         }
135         
136         lov->lov_qos.lq_dirty = 1;
137         lov->lov_qos.lq_dirty_rr = 1;
138 out:
139         up_write(&lov->lov_qos.lq_rw_sem);
140         RETURN(rc);
141 }
142
143 /* Recalculate per-object penalties for OSSs and OSTs, 
144    depends on size of each ost in an oss */  
145 static int qos_calc_ppo(struct obd_device *obd)
146 {
147         struct lov_obd *lov = &obd->u.lov;
148         struct lov_qos_oss *oss;
149         __u64 ba_max, ba_min, temp;
150         __u32 num_active;
151         int rc, i, prio_wide;
152         ENTRY;
153
154         if (!lov->lov_qos.lq_dirty) 
155                 GOTO(out, rc = 0);
156
157         num_active = lov->desc.ld_active_tgt_count - 1; 
158         if (num_active < 1)
159                 GOTO(out, rc = -EAGAIN);
160
161         /* find bavail on each OSS */
162         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
163                 oss->lqo_bavail = 0;
164         }
165         lov->lov_qos.lq_active_oss_count = 0;
166
167         /* How badly user wants to select osts "widely" (not recently chosen
168            and not on recent oss's).  As opposed to "freely" (free space
169            avail.) 0-256. */
170         prio_wide = 256 - lov->lov_qos.lq_prio_free;
171
172         ba_min = (__u64)(-1);
173         ba_max = 0;
174         /* Calculate OST penalty per object */
175         /* (lov ref taken in alloc_qos) */
176         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
177                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
178                         continue;
179                 temp = TGT_BAVAIL(i);
180                 if (!temp)
181                         continue;
182                 ba_min = min(temp, ba_min);
183                 ba_max = max(temp, ba_max);
184                 
185                 /* Count the number of usable OSS's */
186                 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
187                         lov->lov_qos.lq_active_oss_count++;
188                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
189
190                 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
191                 temp >>= 1;
192                 do_div(temp, num_active);
193                 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj = 
194                         (temp * prio_wide) >> 8;
195
196                 if (lov->lov_qos.lq_reset == 0) 
197                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
198         }
199
200         num_active = lov->lov_qos.lq_active_oss_count - 1;
201         if (num_active < 1) {
202                 /* If there's only 1 OSS, we can't penalize it, so instead
203                    we have to double the OST penalty */
204                 num_active = 1;
205                 for (i = 0; i < lov->desc.ld_tgt_count; i++) { 
206                         if (lov->lov_tgts[i] == NULL)
207                                 continue;
208                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
209                 }
210         }
211         
212         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
213         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
214                 temp = oss->lqo_bavail >> 1;
215                 do_div(temp, oss->lqo_ost_count * num_active);
216                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
217                 if (lov->lov_qos.lq_reset == 0) 
218                         oss->lqo_penalty = 0;
219         }
220
221         lov->lov_qos.lq_dirty = 0;
222         lov->lov_qos.lq_reset = 0;
223
224         /* If each ost has almost same free space, 
225          * do rr allocation for better creation performance */
226         lov->lov_qos.lq_same_space = 0;
227         temp = ba_max - ba_min;
228         ba_min = (ba_min * 51) >> 8;     /* 51/256 = .20 */  
229         if (temp < ba_min) {
230                 /* Difference is less than 20% */ 
231                 lov->lov_qos.lq_same_space = 1;
232                 /* Reset weights for the next time we enter qos mode */
233                 lov->lov_qos.lq_reset = 0;
234         }
235         rc = 0;
236
237 out:
238         if (!rc && lov->lov_qos.lq_same_space)
239                 RETURN(-EAGAIN);
240         RETURN(rc);
241 }
242
243 static int qos_calc_weight(struct lov_obd *lov, int i)
244 {
245         __u64 temp, temp2;
246         
247         /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
248         temp = TGT_BAVAIL(i);
249         temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty + 
250                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
251         if (temp < temp2) 
252                 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
253         else
254                 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
255         return 0;
256 }
257
258 /* We just used this index for a stripe; adjust everyone's weights */
259 static int qos_used(struct lov_obd *lov, __u32 index, __u64 *total_wt)
260 {
261         struct lov_qos_oss *oss;
262         int i;
263         ENTRY;
264
265         /* Don't allocate from this stripe anymore, until the next alloc_qos */
266         lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
267
268         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
269         
270         /* Decay old penalty by half (we're adding max penalty, and don't
271            want it to run away.) */
272         lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
273         oss->lqo_penalty >>= 1;
274
275         /* Set max penalties for this OST and OSS */
276         lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
277                 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
278                 lov->desc.ld_active_tgt_count;
279         oss->lqo_penalty += oss->lqo_penalty_per_obj * 
280                 lov->lov_qos.lq_active_oss_count;
281         
282         /* Decrease all OSS penalties */
283         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
284                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj) 
285                         oss->lqo_penalty = 0;
286                 else
287                         oss->lqo_penalty -= oss->lqo_penalty_per_obj;
288         }
289
290         *total_wt = 0;
291         /* Decrease all OST penalties */
292         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
293                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active) 
294                         continue;
295                 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
296                     lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
297                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
298                 else
299                         lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
300                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
301                 
302                 qos_calc_weight(lov, i);
303
304                 /* Recalc the total weight of usable osts */
305                 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
306                         *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
307
308 #ifdef QOS_DEBUG
309                 CDEBUG(D_QOS, "recalc tgt %d avail="LPU64
310                        " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
311                        " ossp="LPU64" wt="LPU64"\n",
312                        i, TGT_BAVAIL(i) >> 10, 
313                        lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
314                        lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10, 
315                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj >> 10,
316                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
317                        lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
318 #endif
319         }
320
321         RETURN(0);
322 }
323
324 #define LOV_QOS_EMPTY ((__u32)-1)
325 /* compute optimal round-robin order, based on OSTs per OSS */
326 static int qos_calc_rr(struct lov_obd *lov)
327 {
328         struct lov_qos_oss *oss;
329         unsigned ost_count, placed, real_count;
330         int i;
331         ENTRY;
332
333         if (!lov->lov_qos.lq_dirty_rr) {
334                 LASSERT(lov->lov_qos.lq_rr_size);
335                 RETURN(0);
336         }
337
338         /* Do actuall allocation. */
339         down_write(&lov->lov_qos.lq_rw_sem);
340
341         /*
342          * Check again. While we were sleeping on @lq_rw_sem something could
343          * change.
344          */
345         if (!lov->lov_qos.lq_dirty_rr) {
346                 LASSERT(lov->lov_qos.lq_rr_size);
347                 up_write(&lov->lov_qos.lq_rw_sem);
348                 RETURN(0);
349         }
350
351         ost_count = lov->desc.ld_tgt_count;
352
353         if (lov->lov_qos.lq_rr_size) 
354                 OBD_FREE(lov->lov_qos.lq_rr_array, lov->lov_qos.lq_rr_size);
355         lov->lov_qos.lq_rr_size = ost_count * 
356                 sizeof(lov->lov_qos.lq_rr_array[0]);
357         OBD_ALLOC(lov->lov_qos.lq_rr_array, lov->lov_qos.lq_rr_size);
358         if (!lov->lov_qos.lq_rr_array) {
359                 lov->lov_qos.lq_rr_size = 0;
360                 up_write(&lov->lov_qos.lq_rw_sem);
361                 RETURN(-ENOMEM);
362         }
363
364         real_count = 0;
365         for (i = 0; i < ost_count; i++) {
366                 lov->lov_qos.lq_rr_array[i] = LOV_QOS_EMPTY;
367                 if (lov->lov_tgts[i])
368                         real_count++;
369         }
370
371         /* Place all the OSTs from 1 OSS at the same time. */
372         placed = 0;
373         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
374                 int j = 0;
375                 for (i = 0; i < ost_count; i++) {
376                       LASSERT(lov->lov_tgts[i] != NULL);
377                       if (lov->lov_tgts[i]->ltd_qos.ltq_oss == oss) {
378                               /* Evenly space these OSTs across arrayspace */
379                               int next = j * ost_count / oss->lqo_ost_count;
380                               LASSERT(next < ost_count);
381                               while (lov->lov_qos.lq_rr_array[next] !=
382                                      LOV_QOS_EMPTY)
383                                       next = (next + 1) % ost_count;
384                               lov->lov_qos.lq_rr_array[next] = i;
385                               j++;
386                               placed++;
387                       }
388                 }
389                 LASSERT(j == oss->lqo_ost_count);
390         }
391
392         lov->lov_qos.lq_dirty_rr = 0;
393         up_write(&lov->lov_qos.lq_rw_sem);
394
395         if (placed != real_count) {
396                 /* This should never happen */
397                 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
398                                    "round-robin list (%d of %d).\n",
399                                    placed, real_count);
400                 for (i = 0; i < ost_count; i++) {
401                         LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
402                                  lov->lov_qos.lq_rr_array[i]);
403                 }
404                 lov->lov_qos.lq_dirty_rr = 1;
405                 RETURN(-EAGAIN);
406         }
407
408 #ifdef QOS_DEBUG
409         for (i = 0; i < ost_count; i++) {
410                 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
411                          lov->lov_qos.lq_rr_array[i]);
412         }
413 #endif
414         
415         RETURN(0);
416 }
417
418
419 void qos_shrink_lsm(struct lov_request_set *set)
420 {
421         struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
422         /* XXX LOV STACKING call into osc for sizes */
423         unsigned oldsize, newsize;
424
425         if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
426                 struct llog_cookie *cookies;
427                 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
428                 newsize = set->set_count * sizeof(*cookies);
429
430                 cookies = set->set_cookies;
431                 oti_alloc_cookies(set->set_oti, set->set_count);
432                 if (set->set_oti->oti_logcookies) {
433                         memcpy(set->set_oti->oti_logcookies, cookies, newsize);
434                         OBD_FREE(cookies, oldsize);
435                         set->set_cookies = set->set_oti->oti_logcookies;
436                 } else {
437                         CWARN("'leaking' %d bytes\n", oldsize - newsize);
438                 }
439         }
440
441         CWARN("using fewer stripes for object "LPX64": old %u new %u\n",
442               lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
443         LASSERT(lsm->lsm_stripe_count >= set->set_count);
444
445         newsize = lov_stripe_md_size(set->set_count);
446         OBD_ALLOC(lsm_new, newsize);
447         if (lsm_new != NULL) {
448                 int i;
449                 memcpy(lsm_new, lsm, sizeof(*lsm));
450                 for (i = 0; i < lsm->lsm_stripe_count; i++) {
451                         if (i < set->set_count) {
452                                 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
453                                 continue;
454                         }
455                         OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
456                                       sizeof(struct lov_oinfo));
457                 }
458                 lsm_new->lsm_stripe_count = set->set_count;
459                 OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
460                          lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
461                 set->set_oi->oi_md = lsm_new;
462         } else {
463                 CWARN("'leaking' few bytes\n");
464         }
465 }
466
467 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
468 {
469         struct lov_stripe_md *lsm = set->set_oi->oi_md;
470         struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
471         unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
472         int stripe, i, rc = -EIO;
473         ENTRY;
474
475         ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
476         for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
477                 if (!lov->lov_tgts[ost_idx] || 
478                     !lov->lov_tgts[ost_idx]->ltd_active) 
479                         continue;
480                 /* check if objects has been created on this ost */
481                 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
482                         if (stripe == req->rq_stripe)
483                                 continue;
484                         if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
485                                 break;
486                 }
487
488                 if (stripe >= lsm->lsm_stripe_count) {
489                         req->rq_idx = ost_idx;
490                         rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
491                                         req->rq_oi.oi_oa, &req->rq_oi.oi_md,
492                                         set->set_oti);
493                         if (!rc)
494                                 break;
495                 }
496         }
497         RETURN(rc);
498 }
499
500 static int min_stripe_count(int stripe_cnt, int flags)
501 {
502         return (flags & LOV_USES_DEFAULT_STRIPE ?
503                 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
504 }
505
506 #define LOV_CREATE_RESEED_MULT 4
507 #define LOV_CREATE_RESEED_MIN  1000
508 /* Allocate objects on osts with round-robin algorithm */
509 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
510                     int flags)
511 {
512         unsigned array_idx, ost_count = lov->desc.ld_tgt_count;
513         unsigned ost_active_count = lov->desc.ld_active_tgt_count;
514         int i, *idx_pos;
515         __u32 ost_idx;
516         int ost_start_idx_temp;
517         int speed = 0;
518         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
519         ENTRY;
520
521         i = qos_calc_rr(lov);
522         if (i)
523                 RETURN(i);
524
525         if (--lov->lov_start_count <= 0) {
526                 lov->lov_start_idx = ll_rand() % ost_count;
527                 lov->lov_start_count =
528                         (LOV_CREATE_RESEED_MIN / max(ost_active_count, 1U) +
529                          LOV_CREATE_RESEED_MULT) * max(ost_active_count, 1U);
530         } else if (stripe_cnt_min >= ost_active_count ||
531                    lov->lov_start_idx > ost_count) {
532                 /* If we have allocated from all of the OSTs, slowly
533                    precess the next start */
534                 lov->lov_start_idx %= ost_count;
535                 if (stripe_cnt_min > 1 &&
536                     (ost_active_count % stripe_cnt_min) != 1)
537                         ++lov->lov_offset_idx;
538         }
539         down_read(&lov->lov_qos.lq_rw_sem);
540         ost_start_idx_temp = lov->lov_start_idx;
541
542 repeat_find:
543         array_idx = (lov->lov_start_idx + lov->lov_offset_idx) % ost_count;
544         idx_pos = idx_arr;
545 #ifdef QOS_DEBUG
546         CDEBUG(D_QOS, "want %d startidx %d startcnt %d offset %d arrayidx %d\n",
547                stripe_cnt_min, lov->lov_start_idx, lov->lov_start_count,
548                lov->lov_offset_idx, array_idx);
549 #endif
550
551         for (i = 0; i < ost_count; i++, array_idx=(array_idx + 1) % ost_count) {
552                 ++lov->lov_start_idx;
553                 ost_idx = lov->lov_qos.lq_rr_array[array_idx];
554 #ifdef QOS_DEBUG
555                 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
556                        i, lov->lov_start_idx,
557                        ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
558                        lov->lov_tgts[ost_idx]->ltd_active : 0,
559                        idx_pos - idx_arr, array_idx, ost_idx);
560 #endif
561                 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
562                     !lov->lov_tgts[ost_idx]->ltd_active)
563                         continue;
564                 /* Fail Check before osc_precreate() is called
565                    so we can only 'fail' single OSC. */
566                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
567                         continue;
568
569                 /* Drop slow OSCs if we can */
570                 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp, speed == 0) >
571                                   speed)
572                         continue;
573
574                 *idx_pos = ost_idx;
575                 idx_pos++;
576                 /* We have enough stripes */
577                 if (idx_pos - idx_arr == *stripe_cnt)
578                         break;
579         }
580         if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
581                 /* Try again, allowing slower OSCs */
582                 speed++;
583                 lov->lov_start_idx = ost_start_idx_temp;
584                 goto repeat_find;
585         }
586
587         up_read(&lov->lov_qos.lq_rw_sem);
588
589         *stripe_cnt = idx_pos - idx_arr;
590         RETURN(0);
591 }
592
593 /* alloc objects on osts with specific stripe offset */
594 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
595                           int *idx_arr)
596 {
597         unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
598         int i, *idx_pos;
599         int speed = 0;
600         ENTRY;
601
602 repeat_find:
603         idx_pos = idx_arr;
604         ost_idx = lsm->lsm_oinfo[0]->loi_ost_idx;
605         for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
606                 if (!lov->lov_tgts[ost_idx] || 
607                     !lov->lov_tgts[ost_idx]->ltd_active) {
608                         continue;
609                 }
610
611                 /* Fail Check before osc_precreate() is called
612                    so we can only 'fail' single OSC. */
613                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
614                         continue;
615
616                 /* Drop slow OSCs if we can */
617                 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp, speed == 0) >
618                     speed)
619                         continue;
620
621                 *idx_pos = ost_idx;
622                 idx_pos++;
623                 /* We have enough stripes */
624                 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
625                         RETURN(0);
626         }
627         if (speed < 2) {
628                 /* Try again, allowing slower OSCs */
629                 speed++;
630                 goto repeat_find;
631         }
632
633         /* If we were passed specific striping params, then a failure to
634          * meet those requirements is an error, since we can't reallocate
635          * that memory (it might be part of a larger array or something).
636          *
637          * We can only get here if lsm_stripe_count was originally > 1.
638          */
639         CERROR("can't lstripe objid "LPX64": have "LPSZ" want %u\n",
640                lsm->lsm_object_id, idx_pos - idx_arr, lsm->lsm_stripe_count);
641         RETURN(-EFBIG);
642 }
643
644 /* Alloc objects on osts with optimization based on:
645    - free space
646    - network resources (shared OSS's)
647 */
648 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
649                      int flags)
650 {
651         struct lov_obd *lov = &exp->exp_obd->u.lov;
652         static time_t last_warn = 0;
653         time_t now = cfs_time_current_sec();
654         __u64 total_bavail, total_weight = 0;
655         __u32 ost_count;
656         int nfound, good_osts, i, warn = 0, rc = 0;
657         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
658         ENTRY;
659
660         if (stripe_cnt_min < 1)
661                 GOTO(out, rc = -EINVAL);
662
663         lov_getref(exp->exp_obd);
664
665         /* Detect -EAGAIN early, before expensive lock is taken. */
666         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
667                 GOTO(out, rc = -EAGAIN);
668         
669         /* Do actuall allocation, use write lock here. */
670         down_write(&lov->lov_qos.lq_rw_sem);
671
672         /* 
673          * Check again, while we were sleeping on @lq_rw_sem things could
674          * change.
675          */
676         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space) {
677                 up_write(&lov->lov_qos.lq_rw_sem);
678                 GOTO(out, rc = -EAGAIN);
679         }
680         ost_count = lov->desc.ld_tgt_count;
681
682         if (lov->desc.ld_active_tgt_count < 2) 
683                 GOTO(out_up_write, rc = -EAGAIN);
684
685         rc = qos_calc_ppo(exp->exp_obd);
686         if (rc) 
687                 GOTO(out_up_write, rc);
688         
689         total_bavail = 0;
690         good_osts = 0;
691         /* Warn users about zero available space/inode every 30 min */
692         if (cfs_time_sub(now, last_warn) > 60 * 30)
693                 warn = 1;
694         /* Find all the OSTs that are valid stripe candidates */
695         for (i = 0; i < ost_count; i++) {
696                 __u64 bavail;
697                 
698                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
699                         continue;
700                 bavail = TGT_BAVAIL(i);
701                 if (!bavail) {
702                         if (warn) {
703                                 CDEBUG(D_QOS, "no free space on %s\n", 
704                                        obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
705                                 last_warn = now;
706                         }
707                         continue;
708                 }
709                 if (!TGT_FFREE(i)) {
710                         if (warn) {
711                                 CDEBUG(D_QOS, "no free inodes on %s\n", 
712                                        obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
713                                 last_warn = now;
714                         }
715                         continue;
716                 }
717
718                 /* Fail Check before osc_precreate() is called
719                    so we can only 'fail' single OSC. */
720                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && i == 0)
721                         continue;
722
723                 if (obd_precreate(lov->lov_tgts[i]->ltd_exp, 1) >= 2)
724                         continue;
725
726                 lov->lov_tgts[i]->ltd_qos.ltq_usable = 1;
727                 qos_calc_weight(lov, i);
728                 total_bavail += bavail;
729                 total_weight += lov->lov_tgts[i]->ltd_qos.ltq_weight;
730
731                 good_osts++;
732         }
733
734         if (!total_bavail)
735                 GOTO(out_up_write, rc = -ENOSPC);
736
737          if (good_osts < stripe_cnt_min)
738                 GOTO(out_up_write, rc = -EAGAIN);
739
740         /* We have enough osts */
741         if (good_osts < *stripe_cnt)
742                 *stripe_cnt = good_osts;
743
744         if (!*stripe_cnt)
745                 GOTO(out_up_write, rc = -EAGAIN);
746
747         /* Find enough OSTs with weighted random allocation. */
748         nfound = 0;
749         while (nfound < *stripe_cnt) {
750                 __u64 rand, cur_weight = 0;
751
752                 rc = -ENODEV;
753
754                 if (total_weight) {
755 #if BITS_PER_LONG == 32
756                         rand = ll_rand() % (unsigned)total_weight;
757                         /* If total_weight > 32-bit, first generate the high
758                          * 32 bits of the random number, then add in the low
759                          * 32 bits (truncated to the upper limit, if needed) */
760                         if (total_weight > 0xffffffffULL)
761                                 rand = (__u64)(ll_rand() %
762                                           (unsigned)(total_weight >> 32)) << 32;
763                         else
764                                 rand = 0;
765
766                         if (rand == (total_weight & 0xffffffff00000000ULL))
767                                 rand |= ll_rand() % (unsigned)total_weight;
768                         else
769                                 rand |= ll_rand();
770 #else
771                         rand = ((__u64)ll_rand() << 32 | ll_rand()) %
772                                 total_weight;
773 #endif
774                 } else {
775                         rand = 0;
776                 }
777
778                 /* On average, this will hit larger-weighted osts more often.
779                    0-weight osts will always get used last (only when rand=0).*/
780                 for (i = 0; i < ost_count; i++) {
781                         if (!lov->lov_tgts[i] ||
782                             !lov->lov_tgts[i]->ltd_qos.ltq_usable)
783                                 continue;
784
785                         cur_weight += lov->lov_tgts[i]->ltd_qos.ltq_weight;
786                         if (cur_weight >= rand) {
787 #ifdef QOS_DEBUG
788                                 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
789                                        nfound, i);
790 #endif
791                                 idx_arr[nfound++] = i;
792                                 qos_used(lov, i, &total_weight);
793                                 rc = 0;
794                                 break;
795                         }
796                 }
797                 /* should never satisfy below condition */
798                 if (rc) {
799                         CERROR("Didn't find any OSTs?\n");
800                         break;
801                 }
802         }
803         LASSERT(nfound == *stripe_cnt);
804         
805 out_up_write:
806         up_write(&lov->lov_qos.lq_rw_sem);
807         
808 out:
809         if (rc == -EAGAIN)
810                 rc = alloc_rr(lov, idx_arr, stripe_cnt, flags);
811         
812         lov_putref(exp->exp_obd);
813         RETURN(rc);
814 }
815
816 /* return new alloced stripe count on success */
817 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm, 
818                            int newea, int **idx_arr, int *arr_cnt, int flags)
819 {
820         struct lov_obd *lov = &exp->exp_obd->u.lov;
821         int stripe_cnt = lsm->lsm_stripe_count;
822         int i, rc = 0;
823         int *tmp_arr = NULL;
824         ENTRY;
825
826         *arr_cnt = stripe_cnt;
827         OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
828         if (tmp_arr == NULL)
829                 RETURN(-ENOMEM);
830         for (i = 0; i < *arr_cnt; i++)
831                 tmp_arr[i] = -1;
832
833         if (newea || 
834             lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count) 
835                 rc = alloc_qos(exp, tmp_arr, &stripe_cnt, flags);
836         else
837                 rc = alloc_specific(lov, lsm, tmp_arr);
838
839         if (rc)
840                 GOTO(out_arr, rc);
841
842         *idx_arr = tmp_arr;
843         RETURN(stripe_cnt);
844 out_arr:
845         OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
846         *arr_cnt = 0;
847         RETURN(rc);
848 }
849
850 static void free_idx_array(int *idx_arr, int arr_cnt)
851 {
852         if (arr_cnt)
853                 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
854 }
855
856 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
857 {
858         struct lov_obd *lov = &exp->exp_obd->u.lov;
859         struct lov_stripe_md *lsm;
860         struct obdo *src_oa = set->set_oi->oi_oa;
861         struct obd_trans_info *oti = set->set_oti;
862         int i, stripes, rc = 0, newea = 0;
863         int *idx_arr, idx_cnt = 0;
864         int flag = LOV_USES_ASSIGNED_STRIPE;
865         ENTRY;
866
867         LASSERT(src_oa->o_valid & OBD_MD_FLID);
868         LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
869  
870         if (set->set_oi->oi_md == NULL) {
871                 int stripes_def = lov_get_stripecnt(lov, 0);
872
873                 /* If the MDS file was truncated up to some size, stripe over
874                  * enough OSTs to allow the file to be created at that size. 
875                  * This may mean we use more than the default # of stripes. */
876                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
877                         obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
878                         
879                         /* Find a small number of stripes we can use 
880                            (up to # of active osts). */
881                         stripes = 1;
882                         lov_getref(exp->exp_obd);
883                         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
884                                 if (!lov->lov_tgts[i] || 
885                                     !lov->lov_tgts[i]->ltd_active)
886                                         continue;
887                                 min_bavail = min(min_bavail, TGT_BAVAIL(i));
888                                 if (min_bavail * stripes > src_oa->o_size)
889                                         break;
890                                 stripes++;
891                         }
892                         lov_putref(exp->exp_obd);
893
894                         if (stripes < stripes_def)
895                                 stripes = stripes_def;
896                 } else {
897                          flag = LOV_USES_DEFAULT_STRIPE;
898                          stripes = stripes_def;
899                 }
900
901                 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes, 
902                                      lov->desc.ld_pattern ?
903                                      lov->desc.ld_pattern : LOV_PATTERN_RAID0,
904                                      LOV_MAGIC);
905                 if (rc < 0)
906                         GOTO(out_err, rc);
907                 newea = 1;
908                 rc = 0;
909         }
910
911         lsm = set->set_oi->oi_md;
912         lsm->lsm_object_id = src_oa->o_id;
913         lsm->lsm_object_gr = src_oa->o_gr;
914
915         if (!lsm->lsm_stripe_size)
916                 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
917         if (!lsm->lsm_pattern) {
918                 LASSERT(lov->desc.ld_pattern);
919                 lsm->lsm_pattern = lov->desc.ld_pattern;
920         }
921
922         stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
923         if (stripes <= 0)
924                 GOTO(out_err, rc = stripes ? stripes : -EIO);
925         LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
926                  lsm->lsm_stripe_count, stripes);
927         
928         for (i = 0; i < stripes; i++) {
929                 struct lov_request *req;
930                 int ost_idx = idx_arr[i];
931                 LASSERT(ost_idx >= 0);
932
933                 OBD_ALLOC(req, sizeof(*req));
934                 if (req == NULL)
935                         GOTO(out_err, rc = -ENOMEM);
936                 lov_set_add_req(req, set);
937
938                 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
939                 OBD_ALLOC(req->rq_oi.oi_md, req->rq_buflen);
940                 if (req->rq_oi.oi_md == NULL)
941                         GOTO(out_err, rc = -ENOMEM);
942
943                 OBDO_ALLOC(req->rq_oi.oi_oa);
944                 if (req->rq_oi.oi_oa == NULL)
945                         GOTO(out_err, rc = -ENOMEM);
946
947                 req->rq_idx = ost_idx;
948                 req->rq_stripe = i;
949                 /* create data objects with "parent" OA */
950                 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
951
952                 /* XXX When we start creating objects on demand, we need to
953                  *     make sure that we always create the object on the
954                  *     stripe which holds the existing file size.
955                  */
956                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
957                         req->rq_oi.oi_oa->o_size = 
958                                 lov_size_to_stripe(lsm, src_oa->o_size, i);
959
960                         CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
961                                i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
962                 }
963         }
964         LASSERT(set->set_count == stripes);
965
966         if (stripes < lsm->lsm_stripe_count)
967                 qos_shrink_lsm(set);
968
969         if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
970                 oti_alloc_cookies(oti, set->set_count);
971                 if (!oti->oti_logcookies)
972                         GOTO(out_err, rc = -ENOMEM);
973                 set->set_cookies = oti->oti_logcookies;
974         }
975 out_err:
976         if (newea && rc)
977                 obd_free_memmd(exp, &set->set_oi->oi_md);
978         free_idx_array(idx_arr, idx_cnt);
979         EXIT;
980         return rc;
981 }
982
983 void qos_update(struct lov_obd *lov)
984 {
985         ENTRY;
986         lov->lov_qos.lq_dirty = 1;
987 }
988