Whamcloud - gitweb
0397878a1112817d5ee63a2ea6f4a88616b9de55
[fs/lustre-release.git] / lustre / lov / lov_qos.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #ifndef EXPORT_SYMTAB
38 # define EXPORT_SYMTAB
39 #endif
40 #define DEBUG_SUBSYSTEM S_LOV
41
42 #ifdef __KERNEL__
43 #include <libcfs/libcfs.h>
44 #else
45 #include <liblustre.h>
46 #endif
47
48 #include <obd_class.h>
49 #include <obd_lov.h>
50 #include <lustre/lustre_idl.h>
51 #include "lov_internal.h"
52
53 /* #define QOS_DEBUG 1 */
54 #define D_QOS D_OTHER
55
56 #define TGT_BAVAIL(i)  (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail*\
57                         lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
58
59 int qos_add_tgt(struct obd_device *obd, __u32 index)
60 {
61         struct lov_obd *lov = &obd->u.lov;
62         struct lov_qos_oss *oss, *temposs;
63         struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
64         int rc = 0, found = 0;
65         ENTRY;
66
67         /* We only need this QOS struct on MDT, not clients - but we may not
68          * have registered the LOV's observer yet, so there's no way to know */
69         if (!exp || !exp->exp_connection) {
70                 CERROR("Missing connection\n");
71                 RETURN(-ENOTCONN);
72         }
73
74         down_write(&lov->lov_qos.lq_rw_sem);
75         mutex_down(&lov->lov_lock);
76         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
77                 if (obd_uuid_equals(&oss->lqo_uuid,
78                                     &exp->exp_connection->c_remote_uuid)) {
79                         found++;
80                         break;
81                 }
82         }
83
84         if (!found) {
85                 OBD_ALLOC_PTR(oss);
86                 if (!oss)
87                         GOTO(out, rc = -ENOMEM);
88                 memcpy(&oss->lqo_uuid,
89                        &exp->exp_connection->c_remote_uuid,
90                        sizeof(oss->lqo_uuid));
91         } else {
92                 /* Assume we have to move this one */
93                 list_del(&oss->lqo_oss_list);
94         }
95
96         oss->lqo_ost_count++;
97         lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
98
99         /* Add sorted by # of OSTs.  Find the first entry that we're
100            bigger than... */
101         list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
102                 if (oss->lqo_ost_count > temposs->lqo_ost_count)
103                         break;
104         }
105         /* ...and add before it.  If we're the first or smallest, temposs
106            points to the list head, and we add to the end. */
107         list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
108
109         lov->lov_qos.lq_dirty = 1;
110         lov->lov_qos.lq_rr.lqr_dirty = 1;
111
112         CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
113                obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
114                obd_uuid2str(&oss->lqo_uuid),
115                oss->lqo_ost_count);
116
117 out:
118         mutex_up(&lov->lov_lock);
119         up_write(&lov->lov_qos.lq_rw_sem);
120         RETURN(rc);
121 }
122
123 int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
124 {
125         struct lov_obd *lov = &obd->u.lov;
126         struct lov_qos_oss *oss;
127         int rc = 0;
128         ENTRY;
129
130         down_write(&lov->lov_qos.lq_rw_sem);
131
132         oss = tgt->ltd_qos.ltq_oss;
133         if (!oss)
134                 GOTO(out, rc = -ENOENT);
135
136         oss->lqo_ost_count--;
137         if (oss->lqo_ost_count == 0) {
138                 CDEBUG(D_QOS, "removing OSS %s\n",
139                        obd_uuid2str(&oss->lqo_uuid));
140                 list_del(&oss->lqo_oss_list);
141                 OBD_FREE_PTR(oss);
142         }
143
144         lov->lov_qos.lq_dirty = 1;
145         lov->lov_qos.lq_rr.lqr_dirty = 1;
146 out:
147         up_write(&lov->lov_qos.lq_rw_sem);
148         RETURN(rc);
149 }
150
151 /* Recalculate per-object penalties for OSSs and OSTs,
152    depends on size of each ost in an oss */
153 static int qos_calc_ppo(struct obd_device *obd)
154 {
155         struct lov_obd *lov = &obd->u.lov;
156         struct lov_qos_oss *oss;
157         __u64 ba_max, ba_min, temp;
158         __u32 num_active;
159         int rc, i, prio_wide;
160         time_t now, age;
161         ENTRY;
162
163         if (!lov->lov_qos.lq_dirty)
164                 GOTO(out, rc = 0);
165
166         num_active = lov->desc.ld_active_tgt_count - 1;
167         if (num_active < 1)
168                 GOTO(out, rc = -EAGAIN);
169
170         /* find bavail on each OSS */
171         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
172                 oss->lqo_bavail = 0;
173         }
174         lov->lov_qos.lq_active_oss_count = 0;
175
176         /* How badly user wants to select osts "widely" (not recently chosen
177            and not on recent oss's).  As opposed to "freely" (free space
178            avail.) 0-256. */
179         prio_wide = 256 - lov->lov_qos.lq_prio_free;
180
181         ba_min = (__u64)(-1);
182         ba_max = 0;
183         now = cfs_time_current_sec();
184         /* Calculate OST penalty per object */
185         /* (lov ref taken in alloc_qos) */
186         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
187                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
188                         continue;
189                 temp = TGT_BAVAIL(i);
190                 if (!temp)
191                         continue;
192                 ba_min = min(temp, ba_min);
193                 ba_max = max(temp, ba_max);
194
195                 /* Count the number of usable OSS's */
196                 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
197                         lov->lov_qos.lq_active_oss_count++;
198                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
199
200                 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
201                 temp >>= 1;
202                 do_div(temp, num_active);
203                 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
204                         (temp * prio_wide) >> 8;
205
206                 age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
207                 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
208                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
209                 else if (age > lov->desc.ld_qos_maxage)
210                         /* Decay the penalty by half for every 8x the update
211                          * interval that the device has been idle.  That gives
212                          * lots of time for the statfs information to be
213                          * updated (which the penalty is only a proxy for),
214                          * and avoids penalizing OSS/OSTs under light load. */
215                         lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
216                                 (age / lov->desc.ld_qos_maxage);
217         }
218
219         num_active = lov->lov_qos.lq_active_oss_count - 1;
220         if (num_active < 1) {
221                 /* If there's only 1 OSS, we can't penalize it, so instead
222                    we have to double the OST penalty */
223                 num_active = 1;
224                 for (i = 0; i < lov->desc.ld_tgt_count; i++)
225                         if (lov->lov_tgts[i])
226                             lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
227         }
228
229         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
230         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
231                 temp = oss->lqo_bavail >> 1;
232                 do_div(temp, oss->lqo_ost_count * num_active);
233                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
234
235                 age = (now - oss->lqo_used) >> 3;
236                 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
237                         oss->lqo_penalty = 0;
238                 else if (age > lov->desc.ld_qos_maxage)
239                         /* Decay the penalty by half for every 8x the update
240                          * interval that the device has been idle.  That gives
241                          * lots of time for the statfs information to be
242                          * updated (which the penalty is only a proxy for),
243                          * and avoids penalizing OSS/OSTs under light load. */
244                         oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
245         }
246
247         lov->lov_qos.lq_dirty = 0;
248         lov->lov_qos.lq_reset = 0;
249
250         /* If each ost has almost same free space,
251          * do rr allocation for better creation performance */
252         lov->lov_qos.lq_same_space = 0;
253         if ((ba_max * (256 - lov->lov_qos.lq_threshold_rr)) >> 8 < ba_min) {
254                 lov->lov_qos.lq_same_space = 1;
255                 /* Reset weights for the next time we enter qos mode */
256                 lov->lov_qos.lq_reset = 1;
257         }
258         rc = 0;
259
260 out:
261         if (!rc && lov->lov_qos.lq_same_space)
262                 RETURN(-EAGAIN);
263         RETURN(rc);
264 }
265
266 static int qos_calc_weight(struct lov_obd *lov, int i)
267 {
268         __u64 temp, temp2;
269
270         /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
271         temp = TGT_BAVAIL(i);
272         temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
273                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
274         if (temp < temp2)
275                 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
276         else
277                 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
278         return 0;
279 }
280
281 /* We just used this index for a stripe; adjust everyone's weights */
282 static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
283                     __u32 index, __u64 *total_wt)
284 {
285         struct lov_qos_oss *oss;
286         int j;
287         ENTRY;
288
289         /* Don't allocate from this stripe anymore, until the next alloc_qos */
290         lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
291
292         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
293
294         /* Decay old penalty by half (we're adding max penalty, and don't
295            want it to run away.) */
296         lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
297         oss->lqo_penalty >>= 1;
298
299         /* mark the OSS and OST as recently used */
300         lov->lov_tgts[index]->ltd_qos.ltq_used =
301                 oss->lqo_used = cfs_time_current_sec();
302
303         /* Set max penalties for this OST and OSS */
304         lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
305                 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
306                 lov->desc.ld_active_tgt_count;
307         oss->lqo_penalty += oss->lqo_penalty_per_obj *
308                 lov->lov_qos.lq_active_oss_count;
309
310         /* Decrease all OSS penalties */
311         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
312                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
313                         oss->lqo_penalty = 0;
314                 else
315                         oss->lqo_penalty -= oss->lqo_penalty_per_obj;
316         }
317
318         *total_wt = 0;
319         /* Decrease all OST penalties */
320         for (j = 0; j < osts->op_count; j++) {
321                 int i;
322
323                 i = osts->op_array[j];
324                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
325                         continue;
326                 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
327                     lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
328                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
329                 else
330                         lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
331                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
332
333                 qos_calc_weight(lov, i);
334
335                 /* Recalc the total weight of usable osts */
336                 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
337                         *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
338
339 #ifdef QOS_DEBUG
340                 CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
341                        " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
342                        " ossp="LPU64" wt="LPU64"\n",
343                        i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
344                        TGT_BAVAIL(i) >> 10,
345                        lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
346                        lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
347                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
348                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
349                        lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
350 #endif
351         }
352
353         RETURN(0);
354 }
355
356 #define LOV_QOS_EMPTY ((__u32)-1)
357 /* compute optimal round-robin order, based on OSTs per OSS
358  */
359 static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
360                        struct lov_qos_rr *lqr)
361 {
362         struct lov_qos_oss *oss;
363         unsigned placed, real_count;
364         int i, rc;
365         ENTRY;
366
367         if (!lqr->lqr_dirty) {
368                 LASSERT(lqr->lqr_pool.op_size);
369                 RETURN(0);
370         }
371
372         /* Do actual allocation. */
373         down_write(&lov->lov_qos.lq_rw_sem);
374
375         real_count = src_pool->op_count;
376
377         /* Zero the pool array */
378         /* alloc_rr is holding a read lock on the pool, so nobody is adding/
379            deleting from the pool. The lq_rw_sem insures that nobody else
380            is reading. */
381         lqr->lqr_pool.op_count = real_count;
382         rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
383         if (rc) {
384                 up_write(&lov->lov_qos.lq_rw_sem);
385                 RETURN(rc);
386         }
387         for (i = 0; i < lqr->lqr_pool.op_count; i++)
388                 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
389
390         /* Place all the OSTs from 1 OSS at the same time. */
391         placed = 0;
392         list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
393                 int j = 0;
394                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
395                         if (lov->lov_tgts[src_pool->op_array[i]] &&
396                             (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
397                               /* Evenly space these OSTs across arrayspace */
398                               int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
399                               while (lqr->lqr_pool.op_array[next] !=
400                                      LOV_QOS_EMPTY)
401                                       next = (next + 1) % lqr->lqr_pool.op_count;
402                               lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
403                               j++;
404                               placed++;
405                         }
406                 }
407         }
408
409         lqr->lqr_dirty = 0;
410         up_write(&lov->lov_qos.lq_rw_sem);
411
412         if (placed != real_count) {
413                 /* This should never happen */
414                 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
415                                    "round-robin list (%d of %d).\n",
416                                    placed, real_count);
417                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
418                         LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
419                                  lqr->lqr_pool.op_array[i]);
420                 }
421                 lqr->lqr_dirty = 1;
422                 RETURN(-EAGAIN);
423         }
424
425 #ifdef QOS_DEBUG
426         for (i = 0; i < lqr->lqr_pool.op_count; i++) {
427                 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
428                          lqr->lqr_pool.op_array[i]);
429         }
430 #endif
431
432         RETURN(0);
433 }
434
435
436 void qos_shrink_lsm(struct lov_request_set *set)
437 {
438         struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
439         /* XXX LOV STACKING call into osc for sizes */
440         unsigned oldsize, newsize;
441
442         if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
443                 struct llog_cookie *cookies;
444                 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
445                 newsize = set->set_count * sizeof(*cookies);
446
447                 cookies = set->set_cookies;
448                 oti_alloc_cookies(set->set_oti, set->set_count);
449                 if (set->set_oti->oti_logcookies) {
450                         memcpy(set->set_oti->oti_logcookies, cookies, newsize);
451                         OBD_FREE(cookies, oldsize);
452                         set->set_cookies = set->set_oti->oti_logcookies;
453                 } else {
454                         CWARN("'leaking' %d bytes\n", oldsize - newsize);
455                 }
456         }
457
458         CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
459               lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
460         LASSERT(lsm->lsm_stripe_count >= set->set_count);
461
462         newsize = lov_stripe_md_size(set->set_count);
463         OBD_ALLOC(lsm_new, newsize);
464         if (lsm_new != NULL) {
465                 int i;
466                 memcpy(lsm_new, lsm, sizeof(*lsm));
467                 for (i = 0; i < lsm->lsm_stripe_count; i++) {
468                         if (i < set->set_count) {
469                                 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
470                                 continue;
471                         }
472                         OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
473                                       sizeof(struct lov_oinfo));
474                 }
475                 lsm_new->lsm_stripe_count = set->set_count;
476                 OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
477                          lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
478                 set->set_oi->oi_md = lsm_new;
479         } else {
480                 CWARN("'leaking' few bytes\n");
481         }
482 }
483
484 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
485 {
486         struct lov_stripe_md *lsm = set->set_oi->oi_md;
487         struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
488         unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
489         int stripe, i, rc = -EIO;
490         ENTRY;
491
492         ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
493         for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
494                 if (!lov->lov_tgts[ost_idx] ||
495                     !lov->lov_tgts[ost_idx]->ltd_active)
496                         continue;
497                 /* check if objects has been created on this ost */
498                 for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
499                         /* we try send create to this ost but he is failed */
500                         if (stripe == req->rq_stripe)
501                                 continue;
502                         /* already have object at this stripe */
503                         if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
504                                 break;
505                 }
506                 if (stripe >= lsm->lsm_stripe_count) {
507                         req->rq_idx = ost_idx;
508                         rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
509                                         req->rq_oi.oi_oa, &req->rq_oi.oi_md,
510                                         set->set_oti);
511                         if (!rc)
512                                 break;
513                 }
514         }
515         RETURN(rc);
516 }
517
518 static int min_stripe_count(int stripe_cnt, int flags)
519 {
520         return (flags & LOV_USES_DEFAULT_STRIPE ?
521                 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
522 }
523
524 #define LOV_CREATE_RESEED_MULT 4
525 #define LOV_CREATE_RESEED_MIN  1000
526 /* Allocate objects on osts with round-robin algorithm */
527 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
528                     char *poolname, int flags)
529 {
530         unsigned array_idx;
531         int i, rc, *idx_pos;
532         __u32 ost_idx;
533         int ost_start_idx_temp;
534         int speed = 0;
535         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
536         struct pool_desc *pool;
537         struct ost_pool *osts;
538         struct lov_qos_rr *lqr;
539         ENTRY;
540
541         pool = lov_find_pool(lov, poolname);
542         if (pool == NULL) {
543                 osts = &(lov->lov_packed);
544                 lqr = &(lov->lov_qos.lq_rr);
545         } else {
546                 down_read(&pool_tgt_rw_sem(pool));
547                 osts = &(pool->pool_obds);
548                 lqr = &(pool->pool_rr);
549         }
550
551         rc = qos_calc_rr(lov, osts, lqr);
552         if (rc)
553                 GOTO(out, rc);
554
555         if (--lqr->lqr_start_count <= 0) {
556                 lqr->lqr_start_idx = ll_rand() % osts->op_count;
557                 lqr->lqr_start_count =
558                         (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
559                          LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
560         } else if (stripe_cnt_min >= osts->op_count ||
561                    lqr->lqr_start_idx > osts->op_count) {
562                 /* If we have allocated from all of the OSTs, slowly
563                  * precess the next start if the OST/stripe count isn't
564                  * already doing this for us. */
565                 lqr->lqr_start_idx %= osts->op_count;
566                 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
567                         ++lqr->lqr_offset_idx;
568         }
569         down_read(&lov->lov_qos.lq_rw_sem);
570         ost_start_idx_temp = lqr->lqr_start_idx;
571
572 repeat_find:
573         array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) % osts->op_count;
574         idx_pos = idx_arr;
575 #ifdef QOS_DEBUG
576         CDEBUG(D_QOS, "pool '%s' want %d startidx %d startcnt %d offset %d "
577                "active %d count %d arrayidx %d\n", poolname,
578                *stripe_cnt, lqr->lqr_start_idx, lqr->lqr_start_count,
579                lqr->lqr_offset_idx, osts->op_count, osts->op_count, array_idx);
580 #endif
581
582         for (i = 0; i < osts->op_count;
583                     i++, array_idx=(array_idx + 1) % osts->op_count) {
584                 ++lqr->lqr_start_idx;
585                 ost_idx = lqr->lqr_pool.op_array[array_idx];
586 #ifdef QOS_DEBUG
587                 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
588                        i, lqr->lqr_start_idx,
589                        ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
590                        lov->lov_tgts[ost_idx]->ltd_active : 0,
591                        idx_pos - idx_arr, array_idx, ost_idx);
592 #endif
593                 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
594                     !lov->lov_tgts[ost_idx]->ltd_active)
595                         continue;
596
597                 /* Fail Check before osc_precreate() is called
598                    so we can only 'fail' single OSC. */
599                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
600                         continue;
601
602                 /* Drop slow OSCs if we can */
603                 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
604                         continue;
605
606                 *idx_pos = ost_idx;
607                 idx_pos++;
608                 /* We have enough stripes */
609                 if (idx_pos - idx_arr == *stripe_cnt)
610                         break;
611         }
612         if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
613                 /* Try again, allowing slower OSCs */
614                 speed++;
615                 lqr->lqr_start_idx = ost_start_idx_temp;
616                 goto repeat_find;
617         }
618
619         up_read(&lov->lov_qos.lq_rw_sem);
620
621         *stripe_cnt = idx_pos - idx_arr;
622 out:
623         if (pool != NULL) {
624                 up_read(&pool_tgt_rw_sem(pool));
625                 /* put back ref got by lov_find_pool() */
626                 lov_pool_putref(pool);
627         }
628
629         RETURN(rc);
630 }
631
632 /* alloc objects on osts with specific stripe offset */
633 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
634                           int *idx_arr)
635 {
636         unsigned ost_idx, array_idx, ost_count;
637         int i, rc, *idx_pos;
638         int speed = 0;
639         struct pool_desc *pool;
640         struct ost_pool *osts;
641         ENTRY;
642
643         pool = lov_find_pool(lov, lsm->lsm_pool_name);
644         if (pool == NULL) {
645                 osts = &(lov->lov_packed);
646         } else {
647                 down_read(&pool_tgt_rw_sem(pool));
648                 osts = &(pool->pool_obds);
649         }
650
651         ost_count = osts->op_count;
652
653 repeat_find:
654         /* search loi_ost_idx in ost array */
655         array_idx = 0;
656         for (i = 0; i < ost_count; i++) {
657                 if (osts->op_array[i] == lsm->lsm_oinfo[0]->loi_ost_idx) {
658                         array_idx = i;
659                         break;
660                 }
661         }
662         if (i == ost_count) {
663                 CERROR("Start index %d not found in pool '%s'\n",
664                        lsm->lsm_oinfo[0]->loi_ost_idx, lsm->lsm_pool_name);
665                 GOTO(out, rc = -EINVAL);
666         }
667
668         idx_pos = idx_arr;
669         for (i = 0; i < ost_count;
670              i++, array_idx = (array_idx + 1) % ost_count) {
671                 ost_idx = osts->op_array[array_idx];
672
673                 if (!lov->lov_tgts[ost_idx] ||
674                     !lov->lov_tgts[ost_idx]->ltd_active) {
675                         continue;
676                 }
677
678                 /* Fail Check before osc_precreate() is called
679                    so we can only 'fail' single OSC. */
680                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
681                         continue;
682
683                 /* Drop slow OSCs if we can, but not for requested start idx.
684                  *
685                  * This means "if OSC is slow and it is not the requested
686                  * start OST, then it can be skipped, otherwise skip it only
687                  * if it is inactive/recovering/out-of-space." */
688                 if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
689                     (i != 0 || speed >= 2))
690                         continue;
691
692                 *idx_pos = ost_idx;
693                 idx_pos++;
694                 /* We have enough stripes */
695                 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
696                         GOTO(out, rc = 0);
697         }
698         if (speed < 2) {
699                 /* Try again, allowing slower OSCs */
700                 speed++;
701                 goto repeat_find;
702         }
703
704         /* If we were passed specific striping params, then a failure to
705          * meet those requirements is an error, since we can't reallocate
706          * that memory (it might be part of a larger array or something).
707          *
708          * We can only get here if lsm_stripe_count was originally > 1.
709          */
710         CERROR("can't lstripe objid "LPX64": have %d want %u\n",
711                lsm->lsm_object_id, (int)(idx_pos - idx_arr),
712                lsm->lsm_stripe_count);
713         rc = -EFBIG;
714 out:
715         if (pool != NULL) {
716                 up_read(&pool_tgt_rw_sem(pool));
717                 /* put back ref got by lov_find_pool() */
718                 lov_pool_putref(pool);
719         }
720         RETURN(rc);
721 }
722
723 /* Alloc objects on osts with optimization based on:
724    - free space
725    - network resources (shared OSS's)
726 */
727 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
728                      char *poolname, int flags)
729 {
730         struct lov_obd *lov = &exp->exp_obd->u.lov;
731         __u64 total_weight = 0;
732         int nfound, good_osts, i, rc = 0;
733         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
734         struct pool_desc *pool;
735         struct ost_pool *osts;
736         struct lov_qos_rr *lqr;
737         ENTRY;
738
739         if (stripe_cnt_min < 1)
740                 RETURN(-EINVAL);
741
742         pool = lov_find_pool(lov, poolname);
743         if (pool == NULL) {
744                 osts = &(lov->lov_packed);
745                 lqr = &(lov->lov_qos.lq_rr);
746         } else {
747                 down_read(&pool_tgt_rw_sem(pool));
748                 osts = &(pool->pool_obds);
749                 lqr = &(pool->pool_rr);
750         }
751
752         obd_getref(exp->exp_obd);
753         /* wait for fresh statfs info if needed, the rpcs are sent in
754          * lov_create() */
755         qos_statfs_update(exp->exp_obd,
756                           cfs_time_shift_64(-2 * lov->desc.ld_qos_maxage), 1);
757
758         down_write(&lov->lov_qos.lq_rw_sem);
759
760         if (lov->desc.ld_active_tgt_count < 2)
761                 GOTO(out, rc = -EAGAIN);
762
763         rc = qos_calc_ppo(exp->exp_obd);
764         if (rc)
765                 GOTO(out, rc);
766
767         good_osts = 0;
768         /* Find all the OSTs that are valid stripe candidates */
769         for (i = 0; i < osts->op_count; i++) {
770                 if (!lov->lov_tgts[osts->op_array[i]] ||
771                     !lov->lov_tgts[osts->op_array[i]]->ltd_active)
772                         continue;
773
774                 /* Fail Check before osc_precreate() is called
775                    so we can only 'fail' single OSC. */
776                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && osts->op_array[i] == 0)
777                         continue;
778
779                 if (obd_precreate(lov->lov_tgts[osts->op_array[i]]->ltd_exp) > 2)
780                         continue;
781
782                 lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
783                 qos_calc_weight(lov, osts->op_array[i]);
784                 total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
785
786                 good_osts++;
787         }
788
789 #ifdef QOS_DEBUG
790         CDEBUG(D_QOS, "found %d good osts\n", good_osts);
791 #endif
792
793         if (good_osts < stripe_cnt_min)
794                 GOTO(out, rc = -EAGAIN);
795
796         /* We have enough osts */
797         if (good_osts < *stripe_cnt)
798                 *stripe_cnt = good_osts;
799
800         /* Find enough OSTs with weighted random allocation. */
801         nfound = 0;
802         while (nfound < *stripe_cnt) {
803                 __u64 rand, cur_weight;
804
805                 cur_weight = 0;
806                 rc = -ENODEV;
807
808                 if (total_weight) {
809 #if BITS_PER_LONG == 32
810                         rand = ll_rand() % (unsigned)total_weight;
811                         /* If total_weight > 32-bit, first generate the high
812                          * 32 bits of the random number, then add in the low
813                          * 32 bits (truncated to the upper limit, if needed) */
814                         if (total_weight > 0xffffffffULL)
815                                 rand = (__u64)(ll_rand() %
816                                           (unsigned)(total_weight >> 32)) << 32;
817                         else
818                                 rand = 0;
819
820                         if (rand == (total_weight & 0xffffffff00000000ULL))
821                                 rand |= ll_rand() % (unsigned)total_weight;
822                         else
823                                 rand |= ll_rand();
824
825 #else
826                         rand = ((__u64)ll_rand() << 32 | ll_rand()) %
827                                 total_weight;
828 #endif
829                 } else {
830                         rand = 0;
831                 }
832
833                 /* On average, this will hit larger-weighted osts more often.
834                    0-weight osts will always get used last (only when rand=0).*/
835                 for (i = 0; i < osts->op_count; i++) {
836                         if (!lov->lov_tgts[osts->op_array[i]] ||
837                             !lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable)
838                                 continue;
839
840                         cur_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
841 #ifdef QOS_DEBUG
842                         CDEBUG(D_QOS, "stripe_cnt=%d nfound=%d cur_weight="LPU64
843                                       " rand="LPU64" total_weight="LPU64"\n",
844                                *stripe_cnt, nfound, cur_weight, rand, total_weight);
845 #endif
846                         if (cur_weight >= rand) {
847 #ifdef QOS_DEBUG
848                                 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
849                                        nfound, osts->op_array[i]);
850 #endif
851                                 idx_arr[nfound++] = osts->op_array[i];
852                                 qos_used(lov, osts, osts->op_array[i], &total_weight);
853                                 rc = 0;
854                                 break;
855                         }
856                 }
857                 if (rc) {
858                         CDEBUG(D_QOS, "Didn't find any OSTs? Reduce total weight\n");
859                         if (total_weight == 0)
860                                 break;
861                         else
862                                 total_weight = 0;
863                 }
864         }
865
866         LASSERT(nfound == *stripe_cnt);
867
868 out:
869         up_write(&lov->lov_qos.lq_rw_sem);
870
871         if (pool != NULL) {
872                 up_read(&pool_tgt_rw_sem(pool));
873                 /* put back ref got by lov_find_pool() */
874                 lov_pool_putref(pool);
875         }
876
877         if (rc == -EAGAIN)
878                 rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
879
880         obd_putref(exp->exp_obd);
881         RETURN(rc);
882 }
883
884 /* return new alloced stripe count on success */
885 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
886                            int newea, int **idx_arr, int *arr_cnt, int flags)
887 {
888         struct lov_obd *lov = &exp->exp_obd->u.lov;
889         int stripe_cnt = lsm->lsm_stripe_count;
890         int i, rc = 0;
891         int *tmp_arr = NULL;
892         ENTRY;
893
894         *arr_cnt = stripe_cnt;
895         OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
896         if (tmp_arr == NULL)
897                 RETURN(-ENOMEM);
898         for (i = 0; i < *arr_cnt; i++)
899                 tmp_arr[i] = -1;
900
901         if (newea ||
902             lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
903                 rc = alloc_qos(exp, tmp_arr, &stripe_cnt,
904                                lsm->lsm_pool_name, flags);
905         else
906                 rc = alloc_specific(lov, lsm, tmp_arr);
907
908         if (rc)
909                 GOTO(out_arr, rc);
910
911         *idx_arr = tmp_arr;
912         RETURN(stripe_cnt);
913 out_arr:
914         OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
915         *arr_cnt = 0;
916         RETURN(rc);
917 }
918
919 static void free_idx_array(int *idx_arr, int arr_cnt)
920 {
921         if (arr_cnt)
922                 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
923 }
924
925 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
926 {
927         struct lov_obd *lov = &exp->exp_obd->u.lov;
928         struct lov_stripe_md *lsm;
929         struct obdo *src_oa = set->set_oi->oi_oa;
930         struct obd_trans_info *oti = set->set_oti;
931         int i, stripes, rc = 0, newea = 0;
932         int flag = LOV_USES_ASSIGNED_STRIPE;
933         int *idx_arr = NULL, idx_cnt = 0;
934         ENTRY;
935
936         LASSERT(src_oa->o_valid & OBD_MD_FLID);
937
938         if (set->set_oi->oi_md == NULL) {
939                 int stripes_def = lov_get_stripecnt(lov, 0);
940
941                 /* If the MDS file was truncated up to some size, stripe over
942                  * enough OSTs to allow the file to be created at that size.
943                  * This may mean we use more than the default # of stripes. */
944                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
945                         obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
946
947                         /* Find a small number of stripes we can use
948                            (up to # of active osts). */
949                         stripes = 1;
950                         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
951                                 if (!lov->lov_tgts[i] ||
952                                     !lov->lov_tgts[i]->ltd_active)
953                                         continue;
954                                 min_bavail = min(min_bavail, TGT_BAVAIL(i));
955                                 if (min_bavail * stripes > src_oa->o_size)
956                                         break;
957                                 stripes++;
958                         }
959
960                         if (stripes < stripes_def)
961                                 stripes = stripes_def;
962                 } else {
963                          flag = LOV_USES_DEFAULT_STRIPE;
964                          stripes = stripes_def;
965                 }
966
967                 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
968                                      lov->desc.ld_pattern ?
969                                      lov->desc.ld_pattern : LOV_PATTERN_RAID0,
970                                      LOV_MAGIC);
971                 if (rc < 0)
972                         GOTO(out_err, rc);
973                 newea = 1;
974                 rc = 0;
975         }
976
977         lsm = set->set_oi->oi_md;
978         lsm->lsm_object_id = src_oa->o_id;
979         if (!lsm->lsm_stripe_size)
980                 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
981         if (!lsm->lsm_pattern) {
982                 LASSERT(lov->desc.ld_pattern);
983                 lsm->lsm_pattern = lov->desc.ld_pattern;
984         }
985
986         stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
987         if (stripes <= 0)
988                 GOTO(out_err, rc = stripes ? stripes : -EIO);
989         LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
990                  lsm->lsm_stripe_count, stripes);
991
992         for (i = 0; i < stripes; i++) {
993                 struct lov_request *req;
994                 int ost_idx = idx_arr[i];
995                 LASSERT(ost_idx >= 0);
996
997                 OBD_ALLOC(req, sizeof(*req));
998                 if (req == NULL)
999                         GOTO(out_err, rc = -ENOMEM);
1000                 lov_set_add_req(req, set);
1001
1002                 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
1003                 OBD_ALLOC(req->rq_oi.oi_md, req->rq_buflen);
1004                 if (req->rq_oi.oi_md == NULL)
1005                         GOTO(out_err, rc = -ENOMEM);
1006
1007                 OBDO_ALLOC(req->rq_oi.oi_oa);
1008                 if (req->rq_oi.oi_oa == NULL)
1009                         GOTO(out_err, rc = -ENOMEM);
1010
1011                 req->rq_idx = ost_idx;
1012                 req->rq_stripe = i;
1013                 /* create data objects with "parent" OA */
1014                 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
1015                 req->rq_oi.oi_cb_up = cb_create_update;
1016
1017                 /* XXX When we start creating objects on demand, we need to
1018                  *     make sure that we always create the object on the
1019                  *     stripe which holds the existing file size.
1020                  */
1021                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1022                         req->rq_oi.oi_oa->o_size =
1023                                 lov_size_to_stripe(lsm, src_oa->o_size, i);
1024
1025                         CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
1026                                i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
1027                 }
1028         }
1029         LASSERT(set->set_count == stripes);
1030
1031         if (stripes < lsm->lsm_stripe_count)
1032                 qos_shrink_lsm(set);
1033         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LOV_PREP_CREATE)) {
1034                 qos_shrink_lsm(set);
1035                 rc = -EIO;
1036         }
1037
1038         if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
1039                 oti_alloc_cookies(oti, set->set_count);
1040                 if (!oti->oti_logcookies)
1041                         GOTO(out_err, rc = -ENOMEM);
1042                 set->set_cookies = oti->oti_logcookies;
1043         }
1044 out_err:
1045         if (newea && rc)
1046                 obd_free_memmd(exp, &set->set_oi->oi_md);
1047         if (idx_arr)
1048                 free_idx_array(idx_arr, idx_cnt);
1049         EXIT;
1050         return rc;
1051 }
1052
1053 void qos_update(struct lov_obd *lov)
1054 {
1055         ENTRY;
1056         lov->lov_qos.lq_dirty = 1;
1057 }
1058
1059 void qos_statfs_done(struct lov_obd *lov)
1060 {
1061         LASSERT(lov->lov_qos.lq_statfs_in_progress);
1062         down_write(&lov->lov_qos.lq_rw_sem);
1063         lov->lov_qos.lq_statfs_in_progress = 0;
1064         /* wake up any threads waiting for the statfs rpcs to complete */
1065         cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
1066         up_write(&lov->lov_qos.lq_rw_sem);
1067 }
1068
1069 static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
1070 {
1071         struct lov_obd         *lov = &obd->u.lov;
1072         int rc;
1073         ENTRY;
1074         down_read(&lov->lov_qos.lq_rw_sem);
1075         rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
1076              cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
1077         up_read(&lov->lov_qos.lq_rw_sem);
1078         RETURN(rc);
1079 }
1080
1081 /*
1082  * Update statfs data if the current osfs age is older than max_age.
1083  * If wait is not set, it means that we are called from lov_create()
1084  * and we should just issue the rpcs without waiting for them to complete.
1085  * If wait is set, we are called from alloc_qos() and we just have
1086  * to wait for the request set to complete.
1087  */
1088 void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
1089 {
1090         struct lov_obd         *lov = &obd->u.lov;
1091         struct obd_info        *oinfo;
1092         int                     rc = 0;
1093         struct ptlrpc_request_set *set = NULL;
1094         ENTRY;
1095
1096         if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
1097                 /* statfs data are quite recent, don't need to refresh it */
1098                 RETURN_EXIT;
1099
1100         if (!wait && lov->lov_qos.lq_statfs_in_progress)
1101                 /* statfs already in progress */
1102                 RETURN_EXIT;
1103
1104         down_write(&lov->lov_qos.lq_rw_sem);
1105         if (lov->lov_qos.lq_statfs_in_progress) {
1106                 up_write(&lov->lov_qos.lq_rw_sem);
1107                 GOTO(out, rc = 0);
1108         }
1109         /* no statfs in flight, send rpcs */
1110         lov->lov_qos.lq_statfs_in_progress = 1;
1111         up_write(&lov->lov_qos.lq_rw_sem);
1112
1113         if (wait)
1114                 CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
1115                        "in a timely manner (osfs age "LPU64", max age "LPU64")"
1116                        ", sending new statfs rpcs\n",
1117                        obd_uuid2str(&lov->desc.ld_uuid), obd->obd_osfs_age,
1118                        max_age);
1119
1120         /* need to send statfs rpcs */
1121         CDEBUG(D_QOS, "sending new statfs requests\n");
1122         memset(lov->lov_qos.lq_statfs_data, 0,
1123                sizeof(*lov->lov_qos.lq_statfs_data));
1124         oinfo = &lov->lov_qos.lq_statfs_data->lsd_oi;
1125         oinfo->oi_osfs = &lov->lov_qos.lq_statfs_data->lsd_statfs;
1126         oinfo->oi_flags = OBD_STATFS_NODELAY;
1127         set = ptlrpc_prep_set();
1128         if (!set)
1129                 GOTO(out_failed, rc = -ENOMEM);
1130
1131         rc = obd_statfs_async(obd, oinfo, max_age, set);
1132         if (rc || list_empty(&set->set_requests)) {
1133                 if (rc)
1134                         CWARN("statfs failed with %d\n", rc);
1135                 GOTO(out_failed, rc);
1136         }
1137         /* send requests via ptlrpcd */
1138         oinfo->oi_flags |= OBD_STATFS_PTLRPCD;
1139         ptlrpcd_add_rqset(set);
1140         GOTO(out, rc);
1141
1142 out_failed:
1143         down_write(&lov->lov_qos.lq_rw_sem);
1144         lov->lov_qos.lq_statfs_in_progress = 0;
1145         /* wake up any threads waiting for the statfs rpcs to complete */
1146         cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
1147         up_write(&lov->lov_qos.lq_rw_sem);
1148         wait = 0;
1149 out:
1150         if (set)
1151                 ptlrpc_set_destroy(set);
1152         if (wait) {
1153                 struct l_wait_info lwi = { 0 };
1154                 CDEBUG(D_QOS, "waiting for statfs requests to complete\n");
1155                 l_wait_event(lov->lov_qos.lq_statfs_waitq,
1156                              qos_statfs_ready(obd, max_age), &lwi);
1157                 if (cfs_time_before_64(obd->obd_osfs_age, max_age))
1158                         CDEBUG(D_QOS, "%s: still no fresh statfs data after "
1159                                       "waiting (osfs age "LPU64", max age "
1160                                       LPU64")\n",
1161                                       obd_uuid2str(&lov->desc.ld_uuid),
1162                                       obd->obd_osfs_age, max_age);
1163         }
1164 }