Whamcloud - gitweb
a2578664eae8e839fa7a53243764ad67407fd20f
[fs/lustre-release.git] / lustre / lov / lov_qos.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  */
38
39 #ifndef EXPORT_SYMTAB
40 # define EXPORT_SYMTAB
41 #endif
42 #define DEBUG_SUBSYSTEM S_LOV
43
44 #ifdef __KERNEL__
45 #include <libcfs/libcfs.h>
46 #else
47 #include <liblustre.h>
48 #endif
49
50 #include <obd_class.h>
51 #include <obd_lov.h>
52 #include <lustre/lustre_idl.h>
53 #include "lov_internal.h"
54
55 /* #define QOS_DEBUG 1 */
56 #define D_QOS D_OTHER
57
58 #define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
59                        lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
60
61
62 int qos_add_tgt(struct obd_device *obd, __u32 index)
63 {
64         struct lov_obd *lov = &obd->u.lov;
65         struct lov_qos_oss *oss, *temposs;
66         struct obd_export *exp = lov->lov_tgts[index]->ltd_exp;
67         int rc = 0, found = 0;
68         ENTRY;
69
70         /* We only need this QOS struct on MDT, not clients - but we may not
71          * have registered the LOV's observer yet, so there's no way to know */
72         if (!exp || !exp->exp_connection) {
73                 CERROR("Missing connection\n");
74                 RETURN(-ENOTCONN);
75         }
76
77         cfs_down_write(&lov->lov_qos.lq_rw_sem);
78         cfs_mutex_down(&lov->lov_lock);
79         cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
80                 if (obd_uuid_equals(&oss->lqo_uuid,
81                                     &exp->exp_connection->c_remote_uuid)) {
82                         found++;
83                         break;
84                 }
85         }
86
87         if (!found) {
88                 OBD_ALLOC_PTR(oss);
89                 if (!oss)
90                         GOTO(out, rc = -ENOMEM);
91                 memcpy(&oss->lqo_uuid,
92                        &exp->exp_connection->c_remote_uuid,
93                        sizeof(oss->lqo_uuid));
94         } else {
95                 /* Assume we have to move this one */
96                 cfs_list_del(&oss->lqo_oss_list);
97         }
98
99         oss->lqo_ost_count++;
100         lov->lov_tgts[index]->ltd_qos.ltq_oss = oss;
101
102         /* Add sorted by # of OSTs.  Find the first entry that we're
103            bigger than... */
104         cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
105                                 lqo_oss_list) {
106                 if (oss->lqo_ost_count > temposs->lqo_ost_count)
107                         break;
108         }
109         /* ...and add before it.  If we're the first or smallest, temposs
110            points to the list head, and we add to the end. */
111         cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
112
113         lov->lov_qos.lq_dirty = 1;
114         lov->lov_qos.lq_rr.lqr_dirty = 1;
115
116         CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
117                obd_uuid2str(&lov->lov_tgts[index]->ltd_uuid),
118                obd_uuid2str(&oss->lqo_uuid),
119                oss->lqo_ost_count);
120
121 out:
122         cfs_mutex_up(&lov->lov_lock);
123         cfs_up_write(&lov->lov_qos.lq_rw_sem);
124         RETURN(rc);
125 }
126
127 int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
128 {
129         struct lov_obd *lov = &obd->u.lov;
130         struct lov_qos_oss *oss;
131         int rc = 0;
132         ENTRY;
133
134         cfs_down_write(&lov->lov_qos.lq_rw_sem);
135
136         oss = tgt->ltd_qos.ltq_oss;
137         if (!oss)
138                 GOTO(out, rc = -ENOENT);
139
140         oss->lqo_ost_count--;
141         if (oss->lqo_ost_count == 0) {
142                 CDEBUG(D_QOS, "removing OSS %s\n",
143                        obd_uuid2str(&oss->lqo_uuid));
144                 cfs_list_del(&oss->lqo_oss_list);
145                 OBD_FREE_PTR(oss);
146         }
147
148         lov->lov_qos.lq_dirty = 1;
149         lov->lov_qos.lq_rr.lqr_dirty = 1;
150 out:
151         cfs_up_write(&lov->lov_qos.lq_rw_sem);
152         RETURN(rc);
153 }
154
155 /* Recalculate per-object penalties for OSSs and OSTs,
156    depends on size of each ost in an oss */
157 static int qos_calc_ppo(struct obd_device *obd)
158 {
159         struct lov_obd *lov = &obd->u.lov;
160         struct lov_qos_oss *oss;
161         __u64 ba_max, ba_min, temp;
162         __u32 num_active;
163         int rc, i, prio_wide;
164         time_t now, age;
165         ENTRY;
166
167         if (!lov->lov_qos.lq_dirty)
168                 GOTO(out, rc = 0);
169
170         num_active = lov->desc.ld_active_tgt_count - 1;
171         if (num_active < 1)
172                 GOTO(out, rc = -EAGAIN);
173
174         /* find bavail on each OSS */
175         cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
176                 oss->lqo_bavail = 0;
177         }
178         lov->lov_qos.lq_active_oss_count = 0;
179
180         /* How badly user wants to select osts "widely" (not recently chosen
181            and not on recent oss's).  As opposed to "freely" (free space
182            avail.) 0-256. */
183         prio_wide = 256 - lov->lov_qos.lq_prio_free;
184
185         ba_min = (__u64)(-1);
186         ba_max = 0;
187         now = cfs_time_current_sec();
188         /* Calculate OST penalty per object */
189         /* (lov ref taken in alloc_qos) */
190         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
191                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
192                         continue;
193                 temp = TGT_BAVAIL(i);
194                 if (!temp)
195                         continue;
196                 ba_min = min(temp, ba_min);
197                 ba_max = max(temp, ba_max);
198
199                 /* Count the number of usable OSS's */
200                 if (lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail == 0)
201                         lov->lov_qos.lq_active_oss_count++;
202                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
203
204                 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
205                 temp >>= 1;
206                 do_div(temp, num_active);
207                 lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
208                         (temp * prio_wide) >> 8;
209
210                 age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
211                 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
212                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
213                 else if (age > lov->desc.ld_qos_maxage)
214                         /* Decay the penalty by half for every 8x the update
215                          * interval that the device has been idle.  That gives
216                          * lots of time for the statfs information to be
217                          * updated (which the penalty is only a proxy for),
218                          * and avoids penalizing OSS/OSTs under light load. */
219                         lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
220                                 (age / lov->desc.ld_qos_maxage);
221         }
222
223         num_active = lov->lov_qos.lq_active_oss_count - 1;
224         if (num_active < 1) {
225                 /* If there's only 1 OSS, we can't penalize it, so instead
226                    we have to double the OST penalty */
227                 num_active = 1;
228                 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
229                         if (lov->lov_tgts[i] == NULL)
230                                 continue;
231                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
232                 }
233         }
234
235         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
236         cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
237                 temp = oss->lqo_bavail >> 1;
238                 do_div(temp, oss->lqo_ost_count * num_active);
239                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
240
241                 age = (now - oss->lqo_used) >> 3;
242                 if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
243                         oss->lqo_penalty = 0;
244                 else if (age > lov->desc.ld_qos_maxage)
245                         /* Decay the penalty by half for every 8x the update
246                          * interval that the device has been idle.  That gives
247                          * lots of time for the statfs information to be
248                          * updated (which the penalty is only a proxy for),
249                          * and avoids penalizing OSS/OSTs under light load. */
250                         oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
251         }
252
253         lov->lov_qos.lq_dirty = 0;
254         lov->lov_qos.lq_reset = 0;
255
256         /* If each ost has almost same free space,
257          * do rr allocation for better creation performance */
258         lov->lov_qos.lq_same_space = 0;
259         if ((ba_max * (256 - lov->lov_qos.lq_threshold_rr)) >> 8 < ba_min) {
260                 lov->lov_qos.lq_same_space = 1;
261                 /* Reset weights for the next time we enter qos mode */
262                 lov->lov_qos.lq_reset = 1;
263         }
264         rc = 0;
265
266 out:
267         if (!rc && lov->lov_qos.lq_same_space)
268                 RETURN(-EAGAIN);
269         RETURN(rc);
270 }
271
272 static int qos_calc_weight(struct lov_obd *lov, int i)
273 {
274         __u64 temp, temp2;
275
276         /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
277         temp = TGT_BAVAIL(i);
278         temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
279                 lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty;
280         if (temp < temp2)
281                 lov->lov_tgts[i]->ltd_qos.ltq_weight = 0;
282         else
283                 lov->lov_tgts[i]->ltd_qos.ltq_weight = temp - temp2;
284         return 0;
285 }
286
287 /* We just used this index for a stripe; adjust everyone's weights */
288 static int qos_used(struct lov_obd *lov, struct ost_pool *osts,
289                     __u32 index, __u64 *total_wt)
290 {
291         struct lov_qos_oss *oss;
292         int j;
293         ENTRY;
294
295         /* Don't allocate from this stripe anymore, until the next alloc_qos */
296         lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
297
298         oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
299
300         /* Decay old penalty by half (we're adding max penalty, and don't
301            want it to run away.) */
302         lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
303         oss->lqo_penalty >>= 1;
304
305         /* mark the OSS and OST as recently used */
306         lov->lov_tgts[index]->ltd_qos.ltq_used =
307                 oss->lqo_used = cfs_time_current_sec();
308
309         /* Set max penalties for this OST and OSS */
310         lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
311                 lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
312                 lov->desc.ld_active_tgt_count;
313         oss->lqo_penalty += oss->lqo_penalty_per_obj *
314                 lov->lov_qos.lq_active_oss_count;
315
316         /* Decrease all OSS penalties */
317         cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
318                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
319                         oss->lqo_penalty = 0;
320                 else
321                         oss->lqo_penalty -= oss->lqo_penalty_per_obj;
322         }
323
324         *total_wt = 0;
325         /* Decrease all OST penalties */
326         for (j = 0; j < osts->op_count; j++) {
327                 int i;
328
329                 i = osts->op_array[j];
330                 if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
331                         continue;
332                 if (lov->lov_tgts[i]->ltd_qos.ltq_penalty <
333                     lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj)
334                         lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
335                 else
336                         lov->lov_tgts[i]->ltd_qos.ltq_penalty -=
337                         lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj;
338
339                 qos_calc_weight(lov, i);
340
341                 /* Recalc the total weight of usable osts */
342                 if (lov->lov_tgts[i]->ltd_qos.ltq_usable)
343                         *total_wt += lov->lov_tgts[i]->ltd_qos.ltq_weight;
344
345 #ifdef QOS_DEBUG
346                 CDEBUG(D_QOS, "recalc tgt %d usable=%d avail="LPU64
347                        " ostppo="LPU64" ostp="LPU64" ossppo="LPU64
348                        " ossp="LPU64" wt="LPU64"\n",
349                        i, lov->lov_tgts[i]->ltd_qos.ltq_usable,
350                        TGT_BAVAIL(i) >> 10,
351                        lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
352                        lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
353                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
354                        lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
355                        lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
356 #endif
357         }
358
359         RETURN(0);
360 }
361
362 #define LOV_QOS_EMPTY ((__u32)-1)
363 /* compute optimal round-robin order, based on OSTs per OSS */
364 static int qos_calc_rr(struct lov_obd *lov, struct ost_pool *src_pool,
365                        struct lov_qos_rr *lqr)
366 {
367         struct lov_qos_oss *oss;
368         unsigned placed, real_count;
369         int i, rc;
370         ENTRY;
371
372         if (!lqr->lqr_dirty) {
373                 LASSERT(lqr->lqr_pool.op_size);
374                 RETURN(0);
375         }
376
377         /* Do actual allocation. */
378         cfs_down_write(&lov->lov_qos.lq_rw_sem);
379
380         /*
381          * Check again. While we were sleeping on @lq_rw_sem something could
382          * change.
383          */
384         if (!lqr->lqr_dirty) {
385                 LASSERT(lqr->lqr_pool.op_size);
386                 cfs_up_write(&lov->lov_qos.lq_rw_sem);
387                 RETURN(0);
388         }
389
390         real_count = src_pool->op_count;
391
392         /* Zero the pool array */
393         /* alloc_rr is holding a read lock on the pool, so nobody is adding/
394            deleting from the pool. The lq_rw_sem insures that nobody else
395            is reading. */
396         lqr->lqr_pool.op_count = real_count;
397         rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
398         if (rc) {
399                 cfs_up_write(&lov->lov_qos.lq_rw_sem);
400                 RETURN(rc);
401         }
402         for (i = 0; i < lqr->lqr_pool.op_count; i++)
403                 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
404
405         /* Place all the OSTs from 1 OSS at the same time. */
406         placed = 0;
407         cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
408                 int j = 0;
409                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
410                         if (lov->lov_tgts[src_pool->op_array[i]] &&
411                             (lov->lov_tgts[src_pool->op_array[i]]->ltd_qos.ltq_oss == oss)) {
412                               /* Evenly space these OSTs across arrayspace */
413                               int next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
414                               while (lqr->lqr_pool.op_array[next] !=
415                                      LOV_QOS_EMPTY)
416                                         next = (next + 1) % lqr->lqr_pool.op_count;
417                               lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
418                               j++;
419                               placed++;
420                         }
421                 }
422         }
423
424         lqr->lqr_dirty = 0;
425         cfs_up_write(&lov->lov_qos.lq_rw_sem);
426
427         if (placed != real_count) {
428                 /* This should never happen */
429                 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
430                                    "round-robin list (%d of %d).\n",
431                                    placed, real_count);
432                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
433                         LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
434                                  lqr->lqr_pool.op_array[i]);
435                 }
436                 lqr->lqr_dirty = 1;
437                 RETURN(-EAGAIN);
438         }
439
440 #ifdef QOS_DEBUG
441         for (i = 0; i < lqr->lqr_pool.op_count; i++) {
442                 LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
443                          lqr->lqr_pool.op_array[i]);
444         }
445 #endif
446
447         RETURN(0);
448 }
449
450
451 void qos_shrink_lsm(struct lov_request_set *set)
452 {
453         struct lov_stripe_md *lsm = set->set_oi->oi_md, *lsm_new;
454         /* XXX LOV STACKING call into osc for sizes */
455         unsigned oldsize, newsize;
456
457         if (set->set_oti && set->set_cookies && set->set_cookie_sent) {
458                 struct llog_cookie *cookies;
459                 oldsize = lsm->lsm_stripe_count * sizeof(*cookies);
460                 newsize = set->set_count * sizeof(*cookies);
461
462                 cookies = set->set_cookies;
463                 oti_alloc_cookies(set->set_oti, set->set_count);
464                 if (set->set_oti->oti_logcookies) {
465                         memcpy(set->set_oti->oti_logcookies, cookies, newsize);
466                         OBD_FREE_LARGE(cookies, oldsize);
467                         set->set_cookies = set->set_oti->oti_logcookies;
468                 } else {
469                         CWARN("'leaking' %d bytes\n", oldsize - newsize);
470                 }
471         }
472
473         CWARN("using fewer stripes for object "LPU64": old %u new %u\n",
474               lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
475         LASSERT(lsm->lsm_stripe_count >= set->set_count);
476
477         newsize = lov_stripe_md_size(set->set_count);
478         OBD_ALLOC_LARGE(lsm_new, newsize);
479         if (lsm_new != NULL) {
480                 int i;
481                 memcpy(lsm_new, lsm, sizeof(*lsm));
482                 for (i = 0; i < lsm->lsm_stripe_count; i++) {
483                         if (i < set->set_count) {
484                                 lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
485                                 continue;
486                         }
487                         OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
488                                       sizeof(struct lov_oinfo));
489                 }
490                 lsm_new->lsm_stripe_count = set->set_count;
491                 OBD_FREE_LARGE(lsm, sizeof(struct lov_stripe_md) +
492                                lsm->lsm_stripe_count*sizeof(struct lov_oinfo*));
493                 set->set_oi->oi_md = lsm_new;
494         } else {
495                 CWARN("'leaking' few bytes\n");
496         }
497 }
498
499 /**
500  * Check whether we can create the object on the OST(refered by ost_idx)
501  * \retval:
502  *          0: create the object.
503  *          other value: did not create the object.
504  */
505 static int lov_check_and_create_object(struct lov_obd *lov, int ost_idx,
506                                        struct lov_stripe_md *lsm,
507                                        struct lov_request *req,
508                                        struct obd_trans_info *oti)
509 {
510         __u16 stripe;
511         int rc = -EIO;
512         ENTRY;
513
514         CDEBUG(D_QOS, "Check and create on idx %d \n", ost_idx);
515         if (!lov->lov_tgts[ost_idx] ||
516             !lov->lov_tgts[ost_idx]->ltd_active)
517                 RETURN(rc);
518
519         /* check if objects has been created on this ost */
520         for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
521                 /* already have object at this stripe */
522                 if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
523                         break;
524         }
525
526         if (stripe >= lsm->lsm_stripe_count) {
527                 req->rq_idx = ost_idx;
528                 rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
529                                 req->rq_oi.oi_oa, &req->rq_oi.oi_md,
530                                 oti);
531         }
532         RETURN(rc);
533 }
534
535 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
536 {
537         struct lov_stripe_md *lsm = set->set_oi->oi_md;
538         struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
539         unsigned ost_idx = 0, ost_count;
540         struct pool_desc *pool;
541         struct ost_pool *osts = NULL;
542         int i, rc = -EIO;
543         ENTRY;
544
545         /* First check whether we can create the objects on the pool */
546         pool = lov_find_pool(lov, lsm->lsm_pool_name);
547         if (pool != NULL) {
548                 cfs_down_read(&pool_tgt_rw_sem(pool));
549                 osts = &(pool->pool_obds);
550                 ost_count = osts->op_count;
551                 for (i = 0; i < ost_count; i++, ost_idx = osts->op_array[i]) {
552                         rc = lov_check_and_create_object(lov, ost_idx, lsm, req,
553                                                          set->set_oti);
554                         if (rc == 0)
555                                 break;
556                 }
557                 cfs_up_read(&pool_tgt_rw_sem(pool));
558                 lov_pool_putref(pool);
559                 RETURN(rc);
560         }
561
562         ost_count = lov->desc.ld_tgt_count;
563         /* Then check whether we can create the objects on other OSTs */
564         ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
565         for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
566                 rc = lov_check_and_create_object(lov, ost_idx, lsm, req,
567                                                  set->set_oti);
568
569                 if (rc == 0)
570                         break;
571         }
572
573         RETURN(rc);
574 }
575
576 static int min_stripe_count(int stripe_cnt, int flags)
577 {
578         return (flags & LOV_USES_DEFAULT_STRIPE ?
579                 stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
580 }
581
582 #define LOV_CREATE_RESEED_MULT 30
583 #define LOV_CREATE_RESEED_MIN  2000
584 /* Allocate objects on osts with round-robin algorithm */
585 static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
586                     char *poolname, int flags)
587 {
588         unsigned array_idx;
589         int i, rc, *idx_pos;
590         __u32 ost_idx;
591         int ost_start_idx_temp;
592         int speed = 0;
593         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
594         struct pool_desc *pool;
595         struct ost_pool *osts;
596         struct lov_qos_rr *lqr;
597         ENTRY;
598
599         pool = lov_find_pool(lov, poolname);
600         if (pool == NULL) {
601                 osts = &(lov->lov_packed);
602                 lqr = &(lov->lov_qos.lq_rr);
603         } else {
604                 cfs_down_read(&pool_tgt_rw_sem(pool));
605                 osts = &(pool->pool_obds);
606                 lqr = &(pool->pool_rr);
607         }
608
609         rc = qos_calc_rr(lov, osts, lqr);
610         if (rc)
611                 GOTO(out, rc);
612
613         if (--lqr->lqr_start_count <= 0) {
614                 lqr->lqr_start_idx = cfs_rand() % osts->op_count;
615                 lqr->lqr_start_count =
616                         (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
617                          LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
618         } else if (stripe_cnt_min >= osts->op_count ||
619                    lqr->lqr_start_idx > osts->op_count) {
620                 /* If we have allocated from all of the OSTs, slowly
621                  * precess the next start if the OST/stripe count isn't
622                  * already doing this for us. */
623                 lqr->lqr_start_idx %= osts->op_count;
624                 if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
625                         ++lqr->lqr_offset_idx;
626         }
627         cfs_down_read(&lov->lov_qos.lq_rw_sem);
628         ost_start_idx_temp = lqr->lqr_start_idx;
629
630 repeat_find:
631         array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) % osts->op_count;
632         idx_pos = idx_arr;
633 #ifdef QOS_DEBUG
634         CDEBUG(D_QOS, "pool '%s' want %d startidx %d startcnt %d offset %d "
635                "active %d count %d arrayidx %d\n", poolname,
636                *stripe_cnt, lqr->lqr_start_idx, lqr->lqr_start_count,
637                lqr->lqr_offset_idx, osts->op_count, osts->op_count, array_idx);
638 #endif
639
640         for (i = 0; i < osts->op_count;
641                     i++, array_idx=(array_idx + 1) % osts->op_count) {
642                 ++lqr->lqr_start_idx;
643                 ost_idx = lqr->lqr_pool.op_array[array_idx];
644 #ifdef QOS_DEBUG
645                 CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
646                        i, lqr->lqr_start_idx,
647                        ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
648                        lov->lov_tgts[ost_idx]->ltd_active : 0,
649                        idx_pos - idx_arr, array_idx, ost_idx);
650 #endif
651                 if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
652                     !lov->lov_tgts[ost_idx]->ltd_active)
653                         continue;
654
655                 /* Fail Check before osc_precreate() is called
656                    so we can only 'fail' single OSC. */
657                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
658                         continue;
659
660                 /* Drop slow OSCs if we can */
661                 if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
662                         continue;
663
664                 *idx_pos = ost_idx;
665                 idx_pos++;
666                 /* We have enough stripes */
667                 if (idx_pos - idx_arr == *stripe_cnt)
668                         break;
669         }
670         if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
671                 /* Try again, allowing slower OSCs */
672                 speed++;
673                 lqr->lqr_start_idx = ost_start_idx_temp;
674                 goto repeat_find;
675         }
676
677         cfs_up_read(&lov->lov_qos.lq_rw_sem);
678
679         *stripe_cnt = idx_pos - idx_arr;
680 out:
681         if (pool != NULL) {
682                 cfs_up_read(&pool_tgt_rw_sem(pool));
683                 /* put back ref got by lov_find_pool() */
684                 lov_pool_putref(pool);
685         }
686
687         RETURN(rc);
688 }
689
690 /* alloc objects on osts with specific stripe offset */
691 static int alloc_specific(struct lov_obd *lov, struct lov_stripe_md *lsm,
692                           int *idx_arr)
693 {
694         unsigned ost_idx, array_idx, ost_count;
695         int i, rc, *idx_pos;
696         int speed = 0;
697         struct pool_desc *pool;
698         struct ost_pool *osts;
699         ENTRY;
700
701         pool = lov_find_pool(lov, lsm->lsm_pool_name);
702         if (pool == NULL) {
703                 osts = &(lov->lov_packed);
704         } else {
705                 cfs_down_read(&pool_tgt_rw_sem(pool));
706                 osts = &(pool->pool_obds);
707         }
708
709         ost_count = osts->op_count;
710
711 repeat_find:
712         /* search loi_ost_idx in ost array */
713         array_idx = 0;
714         for (i = 0; i < ost_count; i++) {
715                 if (osts->op_array[i] == lsm->lsm_oinfo[0]->loi_ost_idx) {
716                         array_idx = i;
717                         break;
718                 }
719         }
720         if (i == ost_count) {
721                 CERROR("Start index %d not found in pool '%s'\n",
722                        lsm->lsm_oinfo[0]->loi_ost_idx, lsm->lsm_pool_name);
723                 GOTO(out, rc = -EINVAL);
724         }
725
726         idx_pos = idx_arr;
727         for (i = 0; i < ost_count;
728              i++, array_idx = (array_idx + 1) % ost_count) {
729                 ost_idx = osts->op_array[array_idx];
730
731                 if (!lov->lov_tgts[ost_idx] ||
732                     !lov->lov_tgts[ost_idx]->ltd_active) {
733                         continue;
734                 }
735
736                 /* Fail Check before osc_precreate() is called
737                    so we can only 'fail' single OSC. */
738                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
739                         continue;
740
741                 /* Drop slow OSCs if we can, but not for requested start idx.
742                  *
743                  * This means "if OSC is slow and it is not the requested
744                  * start OST, then it can be skipped, otherwise skip it only
745                  * if it is inactive/recovering/out-of-space." */
746                 if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
747                     (i != 0 || speed >= 2))
748                         continue;
749
750                 *idx_pos = ost_idx;
751                 idx_pos++;
752                 /* We have enough stripes */
753                 if (idx_pos - idx_arr == lsm->lsm_stripe_count)
754                         GOTO(out, rc = 0);
755         }
756         if (speed < 2) {
757                 /* Try again, allowing slower OSCs */
758                 speed++;
759                 goto repeat_find;
760         }
761
762         /* If we were passed specific striping params, then a failure to
763          * meet those requirements is an error, since we can't reallocate
764          * that memory (it might be part of a larger array or something).
765          *
766          * We can only get here if lsm_stripe_count was originally > 1.
767          */
768         CERROR("can't lstripe objid "LPX64": have %d want %u\n",
769                lsm->lsm_object_id, (int)(idx_pos - idx_arr),
770                lsm->lsm_stripe_count);
771         rc = -EFBIG;
772 out:
773         if (pool != NULL) {
774                 cfs_up_read(&pool_tgt_rw_sem(pool));
775                 /* put back ref got by lov_find_pool() */
776                 lov_pool_putref(pool);
777         }
778
779         RETURN(rc);
780 }
781
782 /* Alloc objects on osts with optimization based on:
783    - free space
784    - network resources (shared OSS's)
785 */
786 static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
787                      char *poolname, int flags)
788 {
789         struct lov_obd *lov = &exp->exp_obd->u.lov;
790         __u64 total_weight = 0;
791         int nfound, good_osts, i, rc = 0;
792         int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
793         struct pool_desc *pool;
794         struct ost_pool *osts;
795         ENTRY;
796
797         if (stripe_cnt_min < 1)
798                 RETURN(-EINVAL);
799
800         pool = lov_find_pool(lov, poolname);
801         if (pool == NULL) {
802                 osts = &(lov->lov_packed);
803         } else {
804                 cfs_down_read(&pool_tgt_rw_sem(pool));
805                 osts = &(pool->pool_obds);
806         }
807
808         obd_getref(exp->exp_obd);
809
810         /* wait for fresh statfs info if needed, the rpcs are sent in
811          * lov_create() */
812         qos_statfs_update(exp->exp_obd,
813                           cfs_time_shift_64(-2 * lov->desc.ld_qos_maxage), 1);
814
815         /* Detect -EAGAIN early, before expensive lock is taken. */
816         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
817                 GOTO(out_nolock, rc = -EAGAIN);
818
819         /* Do actual allocation, use write lock here. */
820         cfs_down_write(&lov->lov_qos.lq_rw_sem);
821
822         /*
823          * Check again, while we were sleeping on @lq_rw_sem things could
824          * change.
825          */
826         if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
827                 GOTO(out, rc = -EAGAIN);
828
829         if (lov->desc.ld_active_tgt_count < 2)
830                 GOTO(out, rc = -EAGAIN);
831
832         rc = qos_calc_ppo(exp->exp_obd);
833         if (rc)
834                 GOTO(out, rc);
835
836         good_osts = 0;
837         /* Find all the OSTs that are valid stripe candidates */
838         for (i = 0; i < osts->op_count; i++) {
839                 if (!lov->lov_tgts[osts->op_array[i]] ||
840                     !lov->lov_tgts[osts->op_array[i]]->ltd_active)
841                         continue;
842
843                 /* Fail Check before osc_precreate() is called
844                    so we can only 'fail' single OSC. */
845                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && osts->op_array[i] == 0)
846                         continue;
847
848                 if (obd_precreate(lov->lov_tgts[osts->op_array[i]]->ltd_exp) > 2)
849                         continue;
850
851                 lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
852                 qos_calc_weight(lov, osts->op_array[i]);
853                 total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
854
855                 good_osts++;
856         }
857
858 #ifdef QOS_DEBUG
859         CDEBUG(D_QOS, "found %d good osts\n", good_osts);
860 #endif
861
862         if (good_osts < stripe_cnt_min)
863                 GOTO(out, rc = -EAGAIN);
864
865         /* We have enough osts */
866         if (good_osts < *stripe_cnt)
867                 *stripe_cnt = good_osts;
868
869         if (!*stripe_cnt)
870                 GOTO(out, rc = -EAGAIN);
871
872         /* Find enough OSTs with weighted random allocation. */
873         nfound = 0;
874         while (nfound < *stripe_cnt) {
875                 __u64 rand, cur_weight;
876
877                 cur_weight = 0;
878                 rc = -ENODEV;
879
880                 if (total_weight) {
881 #if BITS_PER_LONG == 32
882                         rand = cfs_rand() % (unsigned)total_weight;
883                         /* If total_weight > 32-bit, first generate the high
884                          * 32 bits of the random number, then add in the low
885                          * 32 bits (truncated to the upper limit, if needed) */
886                         if (total_weight > 0xffffffffULL)
887                                 rand = (__u64)(cfs_rand() %
888                                           (unsigned)(total_weight >> 32)) << 32;
889                         else
890                                 rand = 0;
891
892                         if (rand == (total_weight & 0xffffffff00000000ULL))
893                                 rand |= cfs_rand() % (unsigned)total_weight;
894                         else
895                                 rand |= cfs_rand();
896
897 #else
898                         rand = ((__u64)cfs_rand() << 32 | cfs_rand()) %
899                                 total_weight;
900 #endif
901                 } else {
902                         rand = 0;
903                 }
904
905                 /* On average, this will hit larger-weighted osts more often.
906                    0-weight osts will always get used last (only when rand=0).*/
907                 for (i = 0; i < osts->op_count; i++) {
908                         if (!lov->lov_tgts[osts->op_array[i]] ||
909                             !lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable)
910                                 continue;
911
912                         cur_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
913 #ifdef QOS_DEBUG
914                         CDEBUG(D_QOS, "stripe_cnt=%d nfound=%d cur_weight="LPU64
915                                       " rand="LPU64" total_weight="LPU64"\n",
916                                *stripe_cnt, nfound, cur_weight, rand, total_weight);
917 #endif
918                         if (cur_weight >= rand) {
919 #ifdef QOS_DEBUG
920                                 CDEBUG(D_QOS, "assigned stripe=%d to idx=%d\n",
921                                        nfound, osts->op_array[i]);
922 #endif
923                                 idx_arr[nfound++] = osts->op_array[i];
924                                 qos_used(lov, osts, osts->op_array[i], &total_weight);
925                                 rc = 0;
926                                 break;
927                         }
928                 }
929                 /* should never satisfy below condition */
930                 if (rc) {
931                         CERROR("Didn't find any OSTs?\n");
932                         break;
933                 }
934         }
935         LASSERT(nfound == *stripe_cnt);
936
937 out:
938         cfs_up_write(&lov->lov_qos.lq_rw_sem);
939
940 out_nolock:
941         if (pool != NULL) {
942                 cfs_up_read(&pool_tgt_rw_sem(pool));
943                 /* put back ref got by lov_find_pool() */
944                 lov_pool_putref(pool);
945         }
946
947         if (rc == -EAGAIN)
948                 rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
949
950         obd_putref(exp->exp_obd);
951         RETURN(rc);
952 }
953
954 /* return new alloced stripe count on success */
955 static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
956                            int newea, int **idx_arr, int *arr_cnt, int flags)
957 {
958         struct lov_obd *lov = &exp->exp_obd->u.lov;
959         int stripe_cnt = lsm->lsm_stripe_count;
960         int i, rc = 0;
961         int *tmp_arr = NULL;
962         ENTRY;
963
964         *arr_cnt = stripe_cnt;
965         OBD_ALLOC(tmp_arr, *arr_cnt * sizeof(int));
966         if (tmp_arr == NULL)
967                 RETURN(-ENOMEM);
968         for (i = 0; i < *arr_cnt; i++)
969                 tmp_arr[i] = -1;
970
971         if (newea ||
972             lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
973                 rc = alloc_qos(exp, tmp_arr, &stripe_cnt,
974                                lsm->lsm_pool_name, flags);
975         else
976                 rc = alloc_specific(lov, lsm, tmp_arr);
977
978         if (rc)
979                 GOTO(out_arr, rc);
980
981         *idx_arr = tmp_arr;
982         RETURN(stripe_cnt);
983 out_arr:
984         OBD_FREE(tmp_arr, *arr_cnt * sizeof(int));
985         *arr_cnt = 0;
986         RETURN(rc);
987 }
988
989 static void free_idx_array(int *idx_arr, int arr_cnt)
990 {
991         if (arr_cnt)
992                 OBD_FREE(idx_arr, arr_cnt * sizeof(int));
993 }
994
995 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set)
996 {
997         struct lov_obd *lov = &exp->exp_obd->u.lov;
998         struct lov_stripe_md *lsm;
999         struct obdo *src_oa = set->set_oi->oi_oa;
1000         struct obd_trans_info *oti = set->set_oti;
1001         int i, stripes, rc = 0, newea = 0;
1002         int flag = LOV_USES_ASSIGNED_STRIPE;
1003         int *idx_arr = NULL, idx_cnt = 0;
1004         ENTRY;
1005
1006         LASSERT(src_oa->o_valid & OBD_MD_FLID);
1007         LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
1008
1009         if (set->set_oi->oi_md == NULL) {
1010                 __u16 stripes_def = lov_get_stripecnt(lov, LOV_MAGIC, 0);
1011
1012                 /* If the MDS file was truncated up to some size, stripe over
1013                  * enough OSTs to allow the file to be created at that size.
1014                  * This may mean we use more than the default # of stripes. */
1015                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1016                         obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
1017
1018                         /* Find a small number of stripes we can use
1019                            (up to # of active osts). */
1020                         stripes = 1;
1021                         for (i = 0; i < lov->desc.ld_tgt_count; i++) {
1022                                 if (!lov->lov_tgts[i] ||
1023                                     !lov->lov_tgts[i]->ltd_active)
1024                                         continue;
1025                                 min_bavail = min(min_bavail, TGT_BAVAIL(i));
1026                                 if (min_bavail * stripes > src_oa->o_size)
1027                                         break;
1028                                 stripes++;
1029                         }
1030
1031                         if (stripes < stripes_def)
1032                                 stripes = stripes_def;
1033                 } else {
1034                         flag = LOV_USES_DEFAULT_STRIPE;
1035                         stripes = stripes_def;
1036                 }
1037
1038                 rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
1039                                      lov->desc.ld_pattern ?
1040                                      lov->desc.ld_pattern : LOV_PATTERN_RAID0,
1041                                      LOV_MAGIC);
1042                 if (rc < 0)
1043                         GOTO(out_err, rc);
1044                 newea = 1;
1045                 rc = 0;
1046         }
1047
1048         lsm = set->set_oi->oi_md;
1049         lsm->lsm_object_id = src_oa->o_id;
1050         lsm->lsm_object_seq = src_oa->o_seq;
1051         lsm->lsm_layout_gen = 0; /* actual generation set in mdd_lov_create() */
1052
1053         if (!lsm->lsm_stripe_size)
1054                 lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
1055         if (!lsm->lsm_pattern) {
1056                 LASSERT(lov->desc.ld_pattern);
1057                 lsm->lsm_pattern = lov->desc.ld_pattern;
1058         }
1059
1060         stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
1061         if (stripes <= 0)
1062                 GOTO(out_err, rc = stripes ? stripes : -EIO);
1063         LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
1064                  lsm->lsm_stripe_count, stripes);
1065
1066         for (i = 0; i < stripes; i++) {
1067                 struct lov_request *req;
1068                 int ost_idx = idx_arr[i];
1069                 LASSERT(ost_idx >= 0);
1070
1071                 OBD_ALLOC(req, sizeof(*req));
1072                 if (req == NULL)
1073                         GOTO(out_err, rc = -ENOMEM);
1074                 lov_set_add_req(req, set);
1075
1076                 req->rq_buflen = sizeof(*req->rq_oi.oi_md);
1077                 OBD_ALLOC_LARGE(req->rq_oi.oi_md, req->rq_buflen);
1078                 if (req->rq_oi.oi_md == NULL)
1079                         GOTO(out_err, rc = -ENOMEM);
1080
1081                 OBDO_ALLOC(req->rq_oi.oi_oa);
1082                 if (req->rq_oi.oi_oa == NULL)
1083                         GOTO(out_err, rc = -ENOMEM);
1084
1085                 req->rq_idx = ost_idx;
1086                 req->rq_stripe = i;
1087                 /* create data objects with "parent" OA */
1088                 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
1089                 req->rq_oi.oi_cb_up = cb_create_update;
1090
1091                 /* XXX When we start creating objects on demand, we need to
1092                  *     make sure that we always create the object on the
1093                  *     stripe which holds the existing file size.
1094                  */
1095                 if (src_oa->o_valid & OBD_MD_FLSIZE) {
1096                         req->rq_oi.oi_oa->o_size =
1097                                 lov_size_to_stripe(lsm, src_oa->o_size, i);
1098
1099                         CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
1100                                i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
1101                 }
1102         }
1103         LASSERT(set->set_count == stripes);
1104
1105         if (stripes < lsm->lsm_stripe_count)
1106                 qos_shrink_lsm(set);
1107         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LOV_PREP_CREATE)) {
1108                 qos_shrink_lsm(set);
1109                 rc = -EIO;
1110         }
1111
1112         if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
1113                 oti_alloc_cookies(oti, set->set_count);
1114                 if (!oti->oti_logcookies)
1115                         GOTO(out_err, rc = -ENOMEM);
1116                 set->set_cookies = oti->oti_logcookies;
1117         }
1118 out_err:
1119         if (newea && rc)
1120                 obd_free_memmd(exp, &set->set_oi->oi_md);
1121         if (idx_arr)
1122                 free_idx_array(idx_arr, idx_cnt);
1123         EXIT;
1124         return rc;
1125 }
1126
1127 void qos_update(struct lov_obd *lov)
1128 {
1129         ENTRY;
1130         lov->lov_qos.lq_dirty = 1;
1131 }
1132
1133 void qos_statfs_done(struct lov_obd *lov)
1134 {
1135         cfs_down_write(&lov->lov_qos.lq_rw_sem);
1136         if (lov->lov_qos.lq_statfs_in_progress) {
1137                 lov->lov_qos.lq_statfs_in_progress = 0;
1138                 /* wake up any threads waiting for the statfs rpcs to complete*/
1139                 cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
1140         }
1141         cfs_up_write(&lov->lov_qos.lq_rw_sem);
1142 }
1143
1144 static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
1145 {
1146         struct lov_obd         *lov = &obd->u.lov;
1147         int rc;
1148         ENTRY;
1149         cfs_down_read(&lov->lov_qos.lq_rw_sem);
1150         rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
1151              cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
1152         cfs_up_read(&lov->lov_qos.lq_rw_sem);
1153         RETURN(rc);
1154 }
1155
1156 /*
1157  * Update statfs data if the current osfs age is older than max_age.
1158  * If wait is not set, it means that we are called from lov_create()
1159  * and we should just issue the rpcs without waiting for them to complete.
1160  * If wait is set, we are called from alloc_qos() and we just have
1161  * to wait for the request set to complete.
1162  */
1163 void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
1164 {
1165         struct lov_obd         *lov = &obd->u.lov;
1166         struct obd_info        *oinfo;
1167         int                     rc = 0;
1168         struct ptlrpc_request_set *set = NULL;
1169         ENTRY;
1170
1171         if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
1172                 /* statfs data are quite recent, don't need to refresh it */
1173                 RETURN_EXIT;
1174
1175         if (!wait && lov->lov_qos.lq_statfs_in_progress)
1176                 /* statfs already in progress */
1177                 RETURN_EXIT;
1178
1179         cfs_down_write(&lov->lov_qos.lq_rw_sem);
1180         if (lov->lov_qos.lq_statfs_in_progress) {
1181                 cfs_up_write(&lov->lov_qos.lq_rw_sem);
1182                 GOTO(out, rc = 0);
1183         }
1184         /* no statfs in flight, send rpcs */
1185         lov->lov_qos.lq_statfs_in_progress = 1;
1186         cfs_up_write(&lov->lov_qos.lq_rw_sem);
1187
1188         if (wait)
1189                 CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
1190                        "in a timely manner (osfs age "LPU64", max age "LPU64")"
1191                        ", sending new statfs rpcs\n",
1192                        obd_uuid2str(&lov->desc.ld_uuid), obd->obd_osfs_age,
1193                        max_age);
1194
1195         /* need to send statfs rpcs */
1196         CDEBUG(D_QOS, "sending new statfs requests\n");
1197         memset(lov->lov_qos.lq_statfs_data, 0,
1198                sizeof(*lov->lov_qos.lq_statfs_data));
1199         oinfo = &lov->lov_qos.lq_statfs_data->lsd_oi;
1200         oinfo->oi_osfs = &lov->lov_qos.lq_statfs_data->lsd_statfs;
1201         oinfo->oi_flags = OBD_STATFS_NODELAY;
1202         set = ptlrpc_prep_set();
1203         if (!set)
1204                 GOTO(out_failed, rc = -ENOMEM);
1205
1206         rc = obd_statfs_async(obd, oinfo, max_age, set);
1207         if (rc || cfs_list_empty(&set->set_requests)) {
1208                 if (rc)
1209                         CWARN("statfs failed with %d\n", rc);
1210                 GOTO(out_failed, rc);
1211         }
1212         /* send requests via ptlrpcd */
1213         oinfo->oi_flags |= OBD_STATFS_PTLRPCD;
1214         ptlrpcd_add_rqset(set);
1215         GOTO(out, rc);
1216
1217 out_failed:
1218         cfs_down_write(&lov->lov_qos.lq_rw_sem);
1219         lov->lov_qos.lq_statfs_in_progress = 0;
1220         /* wake up any threads waiting for the statfs rpcs to complete */
1221         cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
1222         cfs_up_write(&lov->lov_qos.lq_rw_sem);
1223         wait = 0;
1224 out:
1225         if (set)
1226                 ptlrpc_set_destroy(set);
1227         if (wait) {
1228                 struct l_wait_info lwi = { 0 };
1229                 CDEBUG(D_QOS, "waiting for statfs requests to complete\n");
1230                 l_wait_event(lov->lov_qos.lq_statfs_waitq,
1231                              qos_statfs_ready(obd, max_age), &lwi);
1232                 if (cfs_time_before_64(obd->obd_osfs_age, max_age))
1233                         CDEBUG(D_QOS, "%s: still no fresh statfs data after "
1234                                       "waiting (osfs age "LPU64", max age "
1235                                       LPU64")\n",
1236                                       obd_uuid2str(&lov->desc.ld_uuid),
1237                                       obd->obd_osfs_age, max_age);
1238         }
1239 }