Whamcloud - gitweb
LU-8066 llite: move /proc/fs/lustre/llite/uuid to sysfs
[fs/lustre-release.git] / lustre / lod / lod_qos.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright  2009 Sun Microsystems, Inc. All rights reserved
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/lod/lod_qos.c
33  *
34  * Implementation of different allocation algorithm used
35  * to distribute objects and data among OSTs.
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include <asm/div64.h>
41 #include <libcfs/libcfs.h>
42 #include <uapi/linux/lustre/lustre_idl.h>
43 #include <lustre_swab.h>
44 #include <obd_class.h>
45
46 #include "lod_internal.h"
47
48 /*
49  * force QoS policy (not RR) to be used for testing purposes
50  */
51 #define FORCE_QOS_
52
53 #define D_QOS   D_OTHER
54
55 #define QOS_DEBUG(fmt, ...)     CDEBUG(D_QOS, fmt, ## __VA_ARGS__)
56 #define QOS_CONSOLE(fmt, ...)   LCONSOLE(D_QOS, fmt, ## __VA_ARGS__)
57
58 #define TGT_BAVAIL(i) (OST_TGT(lod,i)->ltd_statfs.os_bavail * \
59                        OST_TGT(lod,i)->ltd_statfs.os_bsize)
60
61 /**
62  * Add a new target to Quality of Service (QoS) target table.
63  *
64  * Add a new OST target to the structure representing an OSS. Resort the list
65  * of known OSSs by the number of OSTs attached to each OSS. The OSS list is
66  * protected internally and no external locking is required.
67  *
68  * \param[in] lod               LOD device
69  * \param[in] ost_desc          OST description
70  *
71  * \retval 0                    on success
72  * \retval -ENOMEM              on error
73  */
74 int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc)
75 {
76         struct lod_qos_oss *oss = NULL, *temposs;
77         struct obd_export  *exp = ost_desc->ltd_exp;
78         int                 rc = 0, found = 0;
79         struct list_head   *list;
80         __u32 id = 0;
81         ENTRY;
82
83         down_write(&lod->lod_qos.lq_rw_sem);
84         /*
85          * a bit hacky approach to learn NID of corresponding connection
86          * but there is no official API to access information like this
87          * with OSD API.
88          */
89         list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
90                 if (obd_uuid_equals(&oss->lqo_uuid,
91                                     &exp->exp_connection->c_remote_uuid)) {
92                         found++;
93                         break;
94                 }
95                 if (oss->lqo_id > id)
96                         id = oss->lqo_id;
97         }
98
99         if (!found) {
100                 OBD_ALLOC_PTR(oss);
101                 if (!oss)
102                         GOTO(out, rc = -ENOMEM);
103                 memcpy(&oss->lqo_uuid, &exp->exp_connection->c_remote_uuid,
104                        sizeof(oss->lqo_uuid));
105                 ++id;
106                 oss->lqo_id = id;
107         } else {
108                 /* Assume we have to move this one */
109                 list_del(&oss->lqo_oss_list);
110         }
111
112         oss->lqo_ost_count++;
113         ost_desc->ltd_qos.ltq_oss = oss;
114
115         CDEBUG(D_QOS, "add tgt %s to OSS %s (%d OSTs)\n",
116                obd_uuid2str(&ost_desc->ltd_uuid), obd_uuid2str(&oss->lqo_uuid),
117                oss->lqo_ost_count);
118
119         /* Add sorted by # of OSTs.  Find the first entry that we're
120            bigger than... */
121         list = &lod->lod_qos.lq_oss_list;
122         list_for_each_entry(temposs, list, lqo_oss_list) {
123                 if (oss->lqo_ost_count > temposs->lqo_ost_count)
124                         break;
125         }
126         /* ...and add before it.  If we're the first or smallest, temposs
127            points to the list head, and we add to the end. */
128         list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
129
130         lod->lod_qos.lq_dirty = 1;
131         lod->lod_qos.lq_rr.lqr_dirty = 1;
132
133 out:
134         up_write(&lod->lod_qos.lq_rw_sem);
135         RETURN(rc);
136 }
137
138 /**
139  * Remove OST target from QoS table.
140  *
141  * Removes given OST target from QoS table and releases related OSS structure
142  * if no OSTs remain on the OSS.
143  *
144  * \param[in] lod               LOD device
145  * \param[in] ost_desc          OST description
146  *
147  * \retval 0                    on success
148  * \retval -ENOENT              if no OSS was found
149  */
150 int qos_del_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc)
151 {
152         struct lod_qos_oss *oss;
153         int                 rc = 0;
154         ENTRY;
155
156         down_write(&lod->lod_qos.lq_rw_sem);
157         oss = ost_desc->ltd_qos.ltq_oss;
158         if (!oss)
159                 GOTO(out, rc = -ENOENT);
160
161         oss->lqo_ost_count--;
162         if (oss->lqo_ost_count == 0) {
163                 CDEBUG(D_QOS, "removing OSS %s\n",
164                        obd_uuid2str(&oss->lqo_uuid));
165                 list_del(&oss->lqo_oss_list);
166                 ost_desc->ltd_qos.ltq_oss = NULL;
167                 OBD_FREE_PTR(oss);
168         }
169
170         lod->lod_qos.lq_dirty = 1;
171         lod->lod_qos.lq_rr.lqr_dirty = 1;
172 out:
173         up_write(&lod->lod_qos.lq_rw_sem);
174         RETURN(rc);
175 }
176
177 /**
178  * Check whether the target is available for new OST objects.
179  *
180  * Request statfs data from the given target and verify it's active and not
181  * read-only. If so, then it can be used to place new OST objects. This
182  * function also maintains the number of active/inactive targets and sets
183  * dirty flags if those numbers change so others can run re-balance procedures.
184  * No external locking is required.
185  *
186  * \param[in] env       execution environment for this thread
187  * \param[in] d         LOD device
188  * \param[in] index     index of OST target to check
189  * \param[out] sfs      buffer for statfs data
190  *
191  * \retval 0            if the target is good
192  * \retval negative     negated errno on error
193
194  */
195 static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
196                                 int index, struct obd_statfs *sfs)
197 {
198         struct lod_tgt_desc *ost;
199         int                  rc;
200         ENTRY;
201
202         LASSERT(d);
203         ost = OST_TGT(d,index);
204         LASSERT(ost);
205
206         rc = dt_statfs(env, ost->ltd_ost, sfs);
207
208         if (rc == 0 && ((sfs->os_state & OS_STATE_ENOSPC) ||
209             (sfs->os_state & OS_STATE_ENOINO && sfs->os_fprecreated == 0)))
210                 RETURN(-ENOSPC);
211
212         if (rc && rc != -ENOTCONN)
213                 CERROR("%s: statfs: rc = %d\n", lod2obd(d)->obd_name, rc);
214
215         /* If the OST is readonly then we can't allocate objects there */
216         if (sfs->os_state & OS_STATE_READONLY)
217                 rc = -EROFS;
218
219         /* object precreation is skipped on the OST with max_create_count=0 */
220         if (sfs->os_state & OS_STATE_NOPRECREATE)
221                 rc = -ENOBUFS;
222
223         /* check whether device has changed state (active, inactive) */
224         if (rc != 0 && ost->ltd_active) {
225                 /* turned inactive? */
226                 spin_lock(&d->lod_lock);
227                 if (ost->ltd_active) {
228                         ost->ltd_active = 0;
229                         if (rc == -ENOTCONN)
230                                 ost->ltd_connecting = 1;
231
232                         LASSERT(d->lod_desc.ld_active_tgt_count > 0);
233                         d->lod_desc.ld_active_tgt_count--;
234                         d->lod_qos.lq_dirty = 1;
235                         d->lod_qos.lq_rr.lqr_dirty = 1;
236                         CDEBUG(D_CONFIG, "%s: turns inactive\n",
237                                ost->ltd_exp->exp_obd->obd_name);
238                 }
239                 spin_unlock(&d->lod_lock);
240         } else if (rc == 0 && ost->ltd_active == 0) {
241                 /* turned active? */
242                 LASSERTF(d->lod_desc.ld_active_tgt_count < d->lod_ostnr,
243                          "active tgt count %d, ost nr %d\n",
244                          d->lod_desc.ld_active_tgt_count, d->lod_ostnr);
245                 spin_lock(&d->lod_lock);
246                 if (ost->ltd_active == 0) {
247                         ost->ltd_active = 1;
248                         ost->ltd_connecting = 0;
249                         d->lod_desc.ld_active_tgt_count++;
250                         d->lod_qos.lq_dirty = 1;
251                         d->lod_qos.lq_rr.lqr_dirty = 1;
252                         CDEBUG(D_CONFIG, "%s: turns active\n",
253                                ost->ltd_exp->exp_obd->obd_name);
254                 }
255                 spin_unlock(&d->lod_lock);
256         }
257
258         RETURN(rc);
259 }
260
261 /**
262  * Maintain per-target statfs data.
263  *
264  * The function refreshes statfs data for all the targets every N seconds.
265  * The actual N is controlled via procfs and set to LOV_DESC_QOS_MAXAGE_DEFAULT
266  * initially.
267  *
268  * \param[in] env       execution environment for this thread
269  * \param[in] lod       LOD device
270  */
271 void lod_qos_statfs_update(const struct lu_env *env, struct lod_device *lod)
272 {
273         struct obd_device *obd = lod2obd(lod);
274         struct ost_pool *osts = &(lod->lod_pool_info);
275         time64_t max_age;
276         unsigned int i;
277         u64 avail;
278         int idx;
279         ENTRY;
280
281         max_age = ktime_get_seconds() - 2 * lod->lod_desc.ld_qos_maxage;
282
283         if (obd->obd_osfs_age > max_age)
284                 /* statfs data are quite recent, don't need to refresh it */
285                 RETURN_EXIT;
286
287         down_write(&lod->lod_qos.lq_rw_sem);
288
289         if (obd->obd_osfs_age > max_age)
290                 goto out;
291
292         for (i = 0; i < osts->op_count; i++) {
293                 idx = osts->op_array[i];
294                 avail = OST_TGT(lod,idx)->ltd_statfs.os_bavail;
295                 if (lod_statfs_and_check(env, lod, idx,
296                                          &OST_TGT(lod, idx)->ltd_statfs))
297                         continue;
298                 if (OST_TGT(lod,idx)->ltd_statfs.os_bavail != avail)
299                         /* recalculate weigths */
300                         lod->lod_qos.lq_dirty = 1;
301         }
302         obd->obd_osfs_age = ktime_get_seconds();
303
304 out:
305         up_write(&lod->lod_qos.lq_rw_sem);
306         EXIT;
307 }
308
309 /**
310  * Calculate per-OST and per-OSS penalties
311  *
312  * Re-calculate penalties when the configuration changes, active targets
313  * change and after statfs refresh (all these are reflected by lq_dirty flag).
314  * On every OST and OSS: decay the penalty by half for every 8x the update
315  * interval that the device has been idle. That gives lots of time for the
316  * statfs information to be updated (which the penalty is only a proxy for),
317  * and avoids penalizing OSS/OSTs under light load.
318  * See lod_qos_calc_weight() for how penalties are factored into the weight.
319  *
320  * \param[in] lod       LOD device
321  *
322  * \retval 0            on success
323  * \retval -EAGAIN      the number of OSTs isn't enough
324  */
325 static int lod_qos_calc_ppo(struct lod_device *lod)
326 {
327         struct lod_qos_oss *oss;
328         __u64               ba_max, ba_min, temp;
329         __u32               num_active;
330         unsigned int        i;
331         int                 rc, prio_wide;
332         time64_t            now, age;
333         ENTRY;
334
335         if (!lod->lod_qos.lq_dirty)
336                 GOTO(out, rc = 0);
337
338         num_active = lod->lod_desc.ld_active_tgt_count - 1;
339         if (num_active < 1)
340                 GOTO(out, rc = -EAGAIN);
341
342         /* find bavail on each OSS */
343         list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list)
344                             oss->lqo_bavail = 0;
345         lod->lod_qos.lq_active_oss_count = 0;
346
347         /*
348          * How badly user wants to select OSTs "widely" (not recently chosen
349          * and not on recent OSS's).  As opposed to "freely" (free space
350          * avail.) 0-256
351          */
352         prio_wide = 256 - lod->lod_qos.lq_prio_free;
353
354         ba_min = (__u64)(-1);
355         ba_max = 0;
356         now = ktime_get_real_seconds();
357         /* Calculate OST penalty per object
358          * (lod ref taken in lod_qos_prep_create())
359          */
360         cfs_foreach_bit(lod->lod_ost_bitmap, i) {
361                 LASSERT(OST_TGT(lod,i));
362                 temp = TGT_BAVAIL(i);
363                 if (!temp)
364                         continue;
365                 ba_min = min(temp, ba_min);
366                 ba_max = max(temp, ba_max);
367
368                 /* Count the number of usable OSS's */
369                 if (OST_TGT(lod,i)->ltd_qos.ltq_oss->lqo_bavail == 0)
370                         lod->lod_qos.lq_active_oss_count++;
371                 OST_TGT(lod,i)->ltd_qos.ltq_oss->lqo_bavail += temp;
372
373                 /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
374                 temp >>= 1;
375                 do_div(temp, num_active);
376                 OST_TGT(lod,i)->ltd_qos.ltq_penalty_per_obj =
377                         (temp * prio_wide) >> 8;
378
379                 age = (now - OST_TGT(lod,i)->ltd_qos.ltq_used) >> 3;
380                 if (lod->lod_qos.lq_reset ||
381                     age > 32 * lod->lod_desc.ld_qos_maxage)
382                         OST_TGT(lod,i)->ltd_qos.ltq_penalty = 0;
383                 else if (age > lod->lod_desc.ld_qos_maxage)
384                         /* Decay OST penalty. */
385                         OST_TGT(lod,i)->ltd_qos.ltq_penalty >>=
386                                 (age / lod->lod_desc.ld_qos_maxage);
387         }
388
389         num_active = lod->lod_qos.lq_active_oss_count - 1;
390         if (num_active < 1) {
391                 /* If there's only 1 OSS, we can't penalize it, so instead
392                    we have to double the OST penalty */
393                 num_active = 1;
394                 cfs_foreach_bit(lod->lod_ost_bitmap, i)
395                         OST_TGT(lod,i)->ltd_qos.ltq_penalty_per_obj <<= 1;
396         }
397
398         /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
399         list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
400                 temp = oss->lqo_bavail >> 1;
401                 do_div(temp, oss->lqo_ost_count * num_active);
402                 oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
403
404                 age = (now - oss->lqo_used) >> 3;
405                 if (lod->lod_qos.lq_reset ||
406                     age > 32 * lod->lod_desc.ld_qos_maxage)
407                         oss->lqo_penalty = 0;
408                 else if (age > lod->lod_desc.ld_qos_maxage)
409                         /* Decay OSS penalty. */
410                         oss->lqo_penalty >>= age / lod->lod_desc.ld_qos_maxage;
411         }
412
413         lod->lod_qos.lq_dirty = 0;
414         lod->lod_qos.lq_reset = 0;
415
416         /* If each ost has almost same free space,
417          * do rr allocation for better creation performance */
418         lod->lod_qos.lq_same_space = 0;
419         if ((ba_max * (256 - lod->lod_qos.lq_threshold_rr)) >> 8 < ba_min) {
420                 lod->lod_qos.lq_same_space = 1;
421                 /* Reset weights for the next time we enter qos mode */
422                 lod->lod_qos.lq_reset = 1;
423         }
424         rc = 0;
425
426 out:
427 #ifndef FORCE_QOS
428         if (!rc && lod->lod_qos.lq_same_space)
429                 RETURN(-EAGAIN);
430 #endif
431         RETURN(rc);
432 }
433
434 /**
435  * Calculate weight for a given OST target.
436  *
437  * The final OST weight is the number of bytes available minus the OST and
438  * OSS penalties.  See lod_qos_calc_ppo() for how penalties are calculated.
439  *
440  * \param[in] lod       LOD device, where OST targets are listed
441  * \param[in] i         OST target index
442  *
443  * \retval              0
444  */
445 static int lod_qos_calc_weight(struct lod_device *lod, int i)
446 {
447         __u64 temp, temp2;
448
449         temp = TGT_BAVAIL(i);
450         temp2 = OST_TGT(lod,i)->ltd_qos.ltq_penalty +
451                 OST_TGT(lod,i)->ltd_qos.ltq_oss->lqo_penalty;
452         if (temp < temp2)
453                 OST_TGT(lod,i)->ltd_qos.ltq_weight = 0;
454         else
455                 OST_TGT(lod,i)->ltd_qos.ltq_weight = temp - temp2;
456         return 0;
457 }
458
459 /**
460  * Re-calculate weights.
461  *
462  * The function is called when some OST target was used for a new object. In
463  * this case we should re-calculate all the weights to keep new allocations
464  * balanced well.
465  *
466  * \param[in] lod       LOD device
467  * \param[in] osts      OST pool where a new object was placed
468  * \param[in] index     OST target where a new object was placed
469  * \param[out] total_wt new total weight for the pool
470  *
471  * \retval              0
472  */
473 static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts,
474                         __u32 index, __u64 *total_wt)
475 {
476         struct lod_tgt_desc *ost;
477         struct lod_qos_oss  *oss;
478         unsigned int j;
479         ENTRY;
480
481         ost = OST_TGT(lod,index);
482         LASSERT(ost);
483
484         /* Don't allocate on this devuce anymore, until the next alloc_qos */
485         ost->ltd_qos.ltq_usable = 0;
486
487         oss = ost->ltd_qos.ltq_oss;
488
489         /* Decay old penalty by half (we're adding max penalty, and don't
490            want it to run away.) */
491         ost->ltd_qos.ltq_penalty >>= 1;
492         oss->lqo_penalty >>= 1;
493
494         /* mark the OSS and OST as recently used */
495         ost->ltd_qos.ltq_used = oss->lqo_used = ktime_get_real_seconds();
496
497         /* Set max penalties for this OST and OSS */
498         ost->ltd_qos.ltq_penalty +=
499                 ost->ltd_qos.ltq_penalty_per_obj * lod->lod_ostnr;
500         oss->lqo_penalty += oss->lqo_penalty_per_obj *
501                 lod->lod_qos.lq_active_oss_count;
502
503         /* Decrease all OSS penalties */
504         list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
505                 if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
506                         oss->lqo_penalty = 0;
507                 else
508                         oss->lqo_penalty -= oss->lqo_penalty_per_obj;
509         }
510
511         *total_wt = 0;
512         /* Decrease all OST penalties */
513         for (j = 0; j < osts->op_count; j++) {
514                 int i;
515
516                 i = osts->op_array[j];
517                 if (!cfs_bitmap_check(lod->lod_ost_bitmap, i))
518                         continue;
519
520                 ost = OST_TGT(lod,i);
521                 LASSERT(ost);
522
523                 if (ost->ltd_qos.ltq_penalty <
524                                 ost->ltd_qos.ltq_penalty_per_obj)
525                         ost->ltd_qos.ltq_penalty = 0;
526                 else
527                         ost->ltd_qos.ltq_penalty -=
528                                 ost->ltd_qos.ltq_penalty_per_obj;
529
530                 lod_qos_calc_weight(lod, i);
531
532                 /* Recalc the total weight of usable osts */
533                 if (ost->ltd_qos.ltq_usable)
534                         *total_wt += ost->ltd_qos.ltq_weight;
535
536                 QOS_DEBUG("recalc tgt %d usable=%d avail=%llu"
537                           " ostppo=%llu ostp=%llu ossppo=%llu"
538                           " ossp=%llu wt=%llu\n",
539                           i, ost->ltd_qos.ltq_usable, TGT_BAVAIL(i) >> 10,
540                           ost->ltd_qos.ltq_penalty_per_obj >> 10,
541                           ost->ltd_qos.ltq_penalty >> 10,
542                           ost->ltd_qos.ltq_oss->lqo_penalty_per_obj >> 10,
543                           ost->ltd_qos.ltq_oss->lqo_penalty >> 10,
544                           ost->ltd_qos.ltq_weight >> 10);
545         }
546
547         RETURN(0);
548 }
549
550 void lod_qos_rr_init(struct lod_qos_rr *lqr)
551 {
552         spin_lock_init(&lqr->lqr_alloc);
553         lqr->lqr_dirty = 1;
554 }
555
556
557 #define LOV_QOS_EMPTY ((__u32)-1)
558
559 /**
560  * Calculate optimal round-robin order with regard to OSSes.
561  *
562  * Place all the OSTs from pool \a src_pool in a special array to be used for
563  * round-robin (RR) stripe allocation.  The placement algorithm interleaves
564  * OSTs from the different OSSs so that RR allocation can balance OSSs evenly.
565  * Resorts the targets when the number of active targets changes (because of
566  * a new target or activation/deactivation).
567  *
568  * \param[in] lod       LOD device
569  * \param[in] src_pool  OST pool
570  * \param[in] lqr       round-robin list
571  *
572  * \retval 0            on success
573  * \retval -ENOMEM      fails to allocate the array
574  */
575 static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
576                            struct lod_qos_rr *lqr)
577 {
578         struct lod_qos_oss  *oss;
579         struct lod_tgt_desc *ost;
580         unsigned placed, real_count;
581         unsigned int i;
582         int rc;
583         ENTRY;
584
585         if (!lqr->lqr_dirty) {
586                 LASSERT(lqr->lqr_pool.op_size);
587                 RETURN(0);
588         }
589
590         /* Do actual allocation. */
591         down_write(&lod->lod_qos.lq_rw_sem);
592
593         /*
594          * Check again. While we were sleeping on @lq_rw_sem something could
595          * change.
596          */
597         if (!lqr->lqr_dirty) {
598                 LASSERT(lqr->lqr_pool.op_size);
599                 up_write(&lod->lod_qos.lq_rw_sem);
600                 RETURN(0);
601         }
602
603         real_count = src_pool->op_count;
604
605         /* Zero the pool array */
606         /* alloc_rr is holding a read lock on the pool, so nobody is adding/
607            deleting from the pool. The lq_rw_sem insures that nobody else
608            is reading. */
609         lqr->lqr_pool.op_count = real_count;
610         rc = lod_ost_pool_extend(&lqr->lqr_pool, real_count);
611         if (rc) {
612                 up_write(&lod->lod_qos.lq_rw_sem);
613                 RETURN(rc);
614         }
615         for (i = 0; i < lqr->lqr_pool.op_count; i++)
616                 lqr->lqr_pool.op_array[i] = LOV_QOS_EMPTY;
617
618         /* Place all the OSTs from 1 OSS at the same time. */
619         placed = 0;
620         list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
621                 int j = 0;
622
623                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
624                         int next;
625
626                         if (!cfs_bitmap_check(lod->lod_ost_bitmap,
627                                                 src_pool->op_array[i]))
628                                 continue;
629
630                         ost = OST_TGT(lod,src_pool->op_array[i]);
631                         LASSERT(ost && ost->ltd_ost);
632                         if (ost->ltd_qos.ltq_oss != oss)
633                                 continue;
634
635                         /* Evenly space these OSTs across arrayspace */
636                         next = j * lqr->lqr_pool.op_count / oss->lqo_ost_count;
637                         while (lqr->lqr_pool.op_array[next] != LOV_QOS_EMPTY)
638                                 next = (next + 1) % lqr->lqr_pool.op_count;
639
640                         lqr->lqr_pool.op_array[next] = src_pool->op_array[i];
641                         j++;
642                         placed++;
643                 }
644         }
645
646         lqr->lqr_dirty = 0;
647         up_write(&lod->lod_qos.lq_rw_sem);
648
649         if (placed != real_count) {
650                 /* This should never happen */
651                 LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
652                                    "round-robin list (%d of %d).\n",
653                                    placed, real_count);
654                 for (i = 0; i < lqr->lqr_pool.op_count; i++) {
655                         LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
656                                  lqr->lqr_pool.op_array[i]);
657                 }
658                 lqr->lqr_dirty = 1;
659                 RETURN(-EAGAIN);
660         }
661
662 #if 0
663         for (i = 0; i < lqr->lqr_pool.op_count; i++)
664                 QOS_CONSOLE("rr #%d ost idx=%d\n", i, lqr->lqr_pool.op_array[i]);
665 #endif
666
667         RETURN(0);
668 }
669
670 /**
671  * Instantiate and declare creation of a new object.
672  *
673  * The function instantiates LU representation for a new object on the
674  * specified device. Also it declares an intention to create that
675  * object on the storage target.
676  *
677  * Note lu_object_anon() is used which is a trick with regard to LU/OSD
678  * infrastructure - in the existing precreation framework we can't assign FID
679  * at this moment, we do this later once a transaction is started. So the
680  * special method instantiates FID-less object in the cache and later it
681  * will get a FID and proper placement in LU cache.
682  *
683  * \param[in] env       execution environment for this thread
684  * \param[in] d         LOD device
685  * \param[in] ost_idx   OST target index where the object is being created
686  * \param[in] th        transaction handle
687  *
688  * \retval              object ptr on success, ERR_PTR() otherwise
689  */
690 static struct dt_object *lod_qos_declare_object_on(const struct lu_env *env,
691                                                    struct lod_device *d,
692                                                    __u32 ost_idx,
693                                                    struct thandle *th)
694 {
695         struct lod_tgt_desc *ost;
696         struct lu_object *o, *n;
697         struct lu_device *nd;
698         struct dt_object *dt;
699         int               rc;
700         ENTRY;
701
702         LASSERT(d);
703         LASSERT(ost_idx < d->lod_osts_size);
704         ost = OST_TGT(d,ost_idx);
705         LASSERT(ost);
706         LASSERT(ost->ltd_ost);
707
708         nd = &ost->ltd_ost->dd_lu_dev;
709
710         /*
711          * allocate anonymous object with zero fid, real fid
712          * will be assigned by OSP within transaction
713          * XXX: to be fixed with fully-functional OST fids
714          */
715         o = lu_object_anon(env, nd, NULL);
716         if (IS_ERR(o))
717                 GOTO(out, dt = ERR_PTR(PTR_ERR(o)));
718
719         n = lu_object_locate(o->lo_header, nd->ld_type);
720         if (unlikely(n == NULL)) {
721                 CERROR("can't find slice\n");
722                 lu_object_put(env, o);
723                 GOTO(out, dt = ERR_PTR(-EINVAL));
724         }
725
726         dt = container_of(n, struct dt_object, do_lu);
727
728         rc = lod_sub_declare_create(env, dt, NULL, NULL, NULL, th);
729         if (rc < 0) {
730                 CDEBUG(D_OTHER, "can't declare creation on #%u: %d\n",
731                        ost_idx, rc);
732                 lu_object_put(env, o);
733                 dt = ERR_PTR(rc);
734         }
735
736 out:
737         RETURN(dt);
738 }
739
740 /**
741  * Calculate a minimum acceptable stripe count.
742  *
743  * Return an acceptable stripe count depending on flag LOV_USES_DEFAULT_STRIPE:
744  * all stripes or 3/4 of stripes.
745  *
746  * \param[in] stripe_count      number of stripes requested
747  * \param[in] flags             0 or LOV_USES_DEFAULT_STRIPE
748  *
749  * \retval                      acceptable stripecount
750  */
751 static int min_stripe_count(__u32 stripe_count, int flags)
752 {
753         return (flags & LOV_USES_DEFAULT_STRIPE ?
754                 stripe_count - (stripe_count / 4) : stripe_count);
755 }
756
757 #define LOV_CREATE_RESEED_MULT 30
758 #define LOV_CREATE_RESEED_MIN  2000
759
760 /**
761  * Initialize temporary OST-in-use array.
762  *
763  * Allocate or extend the array used to mark targets already assigned to a new
764  * striping so they are not used more than once.
765  *
766  * \param[in] env       execution environment for this thread
767  * \param[in] stripes   number of items needed in the array
768  *
769  * \retval 0            on success
770  * \retval -ENOMEM      on error
771  */
772 static inline int lod_qos_ost_in_use_clear(const struct lu_env *env,
773                                            __u32 stripes)
774 {
775         struct lod_thread_info *info = lod_env_info(env);
776
777         if (info->lti_ea_store_size < sizeof(int) * stripes)
778                 lod_ea_store_resize(info, stripes * sizeof(int));
779         if (info->lti_ea_store_size < sizeof(int) * stripes) {
780                 CERROR("can't allocate memory for ost-in-use array\n");
781                 return -ENOMEM;
782         }
783         memset(info->lti_ea_store, -1, sizeof(int) * stripes);
784         return 0;
785 }
786
787 /**
788  * Remember a target in the array of used targets.
789  *
790  * Mark the given target as used for a new striping being created. The status
791  * of an OST in a striping can be checked with lod_qos_is_ost_used().
792  *
793  * \param[in] env       execution environment for this thread
794  * \param[in] idx       index in the array
795  * \param[in] ost       OST target index to mark as used
796  */
797 static inline void lod_qos_ost_in_use(const struct lu_env *env,
798                                       int idx, int ost)
799 {
800         struct lod_thread_info *info = lod_env_info(env);
801         int *osts = info->lti_ea_store;
802
803         LASSERT(info->lti_ea_store_size >= idx * sizeof(int));
804         osts[idx] = ost;
805 }
806
807 /**
808  * Check is OST used in a striping.
809  *
810  * Checks whether OST with the given index is marked as used in the temporary
811  * array (see lod_qos_ost_in_use()).
812  *
813  * \param[in] env       execution environment for this thread
814  * \param[in] ost       OST target index to check
815  * \param[in] stripes   the number of items used in the array already
816  *
817  * \retval 0            not used
818  * \retval 1            used
819  */
820 static int lod_qos_is_ost_used(const struct lu_env *env, int ost, __u32 stripes)
821 {
822         struct lod_thread_info *info = lod_env_info(env);
823         int *osts = info->lti_ea_store;
824         __u32 j;
825
826         for (j = 0; j < stripes; j++) {
827                 if (osts[j] == ost)
828                         return 1;
829         }
830         return 0;
831 }
832
833 static inline bool
834 lod_obj_is_ost_use_skip_cb(const struct lu_env *env, struct lod_object *lo,
835                            int comp_idx, struct lod_obj_stripe_cb_data *data)
836 {
837         struct lod_layout_component *comp = &lo->ldo_comp_entries[comp_idx];
838
839         return comp->llc_ost_indices == NULL;
840 }
841
842 static inline int
843 lod_obj_is_ost_use_cb(const struct lu_env *env, struct lod_object *lo,
844                       int comp_idx, struct lod_obj_stripe_cb_data *data)
845 {
846         struct lod_layout_component *comp = &lo->ldo_comp_entries[comp_idx];
847         int i;
848
849         for (i = 0; i < comp->llc_stripe_count; i++) {
850                 if (comp->llc_ost_indices[i] == data->locd_ost_index) {
851                         data->locd_ost_index = -1;
852                         return -EEXIST;
853                 }
854         }
855
856         return 0;
857 }
858
859 /**
860  * Check is OST used in a composite layout
861  *
862  * \param[in] lo        lod object
863  * \param[in] ost       OST target index to check
864  *
865  * \retval false        not used
866  * \retval true         used
867  */
868 static inline bool lod_comp_is_ost_used(const struct lu_env *env,
869                                        struct lod_object *lo, int ost)
870 {
871         struct lod_obj_stripe_cb_data data = { { 0 } };
872
873         data.locd_ost_index = ost;
874         data.locd_comp_skip_cb = lod_obj_is_ost_use_skip_cb;
875         data.locd_comp_cb = lod_obj_is_ost_use_cb;
876
877         (void)lod_obj_for_each_stripe(env, lo, NULL, &data);
878
879         return data.locd_ost_index == -1;
880 }
881
882 static inline void lod_avoid_update(struct lod_object *lo,
883                                     struct lod_avoid_guide *lag)
884 {
885         if (!lod_is_flr(lo))
886                 return;
887
888         lag->lag_ost_avail--;
889 }
890
891 static inline bool lod_should_avoid_ost(struct lod_object *lo,
892                                         struct lod_avoid_guide *lag,
893                                         __u32 index)
894 {
895         struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
896         struct lod_tgt_desc *ost = OST_TGT(lod, index);
897         struct lod_qos_oss *lqo = ost->ltd_qos.ltq_oss;
898         bool used = false;
899         int i;
900
901         if (!cfs_bitmap_check(lod->lod_ost_bitmap, index))
902                 return true;
903
904         /**
905          * we've tried our best, all available OSTs have been used in
906          * overlapped components in the other mirror
907          */
908         if (lag->lag_ost_avail == 0)
909                 return false;
910
911         /* check OSS use */
912         for (i = 0; i < lag->lag_oaa_count; i++) {
913                 if (lag->lag_oss_avoid_array[i] == lqo->lqo_id) {
914                         used = true;
915                         break;
916                 }
917         }
918         /**
919          * if the OSS which OST[index] resides has not been used, we'd like to
920          * use it
921          */
922         if (!used)
923                 return false;
924
925         /* if the OSS has been used, check whether the OST has been used */
926         if (!cfs_bitmap_check(lag->lag_ost_avoid_bitmap, index))
927                 used = false;
928         else
929                 QOS_DEBUG("OST%d: has been used in overlapped component "
930                           "in other mirror\n", index);
931         return used;
932 }
933
934 static int lod_check_and_reserve_ost(const struct lu_env *env,
935                                      struct lod_object *lo,
936                                      struct obd_statfs *sfs, __u32 ost_idx,
937                                      __u32 speed, __u32 *s_idx,
938                                      struct dt_object **stripe,
939                                      __u32 *ost_indices,
940                                      struct thandle *th)
941 {
942         struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
943         struct lod_avoid_guide *lag = &lod_env_info(env)->lti_avoid;
944         struct dt_object   *o;
945         __u32 stripe_idx = *s_idx;
946         int rc;
947
948         rc = lod_statfs_and_check(env, lod, ost_idx, sfs);
949         if (rc) {
950                 /* this OSP doesn't feel well */
951                 goto out_return;
952         }
953
954         /*
955          * We expect number of precreated objects in f_ffree at
956          * the first iteration, skip OSPs with no objects ready
957          */
958         if (sfs->os_fprecreated == 0 && speed == 0) {
959                 QOS_DEBUG("#%d: precreation is empty\n", ost_idx);
960                 goto out_return;
961         }
962
963         /*
964          * try to use another OSP if this one is degraded
965          */
966         if (sfs->os_state & OS_STATE_DEGRADED && speed < 2) {
967                 QOS_DEBUG("#%d: degraded\n", ost_idx);
968                 goto out_return;
969         }
970
971         /*
972          * try not allocate on OST which has been used by other
973          * component
974          */
975         if (speed == 0 && lod_comp_is_ost_used(env, lo, ost_idx)) {
976                 QOS_DEBUG("#%d: used by other component\n", ost_idx);
977                 goto out_return;
978         }
979
980         /**
981          * try not allocate OSTs used by conflicting component of other mirrors
982          * for the first and second time.
983          */
984         if (speed < 2 && lod_should_avoid_ost(lo, lag, ost_idx)) {
985                 QOS_DEBUG("#%d: used by overlapped component of other mirror\n",
986                           ost_idx);
987                 goto out_return;
988         }
989         /*
990          * do not put >1 objects on a single OST
991          */
992         if (lod_qos_is_ost_used(env, ost_idx, stripe_idx))
993                 goto out_return;
994
995         o = lod_qos_declare_object_on(env, lod, ost_idx, th);
996         if (IS_ERR(o)) {
997                 CDEBUG(D_OTHER, "can't declare new object on #%u: %d\n",
998                        ost_idx, (int) PTR_ERR(o));
999                 rc = PTR_ERR(o);
1000                 goto out_return;
1001         }
1002
1003         /*
1004          * We've successfully declared (reserved) an object
1005          */
1006         lod_avoid_update(lo, lag);
1007         lod_qos_ost_in_use(env, stripe_idx, ost_idx);
1008         stripe[stripe_idx] = o;
1009         ost_indices[stripe_idx] = ost_idx;
1010         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_LOV_CREATE_RACE, 2);
1011         stripe_idx++;
1012         *s_idx = stripe_idx;
1013
1014 out_return:
1015         return rc;
1016 }
1017
1018 /**
1019  * Allocate a striping using round-robin algorithm.
1020  *
1021  * Allocates a new striping using round-robin algorithm. The function refreshes
1022  * all the internal structures (statfs cache, array of available OSTs sorted
1023  * with regard to OSS, etc). The number of stripes required is taken from the
1024  * object (must be prepared by the caller), but can change if the flag
1025  * LOV_USES_DEFAULT_STRIPE is supplied. The caller should ensure nobody else
1026  * is trying to create a striping on the object in parallel. All the internal
1027  * structures (like pools, etc) are protected and no additional locking is
1028  * required. The function succeeds even if a single stripe is allocated. To save
1029  * time we give priority to targets which already have objects precreated.
1030  * Full OSTs are skipped (see lod_qos_dev_is_full() for the details).
1031  *
1032  * \param[in] env               execution environment for this thread
1033  * \param[in] lo                LOD object
1034  * \param[out] stripe           striping created
1035  * \param[out] ost_indices      ost indices of striping created
1036  * \param[in] flags             allocation flags (0 or LOV_USES_DEFAULT_STRIPE)
1037  * \param[in] th                transaction handle
1038  * \param[in] comp_idx          index of ldo_comp_entries
1039  *
1040  * \retval 0            on success
1041  * \retval -ENOSPC      if not enough OSTs are found
1042  * \retval negative     negated errno for other failures
1043  */
1044 static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo,
1045                         struct dt_object **stripe, __u32 *ost_indices,
1046                         int flags, struct thandle *th, int comp_idx)
1047 {
1048         struct lod_layout_component *lod_comp;
1049         struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
1050         struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs;
1051         struct pool_desc  *pool = NULL;
1052         struct ost_pool   *osts;
1053         struct lod_qos_rr *lqr;
1054         unsigned int    i, array_idx;
1055         __u32 ost_start_idx_temp;
1056         __u32 stripe_idx = 0;
1057         __u32 stripe_count, stripe_count_min, ost_idx;
1058         int rc, speed = 0, ost_connecting = 0;
1059         ENTRY;
1060
1061         LASSERT(lo->ldo_comp_cnt > comp_idx && lo->ldo_comp_entries != NULL);
1062         lod_comp = &lo->ldo_comp_entries[comp_idx];
1063         stripe_count = lod_comp->llc_stripe_count;
1064         stripe_count_min = min_stripe_count(stripe_count, flags);
1065
1066         if (lod_comp->llc_pool != NULL)
1067                 pool = lod_find_pool(m, lod_comp->llc_pool);
1068
1069         if (pool != NULL) {
1070                 down_read(&pool_tgt_rw_sem(pool));
1071                 osts = &(pool->pool_obds);
1072                 lqr = &(pool->pool_rr);
1073         } else {
1074                 osts = &(m->lod_pool_info);
1075                 lqr = &(m->lod_qos.lq_rr);
1076         }
1077
1078         rc = lod_qos_calc_rr(m, osts, lqr);
1079         if (rc)
1080                 GOTO(out, rc);
1081
1082         rc = lod_qos_ost_in_use_clear(env, stripe_count);
1083         if (rc)
1084                 GOTO(out, rc);
1085
1086         down_read(&m->lod_qos.lq_rw_sem);
1087         spin_lock(&lqr->lqr_alloc);
1088         if (--lqr->lqr_start_count <= 0) {
1089                 lqr->lqr_start_idx = cfs_rand() % osts->op_count;
1090                 lqr->lqr_start_count =
1091                         (LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
1092                          LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
1093         } else if (stripe_count_min >= osts->op_count ||
1094                         lqr->lqr_start_idx > osts->op_count) {
1095                 /* If we have allocated from all of the OSTs, slowly
1096                  * precess the next start if the OST/stripe count isn't
1097                  * already doing this for us. */
1098                 lqr->lqr_start_idx %= osts->op_count;
1099                 if (stripe_count > 1 && (osts->op_count % stripe_count) != 1)
1100                         ++lqr->lqr_offset_idx;
1101         }
1102         ost_start_idx_temp = lqr->lqr_start_idx;
1103
1104 repeat_find:
1105
1106         QOS_DEBUG("pool '%s' want %d start_idx %d start_count %d offset %d "
1107                   "active %d count %d\n",
1108                   lod_comp->llc_pool ? lod_comp->llc_pool : "",
1109                   stripe_count, lqr->lqr_start_idx, lqr->lqr_start_count,
1110                   lqr->lqr_offset_idx, osts->op_count, osts->op_count);
1111
1112         for (i = 0; i < osts->op_count && stripe_idx < stripe_count; i++) {
1113                 array_idx = (lqr->lqr_start_idx + lqr->lqr_offset_idx) %
1114                                 osts->op_count;
1115                 ++lqr->lqr_start_idx;
1116                 ost_idx = lqr->lqr_pool.op_array[array_idx];
1117
1118                 QOS_DEBUG("#%d strt %d act %d strp %d ary %d idx %d\n",
1119                           i, lqr->lqr_start_idx, /* XXX: active*/ 0,
1120                           stripe_idx, array_idx, ost_idx);
1121
1122                 if ((ost_idx == LOV_QOS_EMPTY) ||
1123                     !cfs_bitmap_check(m->lod_ost_bitmap, ost_idx))
1124                         continue;
1125
1126                 /* Fail Check before osc_precreate() is called
1127                    so we can only 'fail' single OSC. */
1128                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
1129                         continue;
1130
1131                 spin_unlock(&lqr->lqr_alloc);
1132                 rc = lod_check_and_reserve_ost(env, lo, sfs, ost_idx, speed,
1133                                                &stripe_idx, stripe, ost_indices,
1134                                                th);
1135                 spin_lock(&lqr->lqr_alloc);
1136
1137                 if (rc != 0 && OST_TGT(m, ost_idx)->ltd_connecting)
1138                         ost_connecting = 1;
1139         }
1140         if ((speed < 2) && (stripe_idx < stripe_count_min)) {
1141                 /* Try again, allowing slower OSCs */
1142                 speed++;
1143                 lqr->lqr_start_idx = ost_start_idx_temp;
1144
1145                 ost_connecting = 0;
1146                 goto repeat_find;
1147         }
1148
1149         spin_unlock(&lqr->lqr_alloc);
1150         up_read(&m->lod_qos.lq_rw_sem);
1151
1152         if (stripe_idx) {
1153                 lod_comp->llc_stripe_count = stripe_idx;
1154                 /* at least one stripe is allocated */
1155                 rc = 0;
1156         } else {
1157                 /* nobody provided us with a single object */
1158                 if (ost_connecting)
1159                         rc = -EINPROGRESS;
1160                 else
1161                         rc = -ENOSPC;
1162         }
1163
1164 out:
1165         if (pool != NULL) {
1166                 up_read(&pool_tgt_rw_sem(pool));
1167                 /* put back ref got by lod_find_pool() */
1168                 lod_pool_putref(pool);
1169         }
1170
1171         RETURN(rc);
1172 }
1173
1174 /**
1175  * Allocate a specific striping layout on a user defined set of OSTs.
1176  *
1177  * Allocates new striping using the OST index range provided by the data from
1178  * the lmm_obejcts contained in the lov_user_md passed to this method. Full
1179  * OSTs are not considered. The exact order of OSTs requested by the user
1180  * is respected as much as possible depending on OST status. The number of
1181  * stripes needed and stripe offset are taken from the object. If that number
1182  * can not be met, then the function returns a failure and then it's the
1183  * caller's responsibility to release the stripes allocated. All the internal
1184  * structures are protected, but no concurrent allocation is allowed on the
1185  * same objects.
1186  *
1187  * \param[in] env               execution environment for this thread
1188  * \param[in] lo                LOD object
1189  * \param[out] stripe           striping created
1190  * \param[out] ost_indices      ost indices of striping created
1191  * \param[in] th                transaction handle
1192  * \param[in] comp_idx          index of ldo_comp_entries
1193  *
1194  * \retval 0            on success
1195  * \retval -ENODEV      OST index does not exist on file system
1196  * \retval -EINVAL      requested OST index is invalid
1197  * \retval negative     negated errno on error
1198  */
1199 static int lod_alloc_ost_list(const struct lu_env *env, struct lod_object *lo,
1200                               struct dt_object **stripe, __u32 *ost_indices,
1201                               struct thandle *th, int comp_idx)
1202 {
1203         struct lod_layout_component *lod_comp;
1204         struct lod_device       *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
1205         struct obd_statfs       *sfs = &lod_env_info(env)->lti_osfs;
1206         struct dt_object        *o;
1207         unsigned int            array_idx = 0;
1208         int                     stripe_count = 0;
1209         int                     i;
1210         int                     rc = -EINVAL;
1211         ENTRY;
1212
1213         /* for specific OSTs layout */
1214         LASSERT(lo->ldo_comp_cnt > comp_idx && lo->ldo_comp_entries != NULL);
1215         lod_comp = &lo->ldo_comp_entries[comp_idx];
1216         LASSERT(lod_comp->llc_ostlist.op_array);
1217
1218         rc = lod_qos_ost_in_use_clear(env, lod_comp->llc_stripe_count);
1219         if (rc < 0)
1220                 RETURN(rc);
1221
1222         for (i = 0; i < lod_comp->llc_stripe_count; i++) {
1223                 if (lod_comp->llc_ostlist.op_array[i] ==
1224                     lod_comp->llc_stripe_offset) {
1225                         array_idx = i;
1226                         break;
1227                 }
1228         }
1229         if (i == lod_comp->llc_stripe_count) {
1230                 CDEBUG(D_OTHER,
1231                        "%s: start index %d not in the specified list of OSTs\n",
1232                        lod2obd(m)->obd_name, lod_comp->llc_stripe_offset);
1233                 RETURN(-EINVAL);
1234         }
1235
1236         for (i = 0; i < lod_comp->llc_stripe_count;
1237              i++, array_idx = (array_idx + 1) % lod_comp->llc_stripe_count) {
1238                 __u32 ost_idx = lod_comp->llc_ostlist.op_array[array_idx];
1239
1240                 if (!cfs_bitmap_check(m->lod_ost_bitmap, ost_idx)) {
1241                         rc = -ENODEV;
1242                         break;
1243                 }
1244
1245                 /*
1246                  * do not put >1 objects on a single OST
1247                  */
1248                 if (lod_qos_is_ost_used(env, ost_idx, stripe_count)) {
1249                         rc = -EINVAL;
1250                         break;
1251                 }
1252
1253                 rc = lod_statfs_and_check(env, m, ost_idx, sfs);
1254                 if (rc < 0) /* this OSP doesn't feel well */
1255                         break;
1256
1257                 o = lod_qos_declare_object_on(env, m, ost_idx, th);
1258                 if (IS_ERR(o)) {
1259                         rc = PTR_ERR(o);
1260                         CDEBUG(D_OTHER,
1261                                "%s: can't declare new object on #%u: %d\n",
1262                                lod2obd(m)->obd_name, ost_idx, rc);
1263                         break;
1264                 }
1265
1266                 /*
1267                  * We've successfully declared (reserved) an object
1268                  */
1269                 lod_qos_ost_in_use(env, stripe_count, ost_idx);
1270                 stripe[stripe_count] = o;
1271                 ost_indices[stripe_count] = ost_idx;
1272                 stripe_count++;
1273         }
1274
1275         RETURN(rc);
1276 }
1277
1278 /**
1279  * Allocate a striping on a predefined set of OSTs.
1280  *
1281  * Allocates new layout starting from OST index in lo->ldo_stripe_offset.
1282  * Full OSTs are not considered. The exact order of OSTs is not important and
1283  * varies depending on OST status. The allocation procedure prefers the targets
1284  * with precreated objects ready. The number of stripes needed and stripe
1285  * offset are taken from the object. If that number cannot be met, then the
1286  * function returns an error and then it's the caller's responsibility to
1287  * release the stripes allocated. All the internal structures are protected,
1288  * but no concurrent allocation is allowed on the same objects.
1289  *
1290  * \param[in] env               execution environment for this thread
1291  * \param[in] lo                LOD object
1292  * \param[out] stripe           striping created
1293  * \param[out] ost_indices      ost indices of striping created
1294  * \param[in] flags             not used
1295  * \param[in] th                transaction handle
1296  * \param[in] comp_idx          index of ldo_comp_entries
1297  *
1298  * \retval 0            on success
1299  * \retval -ENOSPC      if no OST objects are available at all
1300  * \retval -EFBIG       if not enough OST objects are found
1301  * \retval -EINVAL      requested offset is invalid
1302  * \retval negative     errno on failure
1303  */
1304 static int lod_alloc_specific(const struct lu_env *env, struct lod_object *lo,
1305                               struct dt_object **stripe, __u32 *ost_indices,
1306                               int flags, struct thandle *th, int comp_idx)
1307 {
1308         struct lod_layout_component *lod_comp;
1309         struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
1310         struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs;
1311         struct dt_object  *o;
1312         __u32              ost_idx;
1313         unsigned int       i, array_idx, ost_count;
1314         int                rc, stripe_num = 0;
1315         int                speed = 0;
1316         struct pool_desc  *pool = NULL;
1317         struct ost_pool   *osts;
1318         ENTRY;
1319
1320         LASSERT(lo->ldo_comp_cnt > comp_idx && lo->ldo_comp_entries != NULL);
1321         lod_comp = &lo->ldo_comp_entries[comp_idx];
1322
1323         rc = lod_qos_ost_in_use_clear(env, lod_comp->llc_stripe_count);
1324         if (rc)
1325                 GOTO(out, rc);
1326
1327         if (lod_comp->llc_pool != NULL)
1328                 pool = lod_find_pool(m, lod_comp->llc_pool);
1329
1330         if (pool != NULL) {
1331                 down_read(&pool_tgt_rw_sem(pool));
1332                 osts = &(pool->pool_obds);
1333         } else {
1334                 osts = &(m->lod_pool_info);
1335         }
1336
1337         ost_count = osts->op_count;
1338
1339 repeat_find:
1340         /* search loi_ost_idx in ost array */
1341         array_idx = 0;
1342         for (i = 0; i < ost_count; i++) {
1343                 if (osts->op_array[i] == lod_comp->llc_stripe_offset) {
1344                         array_idx = i;
1345                         break;
1346                 }
1347         }
1348         if (i == ost_count) {
1349                 CERROR("Start index %d not found in pool '%s'\n",
1350                        lod_comp->llc_stripe_offset,
1351                        lod_comp->llc_pool ? lod_comp->llc_pool : "");
1352                 GOTO(out, rc = -EINVAL);
1353         }
1354
1355         for (i = 0; i < ost_count;
1356                         i++, array_idx = (array_idx + 1) % ost_count) {
1357                 ost_idx = osts->op_array[array_idx];
1358
1359                 if (!cfs_bitmap_check(m->lod_ost_bitmap, ost_idx))
1360                         continue;
1361
1362                 /* Fail Check before osc_precreate() is called
1363                    so we can only 'fail' single OSC. */
1364                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
1365                         continue;
1366
1367                 /*
1368                  * do not put >1 objects on a single OST
1369                  */
1370                 if (lod_qos_is_ost_used(env, ost_idx, stripe_num))
1371                         continue;
1372
1373                 /*
1374                  * try not allocate on the OST used by other component
1375                  */
1376                 if (speed == 0 && i != 0 &&
1377                     lod_comp_is_ost_used(env, lo, ost_idx))
1378                         continue;
1379
1380                 /* Drop slow OSCs if we can, but not for requested start idx.
1381                  *
1382                  * This means "if OSC is slow and it is not the requested
1383                  * start OST, then it can be skipped, otherwise skip it only
1384                  * if it is inactive/recovering/out-of-space." */
1385
1386                 rc = lod_statfs_and_check(env, m, ost_idx, sfs);
1387                 if (rc) {
1388                         /* this OSP doesn't feel well */
1389                         continue;
1390                 }
1391
1392                 /*
1393                  * We expect number of precreated objects at the first
1394                  * iteration.  Skip OSPs with no objects ready.  Don't apply
1395                  * this logic to OST specified with stripe_offset.
1396                  */
1397                 if (i != 0 && sfs->os_fprecreated == 0 && speed == 0)
1398                         continue;
1399
1400                 o = lod_qos_declare_object_on(env, m, ost_idx, th);
1401                 if (IS_ERR(o)) {
1402                         CDEBUG(D_OTHER, "can't declare new object on #%u: %d\n",
1403                                ost_idx, (int) PTR_ERR(o));
1404                         continue;
1405                 }
1406
1407                 /*
1408                  * We've successfully declared (reserved) an object
1409                  */
1410                 lod_qos_ost_in_use(env, stripe_num, ost_idx);
1411                 stripe[stripe_num] = o;
1412                 ost_indices[stripe_num] = ost_idx;
1413                 stripe_num++;
1414
1415                 /* We have enough stripes */
1416                 if (stripe_num == lod_comp->llc_stripe_count)
1417                         GOTO(out, rc = 0);
1418         }
1419         if (speed < 2) {
1420                 /* Try again, allowing slower OSCs */
1421                 speed++;
1422                 goto repeat_find;
1423         }
1424
1425         /* If we were passed specific striping params, then a failure to
1426          * meet those requirements is an error, since we can't reallocate
1427          * that memory (it might be part of a larger array or something).
1428          */
1429         CERROR("can't lstripe objid "DFID": have %d want %u\n",
1430                PFID(lu_object_fid(lod2lu_obj(lo))), stripe_num,
1431                lod_comp->llc_stripe_count);
1432         rc = stripe_num == 0 ? -ENOSPC : -EFBIG;
1433 out:
1434         if (pool != NULL) {
1435                 up_read(&pool_tgt_rw_sem(pool));
1436                 /* put back ref got by lod_find_pool() */
1437                 lod_pool_putref(pool);
1438         }
1439
1440         RETURN(rc);
1441 }
1442
1443 /**
1444  * Check whether QoS allocation should be used.
1445  *
1446  * A simple helper to decide when QoS allocation should be used:
1447  * if it's just a single available target or the used space is
1448  * evenly distributed among the targets at the moment, then QoS
1449  * allocation algorithm should not be used.
1450  *
1451  * \param[in] lod       LOD device
1452  *
1453  * \retval 0            should not be used
1454  * \retval 1            should be used
1455  */
1456 static inline int lod_qos_is_usable(struct lod_device *lod)
1457 {
1458 #ifdef FORCE_QOS
1459         /* to be able to debug QoS code */
1460         return 1;
1461 #endif
1462
1463         /* Detect -EAGAIN early, before expensive lock is taken. */
1464         if (!lod->lod_qos.lq_dirty && lod->lod_qos.lq_same_space)
1465                 return 0;
1466
1467         if (lod->lod_desc.ld_active_tgt_count < 2)
1468                 return 0;
1469
1470         return 1;
1471 }
1472
1473 /**
1474  * Allocate a striping using an algorithm with weights.
1475  *
1476  * The function allocates OST objects to create a striping. The algorithm
1477  * used is based on weights (currently only using the free space), and it's
1478  * trying to ensure the space is used evenly by OSTs and OSSs. The striping
1479  * configuration (# of stripes, offset, pool) is taken from the object and
1480  * is prepared by the caller.
1481  *
1482  * If LOV_USES_DEFAULT_STRIPE is not passed and prepared configuration can't
1483  * be met due to too few OSTs, then allocation fails. If the flag is passed
1484  * fewer than 3/4 of the requested number of stripes can be allocated, then
1485  * allocation fails.
1486  *
1487  * No concurrent allocation is allowed on the object and this must be ensured
1488  * by the caller. All the internal structures are protected by the function.
1489  *
1490  * The algorithm has two steps: find available OSTs and calculate their
1491  * weights, then select the OSTs with their weights used as the probability.
1492  * An OST with a higher weight is proportionately more likely to be selected
1493  * than one with a lower weight.
1494  *
1495  * \param[in] env               execution environment for this thread
1496  * \param[in] lo                LOD object
1497  * \param[out] stripe           striping created
1498  * \param[out] ost_indices      ost indices of striping created
1499  * \param[in] flags             0 or LOV_USES_DEFAULT_STRIPE
1500  * \param[in] th                transaction handle
1501  * \param[in] comp_idx          index of ldo_comp_entries
1502  *
1503  * \retval 0            on success
1504  * \retval -EAGAIN      not enough OSTs are found for specified stripe count
1505  * \retval -EINVAL      requested OST index is invalid
1506  * \retval negative     errno on failure
1507  */
1508 static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
1509                          struct dt_object **stripe, __u32 *ost_indices,
1510                          int flags, struct thandle *th, int comp_idx)
1511 {
1512         struct lod_layout_component *lod_comp;
1513         struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
1514         struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs;
1515         struct lod_avoid_guide *lag = &lod_env_info(env)->lti_avoid;
1516         struct lod_tgt_desc *ost;
1517         struct dt_object *o;
1518         __u64 total_weight = 0;
1519         struct pool_desc *pool = NULL;
1520         struct ost_pool *osts;
1521         unsigned int i;
1522         __u32 nfound, good_osts, stripe_count, stripe_count_min;
1523         int rc = 0;
1524         ENTRY;
1525
1526         LASSERT(lo->ldo_comp_cnt > comp_idx && lo->ldo_comp_entries != NULL);
1527         lod_comp = &lo->ldo_comp_entries[comp_idx];
1528         stripe_count = lod_comp->llc_stripe_count;
1529         stripe_count_min = min_stripe_count(stripe_count, flags);
1530         if (stripe_count_min < 1)
1531                 RETURN(-EINVAL);
1532
1533         if (lod_comp->llc_pool != NULL)
1534                 pool = lod_find_pool(lod, lod_comp->llc_pool);
1535
1536         if (pool != NULL) {
1537                 down_read(&pool_tgt_rw_sem(pool));
1538                 osts = &(pool->pool_obds);
1539         } else {
1540                 osts = &(lod->lod_pool_info);
1541         }
1542
1543         /* Detect -EAGAIN early, before expensive lock is taken. */
1544         if (!lod_qos_is_usable(lod))
1545                 GOTO(out_nolock, rc = -EAGAIN);
1546
1547         /* Do actual allocation, use write lock here. */
1548         down_write(&lod->lod_qos.lq_rw_sem);
1549
1550         /*
1551          * Check again, while we were sleeping on @lq_rw_sem things could
1552          * change.
1553          */
1554         if (!lod_qos_is_usable(lod))
1555                 GOTO(out, rc = -EAGAIN);
1556
1557         rc = lod_qos_calc_ppo(lod);
1558         if (rc)
1559                 GOTO(out, rc);
1560
1561         rc = lod_qos_ost_in_use_clear(env, lod_comp->llc_stripe_count);
1562         if (rc)
1563                 GOTO(out, rc);
1564
1565         good_osts = 0;
1566         /* Find all the OSTs that are valid stripe candidates */
1567         for (i = 0; i < osts->op_count; i++) {
1568                 if (!cfs_bitmap_check(lod->lod_ost_bitmap, osts->op_array[i]))
1569                         continue;
1570
1571                 ost = OST_TGT(lod, osts->op_array[i]);
1572                 ost->ltd_qos.ltq_usable = 0;
1573
1574                 rc = lod_statfs_and_check(env, lod, osts->op_array[i], sfs);
1575                 if (rc) {
1576                         /* this OSP doesn't feel well */
1577                         continue;
1578                 }
1579
1580                 if (sfs->os_state & OS_STATE_DEGRADED)
1581                         continue;
1582
1583                 /* Fail Check before osc_precreate() is called
1584                    so we can only 'fail' single OSC. */
1585                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) &&
1586                                    osts->op_array[i] == 0)
1587                         continue;
1588
1589                 ost->ltd_qos.ltq_usable = 1;
1590                 lod_qos_calc_weight(lod, osts->op_array[i]);
1591                 total_weight += ost->ltd_qos.ltq_weight;
1592
1593                 good_osts++;
1594         }
1595
1596         QOS_DEBUG("found %d good osts\n", good_osts);
1597
1598         if (good_osts < stripe_count_min)
1599                 GOTO(out, rc = -EAGAIN);
1600
1601         /* We have enough osts */
1602         if (good_osts < stripe_count)
1603                 stripe_count = good_osts;
1604
1605         /* Find enough OSTs with weighted random allocation. */
1606         nfound = 0;
1607         while (nfound < stripe_count) {
1608                 __u64 rand, cur_weight;
1609
1610                 cur_weight = 0;
1611                 rc = -ENOSPC;
1612
1613                 if (total_weight) {
1614 #if BITS_PER_LONG == 32
1615                         rand = cfs_rand() % (unsigned)total_weight;
1616                         /* If total_weight > 32-bit, first generate the high
1617                          * 32 bits of the random number, then add in the low
1618                          * 32 bits (truncated to the upper limit, if needed) */
1619                         if (total_weight > 0xffffffffULL)
1620                                 rand = (__u64)(cfs_rand() %
1621                                         (unsigned)(total_weight >> 32)) << 32;
1622                         else
1623                                 rand = 0;
1624
1625                         if (rand == (total_weight & 0xffffffff00000000ULL))
1626                                 rand |= cfs_rand() % (unsigned)total_weight;
1627                         else
1628                                 rand |= cfs_rand();
1629
1630 #else
1631                         rand = ((__u64)cfs_rand() << 32 | cfs_rand()) %
1632                                 total_weight;
1633 #endif
1634                 } else {
1635                         rand = 0;
1636                 }
1637
1638                 /* On average, this will hit larger-weighted OSTs more often.
1639                  * 0-weight OSTs will always get used last (only when rand=0) */
1640                 for (i = 0; i < osts->op_count; i++) {
1641                         __u32 idx = osts->op_array[i];
1642
1643                         if (lod_should_avoid_ost(lo, lag, idx))
1644                                 continue;
1645
1646                         ost = OST_TGT(lod, idx);
1647
1648                         if (!ost->ltd_qos.ltq_usable)
1649                                 continue;
1650
1651                         cur_weight += ost->ltd_qos.ltq_weight;
1652                         QOS_DEBUG("stripe_count=%d nfound=%d cur_weight=%llu "
1653                                   "rand=%llu total_weight=%llu\n",
1654                                   stripe_count, nfound, cur_weight, rand,
1655                                   total_weight);
1656
1657                         if (cur_weight < rand)
1658                                 continue;
1659
1660                         QOS_DEBUG("stripe=%d to idx=%d\n", nfound, idx);
1661                         /*
1662                          * do not put >1 objects on a single OST
1663                          */
1664                         if (lod_qos_is_ost_used(env, idx, nfound) ||
1665                             lod_comp_is_ost_used(env, lo, idx))
1666                                 continue;
1667
1668                         o = lod_qos_declare_object_on(env, lod, idx, th);
1669                         if (IS_ERR(o)) {
1670                                 QOS_DEBUG("can't declare object on #%u: %d\n",
1671                                           idx, (int) PTR_ERR(o));
1672                                 continue;
1673                         }
1674
1675                         lod_avoid_update(lo, lag);
1676                         lod_qos_ost_in_use(env, nfound, idx);
1677                         stripe[nfound] = o;
1678                         ost_indices[nfound] = idx;
1679                         lod_qos_used(lod, osts, idx, &total_weight);
1680                         nfound++;
1681                         rc = 0;
1682                         break;
1683                 }
1684
1685                 if (rc) {
1686                         /* no OST found on this iteration, give up */
1687                         break;
1688                 }
1689         }
1690
1691         if (unlikely(nfound != stripe_count)) {
1692                 /*
1693                  * when the decision to use weighted algorithm was made
1694                  * we had enough appropriate OSPs, but this state can
1695                  * change anytime (no space on OST, broken connection, etc)
1696                  * so it's possible OSP won't be able to provide us with
1697                  * an object due to just changed state
1698                  */
1699                 QOS_DEBUG("%s: wanted %d objects, found only %d\n",
1700                           lod2obd(lod)->obd_name, stripe_count, nfound);
1701                 for (i = 0; i < nfound; i++) {
1702                         LASSERT(stripe[i] != NULL);
1703                         dt_object_put(env, stripe[i]);
1704                         stripe[i] = NULL;
1705                 }
1706
1707                 /* makes sense to rebalance next time */
1708                 lod->lod_qos.lq_dirty = 1;
1709                 lod->lod_qos.lq_same_space = 0;
1710
1711                 rc = -EAGAIN;
1712         }
1713
1714 out:
1715         up_write(&lod->lod_qos.lq_rw_sem);
1716
1717 out_nolock:
1718         if (pool != NULL) {
1719                 up_read(&pool_tgt_rw_sem(pool));
1720                 /* put back ref got by lod_find_pool() */
1721                 lod_pool_putref(pool);
1722         }
1723
1724         RETURN(rc);
1725 }
1726
1727 /**
1728  * Find largest stripe count the caller can use.
1729  *
1730  * Find the maximal possible stripe count not greater than \a stripe_count.
1731  * Sometimes suggested stripecount can't be reached for a number of reasons:
1732  * lack of enough active OSTs or the backend does not support EAs that large.
1733  * If the passed one is 0, then the filesystem's default one is used.
1734  *
1735  * \param[in] lod       LOD device
1736  * \param[in] lo        The lod_object
1737  * \param[in] stripe_count      count the caller would like to use
1738  *
1739  * \retval              the maximum usable stripe count
1740  */
1741 __u16 lod_get_stripe_count(struct lod_device *lod, struct lod_object *lo,
1742                            __u16 stripe_count)
1743 {
1744         __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
1745
1746         if (!stripe_count)
1747                 stripe_count = lod->lod_desc.ld_default_stripe_count;
1748         if (stripe_count > lod->lod_desc.ld_active_tgt_count)
1749                 stripe_count = lod->lod_desc.ld_active_tgt_count;
1750         if (!stripe_count)
1751                 stripe_count = 1;
1752
1753         /* stripe count is based on whether OSD can handle larger EA sizes */
1754         if (lod->lod_osd_max_easize > 0) {
1755                 unsigned int easize = lod->lod_osd_max_easize;
1756                 int i;
1757
1758                 if (lo->ldo_is_composite) {
1759                         struct lod_layout_component *lod_comp;
1760                         unsigned int header_sz = sizeof(struct lov_comp_md_v1);
1761
1762                         header_sz += sizeof(struct lov_comp_md_entry_v1) *
1763                                         lo->ldo_comp_cnt;
1764                         for (i = 0; i < lo->ldo_comp_cnt; i++) {
1765                                 lod_comp = &lo->ldo_comp_entries[i];
1766                                 if (lod_comp->llc_flags & LCME_FL_INIT)
1767                                         header_sz += lov_mds_md_size(
1768                                                 lod_comp->llc_stripe_count,
1769                                                 LOV_MAGIC_V3);
1770                         }
1771                         if (easize > header_sz)
1772                                 easize -= header_sz;
1773                         else
1774                                 easize = 0;
1775                 }
1776
1777                 max_stripes = lov_mds_md_max_stripe_count(easize, LOV_MAGIC_V3);
1778         }
1779
1780         return (stripe_count < max_stripes) ? stripe_count : max_stripes;
1781 }
1782
1783 /**
1784  * Create in-core respresentation for a fully-defined striping
1785  *
1786  * When the caller passes a fully-defined striping (i.e. everything including
1787  * OST object FIDs are defined), then we still need to instantiate LU-cache
1788  * with the objects representing the stripes defined. This function completes
1789  * that task.
1790  *
1791  * \param[in] env       execution environment for this thread
1792  * \param[in] mo        LOD object
1793  * \param[in] buf       buffer containing the striping
1794  *
1795  * \retval 0            on success
1796  * \retval negative     negated errno on error
1797  */
1798 int lod_use_defined_striping(const struct lu_env *env,
1799                              struct lod_object *mo,
1800                              const struct lu_buf *buf)
1801 {
1802         struct lod_layout_component *lod_comp;
1803         struct lov_mds_md_v1   *v1 = buf->lb_buf;
1804         struct lov_mds_md_v3   *v3 = buf->lb_buf;
1805         struct lov_comp_md_v1  *comp_v1 = NULL;
1806         struct lov_ost_data_v1 *objs;
1807         __u32   magic;
1808         __u16   comp_cnt;
1809         __u16   mirror_cnt;
1810         int     rc = 0, i;
1811         ENTRY;
1812
1813         mutex_lock(&mo->ldo_layout_mutex);
1814         lod_striping_free_nolock(env, mo);
1815
1816         magic = le32_to_cpu(v1->lmm_magic) & ~LOV_MAGIC_DEFINED;
1817
1818         if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3 &&
1819             magic != LOV_MAGIC_COMP_V1)
1820                 GOTO(unlock, rc = -EINVAL);
1821
1822         if (magic == LOV_MAGIC_COMP_V1) {
1823                 comp_v1 = buf->lb_buf;
1824                 comp_cnt = le16_to_cpu(comp_v1->lcm_entry_count);
1825                 if (comp_cnt == 0)
1826                         GOTO(unlock, rc = -EINVAL);
1827                 mirror_cnt = le16_to_cpu(comp_v1->lcm_mirror_count) + 1;
1828                 mo->ldo_flr_state = le16_to_cpu(comp_v1->lcm_flags) &
1829                                         LCM_FL_FLR_MASK;
1830                 mo->ldo_is_composite = 1;
1831         } else {
1832                 mo->ldo_is_composite = 0;
1833                 comp_cnt = 1;
1834                 mirror_cnt = 0;
1835         }
1836         mo->ldo_layout_gen = le16_to_cpu(v1->lmm_layout_gen);
1837
1838         rc = lod_alloc_comp_entries(mo, mirror_cnt, comp_cnt);
1839         if (rc)
1840                 GOTO(unlock, rc);
1841
1842         for (i = 0; i < comp_cnt; i++) {
1843                 struct lu_extent *ext;
1844                 char    *pool_name;
1845                 __u32   offs;
1846
1847                 lod_comp = &mo->ldo_comp_entries[i];
1848
1849                 if (mo->ldo_is_composite) {
1850                         offs = le32_to_cpu(comp_v1->lcm_entries[i].lcme_offset);
1851                         v1 = (struct lov_mds_md_v1 *)((char *)comp_v1 + offs);
1852                         magic = le32_to_cpu(v1->lmm_magic);
1853
1854                         ext = &comp_v1->lcm_entries[i].lcme_extent;
1855                         lod_comp->llc_extent.e_start =
1856                                 le64_to_cpu(ext->e_start);
1857                         lod_comp->llc_extent.e_end = le64_to_cpu(ext->e_end);
1858                         lod_comp->llc_flags =
1859                                 le32_to_cpu(comp_v1->lcm_entries[i].lcme_flags);
1860                         lod_comp->llc_id =
1861                                 le32_to_cpu(comp_v1->lcm_entries[i].lcme_id);
1862                         if (lod_comp->llc_id == LCME_ID_INVAL)
1863                                 GOTO(out, rc = -EINVAL);
1864                 }
1865
1866                 pool_name = NULL;
1867                 if (magic == LOV_MAGIC_V1) {
1868                         objs = &v1->lmm_objects[0];
1869                 } else if (magic == LOV_MAGIC_V3) {
1870                         objs = &v3->lmm_objects[0];
1871                         if (v3->lmm_pool_name[0] != '\0')
1872                                 pool_name = v3->lmm_pool_name;
1873                 } else {
1874                         CDEBUG(D_LAYOUT, "Invalid magic %x\n", magic);
1875                         GOTO(out, rc = -EINVAL);
1876                 }
1877
1878                 lod_comp->llc_pattern = le32_to_cpu(v1->lmm_pattern);
1879                 lod_comp->llc_stripe_size = le32_to_cpu(v1->lmm_stripe_size);
1880                 lod_comp->llc_stripe_count = le16_to_cpu(v1->lmm_stripe_count);
1881                 lod_comp->llc_layout_gen = le16_to_cpu(v1->lmm_layout_gen);
1882                 /**
1883                  * The stripe_offset of an uninit-ed component is stored in
1884                  * the lmm_layout_gen
1885                  */
1886                 if (mo->ldo_is_composite && !lod_comp_inited(lod_comp))
1887                         lod_comp->llc_stripe_offset = lod_comp->llc_layout_gen;
1888                 lod_obj_set_pool(mo, i, pool_name);
1889
1890                 if ((!mo->ldo_is_composite || lod_comp_inited(lod_comp)) &&
1891                     !(lod_comp->llc_pattern & LOV_PATTERN_F_RELEASED) &&
1892                     !(lod_comp->llc_pattern & LOV_PATTERN_MDT)) {
1893                         rc = lod_initialize_objects(env, mo, objs, i);
1894                         if (rc)
1895                                 GOTO(out, rc);
1896                 }
1897         }
1898
1899         rc = lod_fill_mirrors(mo);
1900         GOTO(out, rc);
1901 out:
1902         if (rc)
1903                 lod_striping_free_nolock(env, mo);
1904 unlock:
1905         mutex_unlock(&mo->ldo_layout_mutex);
1906
1907         RETURN(rc);
1908 }
1909
1910 /**
1911  * Parse suggested striping configuration.
1912  *
1913  * The caller gets a suggested striping configuration from a number of sources
1914  * including per-directory default and applications. Then it needs to verify
1915  * the suggested striping is valid, apply missing bits and store the resulting
1916  * configuration in the object to be used by the allocator later. Must not be
1917  * called concurrently against the same object. It's OK to provide a
1918  * fully-defined striping.
1919  *
1920  * \param[in] env       execution environment for this thread
1921  * \param[in] lo        LOD object
1922  * \param[in] buf       buffer containing the striping
1923  *
1924  * \retval 0            on success
1925  * \retval negative     negated errno on error
1926  */
1927 int lod_qos_parse_config(const struct lu_env *env, struct lod_object *lo,
1928                          const struct lu_buf *buf)
1929 {
1930         struct lod_layout_component *lod_comp;
1931         struct lod_device       *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev);
1932         struct lov_desc         *desc = &d->lod_desc;
1933         struct lov_user_md_v1   *v1 = NULL;
1934         struct lov_user_md_v3   *v3 = NULL;
1935         struct lov_comp_md_v1   *comp_v1 = NULL;
1936         char    def_pool[LOV_MAXPOOLNAME + 1];
1937         __u32   magic;
1938         __u16   comp_cnt;
1939         __u16   mirror_cnt;
1940         int     i, rc;
1941         ENTRY;
1942
1943         if (buf == NULL || buf->lb_buf == NULL || buf->lb_len == 0)
1944                 RETURN(0);
1945
1946         memset(def_pool, 0, sizeof(def_pool));
1947         if (lo->ldo_comp_entries != NULL)
1948                 lod_layout_get_pool(lo->ldo_comp_entries, lo->ldo_comp_cnt,
1949                                     def_pool, sizeof(def_pool));
1950
1951         /* free default striping info */
1952         lod_free_comp_entries(lo);
1953
1954         rc = lod_verify_striping(d, lo, buf, false);
1955         if (rc)
1956                 RETURN(-EINVAL);
1957
1958         v3 = buf->lb_buf;
1959         v1 = buf->lb_buf;
1960         comp_v1 = buf->lb_buf;
1961         magic = v1->lmm_magic;
1962
1963         if (unlikely(le32_to_cpu(magic) & LOV_MAGIC_DEFINED)) {
1964                 /* try to use as fully defined striping */
1965                 rc = lod_use_defined_striping(env, lo, buf);
1966                 RETURN(rc);
1967         }
1968
1969         switch (magic) {
1970         case __swab32(LOV_USER_MAGIC_V1):
1971                 lustre_swab_lov_user_md_v1(v1);
1972                 magic = v1->lmm_magic;
1973                 /* fall through */
1974         case LOV_USER_MAGIC_V1:
1975                 break;
1976         case __swab32(LOV_USER_MAGIC_V3):
1977                 lustre_swab_lov_user_md_v3(v3);
1978                 magic = v3->lmm_magic;
1979                 /* fall through */
1980         case LOV_USER_MAGIC_V3:
1981                 break;
1982         case __swab32(LOV_USER_MAGIC_SPECIFIC):
1983                 lustre_swab_lov_user_md_v3(v3);
1984                 lustre_swab_lov_user_md_objects(v3->lmm_objects,
1985                                                 v3->lmm_stripe_count);
1986                 magic = v3->lmm_magic;
1987                 /* fall through */
1988         case LOV_USER_MAGIC_SPECIFIC:
1989                 break;
1990         case __swab32(LOV_USER_MAGIC_COMP_V1):
1991                 lustre_swab_lov_comp_md_v1(comp_v1);
1992                 magic = comp_v1->lcm_magic;
1993                 /* fall trhough */
1994         case LOV_USER_MAGIC_COMP_V1:
1995                 break;
1996         default:
1997                 CERROR("%s: unrecognized magic %X\n",
1998                        lod2obd(d)->obd_name, magic);
1999                 RETURN(-EINVAL);
2000         }
2001
2002         lustre_print_user_md(D_OTHER, v1, "parse config");
2003
2004         if (magic == LOV_USER_MAGIC_COMP_V1) {
2005                 comp_cnt = comp_v1->lcm_entry_count;
2006                 if (comp_cnt == 0)
2007                         RETURN(-EINVAL);
2008                 mirror_cnt =  comp_v1->lcm_mirror_count + 1;
2009                 if (mirror_cnt > 1)
2010                         lo->ldo_flr_state = LCM_FL_RDONLY;
2011                 lo->ldo_is_composite = 1;
2012         } else {
2013                 comp_cnt = 1;
2014                 mirror_cnt = 0;
2015                 lo->ldo_is_composite = 0;
2016         }
2017
2018         rc = lod_alloc_comp_entries(lo, mirror_cnt, comp_cnt);
2019         if (rc)
2020                 RETURN(rc);
2021
2022         LASSERT(lo->ldo_comp_entries);
2023
2024         for (i = 0; i < comp_cnt; i++) {
2025                 struct pool_desc        *pool;
2026                 struct lu_extent        *ext;
2027                 char    *pool_name;
2028
2029                 lod_comp = &lo->ldo_comp_entries[i];
2030
2031                 if (lo->ldo_is_composite) {
2032                         v1 = (struct lov_user_md *)((char *)comp_v1 +
2033                                         comp_v1->lcm_entries[i].lcme_offset);
2034                         ext = &comp_v1->lcm_entries[i].lcme_extent;
2035                         lod_comp->llc_extent = *ext;
2036                         lod_comp->llc_flags =
2037                                 comp_v1->lcm_entries[i].lcme_flags &
2038                                         LCME_USER_FLAGS;
2039                 }
2040
2041                 pool_name = NULL;
2042                 if (v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2043                     v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2044                         int j;
2045
2046                         v3 = (struct lov_user_md_v3 *)v1;
2047                         if (v3->lmm_pool_name[0] != '\0')
2048                                 pool_name = v3->lmm_pool_name;
2049
2050                         if (v3->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2051                                 if (v3->lmm_stripe_offset == LOV_OFFSET_DEFAULT)
2052                                         v3->lmm_stripe_offset =
2053                                                 v3->lmm_objects[0].l_ost_idx;
2054
2055                                 /* copy ost list from lmm */
2056                                 lod_comp->llc_ostlist.op_count =
2057                                         v3->lmm_stripe_count;
2058                                 lod_comp->llc_ostlist.op_size =
2059                                         v3->lmm_stripe_count * sizeof(__u32);
2060                                 OBD_ALLOC(lod_comp->llc_ostlist.op_array,
2061                                           lod_comp->llc_ostlist.op_size);
2062                                 if (!lod_comp->llc_ostlist.op_array)
2063                                         GOTO(free_comp, rc = -ENOMEM);
2064
2065                                 for (j = 0; j < v3->lmm_stripe_count; j++)
2066                                         lod_comp->llc_ostlist.op_array[j] =
2067                                                 v3->lmm_objects[j].l_ost_idx;
2068                         }
2069                 }
2070
2071                 if (pool_name == NULL && def_pool[0] != '\0')
2072                         pool_name = def_pool;
2073
2074                 if (v1->lmm_pattern == 0)
2075                         v1->lmm_pattern = LOV_PATTERN_RAID0;
2076                 if (lov_pattern(v1->lmm_pattern) != LOV_PATTERN_RAID0 &&
2077                     lov_pattern(v1->lmm_pattern) != LOV_PATTERN_MDT) {
2078                         CDEBUG(D_LAYOUT, "%s: invalid pattern: %x\n",
2079                                lod2obd(d)->obd_name, v1->lmm_pattern);
2080                         GOTO(free_comp, rc = -EINVAL);
2081                 }
2082
2083                 lod_comp->llc_pattern = v1->lmm_pattern;
2084                 lod_comp->llc_stripe_size = desc->ld_default_stripe_size;
2085                 if (v1->lmm_stripe_size)
2086                         lod_comp->llc_stripe_size = v1->lmm_stripe_size;
2087
2088                 lod_comp->llc_stripe_count = desc->ld_default_stripe_count;
2089                 if (v1->lmm_stripe_count ||
2090                     lov_pattern(v1->lmm_pattern) == LOV_PATTERN_MDT)
2091                         lod_comp->llc_stripe_count = v1->lmm_stripe_count;
2092
2093                 lod_comp->llc_stripe_offset = v1->lmm_stripe_offset;
2094                 lod_obj_set_pool(lo, i, pool_name);
2095
2096                 LASSERT(ergo(lov_pattern(lod_comp->llc_pattern) ==
2097                              LOV_PATTERN_MDT, lod_comp->llc_stripe_count == 0));
2098
2099                 if (pool_name == NULL)
2100                         continue;
2101
2102                 /* In the function below, .hs_keycmp resolves to
2103                  * pool_hashkey_keycmp() */
2104                 /* coverity[overrun-buffer-val] */
2105                 pool = lod_find_pool(d, pool_name);
2106                 if (pool == NULL)
2107                         continue;
2108
2109                 if (lod_comp->llc_stripe_offset != LOV_OFFSET_DEFAULT) {
2110                         rc = lod_check_index_in_pool(
2111                                         lod_comp->llc_stripe_offset, pool);
2112                         if (rc < 0) {
2113                                 lod_pool_putref(pool);
2114                                 CDEBUG(D_LAYOUT, "%s: invalid offset, %u\n",
2115                                        lod2obd(d)->obd_name,
2116                                        lod_comp->llc_stripe_offset);
2117                                 GOTO(free_comp, rc = -EINVAL);
2118                         }
2119                 }
2120
2121                 if (lod_comp->llc_stripe_count > pool_tgt_count(pool))
2122                         lod_comp->llc_stripe_count = pool_tgt_count(pool);
2123
2124                 lod_pool_putref(pool);
2125         }
2126
2127         RETURN(0);
2128
2129 free_comp:
2130         lod_free_comp_entries(lo);
2131         RETURN(rc);
2132 }
2133
2134 /**
2135  * prepare enough OST avoidance bitmap space
2136  */
2137 int lod_prepare_avoidance(const struct lu_env *env, struct lod_object *lo)
2138 {
2139         struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
2140         struct lod_tgt_descs *ltds = &lod->lod_ost_descs;
2141         struct lod_avoid_guide *lag = &lod_env_info(env)->lti_avoid;
2142         struct cfs_bitmap *bitmap = NULL;
2143         __u32 *new_oss = NULL;
2144
2145         lag->lag_ost_avail = ltds->ltd_tgtnr;
2146
2147         /* reset OSS avoid guide array */
2148         lag->lag_oaa_count = 0;
2149         if (lag->lag_oss_avoid_array && lag->lag_oaa_size < ltds->ltd_tgtnr) {
2150                 OBD_FREE(lag->lag_oss_avoid_array,
2151                          sizeof(__u32) * lag->lag_oaa_size);
2152                 lag->lag_oss_avoid_array = NULL;
2153                 lag->lag_oaa_size = 0;
2154         }
2155
2156         /* init OST avoid guide bitmap */
2157         if (lag->lag_ost_avoid_bitmap) {
2158                 if (ltds->ltd_tgtnr <= lag->lag_ost_avoid_bitmap->size) {
2159                         CFS_RESET_BITMAP(lag->lag_ost_avoid_bitmap);
2160                 } else {
2161                         CFS_FREE_BITMAP(lag->lag_ost_avoid_bitmap);
2162                         lag->lag_ost_avoid_bitmap = NULL;
2163                 }
2164         }
2165
2166         if (!lag->lag_ost_avoid_bitmap) {
2167                 bitmap = CFS_ALLOCATE_BITMAP(ltds->ltd_tgtnr);
2168                 if (!bitmap)
2169                         return -ENOMEM;
2170         }
2171
2172         if (!lag->lag_oss_avoid_array) {
2173                 /**
2174                  * usually there are multiple OSTs in one OSS, but we don't
2175                  * know the exact OSS number, so we choose a safe option,
2176                  * using OST count to allocate the array to store the OSS
2177                  * id.
2178                  */
2179                 OBD_ALLOC(new_oss, sizeof(*new_oss) * ltds->ltd_tgtnr);
2180                 if (!new_oss) {
2181                         CFS_FREE_BITMAP(bitmap);
2182                         return -ENOMEM;
2183                 }
2184         }
2185
2186         if (new_oss) {
2187                 lag->lag_oss_avoid_array = new_oss;
2188                 lag->lag_oaa_size = ltds->ltd_tgtnr;
2189         }
2190         if (bitmap)
2191                 lag->lag_ost_avoid_bitmap = bitmap;
2192
2193         return 0;
2194 }
2195
2196 /**
2197  * Collect information of used OSTs and OSSs in the overlapped components
2198  * of other mirrors
2199  */
2200 void lod_collect_avoidance(struct lod_object *lo, struct lod_avoid_guide *lag,
2201                            int comp_idx)
2202 {
2203         struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
2204         struct lod_layout_component *lod_comp = &lo->ldo_comp_entries[comp_idx];
2205         struct cfs_bitmap *bitmap = lag->lag_ost_avoid_bitmap;
2206         int i, j;
2207
2208         /* iterate mirrors */
2209         for (i = 0; i < lo->ldo_mirror_count; i++) {
2210                 struct lod_layout_component *comp;
2211
2212                 /**
2213                  * skip mirror containing component[comp_idx], we only
2214                  * collect OSTs info of conflicting component in other mirrors,
2215                  * so that during read, if OSTs of a mirror's component are
2216                  * not available, we still have other mirror with different
2217                  * OSTs to read the data.
2218                  */
2219                 comp = &lo->ldo_comp_entries[lo->ldo_mirrors[i].lme_start];
2220                 if (comp->llc_id == LCME_ID_INVAL ||
2221                     mirror_id_of(comp->llc_id) ==
2222                                                 mirror_id_of(lod_comp->llc_id))
2223                         continue;
2224
2225                 /* iterate components of a mirror */
2226                 lod_foreach_mirror_comp(comp, lo, i) {
2227                         /* skip non-overlapped or un-instantiated components */
2228                         if (!lu_extent_is_overlapped(&comp->llc_extent,
2229                                                      &lod_comp->llc_extent) ||
2230                             !lod_comp_inited(comp) || !comp->llc_stripe)
2231                                 continue;
2232
2233                         /**
2234                          * collect used OSTs index and OSS info from a
2235                          * component
2236                          */
2237                         for (j = 0; j < comp->llc_stripe_count; j++) {
2238                                 struct lod_tgt_desc *ost;
2239                                 struct lod_qos_oss *lqo;
2240                                 int k;
2241
2242                                 ost = OST_TGT(lod, comp->llc_ost_indices[j]);
2243                                 lqo = ost->ltd_qos.ltq_oss;
2244
2245                                 if (cfs_bitmap_check(bitmap, ost->ltd_index))
2246                                         continue;
2247
2248                                 cfs_bitmap_set(bitmap, ost->ltd_index);
2249                                 lag->lag_ost_avail--;
2250
2251                                 for (k = 0; k < lag->lag_oaa_count; k++) {
2252                                         if (lag->lag_oss_avoid_array[k] ==
2253                                             lqo->lqo_id)
2254                                                 break;
2255                                 }
2256                                 if (k == lag->lag_oaa_count) {
2257                                         lag->lag_oss_avoid_array[k] =
2258                                                                 lqo->lqo_id;
2259                                         lag->lag_oaa_count++;
2260                                 }
2261                         }
2262                 }
2263         }
2264 }
2265
2266 /**
2267  * Create a striping for an obejct.
2268  *
2269  * The function creates a new striping for the object. The function tries QoS
2270  * algorithm first unless free space is distributed evenly among OSTs, but
2271  * by default RR algorithm is preferred due to internal concurrency (QoS is
2272  * serialized). The caller must ensure no concurrent calls to the function
2273  * are made against the same object.
2274  *
2275  * \param[in] env       execution environment for this thread
2276  * \param[in] lo        LOD object
2277  * \param[in] attr      attributes OST objects will be declared with
2278  * \param[in] th        transaction handle
2279  * \param[in] comp_idx  index of ldo_comp_entries
2280  *
2281  * \retval 0            on success
2282  * \retval negative     negated errno on error
2283  */
2284 int lod_qos_prep_create(const struct lu_env *env, struct lod_object *lo,
2285                         struct lu_attr *attr, struct thandle *th,
2286                         int comp_idx)
2287 {
2288         struct lod_layout_component *lod_comp;
2289         struct lod_device      *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev);
2290         int                     stripe_len;
2291         int                     flag = LOV_USES_ASSIGNED_STRIPE;
2292         int                     i, rc = 0;
2293         struct lod_avoid_guide *lag = &lod_env_info(env)->lti_avoid;
2294         struct dt_object **stripe = NULL;
2295         __u32 *ost_indices = NULL;
2296         ENTRY;
2297
2298         LASSERT(lo);
2299         LASSERT(lo->ldo_comp_cnt > comp_idx && lo->ldo_comp_entries != NULL);
2300         lod_comp = &lo->ldo_comp_entries[comp_idx];
2301
2302         /* A released component is being created */
2303         if (lod_comp->llc_pattern & LOV_PATTERN_F_RELEASED)
2304                 RETURN(0);
2305
2306         /* A Data-on-MDT component is being created */
2307         if (lov_pattern(lod_comp->llc_pattern) == LOV_PATTERN_MDT)
2308                 RETURN(0);
2309
2310         if (likely(lod_comp->llc_stripe == NULL)) {
2311                 /*
2312                  * no striping has been created so far
2313                  */
2314                 LASSERT(lod_comp->llc_stripe_count);
2315                 /*
2316                  * statfs and check OST targets now, since ld_active_tgt_count
2317                  * could be changed if some OSTs are [de]activated manually.
2318                  */
2319                 lod_qos_statfs_update(env, d);
2320                 stripe_len = lod_get_stripe_count(d, lo,
2321                                                   lod_comp->llc_stripe_count);
2322                 if (stripe_len == 0)
2323                         GOTO(out, rc = -ERANGE);
2324                 lod_comp->llc_stripe_count = stripe_len;
2325                 OBD_ALLOC(stripe, sizeof(stripe[0]) * stripe_len);
2326                 if (stripe == NULL)
2327                         GOTO(out, rc = -ENOMEM);
2328                 OBD_ALLOC(ost_indices, sizeof(*ost_indices) * stripe_len);
2329                 if (!ost_indices)
2330                         GOTO(out, rc = -ENOMEM);
2331
2332                 lod_getref(&d->lod_ost_descs);
2333                 /* XXX: support for non-0 files w/o objects */
2334                 CDEBUG(D_OTHER, "tgt_count %d stripe_count %d\n",
2335                                 d->lod_desc.ld_tgt_count, stripe_len);
2336
2337                 if (lod_comp->llc_ostlist.op_array) {
2338                         rc = lod_alloc_ost_list(env, lo, stripe, ost_indices,
2339                                                 th, comp_idx);
2340                 } else if (lod_comp->llc_stripe_offset == LOV_OFFSET_DEFAULT) {
2341                         /**
2342                          * collect OSTs and OSSs used in other mirrors whose
2343                          * components cross the ldo_comp_entries[comp_idx]
2344                          */
2345                         rc = lod_prepare_avoidance(env, lo);
2346                         if (rc)
2347                                 GOTO(put_ldts, rc);
2348
2349                         lod_collect_avoidance(lo, lag, comp_idx);
2350
2351                         rc = lod_alloc_qos(env, lo, stripe, ost_indices, flag,
2352                                            th, comp_idx);
2353                         if (rc == -EAGAIN)
2354                                 rc = lod_alloc_rr(env, lo, stripe, ost_indices,
2355                                                   flag, th, comp_idx);
2356                 } else {
2357                         rc = lod_alloc_specific(env, lo, stripe, ost_indices,
2358                                                 flag, th, comp_idx);
2359                 }
2360 put_ldts:
2361                 lod_putref(d, &d->lod_ost_descs);
2362                 if (rc < 0) {
2363                         for (i = 0; i < stripe_len; i++)
2364                                 if (stripe[i] != NULL)
2365                                         dt_object_put(env, stripe[i]);
2366                         lod_comp->llc_stripe_count = 0;
2367                 } else {
2368                         lod_comp->llc_stripe = stripe;
2369                         lod_comp->llc_ost_indices = ost_indices;
2370                         lod_comp->llc_stripes_allocated = stripe_len;
2371                 }
2372         } else {
2373                 /*
2374                  * lod_qos_parse_config() found supplied buf as a predefined
2375                  * striping (not a hint), so it allocated all the object
2376                  * now we need to create them
2377                  */
2378                 for (i = 0; i < lod_comp->llc_stripe_count; i++) {
2379                         struct dt_object  *o;
2380
2381                         o = lod_comp->llc_stripe[i];
2382                         LASSERT(o);
2383
2384                         rc = lod_sub_declare_create(env, o, attr, NULL,
2385                                                     NULL, th);
2386                         if (rc < 0) {
2387                                 CERROR("can't declare create: %d\n", rc);
2388                                 break;
2389                         }
2390                 }
2391                 /**
2392                  * Clear LCME_FL_INIT for the component so that
2393                  * lod_striping_create() can create the striping objects
2394                  * in replay.
2395                  */
2396                 lod_comp_unset_init(lod_comp);
2397         }
2398
2399 out:
2400         if (rc < 0) {
2401                 if (stripe)
2402                         OBD_FREE(stripe, sizeof(stripe[0]) * stripe_len);
2403                 if (ost_indices)
2404                         OBD_FREE(ost_indices,
2405                                  sizeof(*ost_indices) * stripe_len);
2406         }
2407         RETURN(rc);
2408 }
2409
2410 int lod_prepare_create(const struct lu_env *env, struct lod_object *lo,
2411                        struct lu_attr *attr, const struct lu_buf *buf,
2412                        struct thandle *th)
2413
2414 {
2415         struct lod_device *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev);
2416         uint64_t size = 0;
2417         int i;
2418         int rc;
2419         ENTRY;
2420
2421         LASSERT(lo);
2422
2423         /* no OST available */
2424         /* XXX: should we be waiting a bit to prevent failures during
2425          * cluster initialization? */
2426         if (d->lod_ostnr == 0)
2427                 RETURN(-EIO);
2428
2429         /*
2430          * by this time, the object's ldo_stripe_count and ldo_stripe_size
2431          * contain default value for striping: taken from the parent
2432          * or from filesystem defaults
2433          *
2434          * in case the caller is passing lovea with new striping config,
2435          * we may need to parse lovea and apply new configuration
2436          */
2437         rc = lod_qos_parse_config(env, lo, buf);
2438         if (rc)
2439                 RETURN(rc);
2440
2441         if (attr->la_valid & LA_SIZE)
2442                 size = attr->la_size;
2443
2444         /**
2445          * prepare OST object creation for the component covering file's
2446          * size, the 1st component (including plain layout file) is always
2447          * instantiated.
2448          */
2449         for (i = 0; i < lo->ldo_comp_cnt; i++) {
2450                 struct lod_layout_component *lod_comp;
2451                 struct lu_extent *extent;
2452
2453                 lod_comp = &lo->ldo_comp_entries[i];
2454                 extent = &lod_comp->llc_extent;
2455                 CDEBUG(D_QOS, "%lld [%lld, %lld)\n",
2456                        size, extent->e_start, extent->e_end);
2457                 if (!lo->ldo_is_composite || size >= extent->e_start) {
2458                         rc = lod_qos_prep_create(env, lo, attr, th, i);
2459                         if (rc)
2460                                 break;
2461                 }
2462         }
2463
2464         RETURN(rc);
2465 }