Whamcloud - gitweb
LU-9859 libcfs: delete libcfs/linux/libcfs.h
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_object for LOV layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include <linux/random.h>
41
42 #include "lov_cl_internal.h"
43
44 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
45 {
46         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
47 }
48
49 /** \addtogroup lov
50  *  @{
51  */
52
53 /*****************************************************************************
54  *
55  * Layout operations.
56  *
57  */
58
59 struct lov_layout_operations {
60         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
61                         struct lov_object *lov, struct lov_stripe_md *lsm,
62                         const struct cl_object_conf *conf,
63                         union lov_layout_state *state);
64         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
65                            union lov_layout_state *state);
66         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
67                          union lov_layout_state *state);
68         int  (*llo_print)(const struct lu_env *env, void *cookie,
69                           lu_printer_t p, const struct lu_object *o);
70         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
71                               struct cl_page *page, pgoff_t index);
72         int  (*llo_lock_init)(const struct lu_env *env,
73                               struct cl_object *obj, struct cl_lock *lock,
74                               const struct cl_io *io);
75         int  (*llo_io_init)(const struct lu_env *env,
76                             struct cl_object *obj, struct cl_io *io);
77         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
78                             struct cl_attr *attr);
79 };
80
81 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
82
83 static void lov_lsm_put(struct lov_stripe_md *lsm)
84 {
85         if (lsm != NULL)
86                 lov_free_memmd(&lsm);
87 }
88
89 /*****************************************************************************
90  *
91  * Lov object layout operations.
92  *
93  */
94
95 static struct cl_object *lov_sub_find(const struct lu_env *env,
96                                       struct cl_device *dev,
97                                       const struct lu_fid *fid,
98                                       const struct cl_object_conf *conf)
99 {
100         struct lu_object *o;
101
102         ENTRY;
103
104         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
105         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
106         RETURN(lu2cl(o));
107 }
108
109 static int lov_page_slice_fixup(struct lov_object *lov,
110                                 struct cl_object *stripe)
111 {
112         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
113         struct cl_object *o;
114
115         if (stripe == NULL)
116                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
117                        cfs_size_round(sizeof(struct lov_page));
118
119         cl_object_for_each(o, stripe)
120                 o->co_slice_off += hdr->coh_page_bufsize;
121
122         return cl_object_header(stripe)->coh_page_bufsize;
123 }
124
125 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
126                         struct cl_object *subobj, struct lov_oinfo *oinfo,
127                         int idx)
128 {
129         struct cl_object_header *hdr;
130         struct cl_object_header *subhdr;
131         struct cl_object_header *parent;
132         int entry = lov_comp_entry(idx);
133         int stripe = lov_comp_stripe(idx);
134         int result;
135
136         if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
137                 /* For sanity:test_206.
138                  * Do not leave the object in cache to avoid accessing
139                  * freed memory. This is because osc_object is referring to
140                  * lov_oinfo of lsm_stripe_data which will be freed due to
141                  * this failure. */
142                 cl_object_kill(env, subobj);
143                 cl_object_put(env, subobj);
144                 return -EIO;
145         }
146
147         hdr = cl_object_header(lov2cl(lov));
148         subhdr = cl_object_header(subobj);
149
150         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
151                " ost idx: %d gen: %d\n",
152                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
153                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
154                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
155
156         /* reuse ->coh_attr_guard to protect coh_parent change */
157         spin_lock(&subhdr->coh_attr_guard);
158         parent = subhdr->coh_parent;
159         if (parent == NULL) {
160                 struct lovsub_object *lso = cl2lovsub(subobj);
161
162                 subhdr->coh_parent = hdr;
163                 spin_unlock(&subhdr->coh_attr_guard);
164                 subhdr->coh_nesting = hdr->coh_nesting + 1;
165                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
166                 lso->lso_super = lov;
167                 lso->lso_index = idx;
168                 result = 0;
169         } else {
170                 struct lu_object  *old_obj;
171                 struct lov_object *old_lov;
172                 unsigned int mask = D_INODE;
173
174                 spin_unlock(&subhdr->coh_attr_guard);
175                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
176                 LASSERT(old_obj != NULL);
177                 old_lov = cl2lov(lu2cl(old_obj));
178                 if (old_lov->lo_layout_invalid) {
179                         /* the object's layout has already changed but isn't
180                          * refreshed */
181                         lu_object_unhash(env, &subobj->co_lu);
182                         result = -EAGAIN;
183                 } else {
184                         mask = D_ERROR;
185                         result = -EIO;
186                 }
187
188                 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
189                                 "stripe %d is already owned.", idx);
190                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
191                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
192                 cl_object_put(env, subobj);
193         }
194         return result;
195 }
196
197 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
198                           struct lov_object *lov, unsigned int index,
199                           const struct cl_object_conf *conf,
200                           struct lov_layout_entry *lle)
201 {
202         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
203         struct lov_thread_info *lti = lov_env_info(env);
204         struct cl_object_conf *subconf = &lti->lti_stripe_conf;
205         struct lu_fid *ofid = &lti->lti_fid;
206         struct cl_object *stripe;
207         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
208         int result;
209         int psz, sz;
210         int i;
211
212         ENTRY;
213
214         spin_lock_init(&r0->lo_sub_lock);
215         r0->lo_nr = lse->lsme_stripe_count;
216         LASSERT(r0->lo_nr <= lov_targets_nr(dev));
217
218         OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
219         if (r0->lo_sub == NULL)
220                 GOTO(out, result = -ENOMEM);
221
222         psz = 0;
223         result = 0;
224         memset(subconf, 0, sizeof(*subconf));
225
226         /*
227          * Create stripe cl_objects.
228          */
229         for (i = 0; i < r0->lo_nr; ++i) {
230                 struct cl_device *subdev;
231                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
232                 int ost_idx = oinfo->loi_ost_idx;
233
234                 if (lov_oinfo_is_dummy(oinfo))
235                         continue;
236
237                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
238                 if (result != 0)
239                         GOTO(out, result);
240
241                 if (dev->ld_target[ost_idx] == NULL) {
242                         CERROR("%s: OST %04x is not initialized\n",
243                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
244                         GOTO(out, result = -EIO);
245                 }
246
247                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
248                 subconf->u.coc_oinfo = oinfo;
249                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
250                 /* In the function below, .hs_keycmp resolves to
251                  * lu_obj_hop_keycmp() */
252                 /* coverity[overrun-buffer-val] */
253                 stripe = lov_sub_find(env, subdev, ofid, subconf);
254                 if (IS_ERR(stripe))
255                         GOTO(out, result = PTR_ERR(stripe));
256
257                 result = lov_init_sub(env, lov, stripe, oinfo,
258                                       lov_comp_index(index, i));
259                 if (result == -EAGAIN) { /* try again */
260                         --i;
261                         result = 0;
262                         continue;
263                 }
264
265                 if (result == 0) {
266                         r0->lo_sub[i] = cl2lovsub(stripe);
267
268                         sz = lov_page_slice_fixup(lov, stripe);
269                         LASSERT(ergo(psz > 0, psz == sz));
270                         psz = sz;
271                 }
272         }
273         if (result == 0)
274                 result = psz;
275 out:
276         RETURN(result);
277 }
278
279 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
280                                struct lov_layout_raid0 *r0,
281                                struct lovsub_object *los, int idx)
282 {
283         struct cl_object        *sub;
284         struct lu_site          *site;
285         struct lu_site_bkt_data *bkt;
286         wait_queue_t          *waiter;
287
288         LASSERT(r0->lo_sub[idx] == los);
289
290         sub  = lovsub2cl(los);
291         site = sub->co_lu.lo_dev->ld_site;
292         bkt  = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
293
294         cl_object_kill(env, sub);
295         /* release a reference to the sub-object and ... */
296         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
297         cl_object_put(env, sub);
298
299         /* ... wait until it is actually destroyed---sub-object clears its
300          * ->lo_sub[] slot in lovsub_object_free() */
301         if (r0->lo_sub[idx] == los) {
302                 waiter = &lov_env_info(env)->lti_waiter;
303                 init_waitqueue_entry(waiter, current);
304                 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
305                 set_current_state(TASK_UNINTERRUPTIBLE);
306                 while (1) {
307                         /* this wait-queue is signaled at the end of
308                          * lu_object_free(). */
309                         set_current_state(TASK_UNINTERRUPTIBLE);
310                         spin_lock(&r0->lo_sub_lock);
311                         if (r0->lo_sub[idx] == los) {
312                                 spin_unlock(&r0->lo_sub_lock);
313                                 schedule();
314                         } else {
315                                 spin_unlock(&r0->lo_sub_lock);
316                                 set_current_state(TASK_RUNNING);
317                                 break;
318                         }
319                 }
320                 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
321         }
322         LASSERT(r0->lo_sub[idx] == NULL);
323 }
324
325 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
326                              struct lov_layout_entry *lle)
327 {
328         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
329
330         ENTRY;
331
332         if (r0->lo_sub != NULL) {
333                 int i;
334
335                 for (i = 0; i < r0->lo_nr; ++i) {
336                         struct lovsub_object *los = r0->lo_sub[i];
337
338                         if (los != NULL) {
339                                 cl_object_prune(env, &los->lso_cl);
340                                 /*
341                                  * If top-level object is to be evicted from
342                                  * the cache, so are its sub-objects.
343                                  */
344                                 lov_subobject_kill(env, lov, r0, los, i);
345                         }
346                 }
347         }
348
349         EXIT;
350 }
351
352 static void lov_fini_raid0(const struct lu_env *env,
353                            struct lov_layout_entry *lle)
354 {
355         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
356
357         if (r0->lo_sub != NULL) {
358                 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
359                 r0->lo_sub = NULL;
360         }
361 }
362
363 static int lov_print_raid0(const struct lu_env *env, void *cookie,
364                            lu_printer_t p, const struct lov_layout_entry *lle)
365 {
366         const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
367         int i;
368
369         for (i = 0; i < r0->lo_nr; ++i) {
370                 struct lu_object *sub;
371
372                 if (r0->lo_sub[i] != NULL) {
373                         sub = lovsub2lu(r0->lo_sub[i]);
374                         lu_object_print(env, cookie, p, sub);
375                 } else {
376                         (*p)(env, cookie, "sub %d absent\n", i);
377                 }
378         }
379         return 0;
380 }
381
382 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
383                               unsigned int index, struct lov_layout_entry *lle,
384                               struct cl_attr **lov_attr)
385 {
386         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
387         struct lov_stripe_md *lsm = lov->lo_lsm;
388         struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
389         struct cl_attr *attr = &r0->lo_attr;
390         __u64 kms = 0;
391         int result = 0;
392
393         if (r0->lo_attr_valid) {
394                 *lov_attr = attr;
395                 return 0;
396         }
397
398         memset(lvb, 0, sizeof(*lvb));
399
400         /* XXX: timestamps can be negative by sanity:test_39m,
401          * how can it be? */
402         lvb->lvb_atime = LLONG_MIN;
403         lvb->lvb_ctime = LLONG_MIN;
404         lvb->lvb_mtime = LLONG_MIN;
405
406         /*
407          * XXX that should be replaced with a loop over sub-objects,
408          * doing cl_object_attr_get() on them. But for now, let's
409          * reuse old lov code.
410          */
411
412         /*
413          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
414          * happy. It's not needed, because new code uses
415          * ->coh_attr_guard spin-lock to protect consistency of
416          * sub-object attributes.
417          */
418         lov_stripe_lock(lsm);
419         result = lov_merge_lvb_kms(lsm, index, lvb, &kms);
420         lov_stripe_unlock(lsm);
421         if (result == 0) {
422                 cl_lvb2attr(attr, lvb);
423                 attr->cat_kms = kms;
424                 r0->lo_attr_valid = 1;
425                 *lov_attr = attr;
426         }
427
428         return result;
429 }
430
431 static struct lov_comp_layout_entry_ops raid0_ops = {
432         .lco_init      = lov_init_raid0,
433         .lco_fini      = lov_fini_raid0,
434         .lco_getattr   = lov_attr_get_raid0,
435 };
436
437 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
438                             unsigned int index, struct lov_layout_entry *lle,
439                             struct cl_attr **lov_attr)
440 {
441         struct lov_layout_dom *dom = &lle->lle_dom;
442         struct lov_oinfo *loi = dom->lo_loi;
443         struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
444
445         if (dom->lo_dom_r0.lo_attr_valid) {
446                 *lov_attr = attr;
447                 return 0;
448         }
449
450         if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
451                 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
452
453         cl_lvb2attr(attr, &loi->loi_lvb);
454
455         /* DoM component size can be bigger than stripe size after
456          * client's setattr RPC, so do not count anything beyond
457          * component end. Alternatively, check that limit on server
458          * and do not allow size overflow there. */
459         if (attr->cat_size > lle->lle_extent->e_end)
460                 attr->cat_size = lle->lle_extent->e_end;
461
462         attr->cat_kms = attr->cat_size;
463
464         dom->lo_dom_r0.lo_attr_valid = 1;
465         *lov_attr = attr;
466
467         return 0;
468 }
469
470 /**
471  * Lookup FLD to get MDS index of the given DOM object FID.
472  *
473  * \param[in]  ld       LOV device
474  * \param[in]  fid      FID to lookup
475  * \param[out] nr       index in MDC array to return back
476  *
477  * \retval              0 and \a mds filled with MDS index if successful
478  * \retval              negative value on error
479  */
480 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
481                           __u32 *nr)
482 {
483         __u32 mds_idx;
484         int i, rc;
485
486         ENTRY;
487
488         rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
489                                &mds_idx, LU_SEQ_RANGE_MDT, NULL);
490         if (rc) {
491                 CERROR("%s: error while looking for mds number. Seq %#llx"
492                        ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
493                        fid_seq(fid), rc);
494                 RETURN(rc);
495         }
496
497         CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
498                mds_idx, PFID(fid));
499
500         /* find proper MDC device in the array */
501         for (i = 0; i < ld->ld_md_tgts_nr; i++) {
502                 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
503                     ld->ld_md_tgts[i].ldm_idx == mds_idx)
504                         break;
505         }
506
507         if (i == ld->ld_md_tgts_nr) {
508                 CERROR("%s: cannot find corresponding MDC device for mds #%x "
509                        "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
510                        mds_idx, PFID(fid));
511                 rc = -EINVAL;
512         } else {
513                 *nr = i;
514         }
515         RETURN(rc);
516 }
517
518 /**
519  * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
520  *
521  * Init the DOM object for the first time. It prepares also RAID0 entry
522  * for it to use in common methods with ordinary RAID0 layout entries.
523  *
524  * \param[in] env       execution environment
525  * \param[in] dev       LOV device
526  * \param[in] lov       LOV object
527  * \param[in] index     Composite layout entry index in LSM
528  * \param[in] lle       Composite LOV layout entry
529  */
530 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
531                         struct lov_object *lov, unsigned int index,
532                         const struct cl_object_conf *conf,
533                         struct lov_layout_entry *lle)
534 {
535         struct lov_thread_info *lti = lov_env_info(env);
536         struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
537         struct cl_object *clo;
538         struct lu_object *o = lov2lu(lov);
539         const struct lu_fid *fid = lu_object_fid(o);
540         struct cl_device *mdcdev;
541         struct lov_oinfo *loi = NULL;
542         struct cl_object_conf *sconf = &lti->lti_stripe_conf;
543
544         int rc;
545         __u32 idx = 0;
546
547         ENTRY;
548
549         LASSERT(index == 0);
550
551         /* find proper MDS device */
552         rc = lov_fld_lookup(dev, fid, &idx);
553         if (rc)
554                 RETURN(rc);
555
556         LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
557                  "LOV md target[%u] is NULL\n", idx);
558
559         /* check lsm is DOM, more checks are needed */
560         LASSERT(lsme->lsme_stripe_count == 0);
561
562         /*
563          * Create lower cl_objects.
564          */
565         mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
566
567         LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
568
569         /* DoM object has no oinfo in LSM entry, create it exclusively */
570         OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
571         if (loi == NULL)
572                 RETURN(-ENOMEM);
573
574         fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
575
576         sconf->u.coc_oinfo = loi;
577 again:
578         clo = lov_sub_find(env, mdcdev, fid, sconf);
579         if (IS_ERR(clo))
580                 GOTO(out, rc = PTR_ERR(clo));
581
582         rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
583         if (rc == -EAGAIN) /* try again */
584                 goto again;
585         else if (rc != 0)
586                 GOTO(out, rc);
587
588         lle->lle_dom.lo_dom = cl2lovsub(clo);
589         spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
590         lle->lle_dom.lo_dom_r0.lo_nr = 1;
591         lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
592         lle->lle_dom.lo_loi = loi;
593
594         rc = lov_page_slice_fixup(lov, clo);
595         RETURN(rc);
596
597 out:
598         if (loi != NULL)
599                 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
600         return rc;
601 }
602
603 /**
604  * Implementation of lov_layout_operations::llo_fini for DOM object.
605  *
606  * Finish the DOM object and free related memory.
607  *
608  * \param[in] env       execution environment
609  * \param[in] lov       LOV object
610  * \param[in] state     LOV layout state
611  */
612 static void lov_fini_dom(const struct lu_env *env,
613                          struct lov_layout_entry *lle)
614 {
615         if (lle->lle_dom.lo_dom != NULL)
616                 lle->lle_dom.lo_dom = NULL;
617         if (lle->lle_dom.lo_loi != NULL)
618                 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
619 }
620
621 static struct lov_comp_layout_entry_ops dom_ops = {
622         .lco_init = lov_init_dom,
623         .lco_fini = lov_fini_dom,
624         .lco_getattr = lov_attr_get_dom,
625 };
626
627 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
628                               struct lov_object *lov, struct lov_stripe_md *lsm,
629                               const struct cl_object_conf *conf,
630                               union lov_layout_state *state)
631 {
632         struct lov_layout_composite *comp = &state->composite;
633         struct lov_layout_entry *lle;
634         struct lov_mirror_entry *lre;
635         unsigned int entry_count;
636         unsigned int psz = 0;
637         unsigned int mirror_count;
638         int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
639         int result = 0;
640         int i, j;
641
642         ENTRY;
643
644         LASSERT(lsm->lsm_entry_count > 0);
645         LASSERT(lov->lo_lsm == NULL);
646         lov->lo_lsm = lsm_addref(lsm);
647         lov->lo_layout_invalid = true;
648
649         dump_lsm(D_INODE, lsm);
650
651         entry_count = lsm->lsm_entry_count;
652
653         spin_lock_init(&comp->lo_write_lock);
654         comp->lo_flags = lsm->lsm_flags;
655         comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
656         comp->lo_entry_count = lsm->lsm_entry_count;
657         comp->lo_preferred_mirror = -1;
658
659         if (equi(flr_state == LCM_FL_NOT_FLR, comp->lo_mirror_count > 1))
660                 RETURN(-EINVAL);
661
662         OBD_ALLOC(comp->lo_mirrors,
663                   comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
664         if (comp->lo_mirrors == NULL)
665                 RETURN(-ENOMEM);
666
667         OBD_ALLOC(comp->lo_entries, entry_count * sizeof(*comp->lo_entries));
668         if (comp->lo_entries == NULL)
669                 RETURN(-ENOMEM);
670
671         /* Initiate all entry types and extents data at first */
672         for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
673                 int mirror_id = 0;
674
675                 lle = &comp->lo_entries[i];
676
677                 lle->lle_lsme = lsm->lsm_entries[i];
678                 lle->lle_type = lov_entry_type(lle->lle_lsme);
679                 switch (lle->lle_type) {
680                 case LOV_PATTERN_RAID0:
681                         lle->lle_comp_ops = &raid0_ops;
682                         break;
683                 case LOV_PATTERN_MDT:
684                         lle->lle_comp_ops = &dom_ops;
685                         break;
686                 default:
687                         CERROR("%s: unknown composite layout entry type %i\n",
688                                lov2obd(dev->ld_lov)->obd_name,
689                                lsm->lsm_entries[i]->lsme_pattern);
690                         dump_lsm(D_ERROR, lsm);
691                         RETURN(-EIO);
692                 }
693
694                 lle->lle_extent = &lle->lle_lsme->lsme_extent;
695                 lle->lle_valid = !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
696
697                 if (flr_state != LCM_FL_NOT_FLR)
698                         mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
699
700                 lre = &comp->lo_mirrors[j];
701                 if (i > 0) {
702                         if (mirror_id == lre->lre_mirror_id) {
703                                 lre->lre_valid |= lle->lle_valid;
704                                 lre->lre_stale |= !lle->lle_valid;
705                                 lre->lre_end = i;
706                                 continue;
707                         }
708
709                         /* new mirror detected, assume that the mirrors
710                          * are shorted in layout */
711                         ++mirror_count;
712                         ++j;
713                         if (j >= comp->lo_mirror_count)
714                                 break;
715
716                         lre = &comp->lo_mirrors[j];
717                 }
718
719                 /* entries must be sorted by mirrors */
720                 lre->lre_mirror_id = mirror_id;
721                 lre->lre_start = lre->lre_end = i;
722                 lre->lre_preferred = (lle->lle_lsme->lsme_flags &
723                                         LCME_FL_PREFERRED);
724                 lre->lre_valid = lle->lle_valid;
725                 lre->lre_stale = !lle->lle_valid;
726         }
727
728         /* sanity check for FLR */
729         if (mirror_count != comp->lo_mirror_count) {
730                 CDEBUG(D_INODE, DFID
731                        " doesn't have the # of mirrors it claims, %u/%u\n",
732                        PFID(lu_object_fid(lov2lu(lov))), mirror_count,
733                        comp->lo_mirror_count + 1);
734
735                 GOTO(out, result = -EINVAL);
736         }
737
738         lov_foreach_layout_entry(lov, lle) {
739                 int index = lov_layout_entry_index(lov, lle);
740
741                 /**
742                  * If the component has not been init-ed on MDS side, for
743                  * PFL layout, we'd know that the components beyond this one
744                  * will be dynamically init-ed later on file write/trunc ops.
745                  */
746                 if (!lsme_inited(lle->lle_lsme))
747                         continue;
748
749                 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
750                                                      conf, lle);
751                 if (result < 0)
752                         break;
753
754                 LASSERT(ergo(psz > 0, psz == result));
755                 psz = result;
756         }
757
758         if (psz > 0)
759                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
760
761         /* decide the preferred mirror */
762         mirror_count = 0, i = 0;
763         lov_foreach_mirror_entry(lov, lre) {
764                 i++;
765                 if (lre->lre_stale)
766                         continue;
767
768                 mirror_count++; /* valid mirror */
769
770                 if (lre->lre_preferred || comp->lo_preferred_mirror < 0)
771                         comp->lo_preferred_mirror = i - 1;
772         }
773         if (mirror_count == 0) {
774                 CDEBUG(D_INODE, DFID
775                        " doesn't have any valid mirrors\n",
776                        PFID(lu_object_fid(lov2lu(lov))));
777
778                 GOTO(out, result = -EINVAL);
779         }
780
781         if (OBD_FAIL_CHECK(OBD_FAIL_FLR_RANDOM_PICK_MIRROR)) {
782                 unsigned int seq;
783
784                 get_random_bytes(&seq, sizeof(seq));
785                 seq %= mirror_count;
786
787                 i = 0;
788                 lov_foreach_mirror_entry(lov, lre) {
789                         i++;
790                         if (lre->lre_stale)
791                                 continue;
792
793                         if (!seq--) {
794                                 comp->lo_preferred_mirror = i - 1;
795                                 break;
796                         }
797                 }
798         }
799
800         LASSERT(comp->lo_preferred_mirror >= 0);
801
802         EXIT;
803 out:
804         return result > 0 ? 0 : result;
805 }
806
807 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
808                           struct lov_object *lov, struct lov_stripe_md *lsm,
809                           const struct cl_object_conf *conf,
810                           union lov_layout_state *state)
811 {
812         return 0;
813 }
814
815 static int lov_init_released(const struct lu_env *env,
816                              struct lov_device *dev, struct lov_object *lov,
817                              struct lov_stripe_md *lsm,
818                              const struct cl_object_conf *conf,
819                              union lov_layout_state *state)
820 {
821         LASSERT(lsm != NULL);
822         LASSERT(lsm->lsm_is_released);
823         LASSERT(lov->lo_lsm == NULL);
824
825         lov->lo_lsm = lsm_addref(lsm);
826         return 0;
827 }
828
829 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
830                             union lov_layout_state *state)
831 {
832         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
833
834         lov_layout_wait(env, lov);
835         return 0;
836 }
837
838 static int lov_delete_composite(const struct lu_env *env,
839                                 struct lov_object *lov,
840                                 union lov_layout_state *state)
841 {
842         struct lov_layout_entry *entry;
843         struct lov_layout_composite *comp = &state->composite;
844
845         ENTRY;
846
847         dump_lsm(D_INODE, lov->lo_lsm);
848
849         lov_layout_wait(env, lov);
850         if (comp->lo_entries)
851                 lov_foreach_layout_entry(lov, entry)
852                         lov_delete_raid0(env, lov, entry);
853
854         RETURN(0);
855 }
856
857 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
858                            union lov_layout_state *state)
859 {
860         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
861 }
862
863 static void lov_fini_composite(const struct lu_env *env,
864                                struct lov_object *lov,
865                                union lov_layout_state *state)
866 {
867         struct lov_layout_composite *comp = &state->composite;
868         ENTRY;
869
870         if (comp->lo_entries != NULL) {
871                 struct lov_layout_entry *entry;
872
873                 lov_foreach_layout_entry(lov, entry)
874                         entry->lle_comp_ops->lco_fini(env, entry);
875
876                 OBD_FREE(comp->lo_entries,
877                          comp->lo_entry_count * sizeof(*comp->lo_entries));
878                 comp->lo_entries = NULL;
879         }
880
881         if (comp->lo_mirrors != NULL) {
882                 OBD_FREE(comp->lo_mirrors,
883                          comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
884                 comp->lo_mirrors = NULL;
885         }
886
887         memset(comp, 0, sizeof(*comp));
888
889         dump_lsm(D_INODE, lov->lo_lsm);
890         lov_free_memmd(&lov->lo_lsm);
891
892         EXIT;
893 }
894
895 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
896                                 union lov_layout_state *state)
897 {
898         ENTRY;
899         dump_lsm(D_INODE, lov->lo_lsm);
900         lov_free_memmd(&lov->lo_lsm);
901         EXIT;
902 }
903
904 static int lov_print_empty(const struct lu_env *env, void *cookie,
905                            lu_printer_t p, const struct lu_object *o)
906 {
907         (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
908         return 0;
909 }
910
911 static int lov_print_composite(const struct lu_env *env, void *cookie,
912                                lu_printer_t p, const struct lu_object *o)
913 {
914         struct lov_object *lov = lu2lov(o);
915         struct lov_stripe_md *lsm = lov->lo_lsm;
916         int i;
917
918         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
919              lsm->lsm_entry_count,
920              lov->lo_layout_invalid ? "invalid" : "valid", lsm,
921              lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
922              lsm->lsm_layout_gen);
923
924         for (i = 0; i < lsm->lsm_entry_count; i++) {
925                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
926                 struct lov_layout_entry *lle = lov_entry(lov, i);
927
928                 (*p)(env, cookie,
929                      DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
930                      PEXT(&lse->lsme_extent), lse->lsme_magic,
931                      lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
932                      lse->lsme_flags, lse->lsme_stripe_count,
933                      lse->lsme_stripe_size);
934                 lov_print_raid0(env, cookie, p, lle);
935         }
936
937         return 0;
938 }
939
940 static int lov_print_released(const struct lu_env *env, void *cookie,
941                                 lu_printer_t p, const struct lu_object *o)
942 {
943         struct lov_object       *lov = lu2lov(o);
944         struct lov_stripe_md    *lsm = lov->lo_lsm;
945
946         (*p)(env, cookie,
947                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
948                 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
949                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
950                 lsm->lsm_layout_gen);
951         return 0;
952 }
953
954 /**
955  * Implements cl_object_operations::coo_attr_get() method for an object
956  * without stripes (LLT_EMPTY layout type).
957  *
958  * The only attributes this layer is authoritative in this case is
959  * cl_attr::cat_blocks---it's 0.
960  */
961 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
962                               struct cl_attr *attr)
963 {
964         attr->cat_blocks = 0;
965         return 0;
966 }
967
968 static int lov_attr_get_composite(const struct lu_env *env,
969                                   struct cl_object *obj,
970                                   struct cl_attr *attr)
971 {
972         struct lov_object       *lov = cl2lov(obj);
973         struct lov_layout_entry *entry;
974         int                      result = 0;
975
976         ENTRY;
977
978         attr->cat_size = 0;
979         attr->cat_blocks = 0;
980         lov_foreach_layout_entry(lov, entry) {
981                 struct cl_attr *lov_attr = NULL;
982                 int index = lov_layout_entry_index(lov, entry);
983
984                 if (!entry->lle_valid)
985                         continue;
986
987                 /* PFL: This component has not been init-ed. */
988                 if (!lsm_entry_inited(lov->lo_lsm, index))
989                         continue;
990
991                 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
992                                                           entry, &lov_attr);
993                 if (result < 0)
994                         RETURN(result);
995
996                 if (lov_attr == NULL)
997                         continue;
998
999                 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
1000                        "b=%llu\n", index - 1, lov_attr->cat_size,
1001                        lov_attr->cat_mtime, lov_attr->cat_atime,
1002                        lov_attr->cat_ctime, lov_attr->cat_blocks);
1003
1004                 /* merge results */
1005                 attr->cat_blocks += lov_attr->cat_blocks;
1006                 if (attr->cat_size < lov_attr->cat_size)
1007                         attr->cat_size = lov_attr->cat_size;
1008                 if (attr->cat_kms < lov_attr->cat_kms)
1009                         attr->cat_kms = lov_attr->cat_kms;
1010                 if (attr->cat_atime < lov_attr->cat_atime)
1011                         attr->cat_atime = lov_attr->cat_atime;
1012                 if (attr->cat_ctime < lov_attr->cat_ctime)
1013                         attr->cat_ctime = lov_attr->cat_ctime;
1014                 if (attr->cat_mtime < lov_attr->cat_mtime)
1015                         attr->cat_mtime = lov_attr->cat_mtime;
1016         }
1017
1018         RETURN(0);
1019 }
1020
1021 const static struct lov_layout_operations lov_dispatch[] = {
1022         [LLT_EMPTY] = {
1023                 .llo_init      = lov_init_empty,
1024                 .llo_delete    = lov_delete_empty,
1025                 .llo_fini      = lov_fini_empty,
1026                 .llo_print     = lov_print_empty,
1027                 .llo_page_init = lov_page_init_empty,
1028                 .llo_lock_init = lov_lock_init_empty,
1029                 .llo_io_init   = lov_io_init_empty,
1030                 .llo_getattr   = lov_attr_get_empty,
1031         },
1032         [LLT_RELEASED] = {
1033                 .llo_init      = lov_init_released,
1034                 .llo_delete    = lov_delete_empty,
1035                 .llo_fini      = lov_fini_released,
1036                 .llo_print     = lov_print_released,
1037                 .llo_page_init = lov_page_init_empty,
1038                 .llo_lock_init = lov_lock_init_empty,
1039                 .llo_io_init   = lov_io_init_released,
1040                 .llo_getattr   = lov_attr_get_empty,
1041         },
1042         [LLT_COMP] = {
1043                 .llo_init      = lov_init_composite,
1044                 .llo_delete    = lov_delete_composite,
1045                 .llo_fini      = lov_fini_composite,
1046                 .llo_print     = lov_print_composite,
1047                 .llo_page_init = lov_page_init_composite,
1048                 .llo_lock_init = lov_lock_init_composite,
1049                 .llo_io_init   = lov_io_init_composite,
1050                 .llo_getattr   = lov_attr_get_composite,
1051         },
1052 };
1053
1054 /**
1055  * Performs a double-dispatch based on the layout type of an object.
1056  */
1057 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
1058 ({                                                      \
1059         struct lov_object *__obj = (obj);               \
1060         enum lov_layout_type __llt;                     \
1061                                                         \
1062         __llt = __obj->lo_type;                         \
1063         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
1064         lov_dispatch[__llt].op(__VA_ARGS__);            \
1065 })
1066
1067 /**
1068  * Return lov_layout_type associated with a given lsm
1069  */
1070 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1071 {
1072         if (lsm == NULL)
1073                 return LLT_EMPTY;
1074
1075         if (lsm->lsm_is_released)
1076                 return LLT_RELEASED;
1077
1078         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1079             lsm->lsm_magic == LOV_MAGIC_V3 ||
1080             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1081                 return LLT_COMP;
1082
1083         return LLT_EMPTY;
1084 }
1085
1086 static inline void lov_conf_freeze(struct lov_object *lov)
1087 {
1088         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1089                 lov, lov->lo_owner, current);
1090         if (lov->lo_owner != current)
1091                 down_read(&lov->lo_type_guard);
1092 }
1093
1094 static inline void lov_conf_thaw(struct lov_object *lov)
1095 {
1096         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1097                 lov, lov->lo_owner, current);
1098         if (lov->lo_owner != current)
1099                 up_read(&lov->lo_type_guard);
1100 }
1101
1102 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
1103 ({                                                                      \
1104         struct lov_object                      *__obj = (obj);          \
1105         int                                     __lock = !!(lock);      \
1106         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
1107                                                                         \
1108         if (__lock)                                                     \
1109                 lov_conf_freeze(__obj);                                 \
1110         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
1111         if (__lock)                                                     \
1112                 lov_conf_thaw(__obj);                                   \
1113         __result;                                                       \
1114 })
1115
1116 /**
1117  * Performs a locked double-dispatch based on the layout type of an object.
1118  */
1119 #define LOV_2DISPATCH(obj, op, ...)                     \
1120         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1121
1122 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
1123 do {                                                                    \
1124         struct lov_object                      *__obj = (obj);          \
1125         enum lov_layout_type                    __llt;                  \
1126                                                                         \
1127         lov_conf_freeze(__obj);                                         \
1128         __llt = __obj->lo_type;                                         \
1129         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
1130         lov_dispatch[__llt].op(__VA_ARGS__);                            \
1131         lov_conf_thaw(__obj);                                           \
1132 } while (0)
1133
1134 static void lov_conf_lock(struct lov_object *lov)
1135 {
1136         LASSERT(lov->lo_owner != current);
1137         down_write(&lov->lo_type_guard);
1138         LASSERT(lov->lo_owner == NULL);
1139         lov->lo_owner = current;
1140         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1141                 lov, lov->lo_owner);
1142 }
1143
1144 static void lov_conf_unlock(struct lov_object *lov)
1145 {
1146         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1147                 lov, lov->lo_owner);
1148         lov->lo_owner = NULL;
1149         up_write(&lov->lo_type_guard);
1150 }
1151
1152 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1153 {
1154         struct l_wait_info lwi = { 0 };
1155         ENTRY;
1156
1157         while (atomic_read(&lov->lo_active_ios) > 0) {
1158                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1159                         PFID(lu_object_fid(lov2lu(lov))),
1160                         atomic_read(&lov->lo_active_ios));
1161
1162                 l_wait_event(lov->lo_waitq,
1163                              atomic_read(&lov->lo_active_ios) == 0, &lwi);
1164         }
1165         RETURN(0);
1166 }
1167
1168 static int lov_layout_change(const struct lu_env *unused,
1169                              struct lov_object *lov, struct lov_stripe_md *lsm,
1170                              const struct cl_object_conf *conf)
1171 {
1172         enum lov_layout_type llt = lov_type(lsm);
1173         union lov_layout_state *state = &lov->u;
1174         const struct lov_layout_operations *old_ops;
1175         const struct lov_layout_operations *new_ops;
1176         struct lov_device *lov_dev = lov_object_dev(lov);
1177         struct lu_env *env;
1178         __u16 refcheck;
1179         int rc;
1180         ENTRY;
1181
1182         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1183
1184         env = cl_env_get(&refcheck);
1185         if (IS_ERR(env))
1186                 RETURN(PTR_ERR(env));
1187
1188         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1189
1190         CDEBUG(D_INODE, DFID" from %s to %s\n",
1191                PFID(lu_object_fid(lov2lu(lov))),
1192                llt2str(lov->lo_type), llt2str(llt));
1193
1194         old_ops = &lov_dispatch[lov->lo_type];
1195         new_ops = &lov_dispatch[llt];
1196
1197         rc = cl_object_prune(env, &lov->lo_cl);
1198         if (rc != 0)
1199                 GOTO(out, rc);
1200
1201         rc = old_ops->llo_delete(env, lov, &lov->u);
1202         if (rc != 0)
1203                 GOTO(out, rc);
1204
1205         old_ops->llo_fini(env, lov, &lov->u);
1206
1207         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1208
1209         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1210                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1211
1212         /* page bufsize fixup */
1213         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1214                 lov_page_slice_fixup(lov, NULL);
1215
1216         lov->lo_type = llt;
1217         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1218         if (rc != 0) {
1219                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1220
1221                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1222                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1223                 new_ops->llo_delete(env, lov, state);
1224                 new_ops->llo_fini(env, lov, state);
1225                 /* this file becomes an EMPTY file. */
1226                 lov->lo_type = LLT_EMPTY;
1227                 GOTO(out, rc);
1228         }
1229
1230 out:
1231         cl_env_put(env, &refcheck);
1232         RETURN(rc);
1233 }
1234
1235 /*****************************************************************************
1236  *
1237  * Lov object operations.
1238  *
1239  */
1240 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1241                     const struct lu_object_conf *conf)
1242 {
1243         struct lov_object            *lov   = lu2lov(obj);
1244         struct lov_device            *dev   = lov_object_dev(lov);
1245         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
1246         union lov_layout_state       *set   = &lov->u;
1247         const struct lov_layout_operations *ops;
1248         struct lov_stripe_md *lsm = NULL;
1249         int rc;
1250         ENTRY;
1251
1252         init_rwsem(&lov->lo_type_guard);
1253         atomic_set(&lov->lo_active_ios, 0);
1254         init_waitqueue_head(&lov->lo_waitq);
1255         cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
1256
1257         lov->lo_type = LLT_EMPTY;
1258         if (cconf->u.coc_layout.lb_buf != NULL) {
1259                 lsm = lov_unpackmd(dev->ld_lov,
1260                                    cconf->u.coc_layout.lb_buf,
1261                                    cconf->u.coc_layout.lb_len);
1262                 if (IS_ERR(lsm))
1263                         RETURN(PTR_ERR(lsm));
1264
1265                 dump_lsm(D_INODE, lsm);
1266         }
1267
1268         /* no locking is necessary, as object is being created */
1269         lov->lo_type = lov_type(lsm);
1270         ops = &lov_dispatch[lov->lo_type];
1271         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1272         if (rc != 0)
1273                 GOTO(out_lsm, rc);
1274
1275 out_lsm:
1276         lov_lsm_put(lsm);
1277
1278         RETURN(rc);
1279 }
1280
1281 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1282                         const struct cl_object_conf *conf)
1283 {
1284         struct lov_stripe_md    *lsm = NULL;
1285         struct lov_object       *lov = cl2lov(obj);
1286         int                      result = 0;
1287         ENTRY;
1288
1289         if (conf->coc_opc == OBJECT_CONF_SET &&
1290             conf->u.coc_layout.lb_buf != NULL) {
1291                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1292                                    conf->u.coc_layout.lb_buf,
1293                                    conf->u.coc_layout.lb_len);
1294                 if (IS_ERR(lsm))
1295                         RETURN(PTR_ERR(lsm));
1296                 dump_lsm(D_INODE, lsm);
1297         }
1298
1299         lov_conf_lock(lov);
1300         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1301                 lov->lo_layout_invalid = true;
1302                 GOTO(out, result = 0);
1303         }
1304
1305         if (conf->coc_opc == OBJECT_CONF_WAIT) {
1306                 if (lov->lo_layout_invalid &&
1307                     atomic_read(&lov->lo_active_ios) > 0) {
1308                         lov_conf_unlock(lov);
1309                         result = lov_layout_wait(env, lov);
1310                         lov_conf_lock(lov);
1311                 }
1312                 GOTO(out, result);
1313         }
1314
1315         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1316
1317         if ((lsm == NULL && lov->lo_lsm == NULL) ||
1318             ((lsm != NULL && lov->lo_lsm != NULL) &&
1319              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1320              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1321               lsm->lsm_entries[0]->lsme_pattern))) {
1322                 /* same version of layout */
1323                 lov->lo_layout_invalid = false;
1324                 GOTO(out, result = 0);
1325         }
1326
1327         /* will change layout - check if there still exists active IO. */
1328         if (atomic_read(&lov->lo_active_ios) > 0) {
1329                 lov->lo_layout_invalid = true;
1330                 GOTO(out, result = -EBUSY);
1331         }
1332
1333         result = lov_layout_change(env, lov, lsm, conf);
1334         lov->lo_layout_invalid = result != 0;
1335         EXIT;
1336
1337 out:
1338         lov_conf_unlock(lov);
1339         lov_lsm_put(lsm);
1340         CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
1341                PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
1342         RETURN(result);
1343 }
1344
1345 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1346 {
1347         struct lov_object *lov = lu2lov(obj);
1348
1349         ENTRY;
1350         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1351         EXIT;
1352 }
1353
1354 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1355 {
1356         struct lov_object *lov = lu2lov(obj);
1357
1358         ENTRY;
1359         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1360         lu_object_fini(obj);
1361         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1362         EXIT;
1363 }
1364
1365 static int lov_object_print(const struct lu_env *env, void *cookie,
1366                             lu_printer_t p, const struct lu_object *o)
1367 {
1368         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1369 }
1370
1371 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1372                   struct cl_page *page, pgoff_t index)
1373 {
1374         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1375                                     index);
1376 }
1377
1378 /**
1379  * Implements cl_object_operations::clo_io_init() method for lov
1380  * layer. Dispatches to the appropriate layout io initialization method.
1381  */
1382 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1383                 struct cl_io *io)
1384 {
1385         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1386
1387         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1388                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1389                io->ci_ignore_layout, io->ci_verify_layout);
1390
1391         /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1392          * the OSC layer. It shouldn't take lov layout conf lock in that case,
1393          * because as long as the OSC object exists, the layout can't be
1394          * reconfigured. */
1395         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1396                         !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1397                         env, obj, io);
1398 }
1399
1400 /**
1401  * An implementation of cl_object_operations::clo_attr_get() method for lov
1402  * layer. For raid0 layout this collects and merges attributes of all
1403  * sub-objects.
1404  */
1405 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1406                         struct cl_attr *attr)
1407 {
1408         /* do not take lock, as this function is called under a
1409          * spin-lock. Layout is protected from changing by ongoing IO. */
1410         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1411 }
1412
1413 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1414                            const struct cl_attr *attr, unsigned valid)
1415 {
1416         /*
1417          * No dispatch is required here, as no layout implements this.
1418          */
1419         return 0;
1420 }
1421
1422 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1423                   struct cl_lock *lock, const struct cl_io *io)
1424 {
1425         /* No need to lock because we've taken one refcount of layout.  */
1426         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1427                                     io);
1428 }
1429
1430 /**
1431  * We calculate on which OST the mapping will end. If the length of mapping
1432  * is greater than (stripe_size * stripe_count) then the last_stripe will
1433  * will be one just before start_stripe. Else we check if the mapping
1434  * intersects each OST and find last_stripe.
1435  * This function returns the last_stripe and also sets the stripe_count
1436  * over which the mapping is spread
1437  *
1438  * \param lsm [in]              striping information for the file
1439  * \param index [in]            stripe component index
1440  * \param ext [in]              logical extent of mapping
1441  * \param start_stripe [in]     starting stripe of the mapping
1442  * \param stripe_count [out]    the number of stripes across which to map is
1443  *                              returned
1444  *
1445  * \retval last_stripe          return the last stripe of the mapping
1446  */
1447 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1448                                    struct lu_extent *ext,
1449                                    int start_stripe, int *stripe_count)
1450 {
1451         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1452         int last_stripe;
1453         u64 obd_start;
1454         u64 obd_end;
1455         int i, j;
1456
1457         if (ext->e_end - ext->e_start >
1458             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1459                 last_stripe = (start_stripe < 1 ? lsme->lsme_stripe_count - 1 :
1460                                                   start_stripe - 1);
1461                 *stripe_count = lsme->lsme_stripe_count;
1462         } else {
1463                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1464                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1465                         if ((lov_stripe_intersects(lsm, index,  i, ext,
1466                                                    &obd_start, &obd_end)) == 0)
1467                                 break;
1468                 }
1469                 *stripe_count = j;
1470                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1471         }
1472
1473         return last_stripe;
1474 }
1475
1476 /**
1477  * Set fe_device and copy extents from local buffer into main return buffer.
1478  *
1479  * \param fiemap [out]          fiemap to hold all extents
1480  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1481  * \param ost_index [in]        OST index to be written into the fm_device
1482  *                              field for each extent
1483  * \param ext_count [in]        number of extents to be copied
1484  * \param current_extent [in]   where to start copying in the extent array
1485  */
1486 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1487                                          struct fiemap_extent *lcl_fm_ext,
1488                                          int ost_index, unsigned int ext_count,
1489                                          int current_extent)
1490 {
1491         char            *to;
1492         unsigned int    ext;
1493
1494         for (ext = 0; ext < ext_count; ext++) {
1495                 lcl_fm_ext[ext].fe_device = ost_index;
1496                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1497         }
1498
1499         /* Copy fm_extent's from fm_local to return buffer */
1500         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1501         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1502 }
1503
1504 #define FIEMAP_BUFFER_SIZE 4096
1505
1506 /**
1507  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1508  * call. The local end offset and the device are sent in the first
1509  * fm_extent. This function calculates the stripe number from the index.
1510  * This function returns a stripe_no on which mapping is to be restarted.
1511  *
1512  * This function returns fm_end_offset which is the in-OST offset at which
1513  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1514  * will re-calculate proper offset in next stripe.
1515  * Note that the first extent is passed to lov_get_info via the value field.
1516  *
1517  * \param fiemap [in]           fiemap request header
1518  * \param lsm [in]              striping information for the file
1519  * \param index [in]            stripe component index
1520  * \param ext [in]              logical extent of mapping
1521  * \param start_stripe [out]    starting stripe will be returned in this
1522  */
1523 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1524                                      struct lov_stripe_md *lsm,
1525                                      int index, struct lu_extent *ext,
1526                                      int *start_stripe)
1527 {
1528         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1529         u64 local_end = fiemap->fm_extents[0].fe_logical;
1530         u64 lun_start;
1531         u64 lun_end;
1532         u64 fm_end_offset;
1533         int stripe_no = -1;
1534         int i;
1535
1536         if (fiemap->fm_extent_count == 0 ||
1537             fiemap->fm_extents[0].fe_logical == 0)
1538                 return 0;
1539
1540         /* Find out stripe_no from ost_index saved in the fe_device */
1541         for (i = 0; i < lsme->lsme_stripe_count; i++) {
1542                 struct lov_oinfo *oinfo = lsme->lsme_oinfo[i];
1543
1544                 if (lov_oinfo_is_dummy(oinfo))
1545                         continue;
1546
1547                 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1548                         stripe_no = i;
1549                         break;
1550                 }
1551         }
1552
1553         if (stripe_no == -1)
1554                 return -EINVAL;
1555
1556         /* If we have finished mapping on previous device, shift logical
1557          * offset to start of next device */
1558         if (lov_stripe_intersects(lsm, index, stripe_no, ext,
1559                                    &lun_start, &lun_end) != 0 &&
1560             local_end < lun_end) {
1561                 fm_end_offset = local_end;
1562                 *start_stripe = stripe_no;
1563         } else {
1564                 /* This is a special value to indicate that caller should
1565                  * calculate offset in next stripe. */
1566                 fm_end_offset = 0;
1567                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1568         }
1569
1570         return fm_end_offset;
1571 }
1572
1573 struct fiemap_state {
1574         struct fiemap           *fs_fm;
1575         struct lu_extent        fs_ext;
1576         u64                     fs_length;
1577         u64                     fs_end_offset;
1578         int                     fs_cur_extent;
1579         int                     fs_cnt_need;
1580         int                     fs_start_stripe;
1581         int                     fs_last_stripe;
1582         bool                    fs_device_done;
1583         bool                    fs_finish_stripe;
1584         bool                    fs_enough;
1585 };
1586
1587 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1588                                          struct lov_object *lov,
1589                                          struct lov_stripe_md *lsm,
1590                                          int index)
1591 {
1592         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1593         struct lov_thread_info  *lti = lov_env_info(env);
1594         struct lu_fid           *ofid = &lti->lti_fid;
1595         struct lov_oinfo        *oinfo;
1596         struct cl_device        *subdev;
1597         int                     entry = lov_comp_entry(index);
1598         int                     stripe = lov_comp_stripe(index);
1599         int                     ost_idx;
1600         int                     rc;
1601         struct cl_object        *result;
1602
1603         if (lov->lo_type != LLT_COMP)
1604                 GOTO(out, result = NULL);
1605
1606         if (entry >= lsm->lsm_entry_count ||
1607             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1608                 GOTO(out, result = NULL);
1609
1610         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1611         ost_idx = oinfo->loi_ost_idx;
1612         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1613         if (rc != 0)
1614                 GOTO(out, result = NULL);
1615
1616         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1617         result = lov_sub_find(env, subdev, ofid, NULL);
1618 out:
1619         if (result == NULL)
1620                 result = ERR_PTR(-EINVAL);
1621         return result;
1622 }
1623
1624 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1625                       struct lov_stripe_md *lsm, struct fiemap *fiemap,
1626                       size_t *buflen, struct ll_fiemap_info_key *fmkey,
1627                       int index, int stripeno, struct fiemap_state *fs)
1628 {
1629         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1630         struct cl_object *subobj;
1631         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1632         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1633         u64 req_fm_len; /* Stores length of required mapping */
1634         u64 len_mapped_single_call;
1635         u64 lun_start;
1636         u64 lun_end;
1637         u64 obd_object_end;
1638         unsigned int ext_count;
1639         /* EOF for object */
1640         bool ost_eof = false;
1641         /* done with required mapping for this OST? */
1642         bool ost_done = false;
1643         int ost_index;
1644         int rc = 0;
1645
1646         fs->fs_device_done = false;
1647         /* Find out range of mapping on this stripe */
1648         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1649                                    &lun_start, &obd_object_end)) == 0)
1650                 return 0;
1651
1652         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1653                 return -EIO;
1654
1655         /* If this is a continuation FIEMAP call and we are on
1656          * starting stripe then lun_start needs to be set to
1657          * end_offset */
1658         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1659                 lun_start = fs->fs_end_offset;
1660         lun_end = lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno);
1661         if (lun_start == lun_end)
1662                 return 0;
1663
1664         req_fm_len = obd_object_end - lun_start;
1665         fs->fs_fm->fm_length = 0;
1666         len_mapped_single_call = 0;
1667
1668         /* find lobsub object */
1669         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1670                                  lov_comp_index(index, stripeno));
1671         if (IS_ERR(subobj))
1672                 return PTR_ERR(subobj);
1673         /* If the output buffer is very large and the objects have many
1674          * extents we may need to loop on a single OST repeatedly */
1675         do {
1676                 if (fiemap->fm_extent_count > 0) {
1677                         /* Don't get too many extents. */
1678                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1679                             fiemap->fm_extent_count)
1680                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1681                                                   fs->fs_cur_extent;
1682                 }
1683
1684                 lun_start += len_mapped_single_call;
1685                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1686                 req_fm_len = fs->fs_fm->fm_length;
1687                 /**
1688                  * If we've collected enough extent map, we'd request 1 more,
1689                  * to see whether we coincidentally finished all available
1690                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1691                  */
1692                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1693                                              1 : fs->fs_cnt_need;
1694                 fs->fs_fm->fm_mapped_extents = 0;
1695                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1696
1697                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1698
1699                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1700                         GOTO(obj_put, rc = -EINVAL);
1701                 /* If OST is inactive, return extent with UNKNOWN flag. */
1702                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1703                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1704                         fs->fs_fm->fm_mapped_extents = 1;
1705
1706                         fm_ext[0].fe_logical = lun_start;
1707                         fm_ext[0].fe_length = obd_object_end - lun_start;
1708                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1709
1710                         goto inactive_tgt;
1711                 }
1712
1713                 fs->fs_fm->fm_start = lun_start;
1714                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1715                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1716                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1717
1718                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1719                 if (rc != 0)
1720                         GOTO(obj_put, rc);
1721 inactive_tgt:
1722                 ext_count = fs->fs_fm->fm_mapped_extents;
1723                 if (ext_count == 0) {
1724                         ost_done = true;
1725                         fs->fs_device_done = true;
1726                         /* If last stripe has hold at the end,
1727                          * we need to return */
1728                         if (stripeno == fs->fs_last_stripe) {
1729                                 fiemap->fm_mapped_extents = 0;
1730                                 fs->fs_finish_stripe = true;
1731                                 GOTO(obj_put, rc);
1732                         }
1733                         break;
1734                 } else if (fs->fs_enough) {
1735                         /*
1736                          * We've collected enough extents and there are
1737                          * more extents after it.
1738                          */
1739                         GOTO(obj_put, rc);
1740                 }
1741
1742                 /* If we just need num of extents, got to next device */
1743                 if (fiemap->fm_extent_count == 0) {
1744                         fs->fs_cur_extent += ext_count;
1745                         break;
1746                 }
1747
1748                 /* prepare to copy retrived map extents */
1749                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1750                                          fm_ext[ext_count - 1].fe_length -
1751                                          lun_start;
1752
1753                 /* Have we finished mapping on this device? */
1754                 if (req_fm_len <= len_mapped_single_call) {
1755                         ost_done = true;
1756                         fs->fs_device_done = true;
1757                 }
1758
1759                 /* Clear the EXTENT_LAST flag which can be present on
1760                  * the last extent */
1761                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1762                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1763                 if (lov_stripe_size(lsm, index,
1764                                     fm_ext[ext_count - 1].fe_logical +
1765                                     fm_ext[ext_count - 1].fe_length,
1766                                     stripeno) >= fmkey->lfik_oa.o_size) {
1767                         ost_eof = true;
1768                         fs->fs_device_done = true;
1769                 }
1770
1771                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1772                                              ext_count, fs->fs_cur_extent);
1773                 fs->fs_cur_extent += ext_count;
1774
1775                 /* Ran out of available extents? */
1776                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1777                         fs->fs_enough = true;
1778         } while (!ost_done && !ost_eof);
1779
1780         if (stripeno == fs->fs_last_stripe)
1781                 fs->fs_finish_stripe = true;
1782 obj_put:
1783         cl_object_put(env, subobj);
1784
1785         return rc;
1786 }
1787
1788 /**
1789  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1790  * This also handles the restarting of FIEMAP calls in case mapping overflows
1791  * the available number of extents in single call.
1792  *
1793  * \param env [in]              lustre environment
1794  * \param obj [in]              file object
1795  * \param fmkey [in]            fiemap request header and other info
1796  * \param fiemap [out]          fiemap buffer holding retrived map extents
1797  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1798  *                              each OST, it is used to limit max map needed
1799  * \retval 0    success
1800  * \retval < 0  error
1801  */
1802 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1803                              struct ll_fiemap_info_key *fmkey,
1804                              struct fiemap *fiemap, size_t *buflen)
1805 {
1806         struct lov_stripe_md_entry *lsme;
1807         struct lov_stripe_md *lsm;
1808         struct fiemap *fm_local = NULL;
1809         loff_t whole_start;
1810         loff_t whole_end;
1811         int entry;
1812         int start_entry;
1813         int end_entry;
1814         int cur_stripe = 0;
1815         int stripe_count;
1816         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1817         int rc = 0;
1818         struct fiemap_state fs = { 0 };
1819         ENTRY;
1820
1821         lsm = lov_lsm_addref(cl2lov(obj));
1822         if (lsm == NULL)
1823                 RETURN(-ENODATA);
1824
1825         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1826                 /**
1827                  * If the entry count > 1 or stripe_count > 1 and the
1828                  * application does not understand DEVICE_ORDER flag,
1829                  * it cannot interpret the extents correctly.
1830                  */
1831                 if (lsm->lsm_entry_count > 1 ||
1832                     (lsm->lsm_entry_count == 1 &&
1833                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
1834                         GOTO(out_lsm, rc = -ENOTSUPP);
1835         }
1836
1837         /* No support for DOM layout yet. */
1838         if (lsme_is_dom(lsm->lsm_entries[0]))
1839                 GOTO(out_lsm, rc = -ENOTSUPP);
1840
1841         if (lsm->lsm_is_released) {
1842                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1843                         /**
1844                          * released file, return a minimal FIEMAP if
1845                          * request fits in file-size.
1846                          */
1847                         fiemap->fm_mapped_extents = 1;
1848                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1849                         if (fiemap->fm_start + fiemap->fm_length <
1850                             fmkey->lfik_oa.o_size)
1851                                 fiemap->fm_extents[0].fe_length =
1852                                         fiemap->fm_length;
1853                         else
1854                                 fiemap->fm_extents[0].fe_length =
1855                                         fmkey->lfik_oa.o_size -
1856                                         fiemap->fm_start;
1857                         fiemap->fm_extents[0].fe_flags |=
1858                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1859                 }
1860                 GOTO(out_lsm, rc = 0);
1861         }
1862
1863         /* buffer_size is small to hold fm_extent_count of extents. */
1864         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1865                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1866
1867         OBD_ALLOC_LARGE(fm_local, buffer_size);
1868         if (fm_local == NULL)
1869                 GOTO(out_lsm, rc = -ENOMEM);
1870
1871         /**
1872          * Requested extent count exceeds the fiemap buffer size, shrink our
1873          * ambition.
1874          */
1875         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1876                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1877         if (fiemap->fm_extent_count == 0)
1878                 fs.fs_cnt_need = 0;
1879
1880         fs.fs_enough = false;
1881         fs.fs_cur_extent = 0;
1882         fs.fs_fm = fm_local;
1883         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1884
1885         whole_start = fiemap->fm_start;
1886         /* whole_start is beyond the end of the file */
1887         if (whole_start > fmkey->lfik_oa.o_size)
1888                 GOTO(out_fm_local, rc = -EINVAL);
1889         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
1890                                         fmkey->lfik_oa.o_size :
1891                                         whole_start + fiemap->fm_length - 1;
1892         /**
1893          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
1894          * size
1895          */
1896         if (whole_end > fmkey->lfik_oa.o_size)
1897                 whole_end = fmkey->lfik_oa.o_size;
1898
1899         start_entry = lov_lsm_entry(lsm, whole_start);
1900         end_entry = lov_lsm_entry(lsm, whole_end);
1901         if (end_entry == -1)
1902                 end_entry = lsm->lsm_entry_count - 1;
1903
1904         if (start_entry == -1 || end_entry == -1)
1905                 GOTO(out_fm_local, rc = -EINVAL);
1906
1907         /* TODO: rewrite it with lov_foreach_io_layout() */
1908         for (entry = start_entry; entry <= end_entry; entry++) {
1909                 lsme = lsm->lsm_entries[entry];
1910
1911                 if (!lsme_inited(lsme))
1912                         break;
1913
1914                 if (entry == start_entry)
1915                         fs.fs_ext.e_start = whole_start;
1916                 else
1917                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
1918                 if (entry == end_entry)
1919                         fs.fs_ext.e_end = whole_end;
1920                 else
1921                         fs.fs_ext.e_end = lsme->lsme_extent.e_end - 1;
1922                 fs.fs_length = fs.fs_ext.e_end - fs.fs_ext.e_start + 1;
1923
1924                 /* Calculate start stripe, last stripe and length of mapping */
1925                 fs.fs_start_stripe = lov_stripe_number(lsm, entry,
1926                                                        fs.fs_ext.e_start);
1927                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
1928                                         &fs.fs_ext, fs.fs_start_stripe,
1929                                         &stripe_count);
1930                 fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, entry,
1931                                         &fs.fs_ext, &fs.fs_start_stripe);
1932                 /* Check each stripe */
1933                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1934                      --stripe_count,
1935                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
1936                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
1937                                                fmkey, entry, cur_stripe, &fs);
1938                         if (rc < 0)
1939                                 GOTO(out_fm_local, rc);
1940                         if (fs.fs_enough)
1941                                 GOTO(finish, rc);
1942                         if (fs.fs_finish_stripe)
1943                                 break;
1944                 } /* for each stripe */
1945         } /* for covering layout component */
1946         /*
1947          * We've traversed all components, set @entry to the last component
1948          * entry, it's for the last stripe check.
1949          */
1950         entry--;
1951 finish:
1952         /* Indicate that we are returning device offsets unless file just has
1953          * single stripe */
1954         if (lsm->lsm_entry_count > 1 ||
1955             (lsm->lsm_entry_count == 1 &&
1956              lsm->lsm_entries[0]->lsme_stripe_count > 1))
1957                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1958
1959         if (fiemap->fm_extent_count == 0)
1960                 goto skip_last_device_calc;
1961
1962         /* Check if we have reached the last stripe and whether mapping for that
1963          * stripe is done. */
1964         if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
1965                 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
1966                                                              FIEMAP_EXTENT_LAST;
1967 skip_last_device_calc:
1968         fiemap->fm_mapped_extents = fs.fs_cur_extent;
1969 out_fm_local:
1970         OBD_FREE_LARGE(fm_local, buffer_size);
1971
1972 out_lsm:
1973         lov_lsm_put(lsm);
1974         return rc;
1975 }
1976
1977 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
1978                                 struct lov_user_md __user *lum, size_t size)
1979 {
1980         struct lov_object       *lov = cl2lov(obj);
1981         struct lov_stripe_md    *lsm;
1982         int                     rc = 0;
1983         ENTRY;
1984
1985         lsm = lov_lsm_addref(lov);
1986         if (lsm == NULL)
1987                 RETURN(-ENODATA);
1988
1989         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
1990         lov_lsm_put(lsm);
1991         RETURN(rc);
1992 }
1993
1994 static int lov_object_layout_get(const struct lu_env *env,
1995                                  struct cl_object *obj,
1996                                  struct cl_layout *cl)
1997 {
1998         struct lov_object *lov = cl2lov(obj);
1999         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2000         struct lu_buf *buf = &cl->cl_buf;
2001         ssize_t rc;
2002         ENTRY;
2003
2004         if (lsm == NULL) {
2005                 cl->cl_size = 0;
2006                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2007
2008                 RETURN(0);
2009         }
2010
2011         cl->cl_size = lov_comp_md_size(lsm);
2012         cl->cl_layout_gen = lsm->lsm_layout_gen;
2013         cl->cl_dom_comp_size = 0;
2014         if (lsm_is_composite(lsm->lsm_magic)) {
2015                 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[0];
2016
2017                 cl->cl_is_composite = true;
2018
2019                 if (lsme_is_dom(lsme))
2020                         cl->cl_dom_comp_size = lsme->lsme_extent.e_end;
2021         } else {
2022                 cl->cl_is_composite = false;
2023         }
2024
2025         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2026         lov_lsm_put(lsm);
2027
2028         RETURN(rc < 0 ? rc : 0);
2029 }
2030
2031 static loff_t lov_object_maxbytes(struct cl_object *obj)
2032 {
2033         struct lov_object *lov = cl2lov(obj);
2034         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2035         loff_t maxbytes;
2036
2037         if (lsm == NULL)
2038                 return LLONG_MAX;
2039
2040         maxbytes = lsm->lsm_maxbytes;
2041
2042         lov_lsm_put(lsm);
2043
2044         return maxbytes;
2045 }
2046
2047 static const struct cl_object_operations lov_ops = {
2048         .coo_page_init    = lov_page_init,
2049         .coo_lock_init    = lov_lock_init,
2050         .coo_io_init      = lov_io_init,
2051         .coo_attr_get     = lov_attr_get,
2052         .coo_attr_update  = lov_attr_update,
2053         .coo_conf_set     = lov_conf_set,
2054         .coo_getstripe    = lov_object_getstripe,
2055         .coo_layout_get   = lov_object_layout_get,
2056         .coo_maxbytes     = lov_object_maxbytes,
2057         .coo_fiemap       = lov_object_fiemap,
2058 };
2059
2060 static const struct lu_object_operations lov_lu_obj_ops = {
2061         .loo_object_init      = lov_object_init,
2062         .loo_object_delete    = lov_object_delete,
2063         .loo_object_release   = NULL,
2064         .loo_object_free      = lov_object_free,
2065         .loo_object_print     = lov_object_print,
2066         .loo_object_invariant = NULL
2067 };
2068
2069 struct lu_object *lov_object_alloc(const struct lu_env *env,
2070                                    const struct lu_object_header *unused,
2071                                    struct lu_device *dev)
2072 {
2073         struct lov_object *lov;
2074         struct lu_object  *obj;
2075
2076         ENTRY;
2077         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2078         if (lov != NULL) {
2079                 obj = lov2lu(lov);
2080                 lu_object_init(obj, NULL, dev);
2081                 lov->lo_cl.co_ops = &lov_ops;
2082                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2083                 /*
2084                  * object io operation vector (cl_object::co_iop) is installed
2085                  * later in lov_object_init(), as different vectors are used
2086                  * for object with different layouts.
2087                  */
2088                 obj->lo_ops = &lov_lu_obj_ops;
2089         } else
2090                 obj = NULL;
2091         RETURN(obj);
2092 }
2093
2094 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2095 {
2096         struct lov_stripe_md *lsm = NULL;
2097
2098         lov_conf_freeze(lov);
2099         if (lov->lo_lsm != NULL) {
2100                 lsm = lsm_addref(lov->lo_lsm);
2101                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2102                         lsm, atomic_read(&lsm->lsm_refc),
2103                         lov->lo_layout_invalid, current);
2104         }
2105         lov_conf_thaw(lov);
2106         return lsm;
2107 }
2108
2109 int lov_read_and_clear_async_rc(struct cl_object *clob)
2110 {
2111         struct lu_object *luobj;
2112         int rc = 0;
2113         ENTRY;
2114
2115         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2116                                  &lov_device_type);
2117         if (luobj != NULL) {
2118                 struct lov_object *lov = lu2lov(luobj);
2119
2120                 lov_conf_freeze(lov);
2121                 switch (lov->lo_type) {
2122                 case LLT_COMP: {
2123                         struct lov_stripe_md *lsm;
2124                         int i;
2125
2126                         lsm = lov->lo_lsm;
2127                         LASSERT(lsm != NULL);
2128                         for (i = 0; i < lsm->lsm_entry_count; i++) {
2129                                 struct lov_stripe_md_entry *lse =
2130                                                 lsm->lsm_entries[i];
2131                                 int j;
2132
2133                                 if (!lsme_inited(lse))
2134                                         break;
2135
2136                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
2137                                         struct lov_oinfo *loi =
2138                                                         lse->lsme_oinfo[j];
2139
2140                                         if (lov_oinfo_is_dummy(loi))
2141                                                 continue;
2142
2143                                         if (loi->loi_ar.ar_rc && !rc)
2144                                                 rc = loi->loi_ar.ar_rc;
2145                                         loi->loi_ar.ar_rc = 0;
2146                                 }
2147                         }
2148                 }
2149                 case LLT_RELEASED:
2150                 case LLT_EMPTY:
2151                         break;
2152                 default:
2153                         LBUG();
2154                 }
2155                 lov_conf_thaw(lov);
2156         }
2157         RETURN(rc);
2158 }
2159 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
2160
2161 /** @} lov */