Whamcloud - gitweb
23d7a5fca9673037ca8ff4030288f80cddb38fa2
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_object for LOV layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include "lov_cl_internal.h"
41
42 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
43 {
44         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
45 }
46
47 /** \addtogroup lov
48  *  @{
49  */
50
51 /*****************************************************************************
52  *
53  * Layout operations.
54  *
55  */
56
57 struct lov_layout_operations {
58         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
59                         struct lov_object *lov, struct lov_stripe_md *lsm,
60                         const struct cl_object_conf *conf,
61                         union lov_layout_state *state);
62         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
63                            union lov_layout_state *state);
64         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
65                          union lov_layout_state *state);
66         int  (*llo_print)(const struct lu_env *env, void *cookie,
67                           lu_printer_t p, const struct lu_object *o);
68         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
69                               struct cl_page *page, pgoff_t index);
70         int  (*llo_lock_init)(const struct lu_env *env,
71                               struct cl_object *obj, struct cl_lock *lock,
72                               const struct cl_io *io);
73         int  (*llo_io_init)(const struct lu_env *env,
74                             struct cl_object *obj, struct cl_io *io);
75         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
76                             struct cl_attr *attr);
77 };
78
79 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
80
81 static void lov_lsm_put(struct lov_stripe_md *lsm)
82 {
83         if (lsm != NULL)
84                 lov_free_memmd(&lsm);
85 }
86
87 /*****************************************************************************
88  *
89  * Lov object layout operations.
90  *
91  */
92
93 static struct cl_object *lov_sub_find(const struct lu_env *env,
94                                       struct cl_device *dev,
95                                       const struct lu_fid *fid,
96                                       const struct cl_object_conf *conf)
97 {
98         struct lu_object *o;
99
100         ENTRY;
101
102         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
103         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
104         RETURN(lu2cl(o));
105 }
106
107 static int lov_page_slice_fixup(struct lov_object *lov,
108                                 struct cl_object *stripe)
109 {
110         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
111         struct cl_object *o;
112
113         if (stripe == NULL)
114                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
115                        cfs_size_round(sizeof(struct lov_page));
116
117         cl_object_for_each(o, stripe)
118                 o->co_slice_off += hdr->coh_page_bufsize;
119
120         return cl_object_header(stripe)->coh_page_bufsize;
121 }
122
123 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
124                         struct cl_object *subobj, struct lov_oinfo *oinfo,
125                         int idx)
126 {
127         struct cl_object_header *hdr;
128         struct cl_object_header *subhdr;
129         struct cl_object_header *parent;
130         int entry = lov_comp_entry(idx);
131         int stripe = lov_comp_stripe(idx);
132         int result;
133
134         if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
135                 /* For sanity:test_206.
136                  * Do not leave the object in cache to avoid accessing
137                  * freed memory. This is because osc_object is referring to
138                  * lov_oinfo of lsm_stripe_data which will be freed due to
139                  * this failure. */
140                 cl_object_kill(env, subobj);
141                 cl_object_put(env, subobj);
142                 return -EIO;
143         }
144
145         hdr = cl_object_header(lov2cl(lov));
146         subhdr = cl_object_header(subobj);
147
148         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
149                " ost idx: %d gen: %d\n",
150                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
151                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
152                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
153
154         /* reuse ->coh_attr_guard to protect coh_parent change */
155         spin_lock(&subhdr->coh_attr_guard);
156         parent = subhdr->coh_parent;
157         if (parent == NULL) {
158                 struct lovsub_object *lso = cl2lovsub(subobj);
159
160                 subhdr->coh_parent = hdr;
161                 spin_unlock(&subhdr->coh_attr_guard);
162                 subhdr->coh_nesting = hdr->coh_nesting + 1;
163                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
164                 lso->lso_super = lov;
165                 lso->lso_index = idx;
166                 result = 0;
167         } else {
168                 struct lu_object  *old_obj;
169                 struct lov_object *old_lov;
170                 unsigned int mask = D_INODE;
171
172                 spin_unlock(&subhdr->coh_attr_guard);
173                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
174                 LASSERT(old_obj != NULL);
175                 old_lov = cl2lov(lu2cl(old_obj));
176                 if (old_lov->lo_layout_invalid) {
177                         /* the object's layout has already changed but isn't
178                          * refreshed */
179                         lu_object_unhash(env, &subobj->co_lu);
180                         result = -EAGAIN;
181                 } else {
182                         mask = D_ERROR;
183                         result = -EIO;
184                 }
185
186                 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
187                                 "stripe %d is already owned.", idx);
188                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
189                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
190                 cl_object_put(env, subobj);
191         }
192         return result;
193 }
194
195 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
196                           struct lov_object *lov, unsigned int index,
197                           const struct cl_object_conf *conf,
198                           struct lov_layout_entry *lle)
199 {
200         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
201         struct lov_thread_info *lti = lov_env_info(env);
202         struct cl_object_conf *subconf = &lti->lti_stripe_conf;
203         struct lu_fid *ofid = &lti->lti_fid;
204         struct cl_object *stripe;
205         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
206         int result;
207         int psz, sz;
208         int i;
209
210         ENTRY;
211
212         spin_lock_init(&r0->lo_sub_lock);
213         r0->lo_nr = lse->lsme_stripe_count;
214         LASSERT(r0->lo_nr <= lov_targets_nr(dev));
215
216         OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
217         if (r0->lo_sub == NULL)
218                 GOTO(out, result = -ENOMEM);
219
220         psz = 0;
221         result = 0;
222         memset(subconf, 0, sizeof(*subconf));
223
224         /*
225          * Create stripe cl_objects.
226          */
227         for (i = 0; i < r0->lo_nr; ++i) {
228                 struct cl_device *subdev;
229                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
230                 int ost_idx = oinfo->loi_ost_idx;
231
232                 if (lov_oinfo_is_dummy(oinfo))
233                         continue;
234
235                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
236                 if (result != 0)
237                         GOTO(out, result);
238
239                 if (dev->ld_target[ost_idx] == NULL) {
240                         CERROR("%s: OST %04x is not initialized\n",
241                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
242                         GOTO(out, result = -EIO);
243                 }
244
245                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
246                 subconf->u.coc_oinfo = oinfo;
247                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
248                 /* In the function below, .hs_keycmp resolves to
249                  * lu_obj_hop_keycmp() */
250                 /* coverity[overrun-buffer-val] */
251                 stripe = lov_sub_find(env, subdev, ofid, subconf);
252                 if (IS_ERR(stripe))
253                         GOTO(out, result = PTR_ERR(stripe));
254
255                 result = lov_init_sub(env, lov, stripe, oinfo,
256                                       lov_comp_index(index, i));
257                 if (result == -EAGAIN) { /* try again */
258                         --i;
259                         result = 0;
260                         continue;
261                 }
262
263                 if (result == 0) {
264                         r0->lo_sub[i] = cl2lovsub(stripe);
265
266                         sz = lov_page_slice_fixup(lov, stripe);
267                         LASSERT(ergo(psz > 0, psz == sz));
268                         psz = sz;
269                 }
270         }
271         if (result == 0)
272                 result = psz;
273 out:
274         RETURN(result);
275 }
276
277 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
278                                struct lov_layout_raid0 *r0,
279                                struct lovsub_object *los, int idx)
280 {
281         struct cl_object        *sub;
282         struct lu_site          *site;
283         struct lu_site_bkt_data *bkt;
284         wait_queue_t          *waiter;
285
286         LASSERT(r0->lo_sub[idx] == los);
287
288         sub  = lovsub2cl(los);
289         site = sub->co_lu.lo_dev->ld_site;
290         bkt  = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
291
292         cl_object_kill(env, sub);
293         /* release a reference to the sub-object and ... */
294         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
295         cl_object_put(env, sub);
296
297         /* ... wait until it is actually destroyed---sub-object clears its
298          * ->lo_sub[] slot in lovsub_object_free() */
299         if (r0->lo_sub[idx] == los) {
300                 waiter = &lov_env_info(env)->lti_waiter;
301                 init_waitqueue_entry(waiter, current);
302                 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
303                 set_current_state(TASK_UNINTERRUPTIBLE);
304                 while (1) {
305                         /* this wait-queue is signaled at the end of
306                          * lu_object_free(). */
307                         set_current_state(TASK_UNINTERRUPTIBLE);
308                         spin_lock(&r0->lo_sub_lock);
309                         if (r0->lo_sub[idx] == los) {
310                                 spin_unlock(&r0->lo_sub_lock);
311                                 schedule();
312                         } else {
313                                 spin_unlock(&r0->lo_sub_lock);
314                                 set_current_state(TASK_RUNNING);
315                                 break;
316                         }
317                 }
318                 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
319         }
320         LASSERT(r0->lo_sub[idx] == NULL);
321 }
322
323 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
324                              struct lov_layout_entry *lle)
325 {
326         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
327
328         ENTRY;
329
330         if (r0->lo_sub != NULL) {
331                 int i;
332
333                 for (i = 0; i < r0->lo_nr; ++i) {
334                         struct lovsub_object *los = r0->lo_sub[i];
335
336                         if (los != NULL) {
337                                 cl_object_prune(env, &los->lso_cl);
338                                 /*
339                                  * If top-level object is to be evicted from
340                                  * the cache, so are its sub-objects.
341                                  */
342                                 lov_subobject_kill(env, lov, r0, los, i);
343                         }
344                 }
345         }
346
347         EXIT;
348 }
349
350 static void lov_fini_raid0(const struct lu_env *env,
351                            struct lov_layout_entry *lle)
352 {
353         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
354
355         if (r0->lo_sub != NULL) {
356                 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
357                 r0->lo_sub = NULL;
358         }
359 }
360
361 static int lov_print_raid0(const struct lu_env *env, void *cookie,
362                            lu_printer_t p, const struct lov_layout_entry *lle)
363 {
364         const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
365         int i;
366
367         for (i = 0; i < r0->lo_nr; ++i) {
368                 struct lu_object *sub;
369
370                 if (r0->lo_sub[i] != NULL) {
371                         sub = lovsub2lu(r0->lo_sub[i]);
372                         lu_object_print(env, cookie, p, sub);
373                 } else {
374                         (*p)(env, cookie, "sub %d absent\n", i);
375                 }
376         }
377         return 0;
378 }
379
380 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
381                               unsigned int index, struct lov_layout_entry *lle,
382                               struct cl_attr **lov_attr)
383 {
384         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
385         struct lov_stripe_md *lsm = lov->lo_lsm;
386         struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
387         struct cl_attr *attr = &r0->lo_attr;
388         __u64 kms = 0;
389         int result = 0;
390
391         if (r0->lo_attr_valid) {
392                 *lov_attr = attr;
393                 return 0;
394         }
395
396         memset(lvb, 0, sizeof(*lvb));
397
398         /* XXX: timestamps can be negative by sanity:test_39m,
399          * how can it be? */
400         lvb->lvb_atime = LLONG_MIN;
401         lvb->lvb_ctime = LLONG_MIN;
402         lvb->lvb_mtime = LLONG_MIN;
403
404         /*
405          * XXX that should be replaced with a loop over sub-objects,
406          * doing cl_object_attr_get() on them. But for now, let's
407          * reuse old lov code.
408          */
409
410         /*
411          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
412          * happy. It's not needed, because new code uses
413          * ->coh_attr_guard spin-lock to protect consistency of
414          * sub-object attributes.
415          */
416         lov_stripe_lock(lsm);
417         result = lov_merge_lvb_kms(lsm, index, lvb, &kms);
418         lov_stripe_unlock(lsm);
419         if (result == 0) {
420                 cl_lvb2attr(attr, lvb);
421                 attr->cat_kms = kms;
422                 r0->lo_attr_valid = 1;
423                 *lov_attr = attr;
424         }
425
426         return result;
427 }
428
429 static struct lov_comp_layout_entry_ops raid0_ops = {
430         .lco_init      = lov_init_raid0,
431         .lco_fini      = lov_fini_raid0,
432         .lco_getattr   = lov_attr_get_raid0,
433 };
434
435 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
436                             unsigned int index, struct lov_layout_entry *lle,
437                             struct cl_attr **lov_attr)
438 {
439         struct lov_layout_dom *dom = &lle->lle_dom;
440         struct lov_oinfo *loi = dom->lo_loi;
441         struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
442
443         if (dom->lo_dom_r0.lo_attr_valid) {
444                 *lov_attr = attr;
445                 return 0;
446         }
447
448         if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
449                 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
450
451         cl_lvb2attr(attr, &loi->loi_lvb);
452
453         /* DoM component size can be bigger than stripe size after
454          * client's setattr RPC, so do not count anything beyond
455          * component end. Alternatively, check that limit on server
456          * and do not allow size overflow there. */
457         if (attr->cat_size > lle->lle_extent->e_end)
458                 attr->cat_size = lle->lle_extent->e_end;
459
460         attr->cat_kms = attr->cat_size;
461
462         dom->lo_dom_r0.lo_attr_valid = 1;
463         *lov_attr = attr;
464
465         return 0;
466 }
467
468 /**
469  * Lookup FLD to get MDS index of the given DOM object FID.
470  *
471  * \param[in]  ld       LOV device
472  * \param[in]  fid      FID to lookup
473  * \param[out] nr       index in MDC array to return back
474  *
475  * \retval              0 and \a mds filled with MDS index if successful
476  * \retval              negative value on error
477  */
478 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
479                           __u32 *nr)
480 {
481         __u32 mds_idx;
482         int i, rc;
483
484         ENTRY;
485
486         rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
487                                &mds_idx, LU_SEQ_RANGE_MDT, NULL);
488         if (rc) {
489                 CERROR("%s: error while looking for mds number. Seq %#llx"
490                        ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
491                        fid_seq(fid), rc);
492                 RETURN(rc);
493         }
494
495         CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
496                mds_idx, PFID(fid));
497
498         /* find proper MDC device in the array */
499         for (i = 0; i < ld->ld_md_tgts_nr; i++) {
500                 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
501                     ld->ld_md_tgts[i].ldm_idx == mds_idx)
502                         break;
503         }
504
505         if (i == ld->ld_md_tgts_nr) {
506                 CERROR("%s: cannot find corresponding MDC device for mds #%x "
507                        "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
508                        mds_idx, PFID(fid));
509                 rc = -EINVAL;
510         } else {
511                 *nr = i;
512         }
513         RETURN(rc);
514 }
515
516 /**
517  * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
518  *
519  * Init the DOM object for the first time. It prepares also RAID0 entry
520  * for it to use in common methods with ordinary RAID0 layout entries.
521  *
522  * \param[in] env       execution environment
523  * \param[in] dev       LOV device
524  * \param[in] lov       LOV object
525  * \param[in] index     Composite layout entry index in LSM
526  * \param[in] lle       Composite LOV layout entry
527  */
528 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
529                         struct lov_object *lov, unsigned int index,
530                         const struct cl_object_conf *conf,
531                         struct lov_layout_entry *lle)
532 {
533         struct lov_thread_info *lti = lov_env_info(env);
534         struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
535         struct cl_object *clo;
536         struct lu_object *o = lov2lu(lov);
537         const struct lu_fid *fid = lu_object_fid(o);
538         struct cl_device *mdcdev;
539         struct lov_oinfo *loi = NULL;
540         struct cl_object_conf *sconf = &lti->lti_stripe_conf;
541
542         int rc;
543         __u32 idx = 0;
544
545         ENTRY;
546
547         LASSERT(index == 0);
548
549         /* find proper MDS device */
550         rc = lov_fld_lookup(dev, fid, &idx);
551         if (rc)
552                 RETURN(rc);
553
554         LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
555                  "LOV md target[%u] is NULL\n", idx);
556
557         /* check lsm is DOM, more checks are needed */
558         LASSERT(lsme->lsme_stripe_count == 0);
559
560         /*
561          * Create lower cl_objects.
562          */
563         mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
564
565         LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
566
567         /* DoM object has no oinfo in LSM entry, create it exclusively */
568         OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
569         if (loi == NULL)
570                 RETURN(-ENOMEM);
571
572         fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
573
574         sconf->u.coc_oinfo = loi;
575 again:
576         clo = lov_sub_find(env, mdcdev, fid, sconf);
577         if (IS_ERR(clo))
578                 GOTO(out, rc = PTR_ERR(clo));
579
580         rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
581         if (rc == -EAGAIN) /* try again */
582                 goto again;
583         else if (rc != 0)
584                 GOTO(out, rc);
585
586         lle->lle_dom.lo_dom = cl2lovsub(clo);
587         spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
588         lle->lle_dom.lo_dom_r0.lo_nr = 1;
589         lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
590         lle->lle_dom.lo_loi = loi;
591
592         rc = lov_page_slice_fixup(lov, clo);
593         RETURN(rc);
594
595 out:
596         if (loi != NULL)
597                 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
598         return rc;
599 }
600
601 /**
602  * Implementation of lov_layout_operations::llo_fini for DOM object.
603  *
604  * Finish the DOM object and free related memory.
605  *
606  * \param[in] env       execution environment
607  * \param[in] lov       LOV object
608  * \param[in] state     LOV layout state
609  */
610 static void lov_fini_dom(const struct lu_env *env,
611                          struct lov_layout_entry *lle)
612 {
613         if (lle->lle_dom.lo_dom != NULL)
614                 lle->lle_dom.lo_dom = NULL;
615         if (lle->lle_dom.lo_loi != NULL)
616                 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
617 }
618
619 static struct lov_comp_layout_entry_ops dom_ops = {
620         .lco_init = lov_init_dom,
621         .lco_fini = lov_fini_dom,
622         .lco_getattr = lov_attr_get_dom,
623 };
624
625 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
626                               struct lov_object *lov, struct lov_stripe_md *lsm,
627                               const struct cl_object_conf *conf,
628                               union lov_layout_state *state)
629 {
630         struct lov_layout_composite *comp = &state->composite;
631         struct lov_layout_entry *lle;
632         struct lov_mirror_entry *lre;
633         unsigned int entry_count;
634         unsigned int psz = 0;
635         unsigned int mirror_count;
636         int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
637         int result = 0;
638         int i, j;
639
640         ENTRY;
641
642         LASSERT(lsm->lsm_entry_count > 0);
643         LASSERT(lov->lo_lsm == NULL);
644         lov->lo_lsm = lsm_addref(lsm);
645         lov->lo_layout_invalid = true;
646
647         dump_lsm(D_INODE, lsm);
648
649         entry_count = lsm->lsm_entry_count;
650
651         spin_lock_init(&comp->lo_write_lock);
652         comp->lo_flags = lsm->lsm_flags;
653         comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
654         comp->lo_entry_count = lsm->lsm_entry_count;
655         comp->lo_preferred_mirror = -1;
656
657         if (equi(flr_state == LCM_FL_NOT_FLR, comp->lo_mirror_count > 1))
658                 RETURN(-EINVAL);
659
660         OBD_ALLOC(comp->lo_mirrors,
661                   comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
662         if (comp->lo_mirrors == NULL)
663                 RETURN(-ENOMEM);
664
665         OBD_ALLOC(comp->lo_entries, entry_count * sizeof(*comp->lo_entries));
666         if (comp->lo_entries == NULL)
667                 RETURN(-ENOMEM);
668
669         /* Initiate all entry types and extents data at first */
670         for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
671                 int mirror_id = 0;
672
673                 lle = &comp->lo_entries[i];
674
675                 lle->lle_lsme = lsm->lsm_entries[i];
676                 lle->lle_type = lov_entry_type(lle->lle_lsme);
677                 switch (lle->lle_type) {
678                 case LOV_PATTERN_RAID0:
679                         lle->lle_comp_ops = &raid0_ops;
680                         break;
681                 case LOV_PATTERN_MDT:
682                         lle->lle_comp_ops = &dom_ops;
683                         break;
684                 default:
685                         CERROR("%s: unknown composite layout entry type %i\n",
686                                lov2obd(dev->ld_lov)->obd_name,
687                                lsm->lsm_entries[i]->lsme_pattern);
688                         dump_lsm(D_ERROR, lsm);
689                         RETURN(-EIO);
690                 }
691
692                 lle->lle_extent = &lle->lle_lsme->lsme_extent;
693                 lle->lle_valid = !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
694
695                 if (flr_state != LCM_FL_NOT_FLR)
696                         mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
697
698                 lre = &comp->lo_mirrors[j];
699                 if (i > 0) {
700                         if (mirror_id == lre->lre_mirror_id) {
701                                 lre->lre_valid |= lle->lle_valid;
702                                 lre->lre_stale |= !lle->lle_valid;
703                                 lre->lre_end = i;
704                                 continue;
705                         }
706
707                         /* new mirror detected, assume that the mirrors
708                          * are shorted in layout */
709                         ++mirror_count;
710                         ++j;
711                         if (j >= comp->lo_mirror_count)
712                                 break;
713
714                         lre = &comp->lo_mirrors[j];
715                 }
716
717                 /* entries must be sorted by mirrors */
718                 lre->lre_mirror_id = mirror_id;
719                 lre->lre_start = lre->lre_end = i;
720                 lre->lre_preferred = (lle->lle_lsme->lsme_flags &
721                                         LCME_FL_PREFERRED);
722                 lre->lre_valid = lle->lle_valid;
723                 lre->lre_stale = !lle->lle_valid;
724         }
725
726         /* sanity check for FLR */
727         if (mirror_count != comp->lo_mirror_count) {
728                 CDEBUG(D_INODE, DFID
729                        " doesn't have the # of mirrors it claims, %u/%u\n",
730                        PFID(lu_object_fid(lov2lu(lov))), mirror_count,
731                        comp->lo_mirror_count + 1);
732
733                 GOTO(out, result = -EINVAL);
734         }
735
736         lov_foreach_layout_entry(lov, lle) {
737                 int index = lov_layout_entry_index(lov, lle);
738
739                 /**
740                  * If the component has not been init-ed on MDS side, for
741                  * PFL layout, we'd know that the components beyond this one
742                  * will be dynamically init-ed later on file write/trunc ops.
743                  */
744                 if (!lsme_inited(lle->lle_lsme))
745                         continue;
746
747                 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
748                                                      conf, lle);
749                 if (result < 0)
750                         break;
751
752                 LASSERT(ergo(psz > 0, psz == result));
753                 psz = result;
754         }
755
756         if (psz > 0)
757                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
758
759         /* decide the preferred mirror */
760         mirror_count = 0, i = 0;
761         lov_foreach_mirror_entry(lov, lre) {
762                 i++;
763                 if (lre->lre_stale)
764                         continue;
765
766                 mirror_count++; /* valid mirror */
767
768                 if (lre->lre_preferred || comp->lo_preferred_mirror < 0)
769                         comp->lo_preferred_mirror = i - 1;
770         }
771         if (mirror_count == 0) {
772                 CDEBUG(D_INODE, DFID
773                        " doesn't have any valid mirrors\n",
774                        PFID(lu_object_fid(lov2lu(lov))));
775
776                 GOTO(out, result = -EINVAL);
777         }
778
779         if (OBD_FAIL_CHECK(OBD_FAIL_FLR_RANDOM_PICK_MIRROR)) {
780                 unsigned int seq;
781
782                 get_random_bytes(&seq, sizeof(seq));
783                 seq %= mirror_count;
784
785                 i = 0;
786                 lov_foreach_mirror_entry(lov, lre) {
787                         i++;
788                         if (lre->lre_stale)
789                                 continue;
790
791                         if (!seq--) {
792                                 comp->lo_preferred_mirror = i - 1;
793                                 break;
794                         }
795                 }
796         }
797
798         LASSERT(comp->lo_preferred_mirror >= 0);
799
800         EXIT;
801 out:
802         return result > 0 ? 0 : result;
803 }
804
805 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
806                           struct lov_object *lov, struct lov_stripe_md *lsm,
807                           const struct cl_object_conf *conf,
808                           union lov_layout_state *state)
809 {
810         return 0;
811 }
812
813 static int lov_init_released(const struct lu_env *env,
814                              struct lov_device *dev, struct lov_object *lov,
815                              struct lov_stripe_md *lsm,
816                              const struct cl_object_conf *conf,
817                              union lov_layout_state *state)
818 {
819         LASSERT(lsm != NULL);
820         LASSERT(lsm->lsm_is_released);
821         LASSERT(lov->lo_lsm == NULL);
822
823         lov->lo_lsm = lsm_addref(lsm);
824         return 0;
825 }
826
827 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
828                             union lov_layout_state *state)
829 {
830         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
831
832         lov_layout_wait(env, lov);
833         return 0;
834 }
835
836 static int lov_delete_composite(const struct lu_env *env,
837                                 struct lov_object *lov,
838                                 union lov_layout_state *state)
839 {
840         struct lov_layout_entry *entry;
841         struct lov_layout_composite *comp = &state->composite;
842
843         ENTRY;
844
845         dump_lsm(D_INODE, lov->lo_lsm);
846
847         lov_layout_wait(env, lov);
848         if (comp->lo_entries)
849                 lov_foreach_layout_entry(lov, entry)
850                         lov_delete_raid0(env, lov, entry);
851
852         RETURN(0);
853 }
854
855 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
856                            union lov_layout_state *state)
857 {
858         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
859 }
860
861 static void lov_fini_composite(const struct lu_env *env,
862                                struct lov_object *lov,
863                                union lov_layout_state *state)
864 {
865         struct lov_layout_composite *comp = &state->composite;
866         ENTRY;
867
868         if (comp->lo_entries != NULL) {
869                 struct lov_layout_entry *entry;
870
871                 lov_foreach_layout_entry(lov, entry)
872                         entry->lle_comp_ops->lco_fini(env, entry);
873
874                 OBD_FREE(comp->lo_entries,
875                          comp->lo_entry_count * sizeof(*comp->lo_entries));
876                 comp->lo_entries = NULL;
877         }
878
879         if (comp->lo_mirrors != NULL) {
880                 OBD_FREE(comp->lo_mirrors,
881                          comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
882                 comp->lo_mirrors = NULL;
883         }
884
885         memset(comp, 0, sizeof(*comp));
886
887         dump_lsm(D_INODE, lov->lo_lsm);
888         lov_free_memmd(&lov->lo_lsm);
889
890         EXIT;
891 }
892
893 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
894                                 union lov_layout_state *state)
895 {
896         ENTRY;
897         dump_lsm(D_INODE, lov->lo_lsm);
898         lov_free_memmd(&lov->lo_lsm);
899         EXIT;
900 }
901
902 static int lov_print_empty(const struct lu_env *env, void *cookie,
903                            lu_printer_t p, const struct lu_object *o)
904 {
905         (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
906         return 0;
907 }
908
909 static int lov_print_composite(const struct lu_env *env, void *cookie,
910                                lu_printer_t p, const struct lu_object *o)
911 {
912         struct lov_object *lov = lu2lov(o);
913         struct lov_stripe_md *lsm = lov->lo_lsm;
914         int i;
915
916         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
917              lsm->lsm_entry_count,
918              lov->lo_layout_invalid ? "invalid" : "valid", lsm,
919              lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
920              lsm->lsm_layout_gen);
921
922         for (i = 0; i < lsm->lsm_entry_count; i++) {
923                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
924                 struct lov_layout_entry *lle = lov_entry(lov, i);
925
926                 (*p)(env, cookie,
927                      DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
928                      PEXT(&lse->lsme_extent), lse->lsme_magic,
929                      lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
930                      lse->lsme_flags, lse->lsme_stripe_count,
931                      lse->lsme_stripe_size);
932                 lov_print_raid0(env, cookie, p, lle);
933         }
934
935         return 0;
936 }
937
938 static int lov_print_released(const struct lu_env *env, void *cookie,
939                                 lu_printer_t p, const struct lu_object *o)
940 {
941         struct lov_object       *lov = lu2lov(o);
942         struct lov_stripe_md    *lsm = lov->lo_lsm;
943
944         (*p)(env, cookie,
945                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
946                 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
947                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
948                 lsm->lsm_layout_gen);
949         return 0;
950 }
951
952 /**
953  * Implements cl_object_operations::coo_attr_get() method for an object
954  * without stripes (LLT_EMPTY layout type).
955  *
956  * The only attributes this layer is authoritative in this case is
957  * cl_attr::cat_blocks---it's 0.
958  */
959 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
960                               struct cl_attr *attr)
961 {
962         attr->cat_blocks = 0;
963         return 0;
964 }
965
966 static int lov_attr_get_composite(const struct lu_env *env,
967                                   struct cl_object *obj,
968                                   struct cl_attr *attr)
969 {
970         struct lov_object       *lov = cl2lov(obj);
971         struct lov_layout_entry *entry;
972         int                      result = 0;
973
974         ENTRY;
975
976         attr->cat_size = 0;
977         attr->cat_blocks = 0;
978         lov_foreach_layout_entry(lov, entry) {
979                 struct cl_attr *lov_attr = NULL;
980                 int index = lov_layout_entry_index(lov, entry);
981
982                 if (!entry->lle_valid)
983                         continue;
984
985                 /* PFL: This component has not been init-ed. */
986                 if (!lsm_entry_inited(lov->lo_lsm, index))
987                         continue;
988
989                 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
990                                                           entry, &lov_attr);
991                 if (result < 0)
992                         RETURN(result);
993
994                 if (lov_attr == NULL)
995                         continue;
996
997                 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
998                        "b=%llu\n", index - 1, lov_attr->cat_size,
999                        lov_attr->cat_mtime, lov_attr->cat_atime,
1000                        lov_attr->cat_ctime, lov_attr->cat_blocks);
1001
1002                 /* merge results */
1003                 attr->cat_blocks += lov_attr->cat_blocks;
1004                 if (attr->cat_size < lov_attr->cat_size)
1005                         attr->cat_size = lov_attr->cat_size;
1006                 if (attr->cat_kms < lov_attr->cat_kms)
1007                         attr->cat_kms = lov_attr->cat_kms;
1008                 if (attr->cat_atime < lov_attr->cat_atime)
1009                         attr->cat_atime = lov_attr->cat_atime;
1010                 if (attr->cat_ctime < lov_attr->cat_ctime)
1011                         attr->cat_ctime = lov_attr->cat_ctime;
1012                 if (attr->cat_mtime < lov_attr->cat_mtime)
1013                         attr->cat_mtime = lov_attr->cat_mtime;
1014         }
1015
1016         RETURN(0);
1017 }
1018
1019 const static struct lov_layout_operations lov_dispatch[] = {
1020         [LLT_EMPTY] = {
1021                 .llo_init      = lov_init_empty,
1022                 .llo_delete    = lov_delete_empty,
1023                 .llo_fini      = lov_fini_empty,
1024                 .llo_print     = lov_print_empty,
1025                 .llo_page_init = lov_page_init_empty,
1026                 .llo_lock_init = lov_lock_init_empty,
1027                 .llo_io_init   = lov_io_init_empty,
1028                 .llo_getattr   = lov_attr_get_empty,
1029         },
1030         [LLT_RELEASED] = {
1031                 .llo_init      = lov_init_released,
1032                 .llo_delete    = lov_delete_empty,
1033                 .llo_fini      = lov_fini_released,
1034                 .llo_print     = lov_print_released,
1035                 .llo_page_init = lov_page_init_empty,
1036                 .llo_lock_init = lov_lock_init_empty,
1037                 .llo_io_init   = lov_io_init_released,
1038                 .llo_getattr   = lov_attr_get_empty,
1039         },
1040         [LLT_COMP] = {
1041                 .llo_init      = lov_init_composite,
1042                 .llo_delete    = lov_delete_composite,
1043                 .llo_fini      = lov_fini_composite,
1044                 .llo_print     = lov_print_composite,
1045                 .llo_page_init = lov_page_init_composite,
1046                 .llo_lock_init = lov_lock_init_composite,
1047                 .llo_io_init   = lov_io_init_composite,
1048                 .llo_getattr   = lov_attr_get_composite,
1049         },
1050 };
1051
1052 /**
1053  * Performs a double-dispatch based on the layout type of an object.
1054  */
1055 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
1056 ({                                                      \
1057         struct lov_object *__obj = (obj);               \
1058         enum lov_layout_type __llt;                     \
1059                                                         \
1060         __llt = __obj->lo_type;                         \
1061         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
1062         lov_dispatch[__llt].op(__VA_ARGS__);            \
1063 })
1064
1065 /**
1066  * Return lov_layout_type associated with a given lsm
1067  */
1068 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1069 {
1070         if (lsm == NULL)
1071                 return LLT_EMPTY;
1072
1073         if (lsm->lsm_is_released)
1074                 return LLT_RELEASED;
1075
1076         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1077             lsm->lsm_magic == LOV_MAGIC_V3 ||
1078             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1079                 return LLT_COMP;
1080
1081         return LLT_EMPTY;
1082 }
1083
1084 static inline void lov_conf_freeze(struct lov_object *lov)
1085 {
1086         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1087                 lov, lov->lo_owner, current);
1088         if (lov->lo_owner != current)
1089                 down_read(&lov->lo_type_guard);
1090 }
1091
1092 static inline void lov_conf_thaw(struct lov_object *lov)
1093 {
1094         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1095                 lov, lov->lo_owner, current);
1096         if (lov->lo_owner != current)
1097                 up_read(&lov->lo_type_guard);
1098 }
1099
1100 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
1101 ({                                                                      \
1102         struct lov_object                      *__obj = (obj);          \
1103         int                                     __lock = !!(lock);      \
1104         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
1105                                                                         \
1106         if (__lock)                                                     \
1107                 lov_conf_freeze(__obj);                                 \
1108         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
1109         if (__lock)                                                     \
1110                 lov_conf_thaw(__obj);                                   \
1111         __result;                                                       \
1112 })
1113
1114 /**
1115  * Performs a locked double-dispatch based on the layout type of an object.
1116  */
1117 #define LOV_2DISPATCH(obj, op, ...)                     \
1118         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1119
1120 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
1121 do {                                                                    \
1122         struct lov_object                      *__obj = (obj);          \
1123         enum lov_layout_type                    __llt;                  \
1124                                                                         \
1125         lov_conf_freeze(__obj);                                         \
1126         __llt = __obj->lo_type;                                         \
1127         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
1128         lov_dispatch[__llt].op(__VA_ARGS__);                            \
1129         lov_conf_thaw(__obj);                                           \
1130 } while (0)
1131
1132 static void lov_conf_lock(struct lov_object *lov)
1133 {
1134         LASSERT(lov->lo_owner != current);
1135         down_write(&lov->lo_type_guard);
1136         LASSERT(lov->lo_owner == NULL);
1137         lov->lo_owner = current;
1138         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1139                 lov, lov->lo_owner);
1140 }
1141
1142 static void lov_conf_unlock(struct lov_object *lov)
1143 {
1144         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1145                 lov, lov->lo_owner);
1146         lov->lo_owner = NULL;
1147         up_write(&lov->lo_type_guard);
1148 }
1149
1150 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1151 {
1152         struct l_wait_info lwi = { 0 };
1153         ENTRY;
1154
1155         while (atomic_read(&lov->lo_active_ios) > 0) {
1156                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1157                         PFID(lu_object_fid(lov2lu(lov))),
1158                         atomic_read(&lov->lo_active_ios));
1159
1160                 l_wait_event(lov->lo_waitq,
1161                              atomic_read(&lov->lo_active_ios) == 0, &lwi);
1162         }
1163         RETURN(0);
1164 }
1165
1166 static int lov_layout_change(const struct lu_env *unused,
1167                              struct lov_object *lov, struct lov_stripe_md *lsm,
1168                              const struct cl_object_conf *conf)
1169 {
1170         enum lov_layout_type llt = lov_type(lsm);
1171         union lov_layout_state *state = &lov->u;
1172         const struct lov_layout_operations *old_ops;
1173         const struct lov_layout_operations *new_ops;
1174         struct lov_device *lov_dev = lov_object_dev(lov);
1175         struct lu_env *env;
1176         __u16 refcheck;
1177         int rc;
1178         ENTRY;
1179
1180         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1181
1182         env = cl_env_get(&refcheck);
1183         if (IS_ERR(env))
1184                 RETURN(PTR_ERR(env));
1185
1186         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1187
1188         CDEBUG(D_INODE, DFID" from %s to %s\n",
1189                PFID(lu_object_fid(lov2lu(lov))),
1190                llt2str(lov->lo_type), llt2str(llt));
1191
1192         old_ops = &lov_dispatch[lov->lo_type];
1193         new_ops = &lov_dispatch[llt];
1194
1195         rc = cl_object_prune(env, &lov->lo_cl);
1196         if (rc != 0)
1197                 GOTO(out, rc);
1198
1199         rc = old_ops->llo_delete(env, lov, &lov->u);
1200         if (rc != 0)
1201                 GOTO(out, rc);
1202
1203         old_ops->llo_fini(env, lov, &lov->u);
1204
1205         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1206
1207         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1208                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1209
1210         /* page bufsize fixup */
1211         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1212                 lov_page_slice_fixup(lov, NULL);
1213
1214         lov->lo_type = llt;
1215         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1216         if (rc != 0) {
1217                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1218
1219                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1220                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1221                 new_ops->llo_delete(env, lov, state);
1222                 new_ops->llo_fini(env, lov, state);
1223                 /* this file becomes an EMPTY file. */
1224                 lov->lo_type = LLT_EMPTY;
1225                 GOTO(out, rc);
1226         }
1227
1228 out:
1229         cl_env_put(env, &refcheck);
1230         RETURN(rc);
1231 }
1232
1233 /*****************************************************************************
1234  *
1235  * Lov object operations.
1236  *
1237  */
1238 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1239                     const struct lu_object_conf *conf)
1240 {
1241         struct lov_object            *lov   = lu2lov(obj);
1242         struct lov_device            *dev   = lov_object_dev(lov);
1243         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
1244         union lov_layout_state       *set   = &lov->u;
1245         const struct lov_layout_operations *ops;
1246         struct lov_stripe_md *lsm = NULL;
1247         int rc;
1248         ENTRY;
1249
1250         init_rwsem(&lov->lo_type_guard);
1251         atomic_set(&lov->lo_active_ios, 0);
1252         init_waitqueue_head(&lov->lo_waitq);
1253         cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
1254
1255         lov->lo_type = LLT_EMPTY;
1256         if (cconf->u.coc_layout.lb_buf != NULL) {
1257                 lsm = lov_unpackmd(dev->ld_lov,
1258                                    cconf->u.coc_layout.lb_buf,
1259                                    cconf->u.coc_layout.lb_len);
1260                 if (IS_ERR(lsm))
1261                         RETURN(PTR_ERR(lsm));
1262
1263                 dump_lsm(D_INODE, lsm);
1264         }
1265
1266         /* no locking is necessary, as object is being created */
1267         lov->lo_type = lov_type(lsm);
1268         ops = &lov_dispatch[lov->lo_type];
1269         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1270         if (rc != 0)
1271                 GOTO(out_lsm, rc);
1272
1273 out_lsm:
1274         lov_lsm_put(lsm);
1275
1276         RETURN(rc);
1277 }
1278
1279 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1280                         const struct cl_object_conf *conf)
1281 {
1282         struct lov_stripe_md    *lsm = NULL;
1283         struct lov_object       *lov = cl2lov(obj);
1284         int                      result = 0;
1285         ENTRY;
1286
1287         if (conf->coc_opc == OBJECT_CONF_SET &&
1288             conf->u.coc_layout.lb_buf != NULL) {
1289                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1290                                    conf->u.coc_layout.lb_buf,
1291                                    conf->u.coc_layout.lb_len);
1292                 if (IS_ERR(lsm))
1293                         RETURN(PTR_ERR(lsm));
1294                 dump_lsm(D_INODE, lsm);
1295         }
1296
1297         lov_conf_lock(lov);
1298         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1299                 lov->lo_layout_invalid = true;
1300                 GOTO(out, result = 0);
1301         }
1302
1303         if (conf->coc_opc == OBJECT_CONF_WAIT) {
1304                 if (lov->lo_layout_invalid &&
1305                     atomic_read(&lov->lo_active_ios) > 0) {
1306                         lov_conf_unlock(lov);
1307                         result = lov_layout_wait(env, lov);
1308                         lov_conf_lock(lov);
1309                 }
1310                 GOTO(out, result);
1311         }
1312
1313         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1314
1315         if ((lsm == NULL && lov->lo_lsm == NULL) ||
1316             ((lsm != NULL && lov->lo_lsm != NULL) &&
1317              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1318              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1319               lsm->lsm_entries[0]->lsme_pattern))) {
1320                 /* same version of layout */
1321                 lov->lo_layout_invalid = false;
1322                 GOTO(out, result = 0);
1323         }
1324
1325         /* will change layout - check if there still exists active IO. */
1326         if (atomic_read(&lov->lo_active_ios) > 0) {
1327                 lov->lo_layout_invalid = true;
1328                 GOTO(out, result = -EBUSY);
1329         }
1330
1331         result = lov_layout_change(env, lov, lsm, conf);
1332         lov->lo_layout_invalid = result != 0;
1333         EXIT;
1334
1335 out:
1336         lov_conf_unlock(lov);
1337         lov_lsm_put(lsm);
1338         CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
1339                PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
1340         RETURN(result);
1341 }
1342
1343 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1344 {
1345         struct lov_object *lov = lu2lov(obj);
1346
1347         ENTRY;
1348         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1349         EXIT;
1350 }
1351
1352 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1353 {
1354         struct lov_object *lov = lu2lov(obj);
1355
1356         ENTRY;
1357         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1358         lu_object_fini(obj);
1359         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1360         EXIT;
1361 }
1362
1363 static int lov_object_print(const struct lu_env *env, void *cookie,
1364                             lu_printer_t p, const struct lu_object *o)
1365 {
1366         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1367 }
1368
1369 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1370                   struct cl_page *page, pgoff_t index)
1371 {
1372         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1373                                     index);
1374 }
1375
1376 /**
1377  * Implements cl_object_operations::clo_io_init() method for lov
1378  * layer. Dispatches to the appropriate layout io initialization method.
1379  */
1380 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1381                 struct cl_io *io)
1382 {
1383         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1384
1385         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1386                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1387                io->ci_ignore_layout, io->ci_verify_layout);
1388
1389         /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1390          * the OSC layer. It shouldn't take lov layout conf lock in that case,
1391          * because as long as the OSC object exists, the layout can't be
1392          * reconfigured. */
1393         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1394                         !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1395                         env, obj, io);
1396 }
1397
1398 /**
1399  * An implementation of cl_object_operations::clo_attr_get() method for lov
1400  * layer. For raid0 layout this collects and merges attributes of all
1401  * sub-objects.
1402  */
1403 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1404                         struct cl_attr *attr)
1405 {
1406         /* do not take lock, as this function is called under a
1407          * spin-lock. Layout is protected from changing by ongoing IO. */
1408         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1409 }
1410
1411 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1412                            const struct cl_attr *attr, unsigned valid)
1413 {
1414         /*
1415          * No dispatch is required here, as no layout implements this.
1416          */
1417         return 0;
1418 }
1419
1420 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1421                   struct cl_lock *lock, const struct cl_io *io)
1422 {
1423         /* No need to lock because we've taken one refcount of layout.  */
1424         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1425                                     io);
1426 }
1427
1428 /**
1429  * We calculate on which OST the mapping will end. If the length of mapping
1430  * is greater than (stripe_size * stripe_count) then the last_stripe will
1431  * will be one just before start_stripe. Else we check if the mapping
1432  * intersects each OST and find last_stripe.
1433  * This function returns the last_stripe and also sets the stripe_count
1434  * over which the mapping is spread
1435  *
1436  * \param lsm [in]              striping information for the file
1437  * \param index [in]            stripe component index
1438  * \param ext [in]              logical extent of mapping
1439  * \param start_stripe [in]     starting stripe of the mapping
1440  * \param stripe_count [out]    the number of stripes across which to map is
1441  *                              returned
1442  *
1443  * \retval last_stripe          return the last stripe of the mapping
1444  */
1445 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1446                                    struct lu_extent *ext,
1447                                    int start_stripe, int *stripe_count)
1448 {
1449         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1450         int last_stripe;
1451         u64 obd_start;
1452         u64 obd_end;
1453         int i, j;
1454
1455         if (ext->e_end - ext->e_start >
1456             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1457                 last_stripe = (start_stripe < 1 ? lsme->lsme_stripe_count - 1 :
1458                                                   start_stripe - 1);
1459                 *stripe_count = lsme->lsme_stripe_count;
1460         } else {
1461                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1462                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1463                         if ((lov_stripe_intersects(lsm, index,  i, ext,
1464                                                    &obd_start, &obd_end)) == 0)
1465                                 break;
1466                 }
1467                 *stripe_count = j;
1468                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1469         }
1470
1471         return last_stripe;
1472 }
1473
1474 /**
1475  * Set fe_device and copy extents from local buffer into main return buffer.
1476  *
1477  * \param fiemap [out]          fiemap to hold all extents
1478  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1479  * \param ost_index [in]        OST index to be written into the fm_device
1480  *                              field for each extent
1481  * \param ext_count [in]        number of extents to be copied
1482  * \param current_extent [in]   where to start copying in the extent array
1483  */
1484 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1485                                          struct fiemap_extent *lcl_fm_ext,
1486                                          int ost_index, unsigned int ext_count,
1487                                          int current_extent)
1488 {
1489         char            *to;
1490         unsigned int    ext;
1491
1492         for (ext = 0; ext < ext_count; ext++) {
1493                 lcl_fm_ext[ext].fe_device = ost_index;
1494                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1495         }
1496
1497         /* Copy fm_extent's from fm_local to return buffer */
1498         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1499         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1500 }
1501
1502 #define FIEMAP_BUFFER_SIZE 4096
1503
1504 /**
1505  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1506  * call. The local end offset and the device are sent in the first
1507  * fm_extent. This function calculates the stripe number from the index.
1508  * This function returns a stripe_no on which mapping is to be restarted.
1509  *
1510  * This function returns fm_end_offset which is the in-OST offset at which
1511  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1512  * will re-calculate proper offset in next stripe.
1513  * Note that the first extent is passed to lov_get_info via the value field.
1514  *
1515  * \param fiemap [in]           fiemap request header
1516  * \param lsm [in]              striping information for the file
1517  * \param index [in]            stripe component index
1518  * \param ext [in]              logical extent of mapping
1519  * \param start_stripe [out]    starting stripe will be returned in this
1520  */
1521 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1522                                      struct lov_stripe_md *lsm,
1523                                      int index, struct lu_extent *ext,
1524                                      int *start_stripe)
1525 {
1526         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1527         u64 local_end = fiemap->fm_extents[0].fe_logical;
1528         u64 lun_start;
1529         u64 lun_end;
1530         u64 fm_end_offset;
1531         int stripe_no = -1;
1532         int i;
1533
1534         if (fiemap->fm_extent_count == 0 ||
1535             fiemap->fm_extents[0].fe_logical == 0)
1536                 return 0;
1537
1538         /* Find out stripe_no from ost_index saved in the fe_device */
1539         for (i = 0; i < lsme->lsme_stripe_count; i++) {
1540                 struct lov_oinfo *oinfo = lsme->lsme_oinfo[i];
1541
1542                 if (lov_oinfo_is_dummy(oinfo))
1543                         continue;
1544
1545                 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1546                         stripe_no = i;
1547                         break;
1548                 }
1549         }
1550
1551         if (stripe_no == -1)
1552                 return -EINVAL;
1553
1554         /* If we have finished mapping on previous device, shift logical
1555          * offset to start of next device */
1556         if (lov_stripe_intersects(lsm, index, stripe_no, ext,
1557                                    &lun_start, &lun_end) != 0 &&
1558             local_end < lun_end) {
1559                 fm_end_offset = local_end;
1560                 *start_stripe = stripe_no;
1561         } else {
1562                 /* This is a special value to indicate that caller should
1563                  * calculate offset in next stripe. */
1564                 fm_end_offset = 0;
1565                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1566         }
1567
1568         return fm_end_offset;
1569 }
1570
1571 struct fiemap_state {
1572         struct fiemap           *fs_fm;
1573         struct lu_extent        fs_ext;
1574         u64                     fs_length;
1575         u64                     fs_end_offset;
1576         int                     fs_cur_extent;
1577         int                     fs_cnt_need;
1578         int                     fs_start_stripe;
1579         int                     fs_last_stripe;
1580         bool                    fs_device_done;
1581         bool                    fs_finish_stripe;
1582         bool                    fs_enough;
1583 };
1584
1585 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1586                                          struct lov_object *lov,
1587                                          struct lov_stripe_md *lsm,
1588                                          int index)
1589 {
1590         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1591         struct lov_thread_info  *lti = lov_env_info(env);
1592         struct lu_fid           *ofid = &lti->lti_fid;
1593         struct lov_oinfo        *oinfo;
1594         struct cl_device        *subdev;
1595         int                     entry = lov_comp_entry(index);
1596         int                     stripe = lov_comp_stripe(index);
1597         int                     ost_idx;
1598         int                     rc;
1599         struct cl_object        *result;
1600
1601         if (lov->lo_type != LLT_COMP)
1602                 GOTO(out, result = NULL);
1603
1604         if (entry >= lsm->lsm_entry_count ||
1605             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1606                 GOTO(out, result = NULL);
1607
1608         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1609         ost_idx = oinfo->loi_ost_idx;
1610         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1611         if (rc != 0)
1612                 GOTO(out, result = NULL);
1613
1614         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1615         result = lov_sub_find(env, subdev, ofid, NULL);
1616 out:
1617         if (result == NULL)
1618                 result = ERR_PTR(-EINVAL);
1619         return result;
1620 }
1621
1622 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1623                       struct lov_stripe_md *lsm, struct fiemap *fiemap,
1624                       size_t *buflen, struct ll_fiemap_info_key *fmkey,
1625                       int index, int stripeno, struct fiemap_state *fs)
1626 {
1627         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1628         struct cl_object *subobj;
1629         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1630         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1631         u64 req_fm_len; /* Stores length of required mapping */
1632         u64 len_mapped_single_call;
1633         u64 lun_start;
1634         u64 lun_end;
1635         u64 obd_object_end;
1636         unsigned int ext_count;
1637         /* EOF for object */
1638         bool ost_eof = false;
1639         /* done with required mapping for this OST? */
1640         bool ost_done = false;
1641         int ost_index;
1642         int rc = 0;
1643
1644         fs->fs_device_done = false;
1645         /* Find out range of mapping on this stripe */
1646         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1647                                    &lun_start, &obd_object_end)) == 0)
1648                 return 0;
1649
1650         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1651                 return -EIO;
1652
1653         /* If this is a continuation FIEMAP call and we are on
1654          * starting stripe then lun_start needs to be set to
1655          * end_offset */
1656         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1657                 lun_start = fs->fs_end_offset;
1658         lun_end = lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno);
1659         if (lun_start == lun_end)
1660                 return 0;
1661
1662         req_fm_len = obd_object_end - lun_start;
1663         fs->fs_fm->fm_length = 0;
1664         len_mapped_single_call = 0;
1665
1666         /* find lobsub object */
1667         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1668                                  lov_comp_index(index, stripeno));
1669         if (IS_ERR(subobj))
1670                 return PTR_ERR(subobj);
1671         /* If the output buffer is very large and the objects have many
1672          * extents we may need to loop on a single OST repeatedly */
1673         do {
1674                 if (fiemap->fm_extent_count > 0) {
1675                         /* Don't get too many extents. */
1676                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1677                             fiemap->fm_extent_count)
1678                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1679                                                   fs->fs_cur_extent;
1680                 }
1681
1682                 lun_start += len_mapped_single_call;
1683                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1684                 req_fm_len = fs->fs_fm->fm_length;
1685                 /**
1686                  * If we've collected enough extent map, we'd request 1 more,
1687                  * to see whether we coincidentally finished all available
1688                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1689                  */
1690                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1691                                              1 : fs->fs_cnt_need;
1692                 fs->fs_fm->fm_mapped_extents = 0;
1693                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1694
1695                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1696
1697                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1698                         GOTO(obj_put, rc = -EINVAL);
1699                 /* If OST is inactive, return extent with UNKNOWN flag. */
1700                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1701                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1702                         fs->fs_fm->fm_mapped_extents = 1;
1703
1704                         fm_ext[0].fe_logical = lun_start;
1705                         fm_ext[0].fe_length = obd_object_end - lun_start;
1706                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1707
1708                         goto inactive_tgt;
1709                 }
1710
1711                 fs->fs_fm->fm_start = lun_start;
1712                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1713                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1714                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1715
1716                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1717                 if (rc != 0)
1718                         GOTO(obj_put, rc);
1719 inactive_tgt:
1720                 ext_count = fs->fs_fm->fm_mapped_extents;
1721                 if (ext_count == 0) {
1722                         ost_done = true;
1723                         fs->fs_device_done = true;
1724                         /* If last stripe has hold at the end,
1725                          * we need to return */
1726                         if (stripeno == fs->fs_last_stripe) {
1727                                 fiemap->fm_mapped_extents = 0;
1728                                 fs->fs_finish_stripe = true;
1729                                 GOTO(obj_put, rc);
1730                         }
1731                         break;
1732                 } else if (fs->fs_enough) {
1733                         /*
1734                          * We've collected enough extents and there are
1735                          * more extents after it.
1736                          */
1737                         GOTO(obj_put, rc);
1738                 }
1739
1740                 /* If we just need num of extents, got to next device */
1741                 if (fiemap->fm_extent_count == 0) {
1742                         fs->fs_cur_extent += ext_count;
1743                         break;
1744                 }
1745
1746                 /* prepare to copy retrived map extents */
1747                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1748                                          fm_ext[ext_count - 1].fe_length -
1749                                          lun_start;
1750
1751                 /* Have we finished mapping on this device? */
1752                 if (req_fm_len <= len_mapped_single_call) {
1753                         ost_done = true;
1754                         fs->fs_device_done = true;
1755                 }
1756
1757                 /* Clear the EXTENT_LAST flag which can be present on
1758                  * the last extent */
1759                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1760                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1761                 if (lov_stripe_size(lsm, index,
1762                                     fm_ext[ext_count - 1].fe_logical +
1763                                     fm_ext[ext_count - 1].fe_length,
1764                                     stripeno) >= fmkey->lfik_oa.o_size) {
1765                         ost_eof = true;
1766                         fs->fs_device_done = true;
1767                 }
1768
1769                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1770                                              ext_count, fs->fs_cur_extent);
1771                 fs->fs_cur_extent += ext_count;
1772
1773                 /* Ran out of available extents? */
1774                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1775                         fs->fs_enough = true;
1776         } while (!ost_done && !ost_eof);
1777
1778         if (stripeno == fs->fs_last_stripe)
1779                 fs->fs_finish_stripe = true;
1780 obj_put:
1781         cl_object_put(env, subobj);
1782
1783         return rc;
1784 }
1785
1786 /**
1787  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1788  * This also handles the restarting of FIEMAP calls in case mapping overflows
1789  * the available number of extents in single call.
1790  *
1791  * \param env [in]              lustre environment
1792  * \param obj [in]              file object
1793  * \param fmkey [in]            fiemap request header and other info
1794  * \param fiemap [out]          fiemap buffer holding retrived map extents
1795  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1796  *                              each OST, it is used to limit max map needed
1797  * \retval 0    success
1798  * \retval < 0  error
1799  */
1800 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1801                              struct ll_fiemap_info_key *fmkey,
1802                              struct fiemap *fiemap, size_t *buflen)
1803 {
1804         struct lov_stripe_md_entry *lsme;
1805         struct lov_stripe_md *lsm;
1806         struct fiemap *fm_local = NULL;
1807         loff_t whole_start;
1808         loff_t whole_end;
1809         int entry;
1810         int start_entry;
1811         int end_entry;
1812         int cur_stripe = 0;
1813         int stripe_count;
1814         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1815         int rc = 0;
1816         struct fiemap_state fs = { 0 };
1817         ENTRY;
1818
1819         lsm = lov_lsm_addref(cl2lov(obj));
1820         if (lsm == NULL)
1821                 RETURN(-ENODATA);
1822
1823         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1824                 /**
1825                  * If the entry count > 1 or stripe_count > 1 and the
1826                  * application does not understand DEVICE_ORDER flag,
1827                  * it cannot interpret the extents correctly.
1828                  */
1829                 if (lsm->lsm_entry_count > 1 ||
1830                     (lsm->lsm_entry_count == 1 &&
1831                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
1832                         GOTO(out_lsm, rc = -ENOTSUPP);
1833         }
1834
1835         /* No support for DOM layout yet. */
1836         if (lsme_is_dom(lsm->lsm_entries[0]))
1837                 GOTO(out_lsm, rc = -ENOTSUPP);
1838
1839         if (lsm->lsm_is_released) {
1840                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1841                         /**
1842                          * released file, return a minimal FIEMAP if
1843                          * request fits in file-size.
1844                          */
1845                         fiemap->fm_mapped_extents = 1;
1846                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1847                         if (fiemap->fm_start + fiemap->fm_length <
1848                             fmkey->lfik_oa.o_size)
1849                                 fiemap->fm_extents[0].fe_length =
1850                                         fiemap->fm_length;
1851                         else
1852                                 fiemap->fm_extents[0].fe_length =
1853                                         fmkey->lfik_oa.o_size -
1854                                         fiemap->fm_start;
1855                         fiemap->fm_extents[0].fe_flags |=
1856                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1857                 }
1858                 GOTO(out_lsm, rc = 0);
1859         }
1860
1861         /* buffer_size is small to hold fm_extent_count of extents. */
1862         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1863                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1864
1865         OBD_ALLOC_LARGE(fm_local, buffer_size);
1866         if (fm_local == NULL)
1867                 GOTO(out_lsm, rc = -ENOMEM);
1868
1869         /**
1870          * Requested extent count exceeds the fiemap buffer size, shrink our
1871          * ambition.
1872          */
1873         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1874                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1875         if (fiemap->fm_extent_count == 0)
1876                 fs.fs_cnt_need = 0;
1877
1878         fs.fs_enough = false;
1879         fs.fs_cur_extent = 0;
1880         fs.fs_fm = fm_local;
1881         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1882
1883         whole_start = fiemap->fm_start;
1884         /* whole_start is beyond the end of the file */
1885         if (whole_start > fmkey->lfik_oa.o_size)
1886                 GOTO(out_fm_local, rc = -EINVAL);
1887         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
1888                                         fmkey->lfik_oa.o_size :
1889                                         whole_start + fiemap->fm_length - 1;
1890         /**
1891          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
1892          * size
1893          */
1894         if (whole_end > fmkey->lfik_oa.o_size)
1895                 whole_end = fmkey->lfik_oa.o_size;
1896
1897         start_entry = lov_lsm_entry(lsm, whole_start);
1898         end_entry = lov_lsm_entry(lsm, whole_end);
1899         if (end_entry == -1)
1900                 end_entry = lsm->lsm_entry_count - 1;
1901
1902         if (start_entry == -1 || end_entry == -1)
1903                 GOTO(out_fm_local, rc = -EINVAL);
1904
1905         /* TODO: rewrite it with lov_foreach_io_layout() */
1906         for (entry = start_entry; entry <= end_entry; entry++) {
1907                 lsme = lsm->lsm_entries[entry];
1908
1909                 if (!lsme_inited(lsme))
1910                         break;
1911
1912                 if (entry == start_entry)
1913                         fs.fs_ext.e_start = whole_start;
1914                 else
1915                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
1916                 if (entry == end_entry)
1917                         fs.fs_ext.e_end = whole_end;
1918                 else
1919                         fs.fs_ext.e_end = lsme->lsme_extent.e_end - 1;
1920                 fs.fs_length = fs.fs_ext.e_end - fs.fs_ext.e_start + 1;
1921
1922                 /* Calculate start stripe, last stripe and length of mapping */
1923                 fs.fs_start_stripe = lov_stripe_number(lsm, entry,
1924                                                        fs.fs_ext.e_start);
1925                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
1926                                         &fs.fs_ext, fs.fs_start_stripe,
1927                                         &stripe_count);
1928                 fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, entry,
1929                                         &fs.fs_ext, &fs.fs_start_stripe);
1930                 /* Check each stripe */
1931                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1932                      --stripe_count,
1933                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
1934                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
1935                                                fmkey, entry, cur_stripe, &fs);
1936                         if (rc < 0)
1937                                 GOTO(out_fm_local, rc);
1938                         if (fs.fs_enough)
1939                                 GOTO(finish, rc);
1940                         if (fs.fs_finish_stripe)
1941                                 break;
1942                 } /* for each stripe */
1943         } /* for covering layout component */
1944         /*
1945          * We've traversed all components, set @entry to the last component
1946          * entry, it's for the last stripe check.
1947          */
1948         entry--;
1949 finish:
1950         /* Indicate that we are returning device offsets unless file just has
1951          * single stripe */
1952         if (lsm->lsm_entry_count > 1 ||
1953             (lsm->lsm_entry_count == 1 &&
1954              lsm->lsm_entries[0]->lsme_stripe_count > 1))
1955                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1956
1957         if (fiemap->fm_extent_count == 0)
1958                 goto skip_last_device_calc;
1959
1960         /* Check if we have reached the last stripe and whether mapping for that
1961          * stripe is done. */
1962         if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
1963                 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
1964                                                              FIEMAP_EXTENT_LAST;
1965 skip_last_device_calc:
1966         fiemap->fm_mapped_extents = fs.fs_cur_extent;
1967 out_fm_local:
1968         OBD_FREE_LARGE(fm_local, buffer_size);
1969
1970 out_lsm:
1971         lov_lsm_put(lsm);
1972         return rc;
1973 }
1974
1975 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
1976                                 struct lov_user_md __user *lum, size_t size)
1977 {
1978         struct lov_object       *lov = cl2lov(obj);
1979         struct lov_stripe_md    *lsm;
1980         int                     rc = 0;
1981         ENTRY;
1982
1983         lsm = lov_lsm_addref(lov);
1984         if (lsm == NULL)
1985                 RETURN(-ENODATA);
1986
1987         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
1988         lov_lsm_put(lsm);
1989         RETURN(rc);
1990 }
1991
1992 static int lov_object_layout_get(const struct lu_env *env,
1993                                  struct cl_object *obj,
1994                                  struct cl_layout *cl)
1995 {
1996         struct lov_object *lov = cl2lov(obj);
1997         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1998         struct lu_buf *buf = &cl->cl_buf;
1999         ssize_t rc;
2000         ENTRY;
2001
2002         if (lsm == NULL) {
2003                 cl->cl_size = 0;
2004                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2005
2006                 RETURN(0);
2007         }
2008
2009         cl->cl_size = lov_comp_md_size(lsm);
2010         cl->cl_layout_gen = lsm->lsm_layout_gen;
2011         cl->cl_dom_comp_size = 0;
2012         if (lsm_is_composite(lsm->lsm_magic)) {
2013                 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[0];
2014
2015                 cl->cl_is_composite = true;
2016
2017                 if (lsme_is_dom(lsme))
2018                         cl->cl_dom_comp_size = lsme->lsme_extent.e_end;
2019         } else {
2020                 cl->cl_is_composite = false;
2021         }
2022
2023         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2024         lov_lsm_put(lsm);
2025
2026         RETURN(rc < 0 ? rc : 0);
2027 }
2028
2029 static loff_t lov_object_maxbytes(struct cl_object *obj)
2030 {
2031         struct lov_object *lov = cl2lov(obj);
2032         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2033         loff_t maxbytes;
2034
2035         if (lsm == NULL)
2036                 return LLONG_MAX;
2037
2038         maxbytes = lsm->lsm_maxbytes;
2039
2040         lov_lsm_put(lsm);
2041
2042         return maxbytes;
2043 }
2044
2045 static const struct cl_object_operations lov_ops = {
2046         .coo_page_init    = lov_page_init,
2047         .coo_lock_init    = lov_lock_init,
2048         .coo_io_init      = lov_io_init,
2049         .coo_attr_get     = lov_attr_get,
2050         .coo_attr_update  = lov_attr_update,
2051         .coo_conf_set     = lov_conf_set,
2052         .coo_getstripe    = lov_object_getstripe,
2053         .coo_layout_get   = lov_object_layout_get,
2054         .coo_maxbytes     = lov_object_maxbytes,
2055         .coo_fiemap       = lov_object_fiemap,
2056 };
2057
2058 static const struct lu_object_operations lov_lu_obj_ops = {
2059         .loo_object_init      = lov_object_init,
2060         .loo_object_delete    = lov_object_delete,
2061         .loo_object_release   = NULL,
2062         .loo_object_free      = lov_object_free,
2063         .loo_object_print     = lov_object_print,
2064         .loo_object_invariant = NULL
2065 };
2066
2067 struct lu_object *lov_object_alloc(const struct lu_env *env,
2068                                    const struct lu_object_header *unused,
2069                                    struct lu_device *dev)
2070 {
2071         struct lov_object *lov;
2072         struct lu_object  *obj;
2073
2074         ENTRY;
2075         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2076         if (lov != NULL) {
2077                 obj = lov2lu(lov);
2078                 lu_object_init(obj, NULL, dev);
2079                 lov->lo_cl.co_ops = &lov_ops;
2080                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2081                 /*
2082                  * object io operation vector (cl_object::co_iop) is installed
2083                  * later in lov_object_init(), as different vectors are used
2084                  * for object with different layouts.
2085                  */
2086                 obj->lo_ops = &lov_lu_obj_ops;
2087         } else
2088                 obj = NULL;
2089         RETURN(obj);
2090 }
2091
2092 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2093 {
2094         struct lov_stripe_md *lsm = NULL;
2095
2096         lov_conf_freeze(lov);
2097         if (lov->lo_lsm != NULL) {
2098                 lsm = lsm_addref(lov->lo_lsm);
2099                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2100                         lsm, atomic_read(&lsm->lsm_refc),
2101                         lov->lo_layout_invalid, current);
2102         }
2103         lov_conf_thaw(lov);
2104         return lsm;
2105 }
2106
2107 int lov_read_and_clear_async_rc(struct cl_object *clob)
2108 {
2109         struct lu_object *luobj;
2110         int rc = 0;
2111         ENTRY;
2112
2113         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2114                                  &lov_device_type);
2115         if (luobj != NULL) {
2116                 struct lov_object *lov = lu2lov(luobj);
2117
2118                 lov_conf_freeze(lov);
2119                 switch (lov->lo_type) {
2120                 case LLT_COMP: {
2121                         struct lov_stripe_md *lsm;
2122                         int i;
2123
2124                         lsm = lov->lo_lsm;
2125                         LASSERT(lsm != NULL);
2126                         for (i = 0; i < lsm->lsm_entry_count; i++) {
2127                                 struct lov_stripe_md_entry *lse =
2128                                                 lsm->lsm_entries[i];
2129                                 int j;
2130
2131                                 if (!lsme_inited(lse))
2132                                         break;
2133
2134                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
2135                                         struct lov_oinfo *loi =
2136                                                         lse->lsme_oinfo[j];
2137
2138                                         if (lov_oinfo_is_dummy(loi))
2139                                                 continue;
2140
2141                                         if (loi->loi_ar.ar_rc && !rc)
2142                                                 rc = loi->loi_ar.ar_rc;
2143                                         loi->loi_ar.ar_rc = 0;
2144                                 }
2145                         }
2146                 }
2147                 case LLT_RELEASED:
2148                 case LLT_EMPTY:
2149                         break;
2150                 default:
2151                         LBUG();
2152                 }
2153                 lov_conf_thaw(lov);
2154         }
2155         RETURN(rc);
2156 }
2157 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
2158
2159 /** @} lov */