Whamcloud - gitweb
LU-17000 misc: remove Coverity annotations
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_object for LOV layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LOV
38
39 #include <linux/random.h>
40
41 #include "lov_cl_internal.h"
42
43 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
44 {
45         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
46 }
47
48 /** \addtogroup lov
49  *  @{
50  */
51
52 /*****************************************************************************
53  *
54  * Layout operations.
55  *
56  */
57
58 struct lov_layout_operations {
59         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
60                         struct lov_object *lov, struct lov_stripe_md *lsm,
61                         const struct cl_object_conf *conf,
62                         union lov_layout_state *state);
63         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
64                            union lov_layout_state *state);
65         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
66                          union lov_layout_state *state);
67         int  (*llo_print)(const struct lu_env *env, void *cookie,
68                           lu_printer_t p, const struct lu_object *o);
69         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
70                               struct cl_page *page, pgoff_t index);
71         int  (*llo_lock_init)(const struct lu_env *env,
72                               struct cl_object *obj, struct cl_lock *lock,
73                               const struct cl_io *io);
74         int  (*llo_io_init)(const struct lu_env *env,
75                             struct cl_object *obj, struct cl_io *io);
76         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
77                             struct cl_attr *attr);
78         int  (*llo_flush)(const struct lu_env *env, struct cl_object *obj,
79                           struct ldlm_lock *lock);
80 };
81
82 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
83 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
84
85 static void lov_lsm_put(struct lov_stripe_md *lsm)
86 {
87         if (lsm != NULL)
88                 lov_free_memmd(&lsm);
89 }
90
91 /*****************************************************************************
92  *
93  * Lov object layout operations.
94  *
95  */
96
97 static struct cl_object *lov_sub_find(const struct lu_env *env,
98                                       struct cl_device *dev,
99                                       const struct lu_fid *fid,
100                                       const struct cl_object_conf *conf)
101 {
102         struct lu_object *o;
103
104         ENTRY;
105
106         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
107         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
108         RETURN(lu2cl(o));
109 }
110
111 static int lov_page_slice_fixup(struct lov_object *lov,
112                                 struct cl_object *stripe)
113 {
114         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
115         struct cl_object *o;
116
117         if (stripe == NULL)
118                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off;
119
120         cl_object_for_each(o, stripe)
121                 o->co_slice_off += hdr->coh_page_bufsize;
122
123         return cl_object_header(stripe)->coh_page_bufsize;
124 }
125
126 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
127                         struct cl_object *subobj, struct lov_oinfo *oinfo,
128                         int idx)
129 {
130         struct cl_object_header *hdr;
131         struct cl_object_header *subhdr;
132         struct cl_object_header *parent;
133         int entry = lov_comp_entry(idx);
134         int stripe = lov_comp_stripe(idx);
135         int result;
136
137         if (CFS_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
138                 /* For sanity:test_206.
139                  * Do not leave the object in cache to avoid accessing
140                  * freed memory. This is because osc_object is referring to
141                  * lov_oinfo of lsm_stripe_data which will be freed due to
142                  * this failure. */
143                 cl_object_kill(env, subobj);
144                 cl_object_put(env, subobj);
145                 return -EIO;
146         }
147
148         hdr = cl_object_header(lov2cl(lov));
149         subhdr = cl_object_header(subobj);
150
151         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
152                " ost idx: %d gen: %d\n",
153                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
154                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
155                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
156
157         /* reuse ->coh_attr_guard to protect coh_parent change */
158         spin_lock(&subhdr->coh_attr_guard);
159         parent = subhdr->coh_parent;
160         if (parent == NULL) {
161                 struct lovsub_object *lso = cl2lovsub(subobj);
162
163                 subhdr->coh_parent = hdr;
164                 spin_unlock(&subhdr->coh_attr_guard);
165                 subhdr->coh_nesting = hdr->coh_nesting + 1;
166                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
167                 lso->lso_super = lov;
168                 lso->lso_index = idx;
169                 result = 0;
170         } else {
171                 struct lu_object  *old_obj;
172                 struct lov_object *old_lov;
173                 unsigned int mask = D_INODE;
174
175                 spin_unlock(&subhdr->coh_attr_guard);
176                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
177                 LASSERT(old_obj != NULL);
178                 old_lov = cl2lov(lu2cl(old_obj));
179                 if (test_bit(LO_LAYOUT_INVALID, &old_lov->lo_obj_flags)) {
180                         /* the object's layout has already changed but isn't
181                          * refreshed */
182                         lu_object_unhash(env, &subobj->co_lu);
183                         result = -EAGAIN;
184                 } else {
185                         mask = D_ERROR;
186                         result = -EIO;
187                 }
188
189                 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
190                                 "stripe %d is already owned.", idx);
191                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
192                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
193                 cl_object_put(env, subobj);
194         }
195         return result;
196 }
197
198 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
199                           struct lov_object *lov, unsigned int index,
200                           const struct cl_object_conf *conf,
201                           struct lov_layout_entry *lle)
202 {
203         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
204         struct lov_thread_info *lti = lov_env_info(env);
205         struct cl_object_conf *subconf = &lti->lti_stripe_conf;
206         struct lu_fid *ofid = &lti->lti_fid;
207         struct cl_object *stripe;
208         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
209         int result;
210         int psz, sz;
211         int i;
212
213         ENTRY;
214
215         spin_lock_init(&r0->lo_sub_lock);
216         r0->lo_nr = lse->lsme_stripe_count;
217
218         OBD_ALLOC_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
219         if (r0->lo_sub == NULL)
220                 GOTO(out, result = -ENOMEM);
221
222         psz = 0;
223         result = 0;
224         memset(subconf, 0, sizeof(*subconf));
225
226         /*
227          * Create stripe cl_objects.
228          */
229         for (i = 0; i < r0->lo_nr; ++i) {
230                 struct cl_device *subdev;
231                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
232                 int ost_idx = oinfo->loi_ost_idx;
233                 struct obd_export *exp;
234
235                 if (lov_oinfo_is_dummy(oinfo))
236                         continue;
237
238                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
239                 if (result != 0)
240                         GOTO(out, result);
241
242                 if (dev->ld_target[ost_idx] == NULL) {
243                         CERROR("%s: OST %04x is not initialized\n",
244                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
245                         GOTO(out, result = -EIO);
246                 }
247
248                 exp = dev->ld_lov->lov_tgts[ost_idx]->ltd_exp;
249                 if (likely(exp)) {
250                         /* the more fast OSTs the better */
251                         if (exp->exp_obd->obd_osfs.os_state & OS_STATFS_NONROT)
252                                 lle->lle_preference++;
253                 }
254
255                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
256                 subconf->u.coc_oinfo = oinfo;
257                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
258                 /* In the function below, .hs_keycmp resolves to
259                  * lu_obj_hop_keycmp() */
260                 stripe = lov_sub_find(env, subdev, ofid, subconf);
261                 if (IS_ERR(stripe))
262                         GOTO(out, result = PTR_ERR(stripe));
263
264                 result = lov_init_sub(env, lov, stripe, oinfo,
265                                       lov_comp_index(index, i));
266                 if (result == -EAGAIN) { /* try again */
267                         --i;
268                         result = 0;
269                         continue;
270                 }
271
272                 if (result == 0) {
273                         r0->lo_sub[i] = cl2lovsub(stripe);
274
275                         sz = lov_page_slice_fixup(lov, stripe);
276                         LASSERT(ergo(psz > 0, psz == sz));
277                         psz = sz;
278                 }
279         }
280         if (result == 0)
281                 result = psz;
282 out:
283         RETURN(result);
284 }
285
286 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
287                                struct lov_layout_raid0 *r0,
288                                struct lovsub_object *los, int idx)
289 {
290         struct cl_object        *sub;
291         struct lu_site          *site;
292         wait_queue_head_t *wq;
293
294         LASSERT(r0->lo_sub[idx] == los);
295
296         sub = lovsub2cl(los);
297         site = sub->co_lu.lo_dev->ld_site;
298         wq = lu_site_wq_from_fid(site, &sub->co_lu.lo_header->loh_fid);
299
300         cl_object_kill(env, sub);
301         /* release a reference to the sub-object and ... */
302         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
303         cl_object_put(env, sub);
304
305         /* ... wait until it is actually destroyed---sub-object clears its
306          * ->lo_sub[] slot in lovsub_object_free() */
307         wait_event(*wq, r0->lo_sub[idx] != los);
308         LASSERT(r0->lo_sub[idx] == NULL);
309 }
310
311 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
312                              struct lov_layout_entry *lle)
313 {
314         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
315         int rc;
316
317         ENTRY;
318
319         if (r0->lo_sub != NULL) {
320                 int i;
321
322                 for (i = 0; i < r0->lo_nr; ++i) {
323                         struct lovsub_object *los = r0->lo_sub[i];
324
325                         if (los != NULL) {
326                                 rc = cl_object_prune(env, &los->lso_cl);
327                                 if (rc)
328                                         RETURN(rc);
329                                 /*
330                                  * If top-level object is to be evicted from
331                                  * the cache, so are its sub-objects.
332                                  */
333                                 lov_subobject_kill(env, lov, r0, los, i);
334                         }
335                 }
336         }
337
338         RETURN(0);
339 }
340
341 static void lov_fini_raid0(const struct lu_env *env,
342                            struct lov_layout_entry *lle)
343 {
344         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
345
346         if (r0->lo_sub != NULL) {
347                 OBD_FREE_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
348                 r0->lo_sub = NULL;
349         }
350 }
351
352 static int lov_print_raid0(const struct lu_env *env, void *cookie,
353                            lu_printer_t p, const struct lov_layout_entry *lle)
354 {
355         const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
356         int i;
357
358         for (i = 0; i < r0->lo_nr; ++i) {
359                 struct lu_object *sub;
360
361                 if (r0->lo_sub[i] != NULL) {
362                         sub = lovsub2lu(r0->lo_sub[i]);
363                         lu_object_print(env, cookie, p, sub);
364                 } else {
365                         (*p)(env, cookie, "sub %d absent\n", i);
366                 }
367         }
368         return 0;
369 }
370
371 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
372                               unsigned int index, struct lov_layout_entry *lle,
373                               struct cl_attr **lov_attr)
374 {
375         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
376         struct lov_stripe_md *lsm = lov->lo_lsm;
377         struct cl_attr *attr = &r0->lo_attr;
378         int result = 0;
379
380         if (r0->lo_attr_valid) {
381                 *lov_attr = attr;
382                 return 0;
383         }
384
385         /*
386          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
387          * happy. It's not needed, because new code uses
388          * ->coh_attr_guard spin-lock to protect consistency of
389          * sub-object attributes.
390          */
391         lov_stripe_lock(lsm);
392         result = lov_merge_lvb_kms(lsm, index, attr);
393         lov_stripe_unlock(lsm);
394         if (result == 0) {
395                 r0->lo_attr_valid = 1;
396                 *lov_attr = attr;
397         }
398
399         return result;
400 }
401
402 static struct lov_comp_layout_entry_ops raid0_ops = {
403         .lco_init      = lov_init_raid0,
404         .lco_fini      = lov_fini_raid0,
405         .lco_getattr   = lov_attr_get_raid0,
406 };
407
408 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
409                             unsigned int index, struct lov_layout_entry *lle,
410                             struct cl_attr **lov_attr)
411 {
412         struct lov_layout_dom *dom = &lle->lle_dom;
413         struct lov_oinfo *loi = dom->lo_loi;
414         struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
415
416         if (dom->lo_dom_r0.lo_attr_valid) {
417                 *lov_attr = attr;
418                 return 0;
419         }
420
421         if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
422                 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
423
424         cl_lvb2attr(attr, &loi->loi_lvb);
425
426         /* DoM component size can be bigger than stripe size after
427          * client's setattr RPC, so do not count anything beyond
428          * component end. Alternatively, check that limit on server
429          * and do not allow size overflow there. */
430         if (attr->cat_size > lle->lle_extent->e_end)
431                 attr->cat_size = lle->lle_extent->e_end;
432
433         attr->cat_kms = attr->cat_size;
434
435         dom->lo_dom_r0.lo_attr_valid = 1;
436         *lov_attr = attr;
437
438         return 0;
439 }
440
441 /**
442  * Lookup FLD to get MDS index of the given DOM object FID.
443  *
444  * \param[in]  ld       LOV device
445  * \param[in]  fid      FID to lookup
446  * \param[out] nr       index in MDC array to return back
447  *
448  * \retval              0 and \a mds filled with MDS index if successful
449  * \retval              negative value on error
450  */
451 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
452                           __u32 *nr)
453 {
454         __u32 mds_idx;
455         int i, rc;
456
457         ENTRY;
458
459         rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
460                                &mds_idx, LU_SEQ_RANGE_MDT, NULL);
461         if (rc) {
462                 CERROR("%s: error while looking for mds number. Seq %#llx"
463                        ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
464                        fid_seq(fid), rc);
465                 RETURN(rc);
466         }
467
468         CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
469                mds_idx, PFID(fid));
470
471         /* find proper MDC device in the array */
472         for (i = 0; i < ld->ld_md_tgts_nr; i++) {
473                 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
474                     ld->ld_md_tgts[i].ldm_idx == mds_idx)
475                         break;
476         }
477
478         if (i == ld->ld_md_tgts_nr) {
479                 CERROR("%s: cannot find corresponding MDC device for mds #%x "
480                        "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
481                        mds_idx, PFID(fid));
482                 rc = -EINVAL;
483         } else {
484                 *nr = i;
485         }
486         RETURN(rc);
487 }
488
489 /**
490  * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
491  *
492  * Init the DOM object for the first time. It prepares also RAID0 entry
493  * for it to use in common methods with ordinary RAID0 layout entries.
494  *
495  * \param[in] env       execution environment
496  * \param[in] dev       LOV device
497  * \param[in] lov       LOV object
498  * \param[in] index     Composite layout entry index in LSM
499  * \param[in] lle       Composite LOV layout entry
500  */
501 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
502                         struct lov_object *lov, unsigned int index,
503                         const struct cl_object_conf *conf,
504                         struct lov_layout_entry *lle)
505 {
506         struct lov_thread_info *lti = lov_env_info(env);
507         struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
508         struct cl_object *clo;
509         struct lu_object *o = lov2lu(lov);
510         const struct lu_fid *fid = lu_object_fid(o);
511         struct cl_device *mdcdev;
512         struct lov_oinfo *loi = NULL;
513         struct cl_object_conf *sconf = &lti->lti_stripe_conf;
514         int rc;
515         __u32 idx = 0;
516
517         ENTRY;
518
519         /* DOM entry may be not zero index due to FLR but must start from 0 */
520         if (unlikely(lle->lle_extent->e_start != 0)) {
521                 CERROR("%s: DOM entry must be the first stripe in a mirror\n",
522                        lov2obd(dev->ld_lov)->obd_name);
523                 dump_lsm(D_ERROR, lov->lo_lsm);
524                 RETURN(-EINVAL);
525         }
526
527         /* find proper MDS device */
528         rc = lov_fld_lookup(dev, fid, &idx);
529         if (rc)
530                 RETURN(rc);
531
532         LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
533                  "LOV md target[%u] is NULL\n", idx);
534
535         /* check lsm is DOM, more checks are needed */
536         LASSERT(lsme->lsme_stripe_count == 0);
537
538         /*
539          * Create lower cl_objects.
540          */
541         mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
542
543         LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
544
545         /* DoM object has no oinfo in LSM entry, create it exclusively */
546         OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
547         if (loi == NULL)
548                 RETURN(-ENOMEM);
549
550         fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
551
552         sconf->u.coc_oinfo = loi;
553 again:
554         clo = lov_sub_find(env, mdcdev, fid, sconf);
555         if (IS_ERR(clo))
556                 GOTO(out, rc = PTR_ERR(clo));
557
558         rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
559         if (rc == -EAGAIN) /* try again */
560                 goto again;
561         else if (rc != 0)
562                 GOTO(out, rc);
563
564         lle->lle_dom.lo_dom = cl2lovsub(clo);
565         spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
566         lle->lle_dom.lo_dom_r0.lo_nr = 1;
567         lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
568         lle->lle_dom.lo_loi = loi;
569
570         rc = lov_page_slice_fixup(lov, clo);
571         RETURN(rc);
572
573 out:
574         if (loi != NULL)
575                 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
576         return rc;
577 }
578
579 /**
580  * Implementation of lov_layout_operations::llo_fini for DOM object.
581  *
582  * Finish the DOM object and free related memory.
583  *
584  * \param[in] env       execution environment
585  * \param[in] lov       LOV object
586  * \param[in] state     LOV layout state
587  */
588 static void lov_fini_dom(const struct lu_env *env,
589                          struct lov_layout_entry *lle)
590 {
591         if (lle->lle_dom.lo_dom != NULL)
592                 lle->lle_dom.lo_dom = NULL;
593         if (lle->lle_dom.lo_loi != NULL)
594                 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
595 }
596
597 static struct lov_comp_layout_entry_ops dom_ops = {
598         .lco_init = lov_init_dom,
599         .lco_fini = lov_fini_dom,
600         .lco_getattr = lov_attr_get_dom,
601 };
602
603 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
604                               struct lov_object *lov, struct lov_stripe_md *lsm,
605                               const struct cl_object_conf *conf,
606                               union lov_layout_state *state)
607 {
608         struct lov_layout_composite *comp = &state->composite;
609         struct lov_layout_entry *lle;
610         struct lov_mirror_entry *lre;
611         unsigned int entry_count;
612         unsigned int psz = 0;
613         unsigned int mirror_count;
614         int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
615         int result = 0;
616         unsigned int seq;
617         int i, j, preference;
618         bool dom_size = 0;
619
620         ENTRY;
621
622         LASSERT(lsm->lsm_entry_count > 0);
623         LASSERT(lov->lo_lsm == NULL);
624         lov->lo_lsm = lsm_addref(lsm);
625         set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
626
627         dump_lsm(D_INODE, lsm);
628
629         entry_count = lsm->lsm_entry_count;
630
631         comp->lo_flags = lsm->lsm_flags;
632         comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
633         comp->lo_entry_count = lsm->lsm_entry_count;
634         comp->lo_preferred_mirror = -1;
635
636         if (equi(flr_state == LCM_FL_NONE, comp->lo_mirror_count > 1))
637                 RETURN(-EINVAL);
638
639         OBD_ALLOC_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
640         if (comp->lo_mirrors == NULL)
641                 RETURN(-ENOMEM);
642
643         OBD_ALLOC_PTR_ARRAY(comp->lo_entries, entry_count);
644         if (comp->lo_entries == NULL)
645                 RETURN(-ENOMEM);
646
647         /* Initiate all entry types and extents data at first */
648         for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
649                 int mirror_id = 0;
650
651                 lle = &comp->lo_entries[i];
652
653                 lle->lle_lsme = lsm->lsm_entries[i];
654                 lle->lle_type = lov_entry_type(lle->lle_lsme);
655                 lle->lle_preference = 0;
656                 switch (lle->lle_type) {
657                 case LOV_PATTERN_RAID0:
658                         lle->lle_comp_ops = &raid0_ops;
659                         break;
660                 case LOV_PATTERN_MDT:
661                         /* Allowed to have several DOM stripes in different
662                          * mirrors with the same DoM size.
663                          */
664                         if (!dom_size) {
665                                 dom_size = lle->lle_lsme->lsme_extent.e_end;
666                         } else if (dom_size !=
667                                    lle->lle_lsme->lsme_extent.e_end) {
668                                 CERROR("%s: DOM entries with different sizes\n",
669                                        lov2obd(dev->ld_lov)->obd_name);
670                                 dump_lsm(D_ERROR, lsm);
671                                 RETURN(-EINVAL);
672                         }
673                         lle->lle_comp_ops = &dom_ops;
674                         break;
675                 case LOV_PATTERN_FOREIGN:
676                         lle->lle_comp_ops = NULL;
677                         break;
678                 default:
679                         CERROR("%s: unknown composite layout entry type %i\n",
680                                lov2obd(dev->ld_lov)->obd_name,
681                                lsm->lsm_entries[i]->lsme_pattern);
682                         dump_lsm(D_ERROR, lsm);
683                         RETURN(-EIO);
684                 }
685
686                 lle->lle_extent = &lle->lle_lsme->lsme_extent;
687                 lle->lle_valid = !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
688
689                 if (flr_state != LCM_FL_NONE)
690                         mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
691
692                 lre = &comp->lo_mirrors[j];
693                 if (i > 0) {
694                         if (mirror_id == lre->lre_mirror_id) {
695                                 lre->lre_valid |= lle->lle_valid;
696                                 lre->lre_stale |= !lle->lle_valid;
697                                 lre->lre_foreign |=
698                                         lsme_is_foreign(lle->lle_lsme);
699                                 lre->lre_end = i;
700                                 continue;
701                         }
702
703                         /* new mirror detected, assume that the mirrors
704                          * are shorted in layout */
705                         ++mirror_count;
706                         ++j;
707                         if (j >= comp->lo_mirror_count)
708                                 break;
709
710                         lre = &comp->lo_mirrors[j];
711                 }
712
713                 /* entries must be sorted by mirrors */
714                 lre->lre_mirror_id = mirror_id;
715                 lre->lre_start = lre->lre_end = i;
716                 lre->lre_preference = lle->lle_lsme->lsme_flags &
717                                         LCME_FL_PREF_RD ? 1000 : 0;
718                 lre->lre_valid = lle->lle_valid;
719                 lre->lre_stale = !lle->lle_valid;
720                 lre->lre_foreign = lsme_is_foreign(lle->lle_lsme);
721         }
722
723         /* sanity check for FLR */
724         if (mirror_count != comp->lo_mirror_count) {
725                 CDEBUG(D_INODE, DFID
726                        " doesn't have the # of mirrors it claims, %u/%u\n",
727                        PFID(lu_object_fid(lov2lu(lov))), mirror_count,
728                        comp->lo_mirror_count + 1);
729
730                 GOTO(out, result = -EINVAL);
731         }
732
733         lov_foreach_layout_entry(lov, lle) {
734                 int index = lov_layout_entry_index(lov, lle);
735
736                 /**
737                  * If the component has not been init-ed on MDS side, for
738                  * PFL layout, we'd know that the components beyond this one
739                  * will be dynamically init-ed later on file write/trunc ops.
740                  */
741                 if (!lsme_inited(lle->lle_lsme))
742                         continue;
743
744                 if (lsme_is_foreign(lle->lle_lsme))
745                         continue;
746
747                 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
748                                                      conf, lle);
749                 if (result < 0)
750                         break;
751
752                 LASSERT(ergo(psz > 0, psz == result));
753                 psz = result;
754         }
755
756         if (psz > 0)
757                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
758
759         /* decide the preferred mirror. It uses the hash value of lov_object
760          * so that different clients would use different mirrors for read. */
761         mirror_count = 0;
762         preference = -1;
763         seq = cfs_hash_long((unsigned long)lov, 8);
764         for (i = 0; i < comp->lo_mirror_count; i++) {
765                 unsigned int idx = (i + seq) % comp->lo_mirror_count;
766
767                 lre = lov_mirror_entry(lov, idx);
768                 if (lre->lre_stale)
769                         continue;
770
771                 if (lre->lre_foreign)
772                         continue;
773
774                 mirror_count++; /* valid mirror */
775
776                 /* aggregated preference of all involved OSTs */
777                 for (j = lre->lre_start; j <= lre->lre_end; j++) {
778                         lre->lre_preference +=
779                                 comp->lo_entries[j].lle_preference;
780                 }
781
782                 if (lre->lre_preference > preference) {
783                         preference = lre->lre_preference;
784                         comp->lo_preferred_mirror = idx;
785                 }
786         }
787         if (!mirror_count) {
788                 CDEBUG(D_INODE, DFID
789                        " doesn't have any valid mirrors\n",
790                        PFID(lu_object_fid(lov2lu(lov))));
791
792                 comp->lo_preferred_mirror = 0;
793         }
794
795         LASSERT(comp->lo_preferred_mirror >= 0);
796
797         EXIT;
798 out:
799         return result > 0 ? 0 : result;
800 }
801
802 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
803                           struct lov_object *lov, struct lov_stripe_md *lsm,
804                           const struct cl_object_conf *conf,
805                           union lov_layout_state *state)
806 {
807         return 0;
808 }
809
810 static int lov_init_released(const struct lu_env *env,
811                              struct lov_device *dev, struct lov_object *lov,
812                              struct lov_stripe_md *lsm,
813                              const struct cl_object_conf *conf,
814                              union lov_layout_state *state)
815 {
816         LASSERT(lsm != NULL);
817         LASSERT(lsm->lsm_is_released);
818         LASSERT(lov->lo_lsm == NULL);
819
820         lov->lo_lsm = lsm_addref(lsm);
821         return 0;
822 }
823
824 static int lov_init_foreign(const struct lu_env *env,
825                             struct lov_device *dev, struct lov_object *lov,
826                             struct lov_stripe_md *lsm,
827                             const struct cl_object_conf *conf,
828                             union lov_layout_state *state)
829 {
830         LASSERT(lsm != NULL);
831         LASSERT(lov->lo_type == LLT_FOREIGN);
832         LASSERT(lov->lo_lsm == NULL);
833
834         lov->lo_lsm = lsm_addref(lsm);
835         return 0;
836 }
837
838 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
839                             union lov_layout_state *state)
840 {
841         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED ||
842                 lov->lo_type == LLT_FOREIGN);
843
844         lov_layout_wait(env, lov);
845         return 0;
846 }
847
848 static int lov_delete_composite(const struct lu_env *env,
849                                 struct lov_object *lov,
850                                 union lov_layout_state *state)
851 {
852         struct lov_layout_entry *entry;
853         int rc;
854
855         ENTRY;
856
857         dump_lsm(D_INODE, lov->lo_lsm);
858
859         lov_layout_wait(env, lov);
860         lov_foreach_layout_entry(lov, entry) {
861                 if (entry->lle_lsme && lsme_is_foreign(entry->lle_lsme))
862                         continue;
863
864                 rc = lov_delete_raid0(env, lov, entry);
865                 if (rc)
866                         RETURN(rc);
867         }
868
869         RETURN(0);
870 }
871
872 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
873                            union lov_layout_state *state)
874 {
875         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
876 }
877
878 static void lov_fini_composite(const struct lu_env *env,
879                                struct lov_object *lov,
880                                union lov_layout_state *state)
881 {
882         struct lov_layout_composite *comp = &state->composite;
883         ENTRY;
884
885         if (comp->lo_entries != NULL) {
886                 struct lov_layout_entry *entry;
887
888                 lov_foreach_layout_entry(lov, entry)
889                         if (entry->lle_comp_ops)
890                                 entry->lle_comp_ops->lco_fini(env, entry);
891
892                 OBD_FREE_PTR_ARRAY(comp->lo_entries, comp->lo_entry_count);
893                 comp->lo_entries = NULL;
894         }
895
896         if (comp->lo_mirrors != NULL) {
897                 OBD_FREE_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
898                 comp->lo_mirrors = NULL;
899         }
900
901         memset(comp, 0, sizeof(*comp));
902
903         dump_lsm(D_INODE, lov->lo_lsm);
904         lov_free_memmd(&lov->lo_lsm);
905
906         EXIT;
907 }
908
909 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
910                                 union lov_layout_state *state)
911 {
912         ENTRY;
913         dump_lsm(D_INODE, lov->lo_lsm);
914         lov_free_memmd(&lov->lo_lsm);
915         EXIT;
916 }
917
918 static int lov_print_empty(const struct lu_env *env, void *cookie,
919                            lu_printer_t p, const struct lu_object *o)
920 {
921         (*p)(env, cookie, "empty %d\n",
922              test_bit(LO_LAYOUT_INVALID, &lu2lov(o)->lo_obj_flags));
923         return 0;
924 }
925
926 static int lov_print_composite(const struct lu_env *env, void *cookie,
927                                lu_printer_t p, const struct lu_object *o)
928 {
929         struct lov_object *lov = lu2lov(o);
930         struct lov_stripe_md *lsm = lov->lo_lsm;
931         int i;
932
933         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
934              lsm->lsm_entry_count,
935              test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
936              "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
937              lsm->lsm_layout_gen);
938
939         for (i = 0; i < lsm->lsm_entry_count; i++) {
940                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
941                 struct lov_layout_entry *lle = lov_entry(lov, i);
942
943                 (*p)(env, cookie,
944                      DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
945                      PEXT(&lse->lsme_extent), lse->lsme_magic,
946                      lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
947                      lse->lsme_flags, lse->lsme_stripe_count,
948                      lse->lsme_stripe_size);
949
950                 if (!lsme_is_foreign(lse))
951                         lov_print_raid0(env, cookie, p, lle);
952         }
953
954         return 0;
955 }
956
957 static int lov_print_released(const struct lu_env *env, void *cookie,
958                                 lu_printer_t p, const struct lu_object *o)
959 {
960         struct lov_object       *lov = lu2lov(o);
961         struct lov_stripe_md    *lsm = lov->lo_lsm;
962
963         (*p)(env, cookie,
964                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
965                 test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
966                 "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
967                 lsm->lsm_layout_gen);
968         return 0;
969 }
970
971 static int lov_print_foreign(const struct lu_env *env, void *cookie,
972                                 lu_printer_t p, const struct lu_object *o)
973 {
974         struct lov_object       *lov = lu2lov(o);
975         struct lov_stripe_md    *lsm = lov->lo_lsm;
976
977         (*p)(env, cookie,
978                 "foreign: %s, lsm{%p 0x%08X %d %u}:\n",
979                 test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ?
980                 "invalid" : "valid", lsm,
981                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
982                 lsm->lsm_layout_gen);
983         (*p)(env, cookie,
984                 "raw_ea_content '%.*s'\n",
985                 (int)lsm->lsm_foreign_size, (char *)lsm_foreign(lsm));
986         return 0;
987 }
988
989 /**
990  * Implements cl_object_operations::coo_attr_get() method for an object
991  * without stripes (LLT_EMPTY layout type).
992  *
993  * The only attributes this layer is authoritative in this case is
994  * cl_attr::cat_blocks---it's 0.
995  */
996 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
997                               struct cl_attr *attr)
998 {
999         attr->cat_blocks = 0;
1000         return 0;
1001 }
1002
1003 static int lov_attr_get_composite(const struct lu_env *env,
1004                                   struct cl_object *obj,
1005                                   struct cl_attr *attr)
1006 {
1007         struct lov_object       *lov = cl2lov(obj);
1008         struct lov_layout_entry *entry;
1009         int                      result = 0;
1010
1011         ENTRY;
1012
1013         attr->cat_size = 0;
1014         attr->cat_blocks = 0;
1015         attr->cat_kms = 0;
1016
1017         lov_foreach_layout_entry(lov, entry) {
1018                 struct cl_attr *lov_attr = NULL;
1019                 int index = lov_layout_entry_index(lov, entry);
1020
1021                 if (!entry->lle_valid)
1022                         continue;
1023
1024                 /* PFL: This component has not been init-ed. */
1025                 if (!lsm_entry_inited(lov->lo_lsm, index))
1026                         continue;
1027
1028                 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
1029                                                           entry, &lov_attr);
1030                 if (result < 0)
1031                         RETURN(result);
1032
1033                 if (lov_attr == NULL)
1034                         continue;
1035
1036                 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
1037                        "b=%llu\n", index - 1, lov_attr->cat_size,
1038                        lov_attr->cat_mtime, lov_attr->cat_atime,
1039                        lov_attr->cat_ctime, lov_attr->cat_blocks);
1040
1041                 /* merge results */
1042                 if (lov_attr->cat_kms_valid)
1043                         attr->cat_kms_valid = 1;
1044                 attr->cat_blocks += lov_attr->cat_blocks;
1045                 if (attr->cat_size < lov_attr->cat_size)
1046                         attr->cat_size = lov_attr->cat_size;
1047                 if (attr->cat_kms < lov_attr->cat_kms)
1048                         attr->cat_kms = lov_attr->cat_kms;
1049                 if (attr->cat_atime < lov_attr->cat_atime)
1050                         attr->cat_atime = lov_attr->cat_atime;
1051                 if (attr->cat_ctime < lov_attr->cat_ctime)
1052                         attr->cat_ctime = lov_attr->cat_ctime;
1053                 if (attr->cat_mtime < lov_attr->cat_mtime)
1054                         attr->cat_mtime = lov_attr->cat_mtime;
1055         }
1056
1057         RETURN(0);
1058 }
1059
1060 static int lov_flush_composite(const struct lu_env *env,
1061                                struct cl_object *obj,
1062                                struct ldlm_lock *lock)
1063 {
1064         struct lov_object *lov = cl2lov(obj);
1065         struct lov_layout_entry *lle;
1066         int rc = -ENODATA;
1067
1068         ENTRY;
1069
1070         lov_foreach_layout_entry(lov, lle) {
1071                 if (!lsme_is_dom(lle->lle_lsme))
1072                         continue;
1073                 rc = cl_object_flush(env, lovsub2cl(lle->lle_dom.lo_dom), lock);
1074                 break;
1075         }
1076
1077         RETURN(rc);
1078 }
1079
1080 static int lov_flush_empty(const struct lu_env *env, struct cl_object *obj,
1081                            struct ldlm_lock *lock)
1082 {
1083         return 0;
1084 }
1085
1086 const static struct lov_layout_operations lov_dispatch[] = {
1087         [LLT_EMPTY] = {
1088                 .llo_init      = lov_init_empty,
1089                 .llo_delete    = lov_delete_empty,
1090                 .llo_fini      = lov_fini_empty,
1091                 .llo_print     = lov_print_empty,
1092                 .llo_page_init = lov_page_init_empty,
1093                 .llo_lock_init = lov_lock_init_empty,
1094                 .llo_io_init   = lov_io_init_empty,
1095                 .llo_getattr   = lov_attr_get_empty,
1096                 .llo_flush     = lov_flush_empty,
1097         },
1098         [LLT_RELEASED] = {
1099                 .llo_init      = lov_init_released,
1100                 .llo_delete    = lov_delete_empty,
1101                 .llo_fini      = lov_fini_released,
1102                 .llo_print     = lov_print_released,
1103                 .llo_page_init = lov_page_init_empty,
1104                 .llo_lock_init = lov_lock_init_empty,
1105                 .llo_io_init   = lov_io_init_released,
1106                 .llo_getattr   = lov_attr_get_empty,
1107                 .llo_flush     = lov_flush_empty,
1108         },
1109         [LLT_COMP] = {
1110                 .llo_init      = lov_init_composite,
1111                 .llo_delete    = lov_delete_composite,
1112                 .llo_fini      = lov_fini_composite,
1113                 .llo_print     = lov_print_composite,
1114                 .llo_page_init = lov_page_init_composite,
1115                 .llo_lock_init = lov_lock_init_composite,
1116                 .llo_io_init   = lov_io_init_composite,
1117                 .llo_getattr   = lov_attr_get_composite,
1118                 .llo_flush     = lov_flush_composite,
1119         },
1120         [LLT_FOREIGN] = {
1121                 .llo_init      = lov_init_foreign,
1122                 .llo_delete    = lov_delete_empty,
1123                 .llo_fini      = lov_fini_released,
1124                 .llo_print     = lov_print_foreign,
1125                 .llo_page_init = lov_page_init_foreign,
1126                 .llo_lock_init = lov_lock_init_empty,
1127                 .llo_io_init   = lov_io_init_empty,
1128                 .llo_getattr   = lov_attr_get_empty,
1129                 .llo_flush     = lov_flush_empty,
1130         },
1131 };
1132
1133 /**
1134  * Performs a double-dispatch based on the layout type of an object.
1135  */
1136 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
1137 ({                                                      \
1138         struct lov_object *__obj = (obj);               \
1139         enum lov_layout_type __llt;                     \
1140                                                         \
1141         __llt = __obj->lo_type;                         \
1142         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
1143         lov_dispatch[__llt].op(__VA_ARGS__);            \
1144 })
1145
1146 /**
1147  * Return lov_layout_type associated with a given lsm
1148  */
1149 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1150 {
1151         if (lsm == NULL)
1152                 return LLT_EMPTY;
1153
1154         if (lsm->lsm_is_released)
1155                 return LLT_RELEASED;
1156
1157         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1158             lsm->lsm_magic == LOV_MAGIC_V3 ||
1159             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1160                 return LLT_COMP;
1161
1162         if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
1163                 return LLT_FOREIGN;
1164
1165         return LLT_EMPTY;
1166 }
1167
1168 static inline void lov_conf_freeze(struct lov_object *lov)
1169 {
1170         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1171                 lov, lov->lo_owner, current);
1172         if (lov->lo_owner != current)
1173                 down_read(&lov->lo_type_guard);
1174 }
1175
1176 static inline void lov_conf_thaw(struct lov_object *lov)
1177 {
1178         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1179                 lov, lov->lo_owner, current);
1180         if (lov->lo_owner != current)
1181                 up_read(&lov->lo_type_guard);
1182 }
1183
1184 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
1185 ({                                                                      \
1186         struct lov_object                      *__obj = (obj);          \
1187         int                                     __lock = !!(lock);      \
1188         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
1189                                                                         \
1190         if (__lock)                                                     \
1191                 lov_conf_freeze(__obj);                                 \
1192         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
1193         if (__lock)                                                     \
1194                 lov_conf_thaw(__obj);                                   \
1195         __result;                                                       \
1196 })
1197
1198 /**
1199  * Performs a locked double-dispatch based on the layout type of an object.
1200  */
1201 #define LOV_2DISPATCH(obj, op, ...)                     \
1202         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1203
1204 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
1205 do {                                                                    \
1206         struct lov_object                      *__obj = (obj);          \
1207         enum lov_layout_type                    __llt;                  \
1208                                                                         \
1209         lov_conf_freeze(__obj);                                         \
1210         __llt = __obj->lo_type;                                         \
1211         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
1212         lov_dispatch[__llt].op(__VA_ARGS__);                            \
1213         lov_conf_thaw(__obj);                                           \
1214 } while (0)
1215
1216 static void lov_conf_lock(struct lov_object *lov)
1217 {
1218         LASSERT(lov->lo_owner != current);
1219         down_write(&lov->lo_type_guard);
1220         LASSERT(lov->lo_owner == NULL);
1221         lov->lo_owner = current;
1222         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1223                 lov, lov->lo_owner);
1224 }
1225
1226 static void lov_conf_unlock(struct lov_object *lov)
1227 {
1228         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1229                 lov, lov->lo_owner);
1230         lov->lo_owner = NULL;
1231         up_write(&lov->lo_type_guard);
1232 }
1233
1234 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1235 {
1236         ENTRY;
1237
1238         while (atomic_read(&lov->lo_active_ios) > 0) {
1239                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1240                         PFID(lu_object_fid(lov2lu(lov))),
1241                         atomic_read(&lov->lo_active_ios));
1242
1243                 wait_event_idle(lov->lo_waitq,
1244                                 atomic_read(&lov->lo_active_ios) == 0);
1245         }
1246         RETURN(0);
1247 }
1248
1249 static int lov_layout_change(const struct lu_env *unused,
1250                              struct lov_object *lov, struct lov_stripe_md *lsm,
1251                              const struct cl_object_conf *conf)
1252 {
1253         enum lov_layout_type llt = lov_type(lsm);
1254         union lov_layout_state *state = &lov->u;
1255         const struct lov_layout_operations *old_ops;
1256         const struct lov_layout_operations *new_ops;
1257         struct lov_device *lov_dev = lov_object_dev(lov);
1258         struct lu_env *env;
1259         __u16 refcheck;
1260         int rc;
1261         ENTRY;
1262
1263         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1264
1265         env = cl_env_get(&refcheck);
1266         if (IS_ERR(env))
1267                 RETURN(PTR_ERR(env));
1268
1269         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1270
1271         CDEBUG(D_INODE, DFID" from %s to %s\n",
1272                PFID(lu_object_fid(lov2lu(lov))),
1273                llt2str(lov->lo_type), llt2str(llt));
1274
1275         old_ops = &lov_dispatch[lov->lo_type];
1276         new_ops = &lov_dispatch[llt];
1277
1278         rc = cl_object_prune(env, &lov->lo_cl);
1279         if (rc != 0)
1280                 GOTO(out, rc);
1281
1282         rc = old_ops->llo_delete(env, lov, &lov->u);
1283         if (rc != 0)
1284                 GOTO(out, rc);
1285
1286         old_ops->llo_fini(env, lov, &lov->u);
1287
1288         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1289
1290         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1291                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1292
1293         /* page bufsize fixup */
1294         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1295                 lov_page_slice_fixup(lov, NULL);
1296
1297         lov->lo_type = llt;
1298         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1299         if (rc != 0) {
1300                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1301
1302                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1303                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1304                 new_ops->llo_delete(env, lov, state);
1305                 new_ops->llo_fini(env, lov, state);
1306                 /* this file becomes an EMPTY file. */
1307                 lov->lo_type = LLT_EMPTY;
1308                 GOTO(out, rc);
1309         }
1310
1311 out:
1312         cl_env_put(env, &refcheck);
1313         RETURN(rc);
1314 }
1315
1316 /*****************************************************************************
1317  *
1318  * Lov object operations.
1319  *
1320  */
1321 static int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1322                            const struct lu_object_conf *conf)
1323 {
1324         struct lov_object            *lov   = lu2lov(obj);
1325         struct lov_device            *dev   = lov_object_dev(lov);
1326         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
1327         union lov_layout_state       *set   = &lov->u;
1328         const struct lov_layout_operations *ops;
1329         struct lov_stripe_md *lsm = NULL;
1330         int rc;
1331         ENTRY;
1332
1333         init_rwsem(&lov->lo_type_guard);
1334         atomic_set(&lov->lo_active_ios, 0);
1335         init_waitqueue_head(&lov->lo_waitq);
1336         cl_object_page_init(lu2cl(obj), 0);
1337
1338         lov->lo_type = LLT_EMPTY;
1339         if (cconf->u.coc_layout.lb_buf != NULL) {
1340                 lsm = lov_unpackmd(dev->ld_lov,
1341                                    cconf->u.coc_layout.lb_buf,
1342                                    cconf->u.coc_layout.lb_len);
1343                 if (IS_ERR(lsm))
1344                         RETURN(PTR_ERR(lsm));
1345
1346                 dump_lsm(D_INODE, lsm);
1347         }
1348
1349         /* no locking is necessary, as object is being created */
1350         lov->lo_type = lov_type(lsm);
1351         ops = &lov_dispatch[lov->lo_type];
1352         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1353         if (rc != 0)
1354                 GOTO(out_lsm, rc);
1355
1356 out_lsm:
1357         lov_lsm_put(lsm);
1358
1359         RETURN(rc);
1360 }
1361
1362 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1363                         const struct cl_object_conf *conf)
1364 {
1365         struct lov_stripe_md    *lsm = NULL;
1366         struct lov_object       *lov = cl2lov(obj);
1367         int                      result = 0;
1368         struct cl_object *top = cl_object_top(obj);
1369         bool unlock_inode = false;
1370         bool lock_inode_size = false;
1371         bool lock_layout = false;
1372         ENTRY;
1373
1374         if (conf->coc_opc == OBJECT_CONF_SET &&
1375             conf->u.coc_layout.lb_buf != NULL) {
1376                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1377                                    conf->u.coc_layout.lb_buf,
1378                                    conf->u.coc_layout.lb_len);
1379                 if (IS_ERR(lsm))
1380                         RETURN(PTR_ERR(lsm));
1381                 dump_lsm(D_INODE, lsm);
1382         }
1383
1384         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1385                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1386                 GOTO(out_lsm, result = 0);
1387         }
1388
1389 retry:
1390         lov_conf_lock(lov);
1391         if (conf->coc_opc == OBJECT_CONF_WAIT) {
1392                 if (test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) &&
1393                     atomic_read(&lov->lo_active_ios) > 0) {
1394                         lov_conf_unlock(lov);
1395                         result = lov_layout_wait(env, lov);
1396                         lov_conf_lock(lov);
1397                 }
1398                 GOTO(out, result);
1399         }
1400
1401         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1402
1403         /*
1404          * don't apply old layouts which can be brought
1405          * if returned w/o ldlm lock.
1406          * XXX: can we rollback in case of recovery?
1407          */
1408         if (lsm && lov->lo_lsm) {
1409                 u32 oldgen = lov->lo_lsm->lsm_layout_gen &= ~LU_LAYOUT_RESYNC;
1410                 u32 newgen = lsm->lsm_layout_gen & ~LU_LAYOUT_RESYNC;
1411
1412                 if (newgen < oldgen) {
1413                         CDEBUG(D_HA, "skip old for "DFID": %d < %d\n",
1414                                PFID(lu_object_fid(lov2lu(lov))),
1415                                (int)newgen, (int)oldgen);
1416                         GOTO(out, result = 0);
1417                 }
1418         }
1419
1420         if ((lsm == NULL && lov->lo_lsm == NULL) ||
1421             ((lsm != NULL && lov->lo_lsm != NULL) &&
1422              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1423              (lov->lo_lsm->lsm_flags == lsm->lsm_flags) &&
1424              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1425               lsm->lsm_entries[0]->lsme_pattern))) {
1426                 /* same version of layout */
1427                 clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1428                 GOTO(out, result = 0);
1429         }
1430
1431         /* will change layout - check if there still exists active IO. */
1432         if (atomic_read(&lov->lo_active_ios) > 0) {
1433                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1434                 GOTO(out, result = -EBUSY);
1435         }
1436
1437         if (conf->coc_try) {
1438                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1439                 GOTO(out, result = -ERESTARTSYS);
1440         }
1441
1442         result = lov_layout_change(env, lov, lsm, conf);
1443         if (result) {
1444                 if (result == -EAGAIN) {
1445                         /**
1446                          * we need unlocked lov conf and get inode lock.
1447                          * It's possible we have already taken inode's size
1448                          * mutex and/or layout mutex, so we need keep such lock
1449                          * order, lest deadlock happens:
1450                          *   inode lock        (ll_inode_lock())
1451                          *   inode size lock   (ll_inode_size_lock())
1452                          *   inode layout lock (ll_layout_refresh())
1453                          *   lov conf lock     (lov_conf_lock())
1454                          *
1455                          * e.g.
1456                          *   vfs_setxattr                inode locked
1457                          *     ll_lov_setstripe_ea_info  inode size locked
1458                          *       ll_prep_inode
1459                          *         ll_file_inode_init
1460                          *           cl_conf_set
1461                          *             lov_conf_set      lov conf locked
1462                          *
1463                          *   ll_migrate                  inode locked
1464                          *     ...
1465                          *       ll_layout_refresh       inode layout locked
1466                          *         ll_layout_conf
1467                          *           cl_conf_set
1468                          *             lov_conf_set      lov conf locked
1469                          */
1470                         lov_conf_unlock(lov);
1471                         if (cl_object_inode_ops(env, top, COIO_LAYOUT_UNLOCK,
1472                                                 NULL) == 0)
1473                                 lock_layout = true;
1474                         if (cl_object_inode_ops(env, top, COIO_SIZE_UNLOCK,
1475                                                 NULL) == 0)
1476                                 lock_inode_size = true;
1477
1478                         /* take lock in order */
1479                         if (cl_object_inode_ops(
1480                                         env, top, COIO_INODE_LOCK, NULL) == 0)
1481                                 unlock_inode = true;
1482                         if (lock_inode_size)
1483                                 cl_object_inode_ops(env, top, COIO_SIZE_LOCK,
1484                                                     NULL);
1485                         if (lock_layout)
1486                                 cl_object_inode_ops(env, top, COIO_LAYOUT_LOCK,
1487                                                     NULL);
1488                         goto retry;
1489                 }
1490                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1491         } else {
1492                 clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1493         }
1494         EXIT;
1495
1496 out:
1497         lov_conf_unlock(lov);
1498         if (unlock_inode)
1499                 cl_object_inode_ops(env, top, COIO_INODE_UNLOCK, NULL);
1500 out_lsm:
1501         lov_lsm_put(lsm);
1502         CDEBUG(D_INODE, DFID" lo_layout_invalid=%u\n",
1503                PFID(lu_object_fid(lov2lu(lov))),
1504                test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags));
1505         RETURN(result);
1506 }
1507
1508 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1509 {
1510         struct lov_object *lov = lu2lov(obj);
1511
1512         ENTRY;
1513         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1514         EXIT;
1515 }
1516
1517 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1518 {
1519         struct lov_object *lov = lu2lov(obj);
1520
1521         ENTRY;
1522         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1523         lu_object_fini(obj);
1524         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1525         EXIT;
1526 }
1527
1528 static int lov_object_print(const struct lu_env *env, void *cookie,
1529                             lu_printer_t p, const struct lu_object *o)
1530 {
1531         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1532 }
1533
1534 static int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1535                          struct cl_page *page, pgoff_t index)
1536 {
1537         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1538                                     index);
1539 }
1540
1541 /**
1542  * Implements cl_object_operations::clo_io_init() method for lov
1543  * layer. Dispatches to the appropriate layout io initialization method.
1544  */
1545 static int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1546                        struct cl_io *io)
1547 {
1548         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1549
1550         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1551                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1552                io->ci_ignore_layout, io->ci_verify_layout);
1553
1554         /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1555          * the OSC layer. It shouldn't take lov layout conf lock in that case,
1556          * because as long as the OSC object exists, the layout can't be
1557          * reconfigured. */
1558         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1559                         !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1560                         env, obj, io);
1561 }
1562
1563 /**
1564  * An implementation of cl_object_operations::clo_attr_get() method for lov
1565  * layer. For raid0 layout this collects and merges attributes of all
1566  * sub-objects.
1567  */
1568 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1569                         struct cl_attr *attr)
1570 {
1571         /* do not take lock, as this function is called under a
1572          * spin-lock. Layout is protected from changing by ongoing IO. */
1573         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1574 }
1575
1576 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1577                            const struct cl_attr *attr, unsigned valid)
1578 {
1579         /*
1580          * No dispatch is required here, as no layout implements this.
1581          */
1582         return 0;
1583 }
1584
1585 static int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1586                   struct cl_lock *lock, const struct cl_io *io)
1587 {
1588         /* No need to lock because we've taken one refcount of layout.  */
1589         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1590                                     io);
1591 }
1592
1593 /**
1594  * We calculate on which OST the mapping will end. If the length of mapping
1595  * is greater than (stripe_size * stripe_count) then the last_stripe will
1596  * will be one just before start_stripe. Else we check if the mapping
1597  * intersects each OST and find last_stripe.
1598  * This function returns the last_stripe and also sets the stripe_count
1599  * over which the mapping is spread
1600  *
1601  * \param lsm [in]              striping information for the file
1602  * \param index [in]            stripe component index
1603  * \param ext [in]              logical extent of mapping
1604  * \param start_stripe [in]     starting stripe of the mapping
1605  * \param stripe_count [out]    the number of stripes across which to map is
1606  *                              returned
1607  *
1608  * \retval last_stripe          return the last stripe of the mapping
1609  */
1610 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1611                                    struct lu_extent *ext,
1612                                    int start_stripe, int *stripe_count)
1613 {
1614         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1615         int init_stripe;
1616         int last_stripe;
1617         int i, j;
1618
1619         init_stripe = lov_stripe_number(lsm, index, ext->e_start);
1620
1621         if (ext->e_end - ext->e_start >
1622             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1623                 if (init_stripe == start_stripe) {
1624                         last_stripe = (start_stripe < 1) ?
1625                                 lsme->lsme_stripe_count - 1 : start_stripe - 1;
1626                         *stripe_count = lsme->lsme_stripe_count;
1627                 } else if (init_stripe < start_stripe) {
1628                         last_stripe = (init_stripe < 1) ?
1629                                 lsme->lsme_stripe_count - 1 : init_stripe - 1;
1630                         *stripe_count = lsme->lsme_stripe_count -
1631                                         (start_stripe - init_stripe);
1632                 } else {
1633                         last_stripe = init_stripe - 1;
1634                         *stripe_count = init_stripe - start_stripe;
1635                 }
1636         } else {
1637                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1638                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1639                         if (!lov_stripe_intersects(lsm, index,  i, ext, NULL,
1640                                                    NULL))
1641                                 break;
1642                         if ((start_stripe != init_stripe) && (i == init_stripe))
1643                                 break;
1644                 }
1645                 *stripe_count = j;
1646                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1647         }
1648
1649         return last_stripe;
1650 }
1651
1652 /**
1653  * Set fe_device and copy extents from local buffer into main return buffer.
1654  *
1655  * \param fiemap [out]          fiemap to hold all extents
1656  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1657  * \param ost_index [in]        OST index to be written into the fm_device
1658  *                              field for each extent
1659  * \param ext_count [in]        number of extents to be copied
1660  * \param current_extent [in]   where to start copying in the extent array
1661  */
1662 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1663                                          struct fiemap_extent *lcl_fm_ext,
1664                                          int ost_index, unsigned int ext_count,
1665                                          int current_extent, int abs_stripeno)
1666 {
1667         char            *to;
1668         unsigned int    ext;
1669
1670         for (ext = 0; ext < ext_count; ext++) {
1671                 set_fe_device_stripenr(&lcl_fm_ext[ext], ost_index,
1672                                        abs_stripeno);
1673                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1674         }
1675
1676         /* Copy fm_extent's from fm_local to return buffer */
1677         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1678         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1679 }
1680
1681 #define FIEMAP_BUFFER_SIZE 4096
1682
1683 /**
1684  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1685  * call. The local end offset and the device are sent in the first
1686  * fm_extent. This function calculates the stripe number from the index.
1687  * This function returns a stripe_no on which mapping is to be restarted.
1688  *
1689  * This function returns fm_end_offset which is the in-OST offset at which
1690  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1691  * will re-calculate proper offset in next stripe.
1692  * Note that the first extent is passed to lov_get_info via the value field.
1693  *
1694  * \param fiemap [in]           fiemap request header
1695  * \param lsm [in]              striping information for the file
1696  * \param index [in]            stripe component index
1697  * \param ext [in]              logical extent of mapping
1698  * \param start_stripe [out]    starting stripe will be returned in this
1699  */
1700 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1701                                      struct lov_stripe_md *lsm,
1702                                      int index, struct lu_extent *ext,
1703                                      int *start_stripe)
1704 {
1705         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1706         u64 local_end = fiemap->fm_extents[0].fe_logical;
1707         u64 lun_end;
1708         u64 fm_end_offset;
1709         int stripe_no = -1;
1710
1711         if (fiemap->fm_extent_count == 0 ||
1712             fiemap->fm_extents[0].fe_logical == 0)
1713                 return 0;
1714
1715         stripe_no = *start_stripe;
1716
1717         if (stripe_no == -1)
1718                 return -EINVAL;
1719
1720         /* If we have finished mapping on previous device, shift logical
1721          * offset to start of next device */
1722         if (lov_stripe_intersects(lsm, index, stripe_no, ext, NULL, &lun_end) &&
1723             local_end < lun_end) {
1724                 fm_end_offset = local_end;
1725         } else {
1726                 /* This is a special value to indicate that caller should
1727                  * calculate offset in next stripe. */
1728                 fm_end_offset = 0;
1729                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1730         }
1731
1732         return fm_end_offset;
1733 }
1734
1735 struct fiemap_state {
1736         struct fiemap           *fs_fm;
1737         struct lu_extent        fs_ext;         /* current entry extent */
1738         u64                     fs_length;
1739         u64                     fs_end_offset;  /* last iteration offset */
1740         int                     fs_cur_extent;  /* collected exts so far */
1741         int                     fs_cnt_need;    /* # of extents buf can hold */
1742         int                     fs_start_stripe;
1743         int                     fs_last_stripe;
1744         bool                    fs_device_done; /* enough for this OST */
1745         bool                    fs_finish_stripe; /* reached fs_last_stripe */
1746         bool                    fs_enough;      /* enough for this call */
1747 };
1748
1749 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1750                                          struct lov_object *lov,
1751                                          struct lov_stripe_md *lsm,
1752                                          int index)
1753 {
1754         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1755         struct lov_thread_info  *lti = lov_env_info(env);
1756         struct lu_fid           *ofid = &lti->lti_fid;
1757         struct lov_oinfo        *oinfo;
1758         struct cl_device        *subdev;
1759         int                     entry = lov_comp_entry(index);
1760         int                     stripe = lov_comp_stripe(index);
1761         int                     ost_idx;
1762         int                     rc;
1763         struct cl_object        *result;
1764
1765         if (lov->lo_type != LLT_COMP)
1766                 GOTO(out, result = NULL);
1767
1768         if (entry >= lsm->lsm_entry_count ||
1769             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1770                 GOTO(out, result = NULL);
1771
1772         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1773         ost_idx = oinfo->loi_ost_idx;
1774         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1775         if (rc != 0)
1776                 GOTO(out, result = NULL);
1777
1778         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1779         result = lov_sub_find(env, subdev, ofid, NULL);
1780 out:
1781         if (result == NULL)
1782                 result = ERR_PTR(-EINVAL);
1783         return result;
1784 }
1785
1786 static int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1787                              struct lov_stripe_md *lsm, struct fiemap *fiemap,
1788                              size_t *buflen, struct ll_fiemap_info_key *fmkey,
1789                              int index, int stripe_last, int stripeno,
1790                              struct fiemap_state *fs)
1791 {
1792         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1793         struct cl_object *subobj;
1794         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1795         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1796         u64 req_fm_len; /* max requested extent coverage */
1797         u64 len_mapped_single_call;
1798         u64 obd_start;
1799         u64 obd_end;
1800         unsigned int ext_count;
1801         /* EOF for object */
1802         bool ost_eof = false;
1803         /* done with required mapping for this OST? */
1804         bool ost_done = false;
1805         int ost_index;
1806         int rc = 0;
1807
1808         fs->fs_device_done = false;
1809         /* Find out range of mapping on this stripe */
1810         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1811                                    &obd_start, &obd_end)) == 0)
1812                 return 0;
1813
1814         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1815                 return -EIO;
1816
1817         /* If this is a continuation FIEMAP call and we are on
1818          * starting stripe then obd_start needs to be set to
1819          * end_offset */
1820         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1821                 obd_start = fs->fs_end_offset;
1822
1823         if (lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno) ==
1824             obd_start)
1825                 return 0;
1826
1827         req_fm_len = obd_end - obd_start + 1;
1828         fs->fs_fm->fm_length = 0;
1829         len_mapped_single_call = 0;
1830
1831         /* find lobsub object */
1832         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1833                                  lov_comp_index(index, stripeno));
1834         if (IS_ERR(subobj))
1835                 return PTR_ERR(subobj);
1836         /* If the output buffer is very large and the objects have many
1837          * extents we may need to loop on a single OST repeatedly */
1838         do {
1839                 if (fiemap->fm_extent_count > 0) {
1840                         /* Don't get too many extents. */
1841                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1842                             fiemap->fm_extent_count)
1843                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1844                                                   fs->fs_cur_extent;
1845                 }
1846
1847                 obd_start += len_mapped_single_call;
1848                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1849                 req_fm_len = fs->fs_fm->fm_length;
1850                 /**
1851                  * If we've collected enough extent map, we'd request 1 more,
1852                  * to see whether we coincidentally finished all available
1853                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1854                  */
1855                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1856                                              1 : fs->fs_cnt_need;
1857                 fs->fs_fm->fm_mapped_extents = 0;
1858                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1859
1860                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1861
1862                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1863                         GOTO(obj_put, rc = -EINVAL);
1864                 /* If OST is inactive, return extent with UNKNOWN flag. */
1865                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1866                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1867                         fs->fs_fm->fm_mapped_extents = 1;
1868
1869                         fm_ext[0].fe_logical = obd_start;
1870                         fm_ext[0].fe_length = obd_end - obd_start + 1;
1871                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1872
1873                         goto inactive_tgt;
1874                 }
1875
1876                 fs->fs_fm->fm_start = obd_start;
1877                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1878                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1879                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1880
1881                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1882                 if (rc != 0)
1883                         GOTO(obj_put, rc);
1884 inactive_tgt:
1885                 ext_count = fs->fs_fm->fm_mapped_extents;
1886                 if (ext_count == 0) {
1887                         ost_done = true;
1888                         fs->fs_device_done = true;
1889                         /* If last stripe has hold at the end,
1890                          * we need to return */
1891                         if (stripeno == fs->fs_last_stripe) {
1892                                 fiemap->fm_mapped_extents = 0;
1893                                 fs->fs_finish_stripe = true;
1894                                 GOTO(obj_put, rc);
1895                         }
1896                         break;
1897                 } else if (fs->fs_enough) {
1898                         /*
1899                          * We've collected enough extents and there are
1900                          * more extents after it.
1901                          */
1902                         GOTO(obj_put, rc);
1903                 }
1904
1905                 /* If we just need num of extents, got to next device */
1906                 if (fiemap->fm_extent_count == 0) {
1907                         fs->fs_cur_extent += ext_count;
1908                         break;
1909                 }
1910
1911                 /* prepare to copy retrived map extents */
1912                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1913                                          fm_ext[ext_count - 1].fe_length -
1914                                          obd_start;
1915
1916                 /* Have we finished mapping on this device? */
1917                 if (req_fm_len <= len_mapped_single_call) {
1918                         ost_done = true;
1919                         fs->fs_device_done = true;
1920                 }
1921
1922                 /* Clear the EXTENT_LAST flag which can be present on
1923                  * the last extent */
1924                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1925                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1926                 if (lov_stripe_size(lsm, index,
1927                                     fm_ext[ext_count - 1].fe_logical +
1928                                     fm_ext[ext_count - 1].fe_length,
1929                                     stripeno) >= fmkey->lfik_oa.o_size) {
1930                         ost_eof = true;
1931                         fs->fs_device_done = true;
1932                 }
1933
1934                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1935                                              ext_count, fs->fs_cur_extent,
1936                                              stripe_last + stripeno);
1937                 fs->fs_cur_extent += ext_count;
1938
1939                 /* Ran out of available extents? */
1940                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1941                         fs->fs_enough = true;
1942         } while (!ost_done && !ost_eof);
1943
1944         if (stripeno == fs->fs_last_stripe)
1945                 fs->fs_finish_stripe = true;
1946 obj_put:
1947         cl_object_put(env, subobj);
1948
1949         return rc;
1950 }
1951
1952 /**
1953  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1954  * This also handles the restarting of FIEMAP calls in case mapping overflows
1955  * the available number of extents in single call.
1956  *
1957  * \param env [in]              lustre environment
1958  * \param obj [in]              file object
1959  * \param fmkey [in]            fiemap request header and other info
1960  * \param fiemap [out]          fiemap buffer holding retrived map extents
1961  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1962  *                              each OST, it is used to limit max map needed
1963  * \retval 0    success
1964  * \retval < 0  error
1965  */
1966 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1967                              struct ll_fiemap_info_key *fmkey,
1968                              struct fiemap *fiemap, size_t *buflen)
1969 {
1970         struct lov_stripe_md_entry *lsme;
1971         struct lov_stripe_md *lsm;
1972         struct fiemap *fm_local = NULL;
1973         loff_t whole_start;
1974         loff_t whole_end;
1975         int entry;
1976         int start_entry = -1;
1977         int end_entry;
1978         int cur_stripe = 0;
1979         int stripe_count;
1980         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1981         int rc = 0;
1982         struct fiemap_state fs = { 0 };
1983         struct lu_extent range;
1984         int cur_ext;
1985         int stripe_last = 0;
1986         int start_stripe = 0;
1987         bool resume = false;
1988         ENTRY;
1989
1990         lsm = lov_lsm_addref(cl2lov(obj));
1991         if (lsm == NULL) {
1992                 /* no extent: there is no object for mapping */
1993                 fiemap->fm_mapped_extents = 0;
1994                 return 0;
1995         }
1996
1997         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1998                 /**
1999                  * If the entry count > 1 or stripe_count > 1 and the
2000                  * application does not understand DEVICE_ORDER flag,
2001                  * it cannot interpret the extents correctly.
2002                  */
2003                 if (lsm->lsm_entry_count > 1 ||
2004                     (lsm->lsm_entry_count == 1 &&
2005                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
2006                         GOTO(out_lsm, rc = -EOPNOTSUPP);
2007         }
2008
2009         /* No support for DOM layout yet. */
2010         if (lsme_is_dom(lsm->lsm_entries[0]))
2011                 GOTO(out_lsm, rc = -EOPNOTSUPP);
2012
2013         if (lsm->lsm_is_released) {
2014                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
2015                         /**
2016                          * released file, return a minimal FIEMAP if
2017                          * request fits in file-size.
2018                          */
2019                         fiemap->fm_mapped_extents = 1;
2020                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
2021                         if (fiemap->fm_start + fiemap->fm_length <
2022                             fmkey->lfik_oa.o_size)
2023                                 fiemap->fm_extents[0].fe_length =
2024                                         fiemap->fm_length;
2025                         else
2026                                 fiemap->fm_extents[0].fe_length =
2027                                         fmkey->lfik_oa.o_size -
2028                                         fiemap->fm_start;
2029                         fiemap->fm_extents[0].fe_flags |=
2030                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
2031                 }
2032                 GOTO(out_lsm, rc = 0);
2033         }
2034
2035         /* buffer_size is small to hold fm_extent_count of extents. */
2036         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
2037                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
2038
2039         OBD_ALLOC_LARGE(fm_local, buffer_size);
2040         if (fm_local == NULL)
2041                 GOTO(out_lsm, rc = -ENOMEM);
2042
2043         /**
2044          * Requested extent count exceeds the fiemap buffer size, shrink our
2045          * ambition.
2046          */
2047         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
2048                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
2049
2050         fs.fs_enough = false;
2051         fs.fs_cur_extent = 0;
2052         fs.fs_fm = fm_local;
2053         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
2054
2055         whole_start = fiemap->fm_start;
2056         /* whole_start is beyond the end of the file */
2057         if (whole_start > fmkey->lfik_oa.o_size)
2058                 GOTO(out_fm_local, rc = -EINVAL);
2059         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
2060                                         fmkey->lfik_oa.o_size + 1 :
2061                                         whole_start + fiemap->fm_length;
2062         /**
2063          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
2064          * size
2065          */
2066         if (whole_end > fmkey->lfik_oa.o_size + 1)
2067                 whole_end = fmkey->lfik_oa.o_size + 1;
2068
2069         /**
2070          * the high 16bits of fe_device remember which stripe the last
2071          * call has been arrived, we'd continue from there in this call.
2072          */
2073         if (fiemap->fm_extent_count && fiemap->fm_extents[0].fe_logical) {
2074                 resume = true;
2075                 stripe_last = get_fe_stripenr(&fiemap->fm_extents[0]);
2076         }
2077         /**
2078          * stripe_last records stripe number we've been processed in the last
2079          * call
2080          */
2081         end_entry = lsm->lsm_entry_count - 1;
2082         cur_stripe = 0;
2083         for (entry = 0; entry <= end_entry; entry++) {
2084                 lsme = lsm->lsm_entries[entry];
2085                 if (cur_stripe + lsme->lsme_stripe_count >= stripe_last) {
2086                         start_entry = entry;
2087                         start_stripe = stripe_last - cur_stripe;
2088                         break;
2089                 }
2090
2091                 cur_stripe += lsme->lsme_stripe_count;
2092         }
2093         if (start_entry == -1) {
2094                 CERROR(DFID": FIEMAP does not init start entry, cur_stripe=%d, "
2095                        "stripe_last=%d\n", PFID(lu_object_fid(&obj->co_lu)),
2096                        cur_stripe, stripe_last);
2097                 GOTO(out_fm_local, rc = -EINVAL);
2098         }
2099         /**
2100          * @start_entry & @start_stripe records the position of fiemap
2101          * resumption @stripe_last keeps recording the absolution position
2102          * we'are processing. @resume indicates we'd honor @start_stripe.
2103          */
2104
2105         range.e_start = whole_start;
2106         range.e_end = whole_end;
2107
2108         for (entry = start_entry; entry <= end_entry; entry++) {
2109                 /* remeber to update stripe_last accordingly */
2110                 lsme = lsm->lsm_entries[entry];
2111
2112                 /* FLR could contain component holes between entries */
2113                 if (!lsme_inited(lsme)) {
2114                         stripe_last += lsme->lsme_stripe_count;
2115                         resume = false;
2116                         continue;
2117                 }
2118
2119                 if (!lu_extent_is_overlapped(&range, &lsme->lsme_extent)) {
2120                         stripe_last += lsme->lsme_stripe_count;
2121                         resume = false;
2122                         continue;
2123                 }
2124
2125                 /* prepare for a component entry iteration */
2126                 if (lsme->lsme_extent.e_start > whole_start)
2127                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
2128                 else
2129                         fs.fs_ext.e_start = whole_start;
2130                 if (lsme->lsme_extent.e_end > whole_end)
2131                         fs.fs_ext.e_end = whole_end;
2132                 else
2133                         fs.fs_ext.e_end = lsme->lsme_extent.e_end;
2134
2135                 /* Calculate start stripe, last stripe and length of mapping */
2136                 if (resume) {
2137                         fs.fs_start_stripe = start_stripe;
2138                         /* put stripe_last to the first stripe of the comp */
2139                         stripe_last -= start_stripe;
2140                         resume = false;
2141                 } else {
2142                         fs.fs_start_stripe = lov_stripe_number(lsm, entry,
2143                                                         fs.fs_ext.e_start);
2144                 }
2145                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
2146                                         &fs.fs_ext, fs.fs_start_stripe,
2147                                         &stripe_count);
2148                 /**
2149                  * A new mirror component is under process, reset
2150                  * fs.fs_end_offset and then fiemap_for_stripe() starts from
2151                  * the overlapping extent, otherwise starts from
2152                  * fs.fs_end_offset.
2153                  */
2154                 if (entry > start_entry && lsme->lsme_extent.e_start == 0) {
2155                         /* new mirror */
2156                         fs.fs_end_offset = 0;
2157                 } else {
2158                         fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap,
2159                                                 lsm, entry, &fs.fs_ext,
2160                                                 &fs.fs_start_stripe);
2161                 }
2162
2163                 /* Check each stripe */
2164                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
2165                      --stripe_count,
2166                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
2167                         /* reset fs_finish_stripe */
2168                         fs.fs_finish_stripe = false;
2169                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
2170                                                fmkey, entry, stripe_last,
2171                                                cur_stripe, &fs);
2172                         if (rc < 0)
2173                                 GOTO(out_fm_local, rc);
2174                         if (fs.fs_enough) {
2175                                 stripe_last += cur_stripe;
2176                                 GOTO(finish, rc);
2177                         }
2178                         if (fs.fs_finish_stripe)
2179                                 break;
2180                 } /* for each stripe */
2181                 stripe_last += lsme->lsme_stripe_count;
2182         } /* for covering layout component entry */
2183
2184 finish:
2185         if (fs.fs_cur_extent > 0)
2186                 cur_ext = fs.fs_cur_extent - 1;
2187         else
2188                 cur_ext = 0;
2189
2190         /* done all the processing */
2191         if (entry > end_entry)
2192                 fiemap->fm_extents[cur_ext].fe_flags |= FIEMAP_EXTENT_LAST;
2193
2194         /* Indicate that we are returning device offsets unless file just has
2195          * single stripe */
2196         if (lsm->lsm_entry_count > 1 ||
2197             (lsm->lsm_entry_count == 1 &&
2198              lsm->lsm_entries[0]->lsme_stripe_count > 1))
2199                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
2200
2201         if (fiemap->fm_extent_count == 0)
2202                 goto skip_last_device_calc;
2203
2204 skip_last_device_calc:
2205         fiemap->fm_mapped_extents = fs.fs_cur_extent;
2206 out_fm_local:
2207         OBD_FREE_LARGE(fm_local, buffer_size);
2208
2209 out_lsm:
2210         lov_lsm_put(lsm);
2211         return rc;
2212 }
2213
2214 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2215                                 struct lov_user_md __user *lum, size_t size)
2216 {
2217         struct lov_object       *lov = cl2lov(obj);
2218         struct lov_stripe_md    *lsm;
2219         int                     rc = 0;
2220         ENTRY;
2221
2222         lsm = lov_lsm_addref(lov);
2223         if (lsm == NULL)
2224                 RETURN(-ENODATA);
2225
2226         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
2227         lov_lsm_put(lsm);
2228         RETURN(rc);
2229 }
2230
2231 static int lov_object_layout_get(const struct lu_env *env,
2232                                  struct cl_object *obj,
2233                                  struct cl_layout *cl)
2234 {
2235         struct lov_object *lov = cl2lov(obj);
2236         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2237         struct lu_buf *buf = &cl->cl_buf;
2238         ssize_t rc;
2239         ENTRY;
2240
2241         if (lsm == NULL) {
2242                 cl->cl_size = 0;
2243                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2244
2245                 RETURN(0);
2246         }
2247
2248         cl->cl_size = lov_comp_md_size(lsm);
2249         cl->cl_layout_gen = lsm->lsm_layout_gen;
2250         cl->cl_is_released = lsm->lsm_is_released;
2251         cl->cl_is_composite = lsm_is_composite(lsm->lsm_magic);
2252
2253         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2254         lov_lsm_put(lsm);
2255
2256         /* return error or number of bytes */
2257         RETURN(rc);
2258 }
2259
2260 static loff_t lov_object_maxbytes(struct cl_object *obj)
2261 {
2262         struct lov_object *lov = cl2lov(obj);
2263         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2264         loff_t maxbytes;
2265
2266         if (lsm == NULL)
2267                 return LLONG_MAX;
2268
2269         maxbytes = lsm->lsm_maxbytes;
2270
2271         lov_lsm_put(lsm);
2272
2273         return maxbytes;
2274 }
2275
2276 static int lov_object_flush(const struct lu_env *env, struct cl_object *obj,
2277                             struct ldlm_lock *lock)
2278 {
2279         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_flush, true, env, obj,
2280                                      lock);
2281 }
2282
2283 static const struct cl_object_operations lov_ops = {
2284         .coo_page_init    = lov_page_init,
2285         .coo_lock_init    = lov_lock_init,
2286         .coo_io_init      = lov_io_init,
2287         .coo_attr_get     = lov_attr_get,
2288         .coo_attr_update  = lov_attr_update,
2289         .coo_conf_set     = lov_conf_set,
2290         .coo_getstripe    = lov_object_getstripe,
2291         .coo_layout_get   = lov_object_layout_get,
2292         .coo_maxbytes     = lov_object_maxbytes,
2293         .coo_fiemap       = lov_object_fiemap,
2294         .coo_object_flush = lov_object_flush
2295 };
2296
2297 static const struct lu_object_operations lov_lu_obj_ops = {
2298         .loo_object_init        = lov_object_init,
2299         .loo_object_delete      = lov_object_delete,
2300         .loo_object_release     = NULL,
2301         .loo_object_free        = lov_object_free,
2302         .loo_object_print       = lov_object_print,
2303         .loo_object_invariant   = NULL,
2304 };
2305
2306 struct lu_object *lov_object_alloc(const struct lu_env *env,
2307                                    const struct lu_object_header *unused,
2308                                    struct lu_device *dev)
2309 {
2310         struct lov_object *lov;
2311         struct lu_object  *obj;
2312
2313         ENTRY;
2314         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2315         if (lov != NULL) {
2316                 obj = lov2lu(lov);
2317                 lu_object_init(obj, NULL, dev);
2318                 lov->lo_cl.co_ops = &lov_ops;
2319                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2320                 /*
2321                  * object io operation vector (cl_object::co_iop) is installed
2322                  * later in lov_object_init(), as different vectors are used
2323                  * for object with different layouts.
2324                  */
2325                 obj->lo_ops = &lov_lu_obj_ops;
2326         } else
2327                 obj = NULL;
2328         RETURN(obj);
2329 }
2330
2331 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2332 {
2333         struct lov_stripe_md *lsm = NULL;
2334
2335         lov_conf_freeze(lov);
2336         if (lov->lo_lsm != NULL) {
2337                 lsm = lsm_addref(lov->lo_lsm);
2338                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2339                         lsm, atomic_read(&lsm->lsm_refc),
2340                         test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags),
2341                         current);
2342         }
2343         lov_conf_thaw(lov);
2344         return lsm;
2345 }
2346
2347 int lov_read_and_clear_async_rc(struct cl_object *clob)
2348 {
2349         struct lu_object *luobj;
2350         int rc = 0;
2351         ENTRY;
2352
2353         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2354                                  &lov_device_type);
2355         if (luobj != NULL) {
2356                 struct lov_object *lov = lu2lov(luobj);
2357
2358                 lov_conf_freeze(lov);
2359                 switch (lov->lo_type) {
2360                 case LLT_COMP: {
2361                         struct lov_stripe_md *lsm;
2362                         int i;
2363
2364                         lsm = lov->lo_lsm;
2365                         LASSERT(lsm != NULL);
2366                         for (i = 0; i < lsm->lsm_entry_count; i++) {
2367                                 struct lov_stripe_md_entry *lse =
2368                                                 lsm->lsm_entries[i];
2369                                 int j;
2370
2371                                 if (!lsme_inited(lse))
2372                                         break;
2373
2374                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
2375                                         struct lov_oinfo *loi =
2376                                                         lse->lsme_oinfo[j];
2377
2378                                         if (lov_oinfo_is_dummy(loi))
2379                                                 continue;
2380
2381                                         if (loi->loi_ar.ar_rc && !rc)
2382                                                 rc = loi->loi_ar.ar_rc;
2383                                         loi->loi_ar.ar_rc = 0;
2384                                 }
2385                         }
2386                 }
2387                 fallthrough;
2388                 case LLT_RELEASED:
2389                 case LLT_EMPTY:
2390                 case LLT_FOREIGN:
2391                         break;
2392                 default:
2393                         LBUG();
2394                 }
2395                 lov_conf_thaw(lov);
2396         }
2397         RETURN(rc);
2398 }
2399 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
2400
2401 /** @} lov */