Whamcloud - gitweb
LU-12610 llite: remove OBD_ -> CFS_ macros
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_object for LOV layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LOV
38
39 #include <linux/random.h>
40
41 #include "lov_cl_internal.h"
42
43 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
44 {
45         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
46 }
47
48 /** \addtogroup lov
49  *  @{
50  */
51
52 /*****************************************************************************
53  *
54  * Layout operations.
55  *
56  */
57
58 struct lov_layout_operations {
59         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
60                         struct lov_object *lov, struct lov_stripe_md *lsm,
61                         const struct cl_object_conf *conf,
62                         union lov_layout_state *state);
63         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
64                            union lov_layout_state *state);
65         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
66                          union lov_layout_state *state);
67         int  (*llo_print)(const struct lu_env *env, void *cookie,
68                           lu_printer_t p, const struct lu_object *o);
69         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
70                               struct cl_page *page, pgoff_t index);
71         int  (*llo_lock_init)(const struct lu_env *env,
72                               struct cl_object *obj, struct cl_lock *lock,
73                               const struct cl_io *io);
74         int  (*llo_io_init)(const struct lu_env *env,
75                             struct cl_object *obj, struct cl_io *io);
76         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
77                             struct cl_attr *attr);
78         int  (*llo_flush)(const struct lu_env *env, struct cl_object *obj,
79                           struct ldlm_lock *lock);
80 };
81
82 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
83 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
84
85 static void lov_lsm_put(struct lov_stripe_md *lsm)
86 {
87         if (lsm != NULL)
88                 lov_free_memmd(&lsm);
89 }
90
91 /*****************************************************************************
92  *
93  * Lov object layout operations.
94  *
95  */
96
97 static struct cl_object *lov_sub_find(const struct lu_env *env,
98                                       struct cl_device *dev,
99                                       const struct lu_fid *fid,
100                                       const struct cl_object_conf *conf)
101 {
102         struct lu_object *o;
103
104         ENTRY;
105
106         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
107         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
108         RETURN(lu2cl(o));
109 }
110
111 static int lov_page_slice_fixup(struct lov_object *lov,
112                                 struct cl_object *stripe)
113 {
114         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
115         struct cl_object *o;
116
117         if (stripe == NULL)
118                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off;
119
120         cl_object_for_each(o, stripe)
121                 o->co_slice_off += hdr->coh_page_bufsize;
122
123         return cl_object_header(stripe)->coh_page_bufsize;
124 }
125
126 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
127                         struct cl_object *subobj, struct lov_oinfo *oinfo,
128                         int idx)
129 {
130         struct cl_object_header *hdr;
131         struct cl_object_header *subhdr;
132         struct cl_object_header *parent;
133         int entry = lov_comp_entry(idx);
134         int stripe = lov_comp_stripe(idx);
135         int result;
136
137         if (CFS_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
138                 /* For sanity:test_206.
139                  * Do not leave the object in cache to avoid accessing
140                  * freed memory. This is because osc_object is referring to
141                  * lov_oinfo of lsm_stripe_data which will be freed due to
142                  * this failure. */
143                 cl_object_kill(env, subobj);
144                 cl_object_put(env, subobj);
145                 return -EIO;
146         }
147
148         hdr = cl_object_header(lov2cl(lov));
149         subhdr = cl_object_header(subobj);
150
151         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
152                " ost idx: %d gen: %d\n",
153                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
154                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
155                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
156
157         /* reuse ->coh_attr_guard to protect coh_parent change */
158         spin_lock(&subhdr->coh_attr_guard);
159         parent = subhdr->coh_parent;
160         if (parent == NULL) {
161                 struct lovsub_object *lso = cl2lovsub(subobj);
162
163                 subhdr->coh_parent = hdr;
164                 spin_unlock(&subhdr->coh_attr_guard);
165                 subhdr->coh_nesting = hdr->coh_nesting + 1;
166                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
167                 lso->lso_super = lov;
168                 lso->lso_index = idx;
169                 result = 0;
170         } else {
171                 struct lu_object  *old_obj;
172                 struct lov_object *old_lov;
173                 unsigned int mask = D_INODE;
174
175                 spin_unlock(&subhdr->coh_attr_guard);
176                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
177                 LASSERT(old_obj != NULL);
178                 old_lov = cl2lov(lu2cl(old_obj));
179                 if (test_bit(LO_LAYOUT_INVALID, &old_lov->lo_obj_flags)) {
180                         /* the object's layout has already changed but isn't
181                          * refreshed */
182                         lu_object_unhash(env, &subobj->co_lu);
183                         result = -EAGAIN;
184                 } else {
185                         mask = D_ERROR;
186                         result = -EIO;
187                 }
188
189                 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
190                                 "stripe %d is already owned.", idx);
191                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
192                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
193                 cl_object_put(env, subobj);
194         }
195         return result;
196 }
197
198 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
199                           struct lov_object *lov, unsigned int index,
200                           const struct cl_object_conf *conf,
201                           struct lov_layout_entry *lle)
202 {
203         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
204         struct lov_thread_info *lti = lov_env_info(env);
205         struct cl_object_conf *subconf = &lti->lti_stripe_conf;
206         struct lu_fid *ofid = &lti->lti_fid;
207         struct cl_object *stripe;
208         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
209         int result;
210         int psz, sz;
211         int i;
212
213         ENTRY;
214
215         spin_lock_init(&r0->lo_sub_lock);
216         r0->lo_nr = lse->lsme_stripe_count;
217
218         OBD_ALLOC_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
219         if (r0->lo_sub == NULL)
220                 GOTO(out, result = -ENOMEM);
221
222         psz = 0;
223         result = 0;
224         memset(subconf, 0, sizeof(*subconf));
225
226         /*
227          * Create stripe cl_objects.
228          */
229         for (i = 0; i < r0->lo_nr; ++i) {
230                 struct cl_device *subdev;
231                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
232                 int ost_idx = oinfo->loi_ost_idx;
233                 struct obd_export *exp;
234
235                 if (lov_oinfo_is_dummy(oinfo))
236                         continue;
237
238                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
239                 if (result != 0)
240                         GOTO(out, result);
241
242                 if (dev->ld_target[ost_idx] == NULL) {
243                         CERROR("%s: OST %04x is not initialized\n",
244                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
245                         GOTO(out, result = -EIO);
246                 }
247
248                 exp = dev->ld_lov->lov_tgts[ost_idx]->ltd_exp;
249                 if (likely(exp)) {
250                         /* the more fast OSTs the better */
251                         if (exp->exp_obd->obd_osfs.os_state & OS_STATFS_NONROT)
252                                 lle->lle_preference++;
253                 }
254
255                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
256                 subconf->u.coc_oinfo = oinfo;
257                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
258                 /* In the function below, .hs_keycmp resolves to
259                  * lu_obj_hop_keycmp() */
260                 /* coverity[overrun-buffer-val] */
261                 stripe = lov_sub_find(env, subdev, ofid, subconf);
262                 if (IS_ERR(stripe))
263                         GOTO(out, result = PTR_ERR(stripe));
264
265                 result = lov_init_sub(env, lov, stripe, oinfo,
266                                       lov_comp_index(index, i));
267                 if (result == -EAGAIN) { /* try again */
268                         --i;
269                         result = 0;
270                         continue;
271                 }
272
273                 if (result == 0) {
274                         r0->lo_sub[i] = cl2lovsub(stripe);
275
276                         sz = lov_page_slice_fixup(lov, stripe);
277                         LASSERT(ergo(psz > 0, psz == sz));
278                         psz = sz;
279                 }
280         }
281         if (result == 0)
282                 result = psz;
283 out:
284         RETURN(result);
285 }
286
287 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
288                                struct lov_layout_raid0 *r0,
289                                struct lovsub_object *los, int idx)
290 {
291         struct cl_object        *sub;
292         struct lu_site          *site;
293         wait_queue_head_t *wq;
294
295         LASSERT(r0->lo_sub[idx] == los);
296
297         sub = lovsub2cl(los);
298         site = sub->co_lu.lo_dev->ld_site;
299         wq = lu_site_wq_from_fid(site, &sub->co_lu.lo_header->loh_fid);
300
301         cl_object_kill(env, sub);
302         /* release a reference to the sub-object and ... */
303         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
304         cl_object_put(env, sub);
305
306         /* ... wait until it is actually destroyed---sub-object clears its
307          * ->lo_sub[] slot in lovsub_object_free() */
308         wait_event(*wq, r0->lo_sub[idx] != los);
309         LASSERT(r0->lo_sub[idx] == NULL);
310 }
311
312 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
313                              struct lov_layout_entry *lle)
314 {
315         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
316
317         ENTRY;
318
319         if (r0->lo_sub != NULL) {
320                 int i;
321
322                 for (i = 0; i < r0->lo_nr; ++i) {
323                         struct lovsub_object *los = r0->lo_sub[i];
324
325                         if (los != NULL) {
326                                 cl_object_prune(env, &los->lso_cl);
327                                 /*
328                                  * If top-level object is to be evicted from
329                                  * the cache, so are its sub-objects.
330                                  */
331                                 lov_subobject_kill(env, lov, r0, los, i);
332                         }
333                 }
334         }
335
336         EXIT;
337 }
338
339 static void lov_fini_raid0(const struct lu_env *env,
340                            struct lov_layout_entry *lle)
341 {
342         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
343
344         if (r0->lo_sub != NULL) {
345                 OBD_FREE_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
346                 r0->lo_sub = NULL;
347         }
348 }
349
350 static int lov_print_raid0(const struct lu_env *env, void *cookie,
351                            lu_printer_t p, const struct lov_layout_entry *lle)
352 {
353         const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
354         int i;
355
356         for (i = 0; i < r0->lo_nr; ++i) {
357                 struct lu_object *sub;
358
359                 if (r0->lo_sub[i] != NULL) {
360                         sub = lovsub2lu(r0->lo_sub[i]);
361                         lu_object_print(env, cookie, p, sub);
362                 } else {
363                         (*p)(env, cookie, "sub %d absent\n", i);
364                 }
365         }
366         return 0;
367 }
368
369 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
370                               unsigned int index, struct lov_layout_entry *lle,
371                               struct cl_attr **lov_attr)
372 {
373         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
374         struct lov_stripe_md *lsm = lov->lo_lsm;
375         struct cl_attr *attr = &r0->lo_attr;
376         int result = 0;
377
378         if (r0->lo_attr_valid) {
379                 *lov_attr = attr;
380                 return 0;
381         }
382
383         /*
384          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
385          * happy. It's not needed, because new code uses
386          * ->coh_attr_guard spin-lock to protect consistency of
387          * sub-object attributes.
388          */
389         lov_stripe_lock(lsm);
390         result = lov_merge_lvb_kms(lsm, index, attr);
391         lov_stripe_unlock(lsm);
392         if (result == 0) {
393                 r0->lo_attr_valid = 1;
394                 *lov_attr = attr;
395         }
396
397         return result;
398 }
399
400 static struct lov_comp_layout_entry_ops raid0_ops = {
401         .lco_init      = lov_init_raid0,
402         .lco_fini      = lov_fini_raid0,
403         .lco_getattr   = lov_attr_get_raid0,
404 };
405
406 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
407                             unsigned int index, struct lov_layout_entry *lle,
408                             struct cl_attr **lov_attr)
409 {
410         struct lov_layout_dom *dom = &lle->lle_dom;
411         struct lov_oinfo *loi = dom->lo_loi;
412         struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
413
414         if (dom->lo_dom_r0.lo_attr_valid) {
415                 *lov_attr = attr;
416                 return 0;
417         }
418
419         if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
420                 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
421
422         cl_lvb2attr(attr, &loi->loi_lvb);
423
424         /* DoM component size can be bigger than stripe size after
425          * client's setattr RPC, so do not count anything beyond
426          * component end. Alternatively, check that limit on server
427          * and do not allow size overflow there. */
428         if (attr->cat_size > lle->lle_extent->e_end)
429                 attr->cat_size = lle->lle_extent->e_end;
430
431         attr->cat_kms = attr->cat_size;
432
433         dom->lo_dom_r0.lo_attr_valid = 1;
434         *lov_attr = attr;
435
436         return 0;
437 }
438
439 /**
440  * Lookup FLD to get MDS index of the given DOM object FID.
441  *
442  * \param[in]  ld       LOV device
443  * \param[in]  fid      FID to lookup
444  * \param[out] nr       index in MDC array to return back
445  *
446  * \retval              0 and \a mds filled with MDS index if successful
447  * \retval              negative value on error
448  */
449 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
450                           __u32 *nr)
451 {
452         __u32 mds_idx;
453         int i, rc;
454
455         ENTRY;
456
457         rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
458                                &mds_idx, LU_SEQ_RANGE_MDT, NULL);
459         if (rc) {
460                 CERROR("%s: error while looking for mds number. Seq %#llx"
461                        ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
462                        fid_seq(fid), rc);
463                 RETURN(rc);
464         }
465
466         CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
467                mds_idx, PFID(fid));
468
469         /* find proper MDC device in the array */
470         for (i = 0; i < ld->ld_md_tgts_nr; i++) {
471                 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
472                     ld->ld_md_tgts[i].ldm_idx == mds_idx)
473                         break;
474         }
475
476         if (i == ld->ld_md_tgts_nr) {
477                 CERROR("%s: cannot find corresponding MDC device for mds #%x "
478                        "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
479                        mds_idx, PFID(fid));
480                 rc = -EINVAL;
481         } else {
482                 *nr = i;
483         }
484         RETURN(rc);
485 }
486
487 /**
488  * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
489  *
490  * Init the DOM object for the first time. It prepares also RAID0 entry
491  * for it to use in common methods with ordinary RAID0 layout entries.
492  *
493  * \param[in] env       execution environment
494  * \param[in] dev       LOV device
495  * \param[in] lov       LOV object
496  * \param[in] index     Composite layout entry index in LSM
497  * \param[in] lle       Composite LOV layout entry
498  */
499 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
500                         struct lov_object *lov, unsigned int index,
501                         const struct cl_object_conf *conf,
502                         struct lov_layout_entry *lle)
503 {
504         struct lov_thread_info *lti = lov_env_info(env);
505         struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
506         struct cl_object *clo;
507         struct lu_object *o = lov2lu(lov);
508         const struct lu_fid *fid = lu_object_fid(o);
509         struct cl_device *mdcdev;
510         struct lov_oinfo *loi = NULL;
511         struct cl_object_conf *sconf = &lti->lti_stripe_conf;
512         int rc;
513         __u32 idx = 0;
514
515         ENTRY;
516
517         /* DOM entry may be not zero index due to FLR but must start from 0 */
518         if (unlikely(lle->lle_extent->e_start != 0)) {
519                 CERROR("%s: DOM entry must be the first stripe in a mirror\n",
520                        lov2obd(dev->ld_lov)->obd_name);
521                 dump_lsm(D_ERROR, lov->lo_lsm);
522                 RETURN(-EINVAL);
523         }
524
525         /* find proper MDS device */
526         rc = lov_fld_lookup(dev, fid, &idx);
527         if (rc)
528                 RETURN(rc);
529
530         LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
531                  "LOV md target[%u] is NULL\n", idx);
532
533         /* check lsm is DOM, more checks are needed */
534         LASSERT(lsme->lsme_stripe_count == 0);
535
536         /*
537          * Create lower cl_objects.
538          */
539         mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
540
541         LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
542
543         /* DoM object has no oinfo in LSM entry, create it exclusively */
544         OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
545         if (loi == NULL)
546                 RETURN(-ENOMEM);
547
548         fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
549
550         sconf->u.coc_oinfo = loi;
551 again:
552         clo = lov_sub_find(env, mdcdev, fid, sconf);
553         if (IS_ERR(clo))
554                 GOTO(out, rc = PTR_ERR(clo));
555
556         rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
557         if (rc == -EAGAIN) /* try again */
558                 goto again;
559         else if (rc != 0)
560                 GOTO(out, rc);
561
562         lle->lle_dom.lo_dom = cl2lovsub(clo);
563         spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
564         lle->lle_dom.lo_dom_r0.lo_nr = 1;
565         lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
566         lle->lle_dom.lo_loi = loi;
567
568         rc = lov_page_slice_fixup(lov, clo);
569         RETURN(rc);
570
571 out:
572         if (loi != NULL)
573                 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
574         return rc;
575 }
576
577 /**
578  * Implementation of lov_layout_operations::llo_fini for DOM object.
579  *
580  * Finish the DOM object and free related memory.
581  *
582  * \param[in] env       execution environment
583  * \param[in] lov       LOV object
584  * \param[in] state     LOV layout state
585  */
586 static void lov_fini_dom(const struct lu_env *env,
587                          struct lov_layout_entry *lle)
588 {
589         if (lle->lle_dom.lo_dom != NULL)
590                 lle->lle_dom.lo_dom = NULL;
591         if (lle->lle_dom.lo_loi != NULL)
592                 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
593 }
594
595 static struct lov_comp_layout_entry_ops dom_ops = {
596         .lco_init = lov_init_dom,
597         .lco_fini = lov_fini_dom,
598         .lco_getattr = lov_attr_get_dom,
599 };
600
601 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
602                               struct lov_object *lov, struct lov_stripe_md *lsm,
603                               const struct cl_object_conf *conf,
604                               union lov_layout_state *state)
605 {
606         struct lov_layout_composite *comp = &state->composite;
607         struct lov_layout_entry *lle;
608         struct lov_mirror_entry *lre;
609         unsigned int entry_count;
610         unsigned int psz = 0;
611         unsigned int mirror_count;
612         int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
613         int result = 0;
614         unsigned int seq;
615         int i, j, preference;
616         bool dom_size = 0;
617
618         ENTRY;
619
620         LASSERT(lsm->lsm_entry_count > 0);
621         LASSERT(lov->lo_lsm == NULL);
622         lov->lo_lsm = lsm_addref(lsm);
623         set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
624
625         dump_lsm(D_INODE, lsm);
626
627         entry_count = lsm->lsm_entry_count;
628
629         comp->lo_flags = lsm->lsm_flags;
630         comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
631         comp->lo_entry_count = lsm->lsm_entry_count;
632         comp->lo_preferred_mirror = -1;
633
634         if (equi(flr_state == LCM_FL_NONE, comp->lo_mirror_count > 1))
635                 RETURN(-EINVAL);
636
637         OBD_ALLOC_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
638         if (comp->lo_mirrors == NULL)
639                 RETURN(-ENOMEM);
640
641         OBD_ALLOC_PTR_ARRAY(comp->lo_entries, entry_count);
642         if (comp->lo_entries == NULL)
643                 RETURN(-ENOMEM);
644
645         /* Initiate all entry types and extents data at first */
646         for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
647                 int mirror_id = 0;
648
649                 lle = &comp->lo_entries[i];
650
651                 lle->lle_lsme = lsm->lsm_entries[i];
652                 lle->lle_type = lov_entry_type(lle->lle_lsme);
653                 lle->lle_preference = 0;
654                 switch (lle->lle_type) {
655                 case LOV_PATTERN_RAID0:
656                         lle->lle_comp_ops = &raid0_ops;
657                         break;
658                 case LOV_PATTERN_MDT:
659                         /* Allowed to have several DOM stripes in different
660                          * mirrors with the same DoM size.
661                          */
662                         if (!dom_size) {
663                                 dom_size = lle->lle_lsme->lsme_extent.e_end;
664                         } else if (dom_size !=
665                                    lle->lle_lsme->lsme_extent.e_end) {
666                                 CERROR("%s: DOM entries with different sizes\n",
667                                        lov2obd(dev->ld_lov)->obd_name);
668                                 dump_lsm(D_ERROR, lsm);
669                                 RETURN(-EINVAL);
670                         }
671                         lle->lle_comp_ops = &dom_ops;
672                         break;
673                 case LOV_PATTERN_FOREIGN:
674                         lle->lle_comp_ops = NULL;
675                         break;
676                 default:
677                         CERROR("%s: unknown composite layout entry type %i\n",
678                                lov2obd(dev->ld_lov)->obd_name,
679                                lsm->lsm_entries[i]->lsme_pattern);
680                         dump_lsm(D_ERROR, lsm);
681                         RETURN(-EIO);
682                 }
683
684                 lle->lle_extent = &lle->lle_lsme->lsme_extent;
685                 lle->lle_valid = !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
686
687                 if (flr_state != LCM_FL_NONE)
688                         mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
689
690                 lre = &comp->lo_mirrors[j];
691                 if (i > 0) {
692                         if (mirror_id == lre->lre_mirror_id) {
693                                 lre->lre_valid |= lle->lle_valid;
694                                 lre->lre_stale |= !lle->lle_valid;
695                                 lre->lre_foreign |=
696                                         lsme_is_foreign(lle->lle_lsme);
697                                 lre->lre_end = i;
698                                 continue;
699                         }
700
701                         /* new mirror detected, assume that the mirrors
702                          * are shorted in layout */
703                         ++mirror_count;
704                         ++j;
705                         if (j >= comp->lo_mirror_count)
706                                 break;
707
708                         lre = &comp->lo_mirrors[j];
709                 }
710
711                 /* entries must be sorted by mirrors */
712                 lre->lre_mirror_id = mirror_id;
713                 lre->lre_start = lre->lre_end = i;
714                 lre->lre_preference = lle->lle_lsme->lsme_flags &
715                                         LCME_FL_PREF_RD ? 1000 : 0;
716                 lre->lre_valid = lle->lle_valid;
717                 lre->lre_stale = !lle->lle_valid;
718                 lre->lre_foreign = lsme_is_foreign(lle->lle_lsme);
719         }
720
721         /* sanity check for FLR */
722         if (mirror_count != comp->lo_mirror_count) {
723                 CDEBUG(D_INODE, DFID
724                        " doesn't have the # of mirrors it claims, %u/%u\n",
725                        PFID(lu_object_fid(lov2lu(lov))), mirror_count,
726                        comp->lo_mirror_count + 1);
727
728                 GOTO(out, result = -EINVAL);
729         }
730
731         lov_foreach_layout_entry(lov, lle) {
732                 int index = lov_layout_entry_index(lov, lle);
733
734                 /**
735                  * If the component has not been init-ed on MDS side, for
736                  * PFL layout, we'd know that the components beyond this one
737                  * will be dynamically init-ed later on file write/trunc ops.
738                  */
739                 if (!lsme_inited(lle->lle_lsme))
740                         continue;
741
742                 if (lsme_is_foreign(lle->lle_lsme))
743                         continue;
744
745                 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
746                                                      conf, lle);
747                 if (result < 0)
748                         break;
749
750                 LASSERT(ergo(psz > 0, psz == result));
751                 psz = result;
752         }
753
754         if (psz > 0)
755                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
756
757         /* decide the preferred mirror. It uses the hash value of lov_object
758          * so that different clients would use different mirrors for read. */
759         mirror_count = 0;
760         preference = -1;
761         seq = cfs_hash_long((unsigned long)lov, 8);
762         for (i = 0; i < comp->lo_mirror_count; i++) {
763                 unsigned int idx = (i + seq) % comp->lo_mirror_count;
764
765                 lre = lov_mirror_entry(lov, idx);
766                 if (lre->lre_stale)
767                         continue;
768
769                 if (lre->lre_foreign)
770                         continue;
771
772                 mirror_count++; /* valid mirror */
773
774                 /* aggregated preference of all involved OSTs */
775                 for (j = lre->lre_start; j <= lre->lre_end; j++) {
776                         lre->lre_preference +=
777                                 comp->lo_entries[j].lle_preference;
778                 }
779
780                 if (lre->lre_preference > preference) {
781                         preference = lre->lre_preference;
782                         comp->lo_preferred_mirror = idx;
783                 }
784         }
785         if (!mirror_count) {
786                 CDEBUG(D_INODE, DFID
787                        " doesn't have any valid mirrors\n",
788                        PFID(lu_object_fid(lov2lu(lov))));
789
790                 comp->lo_preferred_mirror = 0;
791         }
792
793         LASSERT(comp->lo_preferred_mirror >= 0);
794
795         EXIT;
796 out:
797         return result > 0 ? 0 : result;
798 }
799
800 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
801                           struct lov_object *lov, struct lov_stripe_md *lsm,
802                           const struct cl_object_conf *conf,
803                           union lov_layout_state *state)
804 {
805         return 0;
806 }
807
808 static int lov_init_released(const struct lu_env *env,
809                              struct lov_device *dev, struct lov_object *lov,
810                              struct lov_stripe_md *lsm,
811                              const struct cl_object_conf *conf,
812                              union lov_layout_state *state)
813 {
814         LASSERT(lsm != NULL);
815         LASSERT(lsm->lsm_is_released);
816         LASSERT(lov->lo_lsm == NULL);
817
818         lov->lo_lsm = lsm_addref(lsm);
819         return 0;
820 }
821
822 static int lov_init_foreign(const struct lu_env *env,
823                             struct lov_device *dev, struct lov_object *lov,
824                             struct lov_stripe_md *lsm,
825                             const struct cl_object_conf *conf,
826                             union lov_layout_state *state)
827 {
828         LASSERT(lsm != NULL);
829         LASSERT(lov->lo_type == LLT_FOREIGN);
830         LASSERT(lov->lo_lsm == NULL);
831
832         lov->lo_lsm = lsm_addref(lsm);
833         return 0;
834 }
835
836 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
837                             union lov_layout_state *state)
838 {
839         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED ||
840                 lov->lo_type == LLT_FOREIGN);
841
842         lov_layout_wait(env, lov);
843         return 0;
844 }
845
846 static int lov_delete_composite(const struct lu_env *env,
847                                 struct lov_object *lov,
848                                 union lov_layout_state *state)
849 {
850         struct lov_layout_entry *entry;
851
852         ENTRY;
853
854         dump_lsm(D_INODE, lov->lo_lsm);
855
856         lov_layout_wait(env, lov);
857         lov_foreach_layout_entry(lov, entry) {
858                 if (entry->lle_lsme && lsme_is_foreign(entry->lle_lsme))
859                         continue;
860
861                 lov_delete_raid0(env, lov, entry);
862         }
863
864         RETURN(0);
865 }
866
867 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
868                            union lov_layout_state *state)
869 {
870         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
871 }
872
873 static void lov_fini_composite(const struct lu_env *env,
874                                struct lov_object *lov,
875                                union lov_layout_state *state)
876 {
877         struct lov_layout_composite *comp = &state->composite;
878         ENTRY;
879
880         if (comp->lo_entries != NULL) {
881                 struct lov_layout_entry *entry;
882
883                 lov_foreach_layout_entry(lov, entry)
884                         if (entry->lle_comp_ops)
885                                 entry->lle_comp_ops->lco_fini(env, entry);
886
887                 OBD_FREE_PTR_ARRAY(comp->lo_entries, comp->lo_entry_count);
888                 comp->lo_entries = NULL;
889         }
890
891         if (comp->lo_mirrors != NULL) {
892                 OBD_FREE_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
893                 comp->lo_mirrors = NULL;
894         }
895
896         memset(comp, 0, sizeof(*comp));
897
898         dump_lsm(D_INODE, lov->lo_lsm);
899         lov_free_memmd(&lov->lo_lsm);
900
901         EXIT;
902 }
903
904 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
905                                 union lov_layout_state *state)
906 {
907         ENTRY;
908         dump_lsm(D_INODE, lov->lo_lsm);
909         lov_free_memmd(&lov->lo_lsm);
910         EXIT;
911 }
912
913 static int lov_print_empty(const struct lu_env *env, void *cookie,
914                            lu_printer_t p, const struct lu_object *o)
915 {
916         (*p)(env, cookie, "empty %d\n",
917              test_bit(LO_LAYOUT_INVALID, &lu2lov(o)->lo_obj_flags));
918         return 0;
919 }
920
921 static int lov_print_composite(const struct lu_env *env, void *cookie,
922                                lu_printer_t p, const struct lu_object *o)
923 {
924         struct lov_object *lov = lu2lov(o);
925         struct lov_stripe_md *lsm = lov->lo_lsm;
926         int i;
927
928         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
929              lsm->lsm_entry_count,
930              test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
931              "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
932              lsm->lsm_layout_gen);
933
934         for (i = 0; i < lsm->lsm_entry_count; i++) {
935                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
936                 struct lov_layout_entry *lle = lov_entry(lov, i);
937
938                 (*p)(env, cookie,
939                      DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
940                      PEXT(&lse->lsme_extent), lse->lsme_magic,
941                      lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
942                      lse->lsme_flags, lse->lsme_stripe_count,
943                      lse->lsme_stripe_size);
944
945                 if (!lsme_is_foreign(lse))
946                         lov_print_raid0(env, cookie, p, lle);
947         }
948
949         return 0;
950 }
951
952 static int lov_print_released(const struct lu_env *env, void *cookie,
953                                 lu_printer_t p, const struct lu_object *o)
954 {
955         struct lov_object       *lov = lu2lov(o);
956         struct lov_stripe_md    *lsm = lov->lo_lsm;
957
958         (*p)(env, cookie,
959                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
960                 test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
961                 "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
962                 lsm->lsm_layout_gen);
963         return 0;
964 }
965
966 static int lov_print_foreign(const struct lu_env *env, void *cookie,
967                                 lu_printer_t p, const struct lu_object *o)
968 {
969         struct lov_object       *lov = lu2lov(o);
970         struct lov_stripe_md    *lsm = lov->lo_lsm;
971
972         (*p)(env, cookie,
973                 "foreign: %s, lsm{%p 0x%08X %d %u}:\n",
974                 test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ?
975                 "invalid" : "valid", lsm,
976                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
977                 lsm->lsm_layout_gen);
978         (*p)(env, cookie,
979                 "raw_ea_content '%.*s'\n",
980                 (int)lsm->lsm_foreign_size, (char *)lsm_foreign(lsm));
981         return 0;
982 }
983
984 /**
985  * Implements cl_object_operations::coo_attr_get() method for an object
986  * without stripes (LLT_EMPTY layout type).
987  *
988  * The only attributes this layer is authoritative in this case is
989  * cl_attr::cat_blocks---it's 0.
990  */
991 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
992                               struct cl_attr *attr)
993 {
994         attr->cat_blocks = 0;
995         return 0;
996 }
997
998 static int lov_attr_get_composite(const struct lu_env *env,
999                                   struct cl_object *obj,
1000                                   struct cl_attr *attr)
1001 {
1002         struct lov_object       *lov = cl2lov(obj);
1003         struct lov_layout_entry *entry;
1004         int                      result = 0;
1005
1006         ENTRY;
1007
1008         attr->cat_size = 0;
1009         attr->cat_blocks = 0;
1010         attr->cat_kms = 0;
1011
1012         lov_foreach_layout_entry(lov, entry) {
1013                 struct cl_attr *lov_attr = NULL;
1014                 int index = lov_layout_entry_index(lov, entry);
1015
1016                 if (!entry->lle_valid)
1017                         continue;
1018
1019                 /* PFL: This component has not been init-ed. */
1020                 if (!lsm_entry_inited(lov->lo_lsm, index))
1021                         continue;
1022
1023                 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
1024                                                           entry, &lov_attr);
1025                 if (result < 0)
1026                         RETURN(result);
1027
1028                 if (lov_attr == NULL)
1029                         continue;
1030
1031                 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
1032                        "b=%llu\n", index - 1, lov_attr->cat_size,
1033                        lov_attr->cat_mtime, lov_attr->cat_atime,
1034                        lov_attr->cat_ctime, lov_attr->cat_blocks);
1035
1036                 /* merge results */
1037                 if (lov_attr->cat_kms_valid)
1038                         attr->cat_kms_valid = 1;
1039                 attr->cat_blocks += lov_attr->cat_blocks;
1040                 if (attr->cat_size < lov_attr->cat_size)
1041                         attr->cat_size = lov_attr->cat_size;
1042                 if (attr->cat_kms < lov_attr->cat_kms)
1043                         attr->cat_kms = lov_attr->cat_kms;
1044                 if (attr->cat_atime < lov_attr->cat_atime)
1045                         attr->cat_atime = lov_attr->cat_atime;
1046                 if (attr->cat_ctime < lov_attr->cat_ctime)
1047                         attr->cat_ctime = lov_attr->cat_ctime;
1048                 if (attr->cat_mtime < lov_attr->cat_mtime)
1049                         attr->cat_mtime = lov_attr->cat_mtime;
1050         }
1051
1052         RETURN(0);
1053 }
1054
1055 static int lov_flush_composite(const struct lu_env *env,
1056                                struct cl_object *obj,
1057                                struct ldlm_lock *lock)
1058 {
1059         struct lov_object *lov = cl2lov(obj);
1060         struct lov_layout_entry *lle;
1061         int rc = -ENODATA;
1062
1063         ENTRY;
1064
1065         lov_foreach_layout_entry(lov, lle) {
1066                 if (!lsme_is_dom(lle->lle_lsme))
1067                         continue;
1068                 rc = cl_object_flush(env, lovsub2cl(lle->lle_dom.lo_dom), lock);
1069                 break;
1070         }
1071
1072         RETURN(rc);
1073 }
1074
1075 static int lov_flush_empty(const struct lu_env *env, struct cl_object *obj,
1076                            struct ldlm_lock *lock)
1077 {
1078         return 0;
1079 }
1080
1081 const static struct lov_layout_operations lov_dispatch[] = {
1082         [LLT_EMPTY] = {
1083                 .llo_init      = lov_init_empty,
1084                 .llo_delete    = lov_delete_empty,
1085                 .llo_fini      = lov_fini_empty,
1086                 .llo_print     = lov_print_empty,
1087                 .llo_page_init = lov_page_init_empty,
1088                 .llo_lock_init = lov_lock_init_empty,
1089                 .llo_io_init   = lov_io_init_empty,
1090                 .llo_getattr   = lov_attr_get_empty,
1091                 .llo_flush     = lov_flush_empty,
1092         },
1093         [LLT_RELEASED] = {
1094                 .llo_init      = lov_init_released,
1095                 .llo_delete    = lov_delete_empty,
1096                 .llo_fini      = lov_fini_released,
1097                 .llo_print     = lov_print_released,
1098                 .llo_page_init = lov_page_init_empty,
1099                 .llo_lock_init = lov_lock_init_empty,
1100                 .llo_io_init   = lov_io_init_released,
1101                 .llo_getattr   = lov_attr_get_empty,
1102                 .llo_flush     = lov_flush_empty,
1103         },
1104         [LLT_COMP] = {
1105                 .llo_init      = lov_init_composite,
1106                 .llo_delete    = lov_delete_composite,
1107                 .llo_fini      = lov_fini_composite,
1108                 .llo_print     = lov_print_composite,
1109                 .llo_page_init = lov_page_init_composite,
1110                 .llo_lock_init = lov_lock_init_composite,
1111                 .llo_io_init   = lov_io_init_composite,
1112                 .llo_getattr   = lov_attr_get_composite,
1113                 .llo_flush     = lov_flush_composite,
1114         },
1115         [LLT_FOREIGN] = {
1116                 .llo_init      = lov_init_foreign,
1117                 .llo_delete    = lov_delete_empty,
1118                 .llo_fini      = lov_fini_released,
1119                 .llo_print     = lov_print_foreign,
1120                 .llo_page_init = lov_page_init_foreign,
1121                 .llo_lock_init = lov_lock_init_empty,
1122                 .llo_io_init   = lov_io_init_empty,
1123                 .llo_getattr   = lov_attr_get_empty,
1124                 .llo_flush     = lov_flush_empty,
1125         },
1126 };
1127
1128 /**
1129  * Performs a double-dispatch based on the layout type of an object.
1130  */
1131 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
1132 ({                                                      \
1133         struct lov_object *__obj = (obj);               \
1134         enum lov_layout_type __llt;                     \
1135                                                         \
1136         __llt = __obj->lo_type;                         \
1137         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
1138         lov_dispatch[__llt].op(__VA_ARGS__);            \
1139 })
1140
1141 /**
1142  * Return lov_layout_type associated with a given lsm
1143  */
1144 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1145 {
1146         if (lsm == NULL)
1147                 return LLT_EMPTY;
1148
1149         if (lsm->lsm_is_released)
1150                 return LLT_RELEASED;
1151
1152         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1153             lsm->lsm_magic == LOV_MAGIC_V3 ||
1154             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1155                 return LLT_COMP;
1156
1157         if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
1158                 return LLT_FOREIGN;
1159
1160         return LLT_EMPTY;
1161 }
1162
1163 static inline void lov_conf_freeze(struct lov_object *lov)
1164 {
1165         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1166                 lov, lov->lo_owner, current);
1167         if (lov->lo_owner != current)
1168                 down_read(&lov->lo_type_guard);
1169 }
1170
1171 static inline void lov_conf_thaw(struct lov_object *lov)
1172 {
1173         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1174                 lov, lov->lo_owner, current);
1175         if (lov->lo_owner != current)
1176                 up_read(&lov->lo_type_guard);
1177 }
1178
1179 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
1180 ({                                                                      \
1181         struct lov_object                      *__obj = (obj);          \
1182         int                                     __lock = !!(lock);      \
1183         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
1184                                                                         \
1185         if (__lock)                                                     \
1186                 lov_conf_freeze(__obj);                                 \
1187         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
1188         if (__lock)                                                     \
1189                 lov_conf_thaw(__obj);                                   \
1190         __result;                                                       \
1191 })
1192
1193 /**
1194  * Performs a locked double-dispatch based on the layout type of an object.
1195  */
1196 #define LOV_2DISPATCH(obj, op, ...)                     \
1197         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1198
1199 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
1200 do {                                                                    \
1201         struct lov_object                      *__obj = (obj);          \
1202         enum lov_layout_type                    __llt;                  \
1203                                                                         \
1204         lov_conf_freeze(__obj);                                         \
1205         __llt = __obj->lo_type;                                         \
1206         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
1207         lov_dispatch[__llt].op(__VA_ARGS__);                            \
1208         lov_conf_thaw(__obj);                                           \
1209 } while (0)
1210
1211 static void lov_conf_lock(struct lov_object *lov)
1212 {
1213         LASSERT(lov->lo_owner != current);
1214         down_write(&lov->lo_type_guard);
1215         LASSERT(lov->lo_owner == NULL);
1216         lov->lo_owner = current;
1217         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1218                 lov, lov->lo_owner);
1219 }
1220
1221 static void lov_conf_unlock(struct lov_object *lov)
1222 {
1223         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1224                 lov, lov->lo_owner);
1225         lov->lo_owner = NULL;
1226         up_write(&lov->lo_type_guard);
1227 }
1228
1229 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1230 {
1231         ENTRY;
1232
1233         while (atomic_read(&lov->lo_active_ios) > 0) {
1234                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1235                         PFID(lu_object_fid(lov2lu(lov))),
1236                         atomic_read(&lov->lo_active_ios));
1237
1238                 wait_event_idle(lov->lo_waitq,
1239                                 atomic_read(&lov->lo_active_ios) == 0);
1240         }
1241         RETURN(0);
1242 }
1243
1244 static int lov_layout_change(const struct lu_env *unused,
1245                              struct lov_object *lov, struct lov_stripe_md *lsm,
1246                              const struct cl_object_conf *conf)
1247 {
1248         enum lov_layout_type llt = lov_type(lsm);
1249         union lov_layout_state *state = &lov->u;
1250         const struct lov_layout_operations *old_ops;
1251         const struct lov_layout_operations *new_ops;
1252         struct lov_device *lov_dev = lov_object_dev(lov);
1253         struct lu_env *env;
1254         __u16 refcheck;
1255         int rc;
1256         ENTRY;
1257
1258         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1259
1260         env = cl_env_get(&refcheck);
1261         if (IS_ERR(env))
1262                 RETURN(PTR_ERR(env));
1263
1264         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1265
1266         CDEBUG(D_INODE, DFID" from %s to %s\n",
1267                PFID(lu_object_fid(lov2lu(lov))),
1268                llt2str(lov->lo_type), llt2str(llt));
1269
1270         old_ops = &lov_dispatch[lov->lo_type];
1271         new_ops = &lov_dispatch[llt];
1272
1273         rc = cl_object_prune(env, &lov->lo_cl);
1274         if (rc != 0)
1275                 GOTO(out, rc);
1276
1277         rc = old_ops->llo_delete(env, lov, &lov->u);
1278         if (rc != 0)
1279                 GOTO(out, rc);
1280
1281         old_ops->llo_fini(env, lov, &lov->u);
1282
1283         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1284
1285         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1286                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1287
1288         /* page bufsize fixup */
1289         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1290                 lov_page_slice_fixup(lov, NULL);
1291
1292         lov->lo_type = llt;
1293         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1294         if (rc != 0) {
1295                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1296
1297                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1298                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1299                 new_ops->llo_delete(env, lov, state);
1300                 new_ops->llo_fini(env, lov, state);
1301                 /* this file becomes an EMPTY file. */
1302                 lov->lo_type = LLT_EMPTY;
1303                 GOTO(out, rc);
1304         }
1305
1306 out:
1307         cl_env_put(env, &refcheck);
1308         RETURN(rc);
1309 }
1310
1311 /*****************************************************************************
1312  *
1313  * Lov object operations.
1314  *
1315  */
1316 static int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1317                            const struct lu_object_conf *conf)
1318 {
1319         struct lov_object            *lov   = lu2lov(obj);
1320         struct lov_device            *dev   = lov_object_dev(lov);
1321         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
1322         union lov_layout_state       *set   = &lov->u;
1323         const struct lov_layout_operations *ops;
1324         struct lov_stripe_md *lsm = NULL;
1325         int rc;
1326         ENTRY;
1327
1328         init_rwsem(&lov->lo_type_guard);
1329         atomic_set(&lov->lo_active_ios, 0);
1330         init_waitqueue_head(&lov->lo_waitq);
1331         cl_object_page_init(lu2cl(obj), 0);
1332
1333         lov->lo_type = LLT_EMPTY;
1334         if (cconf->u.coc_layout.lb_buf != NULL) {
1335                 lsm = lov_unpackmd(dev->ld_lov,
1336                                    cconf->u.coc_layout.lb_buf,
1337                                    cconf->u.coc_layout.lb_len);
1338                 if (IS_ERR(lsm))
1339                         RETURN(PTR_ERR(lsm));
1340
1341                 dump_lsm(D_INODE, lsm);
1342         }
1343
1344         /* no locking is necessary, as object is being created */
1345         lov->lo_type = lov_type(lsm);
1346         ops = &lov_dispatch[lov->lo_type];
1347         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1348         if (rc != 0)
1349                 GOTO(out_lsm, rc);
1350
1351 out_lsm:
1352         lov_lsm_put(lsm);
1353
1354         RETURN(rc);
1355 }
1356
1357 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1358                         const struct cl_object_conf *conf)
1359 {
1360         struct lov_stripe_md    *lsm = NULL;
1361         struct lov_object       *lov = cl2lov(obj);
1362         int                      result = 0;
1363         ENTRY;
1364
1365         if (conf->coc_opc == OBJECT_CONF_SET &&
1366             conf->u.coc_layout.lb_buf != NULL) {
1367                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1368                                    conf->u.coc_layout.lb_buf,
1369                                    conf->u.coc_layout.lb_len);
1370                 if (IS_ERR(lsm))
1371                         RETURN(PTR_ERR(lsm));
1372                 dump_lsm(D_INODE, lsm);
1373         }
1374
1375         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1376                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1377                 GOTO(out_lsm, result = 0);
1378         }
1379
1380         lov_conf_lock(lov);
1381         if (conf->coc_opc == OBJECT_CONF_WAIT) {
1382                 if (test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) &&
1383                     atomic_read(&lov->lo_active_ios) > 0) {
1384                         lov_conf_unlock(lov);
1385                         result = lov_layout_wait(env, lov);
1386                         lov_conf_lock(lov);
1387                 }
1388                 GOTO(out, result);
1389         }
1390
1391         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1392
1393         /*
1394          * don't apply old layouts which can be brought
1395          * if returned w/o ldlm lock.
1396          * XXX: can we rollback in case of recovery?
1397          */
1398         if (lsm && lov->lo_lsm) {
1399                 u32 oldgen = lov->lo_lsm->lsm_layout_gen &= ~LU_LAYOUT_RESYNC;
1400                 u32 newgen = lsm->lsm_layout_gen & ~LU_LAYOUT_RESYNC;
1401
1402                 if (newgen < oldgen) {
1403                         CDEBUG(D_HA, "skip old for "DFID": %d < %d\n",
1404                                PFID(lu_object_fid(lov2lu(lov))),
1405                                (int)newgen, (int)oldgen);
1406                         GOTO(out, result = 0);
1407                 }
1408         }
1409
1410         if ((lsm == NULL && lov->lo_lsm == NULL) ||
1411             ((lsm != NULL && lov->lo_lsm != NULL) &&
1412              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1413              (lov->lo_lsm->lsm_flags == lsm->lsm_flags) &&
1414              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1415               lsm->lsm_entries[0]->lsme_pattern))) {
1416                 /* same version of layout */
1417                 clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1418                 GOTO(out, result = 0);
1419         }
1420
1421         /* will change layout - check if there still exists active IO. */
1422         if (atomic_read(&lov->lo_active_ios) > 0) {
1423                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1424                 GOTO(out, result = -EBUSY);
1425         }
1426
1427         result = lov_layout_change(env, lov, lsm, conf);
1428         if (result)
1429                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1430         else
1431                 clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1432         EXIT;
1433
1434 out:
1435         lov_conf_unlock(lov);
1436 out_lsm:
1437         lov_lsm_put(lsm);
1438         CDEBUG(D_INODE, DFID" lo_layout_invalid=%u\n",
1439                PFID(lu_object_fid(lov2lu(lov))),
1440                test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags));
1441         RETURN(result);
1442 }
1443
1444 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1445 {
1446         struct lov_object *lov = lu2lov(obj);
1447
1448         ENTRY;
1449         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1450         EXIT;
1451 }
1452
1453 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1454 {
1455         struct lov_object *lov = lu2lov(obj);
1456
1457         ENTRY;
1458         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1459         lu_object_fini(obj);
1460         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1461         EXIT;
1462 }
1463
1464 static int lov_object_print(const struct lu_env *env, void *cookie,
1465                             lu_printer_t p, const struct lu_object *o)
1466 {
1467         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1468 }
1469
1470 static int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1471                          struct cl_page *page, pgoff_t index)
1472 {
1473         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1474                                     index);
1475 }
1476
1477 /**
1478  * Implements cl_object_operations::clo_io_init() method for lov
1479  * layer. Dispatches to the appropriate layout io initialization method.
1480  */
1481 static int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1482                        struct cl_io *io)
1483 {
1484         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1485
1486         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1487                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1488                io->ci_ignore_layout, io->ci_verify_layout);
1489
1490         /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1491          * the OSC layer. It shouldn't take lov layout conf lock in that case,
1492          * because as long as the OSC object exists, the layout can't be
1493          * reconfigured. */
1494         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1495                         !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1496                         env, obj, io);
1497 }
1498
1499 /**
1500  * An implementation of cl_object_operations::clo_attr_get() method for lov
1501  * layer. For raid0 layout this collects and merges attributes of all
1502  * sub-objects.
1503  */
1504 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1505                         struct cl_attr *attr)
1506 {
1507         /* do not take lock, as this function is called under a
1508          * spin-lock. Layout is protected from changing by ongoing IO. */
1509         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1510 }
1511
1512 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1513                            const struct cl_attr *attr, unsigned valid)
1514 {
1515         /*
1516          * No dispatch is required here, as no layout implements this.
1517          */
1518         return 0;
1519 }
1520
1521 static int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1522                   struct cl_lock *lock, const struct cl_io *io)
1523 {
1524         /* No need to lock because we've taken one refcount of layout.  */
1525         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1526                                     io);
1527 }
1528
1529 /**
1530  * We calculate on which OST the mapping will end. If the length of mapping
1531  * is greater than (stripe_size * stripe_count) then the last_stripe will
1532  * will be one just before start_stripe. Else we check if the mapping
1533  * intersects each OST and find last_stripe.
1534  * This function returns the last_stripe and also sets the stripe_count
1535  * over which the mapping is spread
1536  *
1537  * \param lsm [in]              striping information for the file
1538  * \param index [in]            stripe component index
1539  * \param ext [in]              logical extent of mapping
1540  * \param start_stripe [in]     starting stripe of the mapping
1541  * \param stripe_count [out]    the number of stripes across which to map is
1542  *                              returned
1543  *
1544  * \retval last_stripe          return the last stripe of the mapping
1545  */
1546 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1547                                    struct lu_extent *ext,
1548                                    int start_stripe, int *stripe_count)
1549 {
1550         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1551         int init_stripe;
1552         int last_stripe;
1553         int i, j;
1554
1555         init_stripe = lov_stripe_number(lsm, index, ext->e_start);
1556
1557         if (ext->e_end - ext->e_start >
1558             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1559                 if (init_stripe == start_stripe) {
1560                         last_stripe = (start_stripe < 1) ?
1561                                 lsme->lsme_stripe_count - 1 : start_stripe - 1;
1562                         *stripe_count = lsme->lsme_stripe_count;
1563                 } else if (init_stripe < start_stripe) {
1564                         last_stripe = (init_stripe < 1) ?
1565                                 lsme->lsme_stripe_count - 1 : init_stripe - 1;
1566                         *stripe_count = lsme->lsme_stripe_count -
1567                                         (start_stripe - init_stripe);
1568                 } else {
1569                         last_stripe = init_stripe - 1;
1570                         *stripe_count = init_stripe - start_stripe;
1571                 }
1572         } else {
1573                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1574                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1575                         if (!lov_stripe_intersects(lsm, index,  i, ext, NULL,
1576                                                    NULL))
1577                                 break;
1578                         if ((start_stripe != init_stripe) && (i == init_stripe))
1579                                 break;
1580                 }
1581                 *stripe_count = j;
1582                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1583         }
1584
1585         return last_stripe;
1586 }
1587
1588 /**
1589  * Set fe_device and copy extents from local buffer into main return buffer.
1590  *
1591  * \param fiemap [out]          fiemap to hold all extents
1592  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1593  * \param ost_index [in]        OST index to be written into the fm_device
1594  *                              field for each extent
1595  * \param ext_count [in]        number of extents to be copied
1596  * \param current_extent [in]   where to start copying in the extent array
1597  */
1598 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1599                                          struct fiemap_extent *lcl_fm_ext,
1600                                          int ost_index, unsigned int ext_count,
1601                                          int current_extent, int abs_stripeno)
1602 {
1603         char            *to;
1604         unsigned int    ext;
1605
1606         for (ext = 0; ext < ext_count; ext++) {
1607                 set_fe_device_stripenr(&lcl_fm_ext[ext], ost_index,
1608                                        abs_stripeno);
1609                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1610         }
1611
1612         /* Copy fm_extent's from fm_local to return buffer */
1613         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1614         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1615 }
1616
1617 #define FIEMAP_BUFFER_SIZE 4096
1618
1619 /**
1620  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1621  * call. The local end offset and the device are sent in the first
1622  * fm_extent. This function calculates the stripe number from the index.
1623  * This function returns a stripe_no on which mapping is to be restarted.
1624  *
1625  * This function returns fm_end_offset which is the in-OST offset at which
1626  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1627  * will re-calculate proper offset in next stripe.
1628  * Note that the first extent is passed to lov_get_info via the value field.
1629  *
1630  * \param fiemap [in]           fiemap request header
1631  * \param lsm [in]              striping information for the file
1632  * \param index [in]            stripe component index
1633  * \param ext [in]              logical extent of mapping
1634  * \param start_stripe [out]    starting stripe will be returned in this
1635  */
1636 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1637                                      struct lov_stripe_md *lsm,
1638                                      int index, struct lu_extent *ext,
1639                                      int *start_stripe)
1640 {
1641         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1642         u64 local_end = fiemap->fm_extents[0].fe_logical;
1643         u64 lun_end;
1644         u64 fm_end_offset;
1645         int stripe_no = -1;
1646
1647         if (fiemap->fm_extent_count == 0 ||
1648             fiemap->fm_extents[0].fe_logical == 0)
1649                 return 0;
1650
1651         stripe_no = *start_stripe;
1652
1653         if (stripe_no == -1)
1654                 return -EINVAL;
1655
1656         /* If we have finished mapping on previous device, shift logical
1657          * offset to start of next device */
1658         if (lov_stripe_intersects(lsm, index, stripe_no, ext, NULL, &lun_end) &&
1659             local_end < lun_end) {
1660                 fm_end_offset = local_end;
1661         } else {
1662                 /* This is a special value to indicate that caller should
1663                  * calculate offset in next stripe. */
1664                 fm_end_offset = 0;
1665                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1666         }
1667
1668         return fm_end_offset;
1669 }
1670
1671 struct fiemap_state {
1672         struct fiemap           *fs_fm;
1673         struct lu_extent        fs_ext;         /* current entry extent */
1674         u64                     fs_length;
1675         u64                     fs_end_offset;  /* last iteration offset */
1676         int                     fs_cur_extent;  /* collected exts so far */
1677         int                     fs_cnt_need;    /* # of extents buf can hold */
1678         int                     fs_start_stripe;
1679         int                     fs_last_stripe;
1680         bool                    fs_device_done; /* enough for this OST */
1681         bool                    fs_finish_stripe; /* reached fs_last_stripe */
1682         bool                    fs_enough;      /* enough for this call */
1683 };
1684
1685 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1686                                          struct lov_object *lov,
1687                                          struct lov_stripe_md *lsm,
1688                                          int index)
1689 {
1690         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1691         struct lov_thread_info  *lti = lov_env_info(env);
1692         struct lu_fid           *ofid = &lti->lti_fid;
1693         struct lov_oinfo        *oinfo;
1694         struct cl_device        *subdev;
1695         int                     entry = lov_comp_entry(index);
1696         int                     stripe = lov_comp_stripe(index);
1697         int                     ost_idx;
1698         int                     rc;
1699         struct cl_object        *result;
1700
1701         if (lov->lo_type != LLT_COMP)
1702                 GOTO(out, result = NULL);
1703
1704         if (entry >= lsm->lsm_entry_count ||
1705             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1706                 GOTO(out, result = NULL);
1707
1708         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1709         ost_idx = oinfo->loi_ost_idx;
1710         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1711         if (rc != 0)
1712                 GOTO(out, result = NULL);
1713
1714         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1715         result = lov_sub_find(env, subdev, ofid, NULL);
1716 out:
1717         if (result == NULL)
1718                 result = ERR_PTR(-EINVAL);
1719         return result;
1720 }
1721
1722 static int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1723                              struct lov_stripe_md *lsm, struct fiemap *fiemap,
1724                              size_t *buflen, struct ll_fiemap_info_key *fmkey,
1725                              int index, int stripe_last, int stripeno,
1726                              struct fiemap_state *fs)
1727 {
1728         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1729         struct cl_object *subobj;
1730         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1731         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1732         u64 req_fm_len; /* max requested extent coverage */
1733         u64 len_mapped_single_call;
1734         u64 obd_start;
1735         u64 obd_end;
1736         unsigned int ext_count;
1737         /* EOF for object */
1738         bool ost_eof = false;
1739         /* done with required mapping for this OST? */
1740         bool ost_done = false;
1741         int ost_index;
1742         int rc = 0;
1743
1744         fs->fs_device_done = false;
1745         /* Find out range of mapping on this stripe */
1746         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1747                                    &obd_start, &obd_end)) == 0)
1748                 return 0;
1749
1750         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1751                 return -EIO;
1752
1753         /* If this is a continuation FIEMAP call and we are on
1754          * starting stripe then obd_start needs to be set to
1755          * end_offset */
1756         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1757                 obd_start = fs->fs_end_offset;
1758
1759         if (lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno) ==
1760             obd_start)
1761                 return 0;
1762
1763         req_fm_len = obd_end - obd_start + 1;
1764         fs->fs_fm->fm_length = 0;
1765         len_mapped_single_call = 0;
1766
1767         /* find lobsub object */
1768         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1769                                  lov_comp_index(index, stripeno));
1770         if (IS_ERR(subobj))
1771                 return PTR_ERR(subobj);
1772         /* If the output buffer is very large and the objects have many
1773          * extents we may need to loop on a single OST repeatedly */
1774         do {
1775                 if (fiemap->fm_extent_count > 0) {
1776                         /* Don't get too many extents. */
1777                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1778                             fiemap->fm_extent_count)
1779                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1780                                                   fs->fs_cur_extent;
1781                 }
1782
1783                 obd_start += len_mapped_single_call;
1784                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1785                 req_fm_len = fs->fs_fm->fm_length;
1786                 /**
1787                  * If we've collected enough extent map, we'd request 1 more,
1788                  * to see whether we coincidentally finished all available
1789                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1790                  */
1791                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1792                                              1 : fs->fs_cnt_need;
1793                 fs->fs_fm->fm_mapped_extents = 0;
1794                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1795
1796                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1797
1798                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1799                         GOTO(obj_put, rc = -EINVAL);
1800                 /* If OST is inactive, return extent with UNKNOWN flag. */
1801                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1802                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1803                         fs->fs_fm->fm_mapped_extents = 1;
1804
1805                         fm_ext[0].fe_logical = obd_start;
1806                         fm_ext[0].fe_length = obd_end - obd_start + 1;
1807                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1808
1809                         goto inactive_tgt;
1810                 }
1811
1812                 fs->fs_fm->fm_start = obd_start;
1813                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1814                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1815                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1816
1817                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1818                 if (rc != 0)
1819                         GOTO(obj_put, rc);
1820 inactive_tgt:
1821                 ext_count = fs->fs_fm->fm_mapped_extents;
1822                 if (ext_count == 0) {
1823                         ost_done = true;
1824                         fs->fs_device_done = true;
1825                         /* If last stripe has hold at the end,
1826                          * we need to return */
1827                         if (stripeno == fs->fs_last_stripe) {
1828                                 fiemap->fm_mapped_extents = 0;
1829                                 fs->fs_finish_stripe = true;
1830                                 GOTO(obj_put, rc);
1831                         }
1832                         break;
1833                 } else if (fs->fs_enough) {
1834                         /*
1835                          * We've collected enough extents and there are
1836                          * more extents after it.
1837                          */
1838                         GOTO(obj_put, rc);
1839                 }
1840
1841                 /* If we just need num of extents, got to next device */
1842                 if (fiemap->fm_extent_count == 0) {
1843                         fs->fs_cur_extent += ext_count;
1844                         break;
1845                 }
1846
1847                 /* prepare to copy retrived map extents */
1848                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1849                                          fm_ext[ext_count - 1].fe_length -
1850                                          obd_start;
1851
1852                 /* Have we finished mapping on this device? */
1853                 if (req_fm_len <= len_mapped_single_call) {
1854                         ost_done = true;
1855                         fs->fs_device_done = true;
1856                 }
1857
1858                 /* Clear the EXTENT_LAST flag which can be present on
1859                  * the last extent */
1860                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1861                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1862                 if (lov_stripe_size(lsm, index,
1863                                     fm_ext[ext_count - 1].fe_logical +
1864                                     fm_ext[ext_count - 1].fe_length,
1865                                     stripeno) >= fmkey->lfik_oa.o_size) {
1866                         ost_eof = true;
1867                         fs->fs_device_done = true;
1868                 }
1869
1870                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1871                                              ext_count, fs->fs_cur_extent,
1872                                              stripe_last + stripeno);
1873                 fs->fs_cur_extent += ext_count;
1874
1875                 /* Ran out of available extents? */
1876                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1877                         fs->fs_enough = true;
1878         } while (!ost_done && !ost_eof);
1879
1880         if (stripeno == fs->fs_last_stripe)
1881                 fs->fs_finish_stripe = true;
1882 obj_put:
1883         cl_object_put(env, subobj);
1884
1885         return rc;
1886 }
1887
1888 /**
1889  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1890  * This also handles the restarting of FIEMAP calls in case mapping overflows
1891  * the available number of extents in single call.
1892  *
1893  * \param env [in]              lustre environment
1894  * \param obj [in]              file object
1895  * \param fmkey [in]            fiemap request header and other info
1896  * \param fiemap [out]          fiemap buffer holding retrived map extents
1897  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1898  *                              each OST, it is used to limit max map needed
1899  * \retval 0    success
1900  * \retval < 0  error
1901  */
1902 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1903                              struct ll_fiemap_info_key *fmkey,
1904                              struct fiemap *fiemap, size_t *buflen)
1905 {
1906         struct lov_stripe_md_entry *lsme;
1907         struct lov_stripe_md *lsm;
1908         struct fiemap *fm_local = NULL;
1909         loff_t whole_start;
1910         loff_t whole_end;
1911         int entry;
1912         int start_entry = -1;
1913         int end_entry;
1914         int cur_stripe = 0;
1915         int stripe_count;
1916         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1917         int rc = 0;
1918         struct fiemap_state fs = { 0 };
1919         struct lu_extent range;
1920         int cur_ext;
1921         int stripe_last = 0;
1922         int start_stripe = 0;
1923         bool resume = false;
1924         ENTRY;
1925
1926         lsm = lov_lsm_addref(cl2lov(obj));
1927         if (lsm == NULL) {
1928                 /* no extent: there is no object for mapping */
1929                 fiemap->fm_mapped_extents = 0;
1930                 return 0;
1931         }
1932
1933         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1934                 /**
1935                  * If the entry count > 1 or stripe_count > 1 and the
1936                  * application does not understand DEVICE_ORDER flag,
1937                  * it cannot interpret the extents correctly.
1938                  */
1939                 if (lsm->lsm_entry_count > 1 ||
1940                     (lsm->lsm_entry_count == 1 &&
1941                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
1942                         GOTO(out_lsm, rc = -ENOTSUPP);
1943         }
1944
1945         /* No support for DOM layout yet. */
1946         if (lsme_is_dom(lsm->lsm_entries[0]))
1947                 GOTO(out_lsm, rc = -ENOTSUPP);
1948
1949         if (lsm->lsm_is_released) {
1950                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1951                         /**
1952                          * released file, return a minimal FIEMAP if
1953                          * request fits in file-size.
1954                          */
1955                         fiemap->fm_mapped_extents = 1;
1956                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1957                         if (fiemap->fm_start + fiemap->fm_length <
1958                             fmkey->lfik_oa.o_size)
1959                                 fiemap->fm_extents[0].fe_length =
1960                                         fiemap->fm_length;
1961                         else
1962                                 fiemap->fm_extents[0].fe_length =
1963                                         fmkey->lfik_oa.o_size -
1964                                         fiemap->fm_start;
1965                         fiemap->fm_extents[0].fe_flags |=
1966                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1967                 }
1968                 GOTO(out_lsm, rc = 0);
1969         }
1970
1971         /* buffer_size is small to hold fm_extent_count of extents. */
1972         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1973                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1974
1975         OBD_ALLOC_LARGE(fm_local, buffer_size);
1976         if (fm_local == NULL)
1977                 GOTO(out_lsm, rc = -ENOMEM);
1978
1979         /**
1980          * Requested extent count exceeds the fiemap buffer size, shrink our
1981          * ambition.
1982          */
1983         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1984                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1985
1986         fs.fs_enough = false;
1987         fs.fs_cur_extent = 0;
1988         fs.fs_fm = fm_local;
1989         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1990
1991         whole_start = fiemap->fm_start;
1992         /* whole_start is beyond the end of the file */
1993         if (whole_start > fmkey->lfik_oa.o_size)
1994                 GOTO(out_fm_local, rc = -EINVAL);
1995         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
1996                                         fmkey->lfik_oa.o_size + 1 :
1997                                         whole_start + fiemap->fm_length;
1998         /**
1999          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
2000          * size
2001          */
2002         if (whole_end > fmkey->lfik_oa.o_size + 1)
2003                 whole_end = fmkey->lfik_oa.o_size + 1;
2004
2005         /**
2006          * the high 16bits of fe_device remember which stripe the last
2007          * call has been arrived, we'd continue from there in this call.
2008          */
2009         if (fiemap->fm_extent_count && fiemap->fm_extents[0].fe_logical) {
2010                 resume = true;
2011                 stripe_last = get_fe_stripenr(&fiemap->fm_extents[0]);
2012         }
2013         /**
2014          * stripe_last records stripe number we've been processed in the last
2015          * call
2016          */
2017         end_entry = lsm->lsm_entry_count - 1;
2018         cur_stripe = 0;
2019         for (entry = 0; entry <= end_entry; entry++) {
2020                 lsme = lsm->lsm_entries[entry];
2021                 if (cur_stripe + lsme->lsme_stripe_count >= stripe_last) {
2022                         start_entry = entry;
2023                         start_stripe = stripe_last - cur_stripe;
2024                         break;
2025                 }
2026
2027                 cur_stripe += lsme->lsme_stripe_count;
2028         }
2029         if (start_entry == -1) {
2030                 CERROR(DFID": FIEMAP does not init start entry, cur_stripe=%d, "
2031                        "stripe_last=%d\n", PFID(lu_object_fid(&obj->co_lu)),
2032                        cur_stripe, stripe_last);
2033                 GOTO(out_fm_local, rc = -EINVAL);
2034         }
2035         /**
2036          * @start_entry & @start_stripe records the position of fiemap
2037          * resumption @stripe_last keeps recording the absolution position
2038          * we'are processing. @resume indicates we'd honor @start_stripe.
2039          */
2040
2041         range.e_start = whole_start;
2042         range.e_end = whole_end;
2043
2044         for (entry = start_entry; entry <= end_entry; entry++) {
2045                 /* remeber to update stripe_last accordingly */
2046                 lsme = lsm->lsm_entries[entry];
2047
2048                 /* FLR could contain component holes between entries */
2049                 if (!lsme_inited(lsme)) {
2050                         stripe_last += lsme->lsme_stripe_count;
2051                         resume = false;
2052                         continue;
2053                 }
2054
2055                 if (!lu_extent_is_overlapped(&range, &lsme->lsme_extent)) {
2056                         stripe_last += lsme->lsme_stripe_count;
2057                         resume = false;
2058                         continue;
2059                 }
2060
2061                 /* prepare for a component entry iteration */
2062                 if (lsme->lsme_extent.e_start > whole_start)
2063                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
2064                 else
2065                         fs.fs_ext.e_start = whole_start;
2066                 if (lsme->lsme_extent.e_end > whole_end)
2067                         fs.fs_ext.e_end = whole_end;
2068                 else
2069                         fs.fs_ext.e_end = lsme->lsme_extent.e_end;
2070
2071                 /* Calculate start stripe, last stripe and length of mapping */
2072                 if (resume) {
2073                         fs.fs_start_stripe = start_stripe;
2074                         /* put stripe_last to the first stripe of the comp */
2075                         stripe_last -= start_stripe;
2076                         resume = false;
2077                 } else {
2078                         fs.fs_start_stripe = lov_stripe_number(lsm, entry,
2079                                                         fs.fs_ext.e_start);
2080                 }
2081                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
2082                                         &fs.fs_ext, fs.fs_start_stripe,
2083                                         &stripe_count);
2084                 /**
2085                  * A new mirror component is under process, reset
2086                  * fs.fs_end_offset and then fiemap_for_stripe() starts from
2087                  * the overlapping extent, otherwise starts from
2088                  * fs.fs_end_offset.
2089                  */
2090                 if (entry > start_entry && lsme->lsme_extent.e_start == 0) {
2091                         /* new mirror */
2092                         fs.fs_end_offset = 0;
2093                 } else {
2094                         fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap,
2095                                                 lsm, entry, &fs.fs_ext,
2096                                                 &fs.fs_start_stripe);
2097                 }
2098
2099                 /* Check each stripe */
2100                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
2101                      --stripe_count,
2102                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
2103                         /* reset fs_finish_stripe */
2104                         fs.fs_finish_stripe = false;
2105                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
2106                                                fmkey, entry, stripe_last,
2107                                                cur_stripe, &fs);
2108                         if (rc < 0)
2109                                 GOTO(out_fm_local, rc);
2110                         if (fs.fs_enough) {
2111                                 stripe_last += cur_stripe;
2112                                 GOTO(finish, rc);
2113                         }
2114                         if (fs.fs_finish_stripe)
2115                                 break;
2116                 } /* for each stripe */
2117                 stripe_last += lsme->lsme_stripe_count;
2118         } /* for covering layout component entry */
2119
2120 finish:
2121         if (fs.fs_cur_extent > 0)
2122                 cur_ext = fs.fs_cur_extent - 1;
2123         else
2124                 cur_ext = 0;
2125
2126         /* done all the processing */
2127         if (entry > end_entry)
2128                 fiemap->fm_extents[cur_ext].fe_flags |= FIEMAP_EXTENT_LAST;
2129
2130         /* Indicate that we are returning device offsets unless file just has
2131          * single stripe */
2132         if (lsm->lsm_entry_count > 1 ||
2133             (lsm->lsm_entry_count == 1 &&
2134              lsm->lsm_entries[0]->lsme_stripe_count > 1))
2135                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
2136
2137         if (fiemap->fm_extent_count == 0)
2138                 goto skip_last_device_calc;
2139
2140 skip_last_device_calc:
2141         fiemap->fm_mapped_extents = fs.fs_cur_extent;
2142 out_fm_local:
2143         OBD_FREE_LARGE(fm_local, buffer_size);
2144
2145 out_lsm:
2146         lov_lsm_put(lsm);
2147         return rc;
2148 }
2149
2150 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2151                                 struct lov_user_md __user *lum, size_t size)
2152 {
2153         struct lov_object       *lov = cl2lov(obj);
2154         struct lov_stripe_md    *lsm;
2155         int                     rc = 0;
2156         ENTRY;
2157
2158         lsm = lov_lsm_addref(lov);
2159         if (lsm == NULL)
2160                 RETURN(-ENODATA);
2161
2162         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
2163         lov_lsm_put(lsm);
2164         RETURN(rc);
2165 }
2166
2167 static int lov_object_layout_get(const struct lu_env *env,
2168                                  struct cl_object *obj,
2169                                  struct cl_layout *cl)
2170 {
2171         struct lov_object *lov = cl2lov(obj);
2172         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2173         struct lu_buf *buf = &cl->cl_buf;
2174         ssize_t rc;
2175         ENTRY;
2176
2177         if (lsm == NULL) {
2178                 cl->cl_size = 0;
2179                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2180
2181                 RETURN(0);
2182         }
2183
2184         cl->cl_size = lov_comp_md_size(lsm);
2185         cl->cl_layout_gen = lsm->lsm_layout_gen;
2186         cl->cl_is_released = lsm->lsm_is_released;
2187         cl->cl_is_composite = lsm_is_composite(lsm->lsm_magic);
2188
2189         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2190         lov_lsm_put(lsm);
2191
2192         /* return error or number of bytes */
2193         RETURN(rc);
2194 }
2195
2196 static loff_t lov_object_maxbytes(struct cl_object *obj)
2197 {
2198         struct lov_object *lov = cl2lov(obj);
2199         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2200         loff_t maxbytes;
2201
2202         if (lsm == NULL)
2203                 return LLONG_MAX;
2204
2205         maxbytes = lsm->lsm_maxbytes;
2206
2207         lov_lsm_put(lsm);
2208
2209         return maxbytes;
2210 }
2211
2212 static int lov_object_flush(const struct lu_env *env, struct cl_object *obj,
2213                             struct ldlm_lock *lock)
2214 {
2215         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_flush, true, env, obj,
2216                                      lock);
2217 }
2218
2219 static const struct cl_object_operations lov_ops = {
2220         .coo_page_init    = lov_page_init,
2221         .coo_lock_init    = lov_lock_init,
2222         .coo_io_init      = lov_io_init,
2223         .coo_attr_get     = lov_attr_get,
2224         .coo_attr_update  = lov_attr_update,
2225         .coo_conf_set     = lov_conf_set,
2226         .coo_getstripe    = lov_object_getstripe,
2227         .coo_layout_get   = lov_object_layout_get,
2228         .coo_maxbytes     = lov_object_maxbytes,
2229         .coo_fiemap       = lov_object_fiemap,
2230         .coo_object_flush = lov_object_flush
2231 };
2232
2233 static const struct lu_object_operations lov_lu_obj_ops = {
2234         .loo_object_init        = lov_object_init,
2235         .loo_object_delete      = lov_object_delete,
2236         .loo_object_release     = NULL,
2237         .loo_object_free        = lov_object_free,
2238         .loo_object_print       = lov_object_print,
2239         .loo_object_invariant   = NULL,
2240 };
2241
2242 struct lu_object *lov_object_alloc(const struct lu_env *env,
2243                                    const struct lu_object_header *unused,
2244                                    struct lu_device *dev)
2245 {
2246         struct lov_object *lov;
2247         struct lu_object  *obj;
2248
2249         ENTRY;
2250         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2251         if (lov != NULL) {
2252                 obj = lov2lu(lov);
2253                 lu_object_init(obj, NULL, dev);
2254                 lov->lo_cl.co_ops = &lov_ops;
2255                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2256                 /*
2257                  * object io operation vector (cl_object::co_iop) is installed
2258                  * later in lov_object_init(), as different vectors are used
2259                  * for object with different layouts.
2260                  */
2261                 obj->lo_ops = &lov_lu_obj_ops;
2262         } else
2263                 obj = NULL;
2264         RETURN(obj);
2265 }
2266
2267 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2268 {
2269         struct lov_stripe_md *lsm = NULL;
2270
2271         lov_conf_freeze(lov);
2272         if (lov->lo_lsm != NULL) {
2273                 lsm = lsm_addref(lov->lo_lsm);
2274                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2275                         lsm, atomic_read(&lsm->lsm_refc),
2276                         test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags),
2277                         current);
2278         }
2279         lov_conf_thaw(lov);
2280         return lsm;
2281 }
2282
2283 int lov_read_and_clear_async_rc(struct cl_object *clob)
2284 {
2285         struct lu_object *luobj;
2286         int rc = 0;
2287         ENTRY;
2288
2289         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2290                                  &lov_device_type);
2291         if (luobj != NULL) {
2292                 struct lov_object *lov = lu2lov(luobj);
2293
2294                 lov_conf_freeze(lov);
2295                 switch (lov->lo_type) {
2296                 case LLT_COMP: {
2297                         struct lov_stripe_md *lsm;
2298                         int i;
2299
2300                         lsm = lov->lo_lsm;
2301                         LASSERT(lsm != NULL);
2302                         for (i = 0; i < lsm->lsm_entry_count; i++) {
2303                                 struct lov_stripe_md_entry *lse =
2304                                                 lsm->lsm_entries[i];
2305                                 int j;
2306
2307                                 if (!lsme_inited(lse))
2308                                         break;
2309
2310                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
2311                                         struct lov_oinfo *loi =
2312                                                         lse->lsme_oinfo[j];
2313
2314                                         if (lov_oinfo_is_dummy(loi))
2315                                                 continue;
2316
2317                                         if (loi->loi_ar.ar_rc && !rc)
2318                                                 rc = loi->loi_ar.ar_rc;
2319                                         loi->loi_ar.ar_rc = 0;
2320                                 }
2321                         }
2322                 }
2323                 fallthrough;
2324                 case LLT_RELEASED:
2325                 case LLT_EMPTY:
2326                 case LLT_FOREIGN:
2327                         break;
2328                 default:
2329                         LBUG();
2330                 }
2331                 lov_conf_thaw(lov);
2332         }
2333         RETURN(rc);
2334 }
2335 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
2336
2337 /** @} lov */