Whamcloud - gitweb
LU-17744 ldiskfs: mballoc stats fixes
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_object for LOV layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LOV
38
39 #include <linux/random.h>
40
41 #include "lov_cl_internal.h"
42
43 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
44 {
45         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
46 }
47
48 /** \addtogroup lov
49  *  @{
50  */
51
52 /**
53  * Layout operations.
54  */
55 struct lov_layout_operations {
56         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
57                         struct lov_object *lov, struct lov_stripe_md *lsm,
58                         const struct cl_object_conf *conf,
59                         union lov_layout_state *state);
60         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
61                           union lov_layout_state *state);
62         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
63                          union lov_layout_state *state);
64         int  (*llo_print)(const struct lu_env *env, void *cookie,
65                           lu_printer_t p, const struct lu_object *o);
66         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
67                               struct cl_page *page, pgoff_t index);
68         int  (*llo_lock_init)(const struct lu_env *env,
69                               struct cl_object *obj, struct cl_lock *lock,
70                               const struct cl_io *io);
71         int  (*llo_io_init)(const struct lu_env *env,
72                             struct cl_object *obj, struct cl_io *io);
73         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
74                             struct cl_attr *attr);
75         int  (*llo_flush)(const struct lu_env *env, struct cl_object *obj,
76                           struct ldlm_lock *lock);
77 };
78
79 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
80 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
81
82 static void lov_lsm_put(struct lov_stripe_md *lsm)
83 {
84         if (lsm != NULL)
85                 lov_free_memmd(&lsm);
86 }
87
88 /**
89  * Lov object layout operations.
90  */
91 static struct cl_object *lov_sub_find(const struct lu_env *env,
92                                       struct cl_device *dev,
93                                       const struct lu_fid *fid,
94                                       const struct cl_object_conf *conf)
95 {
96         struct lu_object *o;
97
98         ENTRY;
99
100         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
101         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
102         RETURN(lu2cl(o));
103 }
104
105 static int lov_page_slice_fixup(struct lov_object *lov,
106                                 struct cl_object *stripe)
107 {
108         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
109         struct cl_object *o;
110
111         if (stripe == NULL)
112                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off;
113
114         cl_object_for_each(o, stripe)
115                 o->co_slice_off += hdr->coh_page_bufsize;
116
117         return cl_object_header(stripe)->coh_page_bufsize;
118 }
119
120 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
121                         struct cl_object *subobj, struct lov_oinfo *oinfo,
122                         int idx)
123 {
124         struct cl_object_header *hdr;
125         struct cl_object_header *subhdr;
126         struct cl_object_header *parent;
127         int entry = lov_comp_entry(idx);
128         int stripe = lov_comp_stripe(idx);
129         int result;
130
131         if (CFS_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
132                 /* For sanity:test_206.
133                  * Do not leave the object in cache to avoid accessing
134                  * freed memory. This is because osc_object is referring to
135                  * lov_oinfo of lsm_stripe_data which will be freed due to
136                  * this failure. */
137                 cl_object_kill(env, subobj);
138                 cl_object_put(env, subobj);
139                 return -EIO;
140         }
141
142         hdr = cl_object_header(lov2cl(lov));
143         subhdr = cl_object_header(subobj);
144
145         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
146                " ost idx: %d gen: %d\n",
147                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
148                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
149                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
150
151         /* reuse ->coh_attr_guard to protect coh_parent change */
152         spin_lock(&subhdr->coh_attr_guard);
153         parent = subhdr->coh_parent;
154         if (parent == NULL) {
155                 struct lovsub_object *lso = cl2lovsub(subobj);
156
157                 subhdr->coh_parent = hdr;
158                 spin_unlock(&subhdr->coh_attr_guard);
159                 subhdr->coh_nesting = hdr->coh_nesting + 1;
160                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
161                 lso->lso_super = lov;
162                 lso->lso_index = idx;
163                 result = 0;
164         } else {
165                 struct lu_object  *old_obj;
166                 struct lov_object *old_lov;
167
168                 spin_unlock(&subhdr->coh_attr_guard);
169                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
170                 LASSERT(old_obj != NULL);
171                 old_lov = cl2lov(lu2cl(old_obj));
172                 if (test_bit(LO_LAYOUT_INVALID, &old_lov->lo_obj_flags)) {
173                         /* the object's layout has already changed but isn't
174                          * refreshed */
175                         lu_object_unhash(env, &subobj->co_lu);
176                         result = -EAGAIN;
177                 } else {
178                         result = -EIO;
179                 }
180
181                 LU_OBJECT_DEBUG(D_INODE, env, &subobj->co_lu,
182                                 "stripe %d is already owned.", idx);
183                 LU_OBJECT_DEBUG(D_INODE, env, old_obj, "owned.");
184                 LU_OBJECT_HEADER(D_INODE, env, lov2lu(lov), "try to own.\n");
185                 cl_object_put(env, subobj);
186         }
187         return result;
188 }
189
190 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
191                           struct lov_object *lov, unsigned int index,
192                           const struct cl_object_conf *conf,
193                           struct lov_layout_entry *lle)
194 {
195         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
196         struct lov_thread_info *lti = lov_env_info(env);
197         struct cl_object_conf *subconf = &lti->lti_stripe_conf;
198         struct lu_fid *ofid = &lti->lti_fid;
199         struct cl_object *stripe;
200         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
201         int result;
202         int psz, sz;
203         int i;
204
205         ENTRY;
206
207         spin_lock_init(&r0->lo_sub_lock);
208         r0->lo_nr = lse->lsme_stripe_count;
209
210         OBD_ALLOC_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
211         if (r0->lo_sub == NULL)
212                 GOTO(out, result = -ENOMEM);
213
214         psz = 0;
215         result = 0;
216         memset(subconf, 0, sizeof(*subconf));
217
218         /*
219          * Create stripe cl_objects.
220          */
221         for (i = 0; i < r0->lo_nr; ++i) {
222                 struct cl_device *subdev;
223                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
224                 int ost_idx = oinfo->loi_ost_idx;
225                 struct obd_export *exp;
226
227                 if (lov_oinfo_is_dummy(oinfo))
228                         continue;
229
230                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
231                 if (result != 0)
232                         GOTO(out, result);
233
234                 if (dev->ld_target[ost_idx] == NULL) {
235                         CERROR("%s: OST %04x is not initialized\n",
236                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
237                         GOTO(out, result = -EIO);
238                 }
239
240                 exp = dev->ld_lov->lov_tgts[ost_idx]->ltd_exp;
241                 if (likely(exp)) {
242                         /* the more fast OSTs the better */
243                         if (exp->exp_obd->obd_osfs.os_state & OS_STATFS_NONROT)
244                                 lle->lle_preference++;
245                 }
246
247                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
248                 subconf->u.coc_oinfo = oinfo;
249                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
250                 /* In the function below, .hs_keycmp resolves to
251                  * lu_obj_hop_keycmp() */
252                 stripe = lov_sub_find(env, subdev, ofid, subconf);
253                 if (IS_ERR(stripe))
254                         GOTO(out, result = PTR_ERR(stripe));
255
256                 result = lov_init_sub(env, lov, stripe, oinfo,
257                                       lov_comp_index(index, i));
258                 if (result == -EAGAIN) { /* try again */
259                         --i;
260                         result = 0;
261                         continue;
262                 }
263
264                 if (result == 0) {
265                         r0->lo_sub[i] = cl2lovsub(stripe);
266
267                         sz = lov_page_slice_fixup(lov, stripe);
268                         LASSERT(ergo(psz > 0, psz == sz));
269                         psz = sz;
270                 }
271         }
272         if (result == 0)
273                 result = psz;
274 out:
275         RETURN(result);
276 }
277
278 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
279                                struct lov_layout_raid0 *r0,
280                                struct lovsub_object *los, int idx)
281 {
282         struct cl_object        *sub;
283         struct lu_site          *site;
284         wait_queue_head_t *wq;
285
286         LASSERT(r0->lo_sub[idx] == los);
287
288         sub = lovsub2cl(los);
289         site = sub->co_lu.lo_dev->ld_site;
290         wq = lu_site_wq_from_fid(site, &sub->co_lu.lo_header->loh_fid);
291
292         cl_object_kill(env, sub);
293         /* release a reference to the sub-object and ... */
294         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
295         cl_object_put(env, sub);
296
297         /* ... wait until it is actually destroyed---sub-object clears its
298          * ->lo_sub[] slot in lovsub_object_free() */
299         wait_event(*wq, r0->lo_sub[idx] != los);
300         LASSERT(r0->lo_sub[idx] == NULL);
301 }
302
303 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
304                              struct lov_layout_entry *lle)
305 {
306         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
307         int rc;
308
309         ENTRY;
310
311         if (r0->lo_sub != NULL) {
312                 int i;
313
314                 for (i = 0; i < r0->lo_nr; ++i) {
315                         struct lovsub_object *los = r0->lo_sub[i];
316
317                         if (los != NULL) {
318                                 rc = cl_object_prune(env, &los->lso_cl);
319                                 if (rc)
320                                         RETURN(rc);
321                                 /*
322                                  * If top-level object is to be evicted from
323                                  * the cache, so are its sub-objects.
324                                  */
325                                 lov_subobject_kill(env, lov, r0, los, i);
326                         }
327                 }
328         }
329
330         RETURN(0);
331 }
332
333 static void lov_fini_raid0(const struct lu_env *env,
334                            struct lov_layout_entry *lle)
335 {
336         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
337
338         if (r0->lo_sub != NULL) {
339                 OBD_FREE_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
340                 r0->lo_sub = NULL;
341         }
342 }
343
344 static int lov_print_raid0(const struct lu_env *env, void *cookie,
345                            lu_printer_t p, const struct lov_layout_entry *lle)
346 {
347         const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
348         int i;
349
350         for (i = 0; i < r0->lo_nr; ++i) {
351                 struct lu_object *sub;
352
353                 if (r0->lo_sub[i] != NULL) {
354                         sub = lovsub2lu(r0->lo_sub[i]);
355                         lu_object_print(env, cookie, p, sub);
356                 } else {
357                         (*p)(env, cookie, "sub %d absent\n", i);
358                 }
359         }
360         return 0;
361 }
362
363 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
364                               unsigned int index, struct lov_layout_entry *lle,
365                               struct cl_attr **lov_attr)
366 {
367         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
368         struct lov_stripe_md *lsm = lov->lo_lsm;
369         struct cl_attr *attr = &r0->lo_attr;
370         int result = 0;
371
372         if (r0->lo_attr_valid) {
373                 *lov_attr = attr;
374                 return 0;
375         }
376
377         /*
378          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
379          * happy. It's not needed, because new code uses
380          * ->coh_attr_guard spin-lock to protect consistency of
381          * sub-object attributes.
382          */
383         lov_stripe_lock(lsm);
384         result = lov_merge_lvb_kms(lsm, index, attr);
385         lov_stripe_unlock(lsm);
386         if (result == 0) {
387                 r0->lo_attr_valid = 1;
388                 *lov_attr = attr;
389         }
390
391         return result;
392 }
393
394 static struct lov_comp_layout_entry_ops raid0_ops = {
395         .lco_init      = lov_init_raid0,
396         .lco_fini      = lov_fini_raid0,
397         .lco_getattr   = lov_attr_get_raid0,
398 };
399
400 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
401                             unsigned int index, struct lov_layout_entry *lle,
402                             struct cl_attr **lov_attr)
403 {
404         struct lov_layout_dom *dom = &lle->lle_dom;
405         struct lov_oinfo *loi = dom->lo_loi;
406         struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
407
408         if (dom->lo_dom_r0.lo_attr_valid) {
409                 *lov_attr = attr;
410                 return 0;
411         }
412
413         if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
414                 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
415
416         cl_lvb2attr(attr, &loi->loi_lvb);
417
418         /* DoM component size can be bigger than stripe size after
419          * client's setattr RPC, so do not count anything beyond
420          * component end. Alternatively, check that limit on server
421          * and do not allow size overflow there. */
422         if (attr->cat_size > lle->lle_extent->e_end)
423                 attr->cat_size = lle->lle_extent->e_end;
424
425         attr->cat_kms = attr->cat_size;
426
427         dom->lo_dom_r0.lo_attr_valid = 1;
428         *lov_attr = attr;
429
430         return 0;
431 }
432
433 /**
434  * Lookup FLD to get MDS index of the given DOM object FID.
435  *
436  * \param[in]  ld       LOV device
437  * \param[in]  fid      FID to lookup
438  * \param[out] nr       index in MDC array to return back
439  *
440  * \retval              0 and \a mds filled with MDS index if successful
441  * \retval              negative value on error
442  */
443 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
444                           __u32 *nr)
445 {
446         __u32 mds_idx;
447         int i, rc;
448
449         ENTRY;
450
451         rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
452                                &mds_idx, LU_SEQ_RANGE_MDT, NULL);
453         if (rc) {
454                 CERROR("%s: error while looking for mds number. Seq %#llx"
455                        ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
456                        fid_seq(fid), rc);
457                 RETURN(rc);
458         }
459
460         CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
461                mds_idx, PFID(fid));
462
463         /* find proper MDC device in the array */
464         for (i = 0; i < ld->ld_md_tgts_nr; i++) {
465                 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
466                     ld->ld_md_tgts[i].ldm_idx == mds_idx)
467                         break;
468         }
469
470         if (i == ld->ld_md_tgts_nr) {
471                 CERROR("%s: cannot find corresponding MDC device for mds #%x "
472                        "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
473                        mds_idx, PFID(fid));
474                 rc = -EINVAL;
475         } else {
476                 *nr = i;
477         }
478         RETURN(rc);
479 }
480
481 /**
482  * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
483  *
484  * Init the DOM object for the first time. It prepares also RAID0 entry
485  * for it to use in common methods with ordinary RAID0 layout entries.
486  *
487  * \param[in] env       execution environment
488  * \param[in] dev       LOV device
489  * \param[in] lov       LOV object
490  * \param[in] index     Composite layout entry index in LSM
491  * \param[in] lle       Composite LOV layout entry
492  */
493 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
494                         struct lov_object *lov, unsigned int index,
495                         const struct cl_object_conf *conf,
496                         struct lov_layout_entry *lle)
497 {
498         struct lov_thread_info *lti = lov_env_info(env);
499         struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
500         struct cl_object *clo;
501         struct lu_object *o = lov2lu(lov);
502         const struct lu_fid *fid = lu_object_fid(o);
503         struct cl_device *mdcdev;
504         struct lov_oinfo *loi = NULL;
505         struct cl_object_conf *sconf = &lti->lti_stripe_conf;
506         int rc;
507         __u32 idx = 0;
508
509         ENTRY;
510
511         /* DOM entry may be not zero index due to FLR but must start from 0 */
512         if (unlikely(lle->lle_extent->e_start != 0)) {
513                 CERROR("%s: DOM entry must be the first stripe in a mirror\n",
514                        lov2obd(dev->ld_lov)->obd_name);
515                 dump_lsm(D_ERROR, lov->lo_lsm);
516                 RETURN(-EINVAL);
517         }
518
519         /* find proper MDS device */
520         rc = lov_fld_lookup(dev, fid, &idx);
521         if (rc)
522                 RETURN(rc);
523
524         LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
525                  "LOV md target[%u] is NULL\n", idx);
526
527         /* check lsm is DOM, more checks are needed */
528         LASSERT(lsme->lsme_stripe_count == 0);
529
530         /*
531          * Create lower cl_objects.
532          */
533         mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
534
535         LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
536
537         /* DoM object has no oinfo in LSM entry, create it exclusively */
538         OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
539         if (loi == NULL)
540                 RETURN(-ENOMEM);
541
542         fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
543
544         sconf->u.coc_oinfo = loi;
545 again:
546         clo = lov_sub_find(env, mdcdev, fid, sconf);
547         if (IS_ERR(clo))
548                 GOTO(out, rc = PTR_ERR(clo));
549
550         rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
551         if (rc == -EAGAIN) /* try again */
552                 goto again;
553         else if (rc != 0)
554                 GOTO(out, rc);
555
556         lle->lle_dom.lo_dom = cl2lovsub(clo);
557         spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
558         lle->lle_dom.lo_dom_r0.lo_nr = 1;
559         lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
560         lle->lle_dom.lo_loi = loi;
561
562         rc = lov_page_slice_fixup(lov, clo);
563         RETURN(rc);
564
565 out:
566         if (loi != NULL)
567                 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
568         return rc;
569 }
570
571 /**
572  * Implementation of lov_layout_operations::llo_fini for DOM object.
573  *
574  * Finish the DOM object and free related memory.
575  *
576  * \param[in] env       execution environment
577  * \param[in] lov       LOV object
578  * \param[in] state     LOV layout state
579  */
580 static void lov_fini_dom(const struct lu_env *env,
581                          struct lov_layout_entry *lle)
582 {
583         if (lle->lle_dom.lo_dom != NULL)
584                 lle->lle_dom.lo_dom = NULL;
585         if (lle->lle_dom.lo_loi != NULL)
586                 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
587 }
588
589 static struct lov_comp_layout_entry_ops dom_ops = {
590         .lco_init = lov_init_dom,
591         .lco_fini = lov_fini_dom,
592         .lco_getattr = lov_attr_get_dom,
593 };
594
595 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
596                               struct lov_object *lov, struct lov_stripe_md *lsm,
597                               const struct cl_object_conf *conf,
598                               union lov_layout_state *state)
599 {
600         struct lov_layout_composite *comp = &state->composite;
601         struct lov_layout_entry *lle;
602         struct lov_mirror_entry *lre;
603         unsigned int entry_count;
604         unsigned int psz = 0;
605         unsigned int mirror_count;
606         int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
607         int result = 0;
608         unsigned int seq;
609         int i, j, preference;
610         __u64 dom_size = 0;
611
612         ENTRY;
613
614         LASSERT(lsm->lsm_entry_count > 0);
615         LASSERT(lov->lo_lsm == NULL);
616         lov->lo_lsm = lsm_addref(lsm);
617         set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
618
619         dump_lsm(D_INODE, lsm);
620
621         entry_count = lsm->lsm_entry_count;
622
623         comp->lo_flags = lsm->lsm_flags;
624         comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
625         comp->lo_entry_count = lsm->lsm_entry_count;
626         comp->lo_preferred_mirror = -1;
627
628         if (equi(flr_state == LCM_FL_NONE, comp->lo_mirror_count > 1))
629                 RETURN(-EINVAL);
630
631         OBD_ALLOC_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
632         if (comp->lo_mirrors == NULL)
633                 RETURN(-ENOMEM);
634
635         OBD_ALLOC_PTR_ARRAY(comp->lo_entries, entry_count);
636         if (comp->lo_entries == NULL)
637                 RETURN(-ENOMEM);
638
639         /* Initiate all entry types and extents data at first */
640         for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
641                 int mirror_id = 0;
642
643                 lle = &comp->lo_entries[i];
644
645                 lle->lle_lsme = lsm->lsm_entries[i];
646                 lle->lle_type = lov_entry_type(lle->lle_lsme);
647                 lle->lle_preference = 0;
648                 switch (lle->lle_type) {
649                 case LOV_PATTERN_RAID0:
650                         lle->lle_comp_ops = &raid0_ops;
651                         break;
652                 case LOV_PATTERN_MDT:
653                         /* Allowed to have several DOM stripes in different
654                          * mirrors with the same DoM size.
655                          */
656                         if (!dom_size) {
657                                 dom_size = lle->lle_lsme->lsme_extent.e_end;
658                         } else if (dom_size !=
659                                    lle->lle_lsme->lsme_extent.e_end) {
660                                 CERROR("%s: DOM entries with different sizes\n",
661                                        lov2obd(dev->ld_lov)->obd_name);
662                                 dump_lsm(D_ERROR, lsm);
663                                 RETURN(-EINVAL);
664                         }
665                         lle->lle_comp_ops = &dom_ops;
666                         break;
667                 case LOV_PATTERN_FOREIGN:
668                         lle->lle_comp_ops = NULL;
669                         break;
670                 default: {
671                         static int nr;
672                         static ktime_t time2_clear_nr;
673                         ktime_t now = ktime_get();
674
675                         lle->lle_comp_ops = NULL;
676
677                         /* limit this message 20 times within 24h */
678                         if (ktime_after(now, time2_clear_nr)) {
679                                 nr = 0;
680                                 time2_clear_nr = ktime_add_ms(now,
681                                                       24 * 3600 * MSEC_PER_SEC);
682                         }
683                         if (nr++ < 20) {
684                                 CWARN("%s: unknown layout entry %d pattern %#x"
685                                       " could be an unrecognizable component"
686                                       " set by other clients, skip to"
687                                       " initialize the next component.\n",
688                                         lov2obd(dev->ld_lov)->obd_name,
689                                         i,
690                                         lsm->lsm_entries[i]->lsme_pattern);
691                                 dump_lsm(D_ERROR, lsm);
692                         }
693                 }
694                 }
695
696                 lle->lle_extent = &lle->lle_lsme->lsme_extent;
697                 if (!lov_pattern_supported(
698                                 lov_pattern(lle->lle_lsme->lsme_pattern)) ||
699                     !lov_supported_comp_magic(lle->lle_lsme->lsme_magic))
700                         lle->lle_valid = 0;
701                 else
702                         lle->lle_valid =
703                                 !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
704
705                 if (flr_state != LCM_FL_NONE)
706                         mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
707
708                 lre = &comp->lo_mirrors[j];
709                 if (i > 0) {
710                         if (mirror_id == lre->lre_mirror_id) {
711                                 lre->lre_valid |= lle->lle_valid;
712                                 lre->lre_stale |= !lle->lle_valid;
713                                 lre->lre_foreign |=
714                                         lsme_is_foreign(lle->lle_lsme);
715                                 lre->lre_end = i;
716                                 continue;
717                         }
718
719                         /* new mirror detected, assume that the mirrors
720                          * are shorted in layout */
721                         ++mirror_count;
722                         ++j;
723                         if (j >= comp->lo_mirror_count)
724                                 break;
725
726                         lre = &comp->lo_mirrors[j];
727                 }
728
729                 /* entries must be sorted by mirrors */
730                 lre->lre_mirror_id = mirror_id;
731                 lre->lre_start = lre->lre_end = i;
732                 lre->lre_preference = lle->lle_lsme->lsme_flags &
733                                         LCME_FL_PREF_RD ? 1000 : 0;
734                 lre->lre_valid = lle->lle_valid;
735                 lre->lre_stale = !lle->lle_valid;
736                 lre->lre_foreign = lsme_is_foreign(lle->lle_lsme);
737         }
738
739         /* sanity check for FLR */
740         if (mirror_count != comp->lo_mirror_count) {
741                 CDEBUG(D_INODE, DFID
742                        " doesn't have the # of mirrors it claims, %u/%u\n",
743                        PFID(lu_object_fid(lov2lu(lov))), mirror_count,
744                        comp->lo_mirror_count + 1);
745
746                 GOTO(out, result = -EINVAL);
747         }
748
749         lov_foreach_layout_entry(lov, lle) {
750                 int index = lov_layout_entry_index(lov, lle);
751
752                 /**
753                  * If the component has not been init-ed on MDS side, for
754                  * PFL layout, we'd know that the components beyond this one
755                  * will be dynamically init-ed later on file write/trunc ops.
756                  */
757                 if (!lsme_inited(lle->lle_lsme))
758                         continue;
759
760                 if (lsme_is_foreign(lle->lle_lsme))
761                         continue;
762
763                 if (!lov_pattern_supported(
764                                 lov_pattern(lle->lle_lsme->lsme_pattern)) ||
765                     !lov_supported_comp_magic(lle->lle_lsme->lsme_magic))
766                         continue;
767
768                 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
769                                                      conf, lle);
770                 if (result < 0)
771                         break;
772
773                 LASSERT(ergo(psz > 0, psz == result));
774                 psz = result;
775         }
776
777         if (psz > 0)
778                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
779
780         /* decide the preferred mirror. It uses the hash value of lov_object
781          * so that different clients would use different mirrors for read. */
782         mirror_count = 0;
783         preference = -1;
784         seq = cfs_hash_long((unsigned long)lov, 8);
785         for (i = 0; i < comp->lo_mirror_count; i++) {
786                 unsigned int idx = (i + seq) % comp->lo_mirror_count;
787
788                 lre = lov_mirror_entry(lov, idx);
789                 if (lre->lre_stale)
790                         continue;
791
792                 if (lre->lre_foreign)
793                         continue;
794
795                 if (!lre->lre_valid)
796                         continue;
797
798                 mirror_count++; /* valid mirror */
799
800                 /* aggregated preference of all involved OSTs */
801                 for (j = lre->lre_start; j <= lre->lre_end; j++) {
802                         lre->lre_preference +=
803                                 comp->lo_entries[j].lle_preference;
804                 }
805
806                 if (lre->lre_preference > preference) {
807                         preference = lre->lre_preference;
808                         comp->lo_preferred_mirror = idx;
809                 }
810         }
811         if (!mirror_count) {
812                 CDEBUG(D_INODE, DFID
813                        " doesn't have any valid mirrors\n",
814                        PFID(lu_object_fid(lov2lu(lov))));
815
816                 comp->lo_preferred_mirror = 0;
817         }
818
819         LASSERT(comp->lo_preferred_mirror >= 0);
820
821         EXIT;
822 out:
823         return result > 0 ? 0 : result;
824 }
825
826 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
827                           struct lov_object *lov, struct lov_stripe_md *lsm,
828                           const struct cl_object_conf *conf,
829                           union lov_layout_state *state)
830 {
831         return 0;
832 }
833
834 static int lov_init_released(const struct lu_env *env,
835                              struct lov_device *dev, struct lov_object *lov,
836                              struct lov_stripe_md *lsm,
837                              const struct cl_object_conf *conf,
838                              union lov_layout_state *state)
839 {
840         LASSERT(lsm != NULL);
841         LASSERT(lsm->lsm_is_released);
842         LASSERT(lov->lo_lsm == NULL);
843
844         lov->lo_lsm = lsm_addref(lsm);
845         return 0;
846 }
847
848 static int lov_init_foreign(const struct lu_env *env,
849                             struct lov_device *dev, struct lov_object *lov,
850                             struct lov_stripe_md *lsm,
851                             const struct cl_object_conf *conf,
852                             union lov_layout_state *state)
853 {
854         LASSERT(lsm != NULL);
855         LASSERT(lov->lo_type == LLT_FOREIGN);
856         LASSERT(lov->lo_lsm == NULL);
857
858         lov->lo_lsm = lsm_addref(lsm);
859         return 0;
860 }
861
862 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
863                             union lov_layout_state *state)
864 {
865         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED ||
866                 lov->lo_type == LLT_FOREIGN);
867
868         lov_layout_wait(env, lov);
869         return 0;
870 }
871
872 static int lov_delete_composite(const struct lu_env *env,
873                                 struct lov_object *lov,
874                                 union lov_layout_state *state)
875 {
876         struct lov_layout_entry *entry;
877         int rc;
878
879         ENTRY;
880
881         dump_lsm(D_INODE, lov->lo_lsm);
882
883         lov_layout_wait(env, lov);
884         lov_foreach_layout_entry(lov, entry) {
885                 struct lov_stripe_md_entry *lsme = entry->lle_lsme;
886
887                 if (lsme) {
888                         if (lsme_is_foreign(lsme))
889                                 continue;
890                         if (!lov_pattern_supported(lov_pattern(
891                                                         lsme->lsme_pattern)) ||
892                             !lov_supported_comp_magic(lsme->lsme_magic))
893                                 continue;
894                 }
895
896                 rc = lov_delete_raid0(env, lov, entry);
897                 if (rc)
898                         RETURN(rc);
899         }
900
901         RETURN(0);
902 }
903
904 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
905                            union lov_layout_state *state)
906 {
907         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
908 }
909
910 static void lov_fini_composite(const struct lu_env *env,
911                                struct lov_object *lov,
912                                union lov_layout_state *state)
913 {
914         struct lov_layout_composite *comp = &state->composite;
915         ENTRY;
916
917         if (comp->lo_entries != NULL) {
918                 struct lov_layout_entry *entry;
919
920                 lov_foreach_layout_entry(lov, entry)
921                         if (entry->lle_comp_ops)
922                                 entry->lle_comp_ops->lco_fini(env, entry);
923
924                 OBD_FREE_PTR_ARRAY(comp->lo_entries, comp->lo_entry_count);
925                 comp->lo_entries = NULL;
926         }
927
928         if (comp->lo_mirrors != NULL) {
929                 OBD_FREE_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
930                 comp->lo_mirrors = NULL;
931         }
932
933         memset(comp, 0, sizeof(*comp));
934
935         dump_lsm(D_INODE, lov->lo_lsm);
936         lov_free_memmd(&lov->lo_lsm);
937
938         EXIT;
939 }
940
941 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
942                                 union lov_layout_state *state)
943 {
944         ENTRY;
945         dump_lsm(D_INODE, lov->lo_lsm);
946         lov_free_memmd(&lov->lo_lsm);
947         EXIT;
948 }
949
950 static int lov_print_empty(const struct lu_env *env, void *cookie,
951                            lu_printer_t p, const struct lu_object *o)
952 {
953         (*p)(env, cookie, "empty %d\n",
954              test_bit(LO_LAYOUT_INVALID, &lu2lov(o)->lo_obj_flags));
955         return 0;
956 }
957
958 static int lov_print_composite(const struct lu_env *env, void *cookie,
959                                lu_printer_t p, const struct lu_object *o)
960 {
961         struct lov_object *lov = lu2lov(o);
962         struct lov_stripe_md *lsm = lov->lo_lsm;
963         int i;
964
965         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
966              lsm->lsm_entry_count,
967              test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
968              "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
969              lsm->lsm_layout_gen);
970
971         for (i = 0; i < lsm->lsm_entry_count; i++) {
972                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
973                 struct lov_layout_entry *lle = lov_entry(lov, i);
974
975                 (*p)(env, cookie,
976                      DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
977                      PEXT(&lse->lsme_extent), lse->lsme_magic,
978                      lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
979                      lse->lsme_flags, lse->lsme_stripe_count,
980                      lse->lsme_stripe_size);
981
982                 if (!lsme_is_foreign(lse))
983                         lov_print_raid0(env, cookie, p, lle);
984         }
985
986         return 0;
987 }
988
989 static int lov_print_released(const struct lu_env *env, void *cookie,
990                               lu_printer_t p, const struct lu_object *o)
991 {
992         struct lov_object       *lov = lu2lov(o);
993         struct lov_stripe_md    *lsm = lov->lo_lsm;
994
995         (*p)(env, cookie,
996                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
997                 test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
998                 "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
999                 lsm->lsm_layout_gen);
1000         return 0;
1001 }
1002
1003 static int lov_print_foreign(const struct lu_env *env, void *cookie,
1004                                 lu_printer_t p, const struct lu_object *o)
1005 {
1006         struct lov_object       *lov = lu2lov(o);
1007         struct lov_stripe_md    *lsm = lov->lo_lsm;
1008
1009         (*p)(env, cookie,
1010                 "foreign: %s, lsm{%p 0x%08X %d %u}:\n",
1011                 test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ?
1012                 "invalid" : "valid", lsm,
1013                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
1014                 lsm->lsm_layout_gen);
1015         (*p)(env, cookie,
1016                 "raw_ea_content '%.*s'\n",
1017                 (int)lsm->lsm_foreign_size, (char *)lsm_foreign(lsm));
1018         return 0;
1019 }
1020
1021 /**
1022  * Implements cl_object_operations::coo_attr_get() method for an object
1023  * without stripes (LLT_EMPTY layout type).
1024  *
1025  * The only attributes this layer is authoritative in this case is
1026  * cl_attr::cat_blocks---it's 0.
1027  */
1028 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
1029                               struct cl_attr *attr)
1030 {
1031         attr->cat_blocks = 0;
1032         return 0;
1033 }
1034
1035 /**
1036  * The MDT returns st_blocks=1 for the HSM released file (See LU-3864).
1037  * The LOV layouer should also return st_blocks=1 for the HSM released file
1038  * in the call ->coo_attr_get().
1039  * Otherwise, the client may get 0 block count. This caused tools like tar
1040  * then to consider the file as fully sparse and to archive it as is without
1041  * attempting to access/restore its content.
1042  */
1043 static int lov_attr_get_released(const struct lu_env *env,
1044                                  struct cl_object *obj, struct cl_attr *attr)
1045 {
1046         if (attr->cat_size == 0)
1047                 attr->cat_blocks = 0;
1048         else
1049                 attr->cat_blocks = 1;
1050
1051         return 0;
1052 }
1053
1054 static int lov_attr_get_composite(const struct lu_env *env,
1055                                   struct cl_object *obj,
1056                                   struct cl_attr *attr)
1057 {
1058         struct lov_object       *lov = cl2lov(obj);
1059         struct lov_layout_entry *entry;
1060         int                      result = 0;
1061
1062         ENTRY;
1063
1064         attr->cat_size = 0;
1065         attr->cat_blocks = 0;
1066         attr->cat_kms = 0;
1067
1068         lov_foreach_layout_entry(lov, entry) {
1069                 struct cl_attr *lov_attr = NULL;
1070                 int index = lov_layout_entry_index(lov, entry);
1071
1072                 if (!entry->lle_valid)
1073                         continue;
1074
1075                 /* PFL: This component has not been init-ed. */
1076                 if (!lsm_entry_inited(lov->lo_lsm, index))
1077                         continue;
1078
1079                 if (lsm_entry_is_foreign(lov->lo_lsm, index))
1080                         continue;
1081
1082                 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
1083                                                           entry, &lov_attr);
1084                 if (result < 0)
1085                         RETURN(result);
1086
1087                 if (lov_attr == NULL)
1088                         continue;
1089
1090                 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
1091                        "b=%llu\n", index - 1, lov_attr->cat_size,
1092                        lov_attr->cat_mtime, lov_attr->cat_atime,
1093                        lov_attr->cat_ctime, lov_attr->cat_blocks);
1094
1095                 /* merge results */
1096                 if (lov_attr->cat_kms_valid)
1097                         attr->cat_kms_valid = 1;
1098                 attr->cat_blocks += lov_attr->cat_blocks;
1099                 if (attr->cat_size < lov_attr->cat_size)
1100                         attr->cat_size = lov_attr->cat_size;
1101                 if (attr->cat_kms < lov_attr->cat_kms)
1102                         attr->cat_kms = lov_attr->cat_kms;
1103                 if (attr->cat_atime < lov_attr->cat_atime)
1104                         attr->cat_atime = lov_attr->cat_atime;
1105                 if (attr->cat_ctime < lov_attr->cat_ctime)
1106                         attr->cat_ctime = lov_attr->cat_ctime;
1107                 if (attr->cat_mtime < lov_attr->cat_mtime)
1108                         attr->cat_mtime = lov_attr->cat_mtime;
1109         }
1110
1111         RETURN(0);
1112 }
1113
1114 static int lov_flush_composite(const struct lu_env *env,
1115                                struct cl_object *obj,
1116                                struct ldlm_lock *lock)
1117 {
1118         struct lov_object *lov = cl2lov(obj);
1119         struct lov_layout_entry *lle;
1120         int rc = -ENODATA;
1121
1122         ENTRY;
1123
1124         lov_foreach_layout_entry(lov, lle) {
1125                 if (!lsme_is_dom(lle->lle_lsme))
1126                         continue;
1127                 rc = cl_object_flush(env, lovsub2cl(lle->lle_dom.lo_dom), lock);
1128                 break;
1129         }
1130
1131         RETURN(rc);
1132 }
1133
1134 static int lov_flush_empty(const struct lu_env *env, struct cl_object *obj,
1135                            struct ldlm_lock *lock)
1136 {
1137         return 0;
1138 }
1139
1140 const static struct lov_layout_operations lov_dispatch[] = {
1141         [LLT_EMPTY] = {
1142                 .llo_init      = lov_init_empty,
1143                 .llo_delete    = lov_delete_empty,
1144                 .llo_fini      = lov_fini_empty,
1145                 .llo_print     = lov_print_empty,
1146                 .llo_page_init = lov_page_init_empty,
1147                 .llo_lock_init = lov_lock_init_empty,
1148                 .llo_io_init   = lov_io_init_empty,
1149                 .llo_getattr   = lov_attr_get_empty,
1150                 .llo_flush     = lov_flush_empty,
1151         },
1152         [LLT_RELEASED] = {
1153                 .llo_init      = lov_init_released,
1154                 .llo_delete    = lov_delete_empty,
1155                 .llo_fini      = lov_fini_released,
1156                 .llo_print     = lov_print_released,
1157                 .llo_page_init = lov_page_init_empty,
1158                 .llo_lock_init = lov_lock_init_empty,
1159                 .llo_io_init   = lov_io_init_released,
1160                 .llo_getattr   = lov_attr_get_released,
1161                 .llo_flush     = lov_flush_empty,
1162         },
1163         [LLT_COMP] = {
1164                 .llo_init      = lov_init_composite,
1165                 .llo_delete    = lov_delete_composite,
1166                 .llo_fini      = lov_fini_composite,
1167                 .llo_print     = lov_print_composite,
1168                 .llo_page_init = lov_page_init_composite,
1169                 .llo_lock_init = lov_lock_init_composite,
1170                 .llo_io_init   = lov_io_init_composite,
1171                 .llo_getattr   = lov_attr_get_composite,
1172                 .llo_flush     = lov_flush_composite,
1173         },
1174         [LLT_FOREIGN] = {
1175                 .llo_init      = lov_init_foreign,
1176                 .llo_delete    = lov_delete_empty,
1177                 .llo_fini      = lov_fini_released,
1178                 .llo_print     = lov_print_foreign,
1179                 .llo_page_init = lov_page_init_foreign,
1180                 .llo_lock_init = lov_lock_init_empty,
1181                 .llo_io_init   = lov_io_init_empty,
1182                 .llo_getattr   = lov_attr_get_empty,
1183                 .llo_flush     = lov_flush_empty,
1184         },
1185 };
1186
1187 /**
1188  * Performs a double-dispatch based on the layout type of an object.
1189  */
1190 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
1191 ({                                                      \
1192         struct lov_object *__obj = (obj);               \
1193         enum lov_layout_type __llt;                     \
1194                                                         \
1195         __llt = __obj->lo_type;                         \
1196         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
1197         lov_dispatch[__llt].op(__VA_ARGS__);            \
1198 })
1199
1200 /**
1201  * Return lov_layout_type associated with a given lsm
1202  */
1203 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1204 {
1205         if (lsm == NULL)
1206                 return LLT_EMPTY;
1207
1208         if (lsm->lsm_is_released)
1209                 return LLT_RELEASED;
1210
1211         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1212             lsm->lsm_magic == LOV_MAGIC_V3 ||
1213             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1214                 return LLT_COMP;
1215
1216         if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
1217                 return LLT_FOREIGN;
1218
1219         return LLT_EMPTY;
1220 }
1221
1222 static inline void lov_conf_freeze(struct lov_object *lov)
1223 {
1224         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1225                 lov, lov->lo_owner, current);
1226         if (lov->lo_owner != current)
1227                 down_read(&lov->lo_type_guard);
1228 }
1229
1230 static inline void lov_conf_thaw(struct lov_object *lov)
1231 {
1232         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1233                 lov, lov->lo_owner, current);
1234         if (lov->lo_owner != current)
1235                 up_read(&lov->lo_type_guard);
1236 }
1237
1238 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
1239 ({                                                                      \
1240         struct lov_object                      *__obj = (obj);          \
1241         int                                     __lock = !!(lock);      \
1242         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
1243                                                                         \
1244         if (__lock)                                                     \
1245                 lov_conf_freeze(__obj);                                 \
1246         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
1247         if (__lock)                                                     \
1248                 lov_conf_thaw(__obj);                                   \
1249         __result;                                                       \
1250 })
1251
1252 /**
1253  * Performs a locked double-dispatch based on the layout type of an object.
1254  */
1255 #define LOV_2DISPATCH(obj, op, ...)                     \
1256         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1257
1258 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
1259 do {                                                                    \
1260         struct lov_object                      *__obj = (obj);          \
1261         enum lov_layout_type                    __llt;                  \
1262                                                                         \
1263         lov_conf_freeze(__obj);                                         \
1264         __llt = __obj->lo_type;                                         \
1265         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
1266         lov_dispatch[__llt].op(__VA_ARGS__);                            \
1267         lov_conf_thaw(__obj);                                           \
1268 } while (0)
1269
1270 static void lov_conf_lock(struct lov_object *lov)
1271 {
1272         LASSERT(lov->lo_owner != current);
1273         down_write(&lov->lo_type_guard);
1274         LASSERT(lov->lo_owner == NULL);
1275         lov->lo_owner = current;
1276         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1277                 lov, lov->lo_owner);
1278 }
1279
1280 static void lov_conf_unlock(struct lov_object *lov)
1281 {
1282         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1283                 lov, lov->lo_owner);
1284         lov->lo_owner = NULL;
1285         up_write(&lov->lo_type_guard);
1286 }
1287
1288 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1289 {
1290         ENTRY;
1291
1292         while (atomic_read(&lov->lo_active_ios) > 0) {
1293                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1294                         PFID(lu_object_fid(lov2lu(lov))),
1295                         atomic_read(&lov->lo_active_ios));
1296
1297                 wait_event_idle(lov->lo_waitq,
1298                                 atomic_read(&lov->lo_active_ios) == 0);
1299         }
1300         RETURN(0);
1301 }
1302
1303 static int lov_layout_change(const struct lu_env *unused,
1304                              struct lov_object *lov, struct lov_stripe_md *lsm,
1305                              const struct cl_object_conf *conf)
1306 {
1307         enum lov_layout_type llt = lov_type(lsm);
1308         union lov_layout_state *state = &lov->u;
1309         const struct lov_layout_operations *old_ops;
1310         const struct lov_layout_operations *new_ops;
1311         struct lov_device *lov_dev = lov_object_dev(lov);
1312         struct lu_env *env;
1313         __u16 refcheck;
1314         int rc;
1315         ENTRY;
1316
1317         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1318
1319         env = cl_env_get(&refcheck);
1320         if (IS_ERR(env))
1321                 RETURN(PTR_ERR(env));
1322
1323         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1324
1325         CDEBUG(D_INODE, DFID" from %s to %s\n",
1326                PFID(lu_object_fid(lov2lu(lov))),
1327                llt2str(lov->lo_type), llt2str(llt));
1328
1329         old_ops = &lov_dispatch[lov->lo_type];
1330         new_ops = &lov_dispatch[llt];
1331
1332         rc = cl_object_prune(env, &lov->lo_cl);
1333         if (rc != 0) {
1334                 if (rc == -EAGAIN)
1335                         set_bit(LO_NEED_INODE_LOCK, &lov->lo_obj_flags);
1336                 GOTO(out, rc);
1337         }
1338
1339         rc = old_ops->llo_delete(env, lov, &lov->u);
1340         if (rc != 0)
1341                 GOTO(out, rc);
1342
1343         old_ops->llo_fini(env, lov, &lov->u);
1344
1345         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1346
1347         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1348                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1349
1350         /* page bufsize fixup */
1351         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1352                 lov_page_slice_fixup(lov, NULL);
1353
1354         lov->lo_type = llt;
1355         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1356         if (rc != 0) {
1357                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1358
1359                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1360                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1361                 new_ops->llo_delete(env, lov, state);
1362                 new_ops->llo_fini(env, lov, state);
1363                 /* this file becomes an EMPTY file. */
1364                 lov->lo_type = LLT_EMPTY;
1365                 GOTO(out, rc);
1366         }
1367
1368 out:
1369         cl_env_put(env, &refcheck);
1370         RETURN(rc);
1371 }
1372
1373 /**
1374  * Lov object operations.
1375  */
1376 static int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1377                            const struct lu_object_conf *conf)
1378 {
1379         struct lov_object            *lov   = lu2lov(obj);
1380         struct lov_device            *dev   = lov_object_dev(lov);
1381         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
1382         union lov_layout_state       *set   = &lov->u;
1383         const struct lov_layout_operations *ops;
1384         struct lov_stripe_md *lsm = NULL;
1385         int rc;
1386         ENTRY;
1387
1388         init_rwsem(&lov->lo_type_guard);
1389         atomic_set(&lov->lo_active_ios, 0);
1390         init_waitqueue_head(&lov->lo_waitq);
1391         cl_object_page_init(lu2cl(obj), 0);
1392
1393         lov->lo_type = LLT_EMPTY;
1394         if (cconf->u.coc_layout.lb_buf != NULL) {
1395                 lsm = lov_unpackmd(dev->ld_lov,
1396                                    cconf->u.coc_layout.lb_buf,
1397                                    cconf->u.coc_layout.lb_len);
1398                 if (IS_ERR(lsm))
1399                         RETURN(PTR_ERR(lsm));
1400
1401                 dump_lsm(D_INODE, lsm);
1402         }
1403
1404         /* no locking is necessary, as object is being created */
1405         lov->lo_type = lov_type(lsm);
1406         ops = &lov_dispatch[lov->lo_type];
1407         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1408         if (rc != 0)
1409                 GOTO(out_lsm, rc);
1410
1411 out_lsm:
1412         lov_lsm_put(lsm);
1413
1414         RETURN(rc);
1415 }
1416
1417 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1418                         const struct cl_object_conf *conf)
1419 {
1420         struct lov_stripe_md *lsm = NULL;
1421         struct lov_object *lov = cl2lov(obj);
1422         struct cl_object *top = cl_object_top(obj);
1423         bool lock_inode = false;
1424         bool inode_size_locked = false;
1425         int result = 0;
1426         ENTRY;
1427
1428         if (conf->coc_opc == OBJECT_CONF_SET &&
1429             conf->u.coc_layout.lb_buf != NULL) {
1430                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1431                                    conf->u.coc_layout.lb_buf,
1432                                    conf->u.coc_layout.lb_len);
1433                 if (IS_ERR(lsm))
1434                         RETURN(PTR_ERR(lsm));
1435                 dump_lsm(D_INODE, lsm);
1436         }
1437
1438         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1439                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1440                 GOTO(out_lsm, result = 0);
1441         }
1442
1443 retry:
1444         lov_conf_lock(lov);
1445         if (conf->coc_opc == OBJECT_CONF_WAIT) {
1446                 if (test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) &&
1447                     atomic_read(&lov->lo_active_ios) > 0) {
1448                         lov_conf_unlock(lov);
1449                         result = lov_layout_wait(env, lov);
1450                         lov_conf_lock(lov);
1451                 }
1452                 GOTO(out, result);
1453         }
1454
1455         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1456
1457         /*
1458          * don't apply old layouts which can be brought
1459          * if returned w/o ldlm lock.
1460          * XXX: can we rollback in case of recovery?
1461          */
1462         if (lsm && lov->lo_lsm) {
1463                 u32 oldgen = lov->lo_lsm->lsm_layout_gen &= ~LU_LAYOUT_RESYNC;
1464                 u32 newgen = lsm->lsm_layout_gen & ~LU_LAYOUT_RESYNC;
1465
1466                 if (newgen < oldgen) {
1467                         CDEBUG(D_HA, "skip old for "DFID": %d < %d\n",
1468                                PFID(lu_object_fid(lov2lu(lov))),
1469                                (int)newgen, (int)oldgen);
1470                         GOTO(out, result = 0);
1471                 }
1472         }
1473
1474         if ((lsm == NULL && lov->lo_lsm == NULL) ||
1475             ((lsm != NULL && lov->lo_lsm != NULL) &&
1476              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1477              (lov->lo_lsm->lsm_flags == lsm->lsm_flags) &&
1478              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1479               lsm->lsm_entries[0]->lsme_pattern))) {
1480                 /* same version of layout */
1481                 clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1482                 GOTO(out, result = 0);
1483         }
1484
1485         /* will change layout - check if there still exists active IO. */
1486         if (atomic_read(&lov->lo_active_ios) > 0) {
1487                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1488                 GOTO(out, result = -EBUSY);
1489         }
1490
1491         if (conf->coc_try) {
1492                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1493                 GOTO(out, result = -ERESTARTSYS);
1494         }
1495
1496         clear_bit(LO_NEED_INODE_LOCK, &lov->lo_obj_flags);
1497         result = lov_layout_change(env, lov, lsm, conf);
1498         if (result) {
1499                 if (result == -EAGAIN &&
1500                     test_bit(LO_NEED_INODE_LOCK, &lov->lo_obj_flags)) {
1501                         /**
1502                          * we need unlocked lov conf and get inode lock.
1503                          * It's possible we have already taken inode's size
1504                          * mutex and/or layout mutex, so we need keep such lock
1505                          * order, lest deadlock happens:
1506                          *   inode lock        (ll_inode_lock())
1507                          *   inode size lock   (ll_inode_size_lock())
1508                          *   lov conf lock     (lov_conf_lock())
1509                          *
1510                          * e.g.
1511                          *   vfs_setxattr                inode locked
1512                          *     ll_lov_setstripe_ea_info  inode size locked
1513                          *       ll_prep_inode
1514                          *         cl_file_inode_init
1515                          *           cl_conf_set
1516                          *             lov_conf_set      lov conf locked
1517                          */
1518                         lov_conf_unlock(lov);
1519                         if (cl_object_inode_ops(env, top, COIO_SIZE_UNLOCK,
1520                                                 NULL) == 0)
1521                                 inode_size_locked = true;
1522
1523                         /* take lock in order */
1524                         if (cl_object_inode_ops(
1525                                         env, top, COIO_INODE_LOCK, NULL) == 0)
1526                                 lock_inode = true;
1527                         if (inode_size_locked)
1528                                 cl_object_inode_ops(env, top, COIO_SIZE_LOCK,
1529                                                     NULL);
1530                         goto retry;
1531                 }
1532                 set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1533         } else {
1534                 clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
1535         }
1536         EXIT;
1537
1538 out:
1539         lov_conf_unlock(lov);
1540         if (lock_inode)
1541                 cl_object_inode_ops(env, top, COIO_INODE_UNLOCK, NULL);
1542 out_lsm:
1543         lov_lsm_put(lsm);
1544         CDEBUG(D_INODE, DFID" lo_layout_invalid=%u\n",
1545                PFID(lu_object_fid(lov2lu(lov))),
1546                test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags));
1547         RETURN(result);
1548 }
1549
1550 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1551 {
1552         struct lov_object *lov = lu2lov(obj);
1553
1554         ENTRY;
1555         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1556         EXIT;
1557 }
1558
1559 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1560 {
1561         struct lov_object *lov = lu2lov(obj);
1562
1563         ENTRY;
1564         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1565         lu_object_fini(obj);
1566         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1567         EXIT;
1568 }
1569
1570 static int lov_object_print(const struct lu_env *env, void *cookie,
1571                             lu_printer_t p, const struct lu_object *o)
1572 {
1573         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1574 }
1575
1576 static int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1577                          struct cl_page *page, pgoff_t index)
1578 {
1579         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1580                                     index);
1581 }
1582
1583 /**
1584  * Implements cl_object_operations::clo_io_init() method for lov
1585  * layer. Dispatches to the appropriate layout io initialization method.
1586  */
1587 static int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1588                        struct cl_io *io)
1589 {
1590         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1591
1592         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1593                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1594                io->ci_ignore_layout, io->ci_verify_layout);
1595
1596         /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1597          * the OSC layer. It shouldn't take lov layout conf lock in that case,
1598          * because as long as the OSC object exists, the layout can't be
1599          * reconfigured. */
1600         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1601                         !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1602                         env, obj, io);
1603 }
1604
1605 /**
1606  * An implementation of cl_object_operations::clo_attr_get() method for lov
1607  * layer. For raid0 layout this collects and merges attributes of all
1608  * sub-objects.
1609  */
1610 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1611                         struct cl_attr *attr)
1612 {
1613         /* do not take lock, as this function is called under a
1614          * spin-lock. Layout is protected from changing by ongoing IO. */
1615         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1616 }
1617
1618 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1619                            const struct cl_attr *attr, unsigned valid)
1620 {
1621         /*
1622          * No dispatch is required here, as no layout implements this.
1623          */
1624         return 0;
1625 }
1626
1627 static int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1628                          struct cl_lock *lock, const struct cl_io *io)
1629 {
1630         /* No need to lock because we've taken one refcount of layout.  */
1631         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1632                                     io);
1633 }
1634
1635 /**
1636  * We calculate on which OST the mapping will end. If the length of mapping
1637  * is greater than (stripe_size * stripe_count) then the last_stripe will
1638  * will be one just before start_stripe. Else we check if the mapping
1639  * intersects each OST and find last_stripe.
1640  * This function returns the last_stripe and also sets the stripe_count
1641  * over which the mapping is spread
1642  *
1643  * \param lsm [in]              striping information for the file
1644  * \param index [in]            stripe component index
1645  * \param ext [in]              logical extent of mapping
1646  * \param start_stripe [in]     starting stripe of the mapping
1647  * \param stripe_count [out]    the number of stripes across which to map is
1648  *                              returned
1649  *
1650  * \retval last_stripe          return the last stripe of the mapping
1651  */
1652 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1653                                    struct lu_extent *ext,
1654                                    int start_stripe, int *stripe_count)
1655 {
1656         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1657         int init_stripe;
1658         int last_stripe;
1659         int i, j;
1660
1661         init_stripe = lov_stripe_number(lsm, index, ext->e_start);
1662
1663         if (ext->e_end - ext->e_start >
1664             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1665                 if (init_stripe == start_stripe) {
1666                         last_stripe = (start_stripe < 1) ?
1667                                 lsme->lsme_stripe_count - 1 : start_stripe - 1;
1668                         *stripe_count = lsme->lsme_stripe_count;
1669                 } else if (init_stripe < start_stripe) {
1670                         last_stripe = (init_stripe < 1) ?
1671                                 lsme->lsme_stripe_count - 1 : init_stripe - 1;
1672                         *stripe_count = lsme->lsme_stripe_count -
1673                                         (start_stripe - init_stripe);
1674                 } else {
1675                         last_stripe = init_stripe - 1;
1676                         *stripe_count = init_stripe - start_stripe;
1677                 }
1678         } else {
1679                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1680                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1681                         if (!lov_stripe_intersects(lsm, index,  i, ext, NULL,
1682                                                    NULL))
1683                                 break;
1684                         if ((start_stripe != init_stripe) && (i == init_stripe))
1685                                 break;
1686                 }
1687                 *stripe_count = j;
1688                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1689         }
1690
1691         return last_stripe;
1692 }
1693
1694 /**
1695  * Set fe_device and copy extents from local buffer into main return buffer.
1696  *
1697  * \param fiemap [out]          fiemap to hold all extents
1698  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1699  * \param ost_index [in]        OST index to be written into the fm_device
1700  *                              field for each extent
1701  * \param ext_count [in]        number of extents to be copied
1702  * \param current_extent [in]   where to start copying in the extent array
1703  */
1704 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1705                                          struct fiemap_extent *lcl_fm_ext,
1706                                          int ost_index, unsigned int ext_count,
1707                                          int current_extent, int abs_stripeno)
1708 {
1709         char            *to;
1710         unsigned int    ext;
1711
1712         for (ext = 0; ext < ext_count; ext++) {
1713                 set_fe_device_stripenr(&lcl_fm_ext[ext], ost_index,
1714                                        abs_stripeno);
1715                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1716         }
1717
1718         /* Copy fm_extent's from fm_local to return buffer */
1719         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1720         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1721 }
1722
1723 #define FIEMAP_BUFFER_SIZE 4096
1724
1725 /**
1726  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1727  * call. The local end offset and the device are sent in the first
1728  * fm_extent. This function calculates the stripe number from the index.
1729  * This function returns a stripe_no on which mapping is to be restarted.
1730  *
1731  * This function returns fm_end_offset which is the in-OST offset at which
1732  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1733  * will re-calculate proper offset in next stripe.
1734  * Note that the first extent is passed to lov_get_info via the value field.
1735  *
1736  * \param fiemap [in]           fiemap request header
1737  * \param lsm [in]              striping information for the file
1738  * \param index [in]            stripe component index
1739  * \param ext [in]              logical extent of mapping
1740  * \param start_stripe [out]    starting stripe will be returned in this
1741  */
1742 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1743                                      struct lov_stripe_md *lsm,
1744                                      int index, struct lu_extent *ext,
1745                                      int *start_stripe)
1746 {
1747         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1748         u64 local_end;
1749         u64 lun_end;
1750         u64 fm_end_offset;
1751         int stripe_no = -1;
1752
1753         if (fiemap->fm_extent_count == 0 ||
1754             fiemap->fm_extents[0].fe_logical == 0)
1755                 return 0;
1756
1757         local_end = fiemap->fm_extents[0].fe_logical;
1758         stripe_no = *start_stripe;
1759
1760         if (stripe_no == -1)
1761                 return -EINVAL;
1762
1763         /* If we have finished mapping on previous device, shift logical
1764          * offset to start of next device */
1765         if (lov_stripe_intersects(lsm, index, stripe_no, ext, NULL, &lun_end) &&
1766             local_end < lun_end) {
1767                 fm_end_offset = local_end;
1768         } else {
1769                 /* This is a special value to indicate that caller should
1770                  * calculate offset in next stripe. */
1771                 fm_end_offset = 0;
1772                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1773         }
1774
1775         return fm_end_offset;
1776 }
1777
1778 struct fiemap_state {
1779         struct fiemap           *fs_fm;
1780         struct lu_extent        fs_ext;         /* current entry extent */
1781         u64                     fs_length;
1782         u64                     fs_end_offset;  /* last iteration offset */
1783         int                     fs_cur_extent;  /* collected exts so far */
1784         int                     fs_cnt_need;    /* # of extents buf can hold */
1785         int                     fs_start_stripe;
1786         int                     fs_last_stripe;
1787         bool                    fs_device_done; /* enough for this OST */
1788         bool                    fs_finish_stripe; /* reached fs_last_stripe */
1789         bool                    fs_enough;      /* enough for this call */
1790 };
1791
1792 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1793                                          struct lov_object *lov,
1794                                          struct lov_stripe_md *lsm,
1795                                          int index)
1796 {
1797         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1798         struct lov_thread_info  *lti = lov_env_info(env);
1799         struct lu_fid           *ofid = &lti->lti_fid;
1800         struct lov_oinfo        *oinfo;
1801         struct cl_device        *subdev;
1802         int                     entry = lov_comp_entry(index);
1803         int                     stripe = lov_comp_stripe(index);
1804         int                     ost_idx;
1805         int                     rc;
1806         struct cl_object        *result;
1807
1808         if (lov->lo_type != LLT_COMP)
1809                 GOTO(out, result = NULL);
1810
1811         if (entry >= lsm->lsm_entry_count ||
1812             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1813                 GOTO(out, result = NULL);
1814
1815         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1816         ost_idx = oinfo->loi_ost_idx;
1817         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1818         if (rc != 0)
1819                 GOTO(out, result = NULL);
1820
1821         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1822         result = lov_sub_find(env, subdev, ofid, NULL);
1823 out:
1824         if (result == NULL)
1825                 result = ERR_PTR(-EINVAL);
1826         return result;
1827 }
1828
1829 static int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1830                              struct lov_stripe_md *lsm, struct fiemap *fiemap,
1831                              size_t *buflen, struct ll_fiemap_info_key *fmkey,
1832                              int index, int stripe_last, int stripeno,
1833                              struct fiemap_state *fs)
1834 {
1835         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1836         struct cl_object *subobj;
1837         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1838         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1839         u64 req_fm_len; /* max requested extent coverage */
1840         u64 len_mapped_single_call;
1841         u64 obd_start;
1842         u64 obd_end;
1843         unsigned int ext_count;
1844         /* EOF for object */
1845         bool ost_eof = false;
1846         /* done with required mapping for this OST? */
1847         bool ost_done = false;
1848         int ost_index;
1849         int rc = 0;
1850
1851         fs->fs_device_done = false;
1852         /* Find out range of mapping on this stripe */
1853         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1854                                    &obd_start, &obd_end)) == 0)
1855                 return 0;
1856
1857         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1858                 return -EIO;
1859
1860         /* If this is a continuation FIEMAP call and we are on
1861          * starting stripe then obd_start needs to be set to
1862          * end_offset */
1863         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1864                 obd_start = fs->fs_end_offset;
1865
1866         if (lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno) ==
1867             obd_start)
1868                 return 0;
1869
1870         req_fm_len = obd_end - obd_start + 1;
1871         fs->fs_fm->fm_length = 0;
1872         len_mapped_single_call = 0;
1873
1874         /* find lobsub object */
1875         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1876                                  lov_comp_index(index, stripeno));
1877         if (IS_ERR(subobj))
1878                 return PTR_ERR(subobj);
1879         /* If the output buffer is very large and the objects have many
1880          * extents we may need to loop on a single OST repeatedly */
1881         do {
1882                 if (fiemap->fm_extent_count > 0) {
1883                         /* Don't get too many extents. */
1884                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1885                             fiemap->fm_extent_count)
1886                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1887                                                   fs->fs_cur_extent;
1888                 }
1889
1890                 obd_start += len_mapped_single_call;
1891                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1892                 req_fm_len = fs->fs_fm->fm_length;
1893                 /**
1894                  * If we've collected enough extent map, we'd request 1 more,
1895                  * to see whether we coincidentally finished all available
1896                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1897                  */
1898                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1899                                              1 : fs->fs_cnt_need;
1900                 fs->fs_fm->fm_mapped_extents = 0;
1901                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1902
1903                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1904
1905                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1906                         GOTO(obj_put, rc = -EINVAL);
1907                 /* If OST is inactive, return extent with UNKNOWN flag. */
1908                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1909
1910                         fs->fs_fm->fm_mapped_extents = 1;
1911                         if (fs->fs_fm->fm_extent_count == 0)
1912                                 goto inactive_tgt;
1913
1914                         fm_ext[0].fe_logical = obd_start;
1915                         fm_ext[0].fe_length = obd_end - obd_start + 1;
1916                         fm_ext[0].fe_flags |=
1917                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1918
1919                         goto inactive_tgt;
1920                 }
1921
1922                 fs->fs_fm->fm_start = obd_start;
1923                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1924                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1925                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1926
1927                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1928                 if (rc != 0)
1929                         GOTO(obj_put, rc);
1930 inactive_tgt:
1931                 ext_count = fs->fs_fm->fm_mapped_extents;
1932                 if (ext_count == 0) {
1933                         ost_done = true;
1934                         fs->fs_device_done = true;
1935                         /* If last stripe has hold at the end,
1936                          * we need to return */
1937                         if (stripeno == fs->fs_last_stripe) {
1938                                 fiemap->fm_mapped_extents = 0;
1939                                 fs->fs_finish_stripe = true;
1940                                 GOTO(obj_put, rc);
1941                         }
1942                         break;
1943                 } else if (fs->fs_enough) {
1944                         /*
1945                          * We've collected enough extents and there are
1946                          * more extents after it.
1947                          */
1948                         GOTO(obj_put, rc);
1949                 }
1950
1951                 /* If we just need num of extents, got to next device */
1952                 if (fiemap->fm_extent_count == 0) {
1953                         fs->fs_cur_extent += ext_count;
1954                         break;
1955                 }
1956
1957                 /* prepare to copy retrived map extents */
1958                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1959                                          fm_ext[ext_count - 1].fe_length -
1960                                          obd_start;
1961
1962                 /* Have we finished mapping on this device? */
1963                 if (req_fm_len <= len_mapped_single_call) {
1964                         ost_done = true;
1965                         fs->fs_device_done = true;
1966                 }
1967
1968                 /* Clear the EXTENT_LAST flag which can be present on
1969                  * the last extent */
1970                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1971                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1972                 if (lov_stripe_size(lsm, index,
1973                                     fm_ext[ext_count - 1].fe_logical +
1974                                     fm_ext[ext_count - 1].fe_length,
1975                                     stripeno) >= fmkey->lfik_oa.o_size) {
1976                         ost_eof = true;
1977                         fs->fs_device_done = true;
1978                 }
1979
1980                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1981                                              ext_count, fs->fs_cur_extent,
1982                                              stripe_last + stripeno);
1983                 fs->fs_cur_extent += ext_count;
1984
1985                 /* Ran out of available extents? */
1986                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1987                         fs->fs_enough = true;
1988         } while (!ost_done && !ost_eof);
1989
1990         if (stripeno == fs->fs_last_stripe)
1991                 fs->fs_finish_stripe = true;
1992 obj_put:
1993         cl_object_put(env, subobj);
1994
1995         return rc;
1996 }
1997
1998 /**
1999  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
2000  * This also handles the restarting of FIEMAP calls in case mapping overflows
2001  * the available number of extents in single call.
2002  *
2003  * \param env [in]              lustre environment
2004  * \param obj [in]              file object
2005  * \param fmkey [in]            fiemap request header and other info
2006  * \param fiemap [out]          fiemap buffer holding retrived map extents
2007  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
2008  *                              each OST, it is used to limit max map needed
2009  * \retval 0    success
2010  * \retval < 0  error
2011  */
2012 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
2013                              struct ll_fiemap_info_key *fmkey,
2014                              struct fiemap *fiemap, size_t *buflen)
2015 {
2016         struct lov_stripe_md_entry *lsme;
2017         struct lov_stripe_md *lsm;
2018         struct fiemap *fm_local = NULL;
2019         loff_t whole_start;
2020         loff_t whole_end;
2021         int entry;
2022         int start_entry = -1;
2023         int end_entry;
2024         int cur_stripe = 0;
2025         int stripe_count;
2026         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
2027         int rc = 0;
2028         struct fiemap_state fs = { 0 };
2029         struct lu_extent range;
2030         int cur_ext;
2031         int stripe_last = 0;
2032         int start_stripe = 0;
2033         bool resume = false;
2034         ENTRY;
2035
2036         lsm = lov_lsm_addref(cl2lov(obj));
2037         if (lsm == NULL) {
2038                 /* no extent: there is no object for mapping */
2039                 fiemap->fm_mapped_extents = 0;
2040                 return 0;
2041         }
2042
2043         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
2044                 /**
2045                  * If the entry count > 1 or stripe_count > 1 and the
2046                  * application does not understand DEVICE_ORDER flag,
2047                  * it cannot interpret the extents correctly.
2048                  */
2049                 if (lsm->lsm_entry_count > 1 ||
2050                     (lsm->lsm_entry_count == 1 &&
2051                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
2052                         GOTO(out_lsm, rc = -EOPNOTSUPP);
2053         }
2054
2055         /* No support for DOM layout yet. */
2056         if (lsme_is_dom(lsm->lsm_entries[0]))
2057                 GOTO(out_lsm, rc = -EOPNOTSUPP);
2058
2059         if (lsm->lsm_is_released) {
2060                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
2061                         /**
2062                          * released file, return a minimal FIEMAP if
2063                          * request fits in file-size.
2064                          */
2065                         fiemap->fm_mapped_extents = 1;
2066                         if (fiemap->fm_extent_count == 0)
2067                                 GOTO(out_lsm, rc = 0);
2068
2069                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
2070                         if (fiemap->fm_start + fiemap->fm_length <
2071                             fmkey->lfik_oa.o_size)
2072                                 fiemap->fm_extents[0].fe_length =
2073                                         fiemap->fm_length;
2074                         else
2075                                 fiemap->fm_extents[0].fe_length =
2076                                         fmkey->lfik_oa.o_size -
2077                                         fiemap->fm_start;
2078                         fiemap->fm_extents[0].fe_flags |=
2079                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
2080                 }
2081                 GOTO(out_lsm, rc = 0);
2082         }
2083
2084         /* buffer_size is small to hold fm_extent_count of extents. */
2085         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
2086                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
2087
2088         OBD_ALLOC_LARGE(fm_local, buffer_size);
2089         if (fm_local == NULL)
2090                 GOTO(out_lsm, rc = -ENOMEM);
2091
2092         /**
2093          * Requested extent count exceeds the fiemap buffer size, shrink our
2094          * ambition.
2095          */
2096         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
2097                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
2098
2099         fs.fs_enough = false;
2100         fs.fs_cur_extent = 0;
2101         fs.fs_fm = fm_local;
2102         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
2103
2104         whole_start = fiemap->fm_start;
2105         /* whole_start is beyond the end of the file */
2106         if (whole_start > fmkey->lfik_oa.o_size)
2107                 GOTO(out_fm_local, rc = -EINVAL);
2108         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
2109                                         fmkey->lfik_oa.o_size + 1 :
2110                                         whole_start + fiemap->fm_length;
2111         /**
2112          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
2113          * size
2114          */
2115         if (whole_end > fmkey->lfik_oa.o_size + 1)
2116                 whole_end = fmkey->lfik_oa.o_size + 1;
2117
2118         /**
2119          * the high 16bits of fe_device remember which stripe the last
2120          * call has been arrived, we'd continue from there in this call.
2121          */
2122         if (fiemap->fm_extent_count && fiemap->fm_extents[0].fe_logical) {
2123                 resume = true;
2124                 stripe_last = get_fe_stripenr(&fiemap->fm_extents[0]);
2125         }
2126         /**
2127          * stripe_last records stripe number we've been processed in the last
2128          * call
2129          */
2130         end_entry = lsm->lsm_entry_count - 1;
2131         cur_stripe = 0;
2132         for (entry = 0; entry <= end_entry; entry++) {
2133                 lsme = lsm->lsm_entries[entry];
2134                 if (cur_stripe + lsme->lsme_stripe_count >= stripe_last) {
2135                         start_entry = entry;
2136                         start_stripe = stripe_last - cur_stripe;
2137                         break;
2138                 }
2139
2140                 cur_stripe += lsme->lsme_stripe_count;
2141         }
2142         if (start_entry == -1) {
2143                 CERROR(DFID": FIEMAP does not init start entry, cur_stripe=%d, "
2144                        "stripe_last=%d\n", PFID(lu_object_fid(&obj->co_lu)),
2145                        cur_stripe, stripe_last);
2146                 GOTO(out_fm_local, rc = -EINVAL);
2147         }
2148         /**
2149          * @start_entry & @start_stripe records the position of fiemap
2150          * resumption @stripe_last keeps recording the absolution position
2151          * we'are processing. @resume indicates we'd honor @start_stripe.
2152          */
2153
2154         range.e_start = whole_start;
2155         range.e_end = whole_end;
2156
2157         for (entry = start_entry; entry <= end_entry; entry++) {
2158                 /* remeber to update stripe_last accordingly */
2159                 lsme = lsm->lsm_entries[entry];
2160
2161                 /* FLR could contain component holes between entries */
2162                 if (!lsme_inited(lsme)) {
2163                         stripe_last += lsme->lsme_stripe_count;
2164                         resume = false;
2165                         continue;
2166                 }
2167
2168                 if (!lu_extent_is_overlapped(&range, &lsme->lsme_extent)) {
2169                         stripe_last += lsme->lsme_stripe_count;
2170                         resume = false;
2171                         continue;
2172                 }
2173
2174                 /* prepare for a component entry iteration */
2175                 if (lsme->lsme_extent.e_start > whole_start)
2176                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
2177                 else
2178                         fs.fs_ext.e_start = whole_start;
2179                 if (lsme->lsme_extent.e_end > whole_end)
2180                         fs.fs_ext.e_end = whole_end;
2181                 else
2182                         fs.fs_ext.e_end = lsme->lsme_extent.e_end;
2183
2184                 /* Calculate start stripe, last stripe and length of mapping */
2185                 if (resume) {
2186                         fs.fs_start_stripe = start_stripe;
2187                         /* put stripe_last to the first stripe of the comp */
2188                         stripe_last -= start_stripe;
2189                         resume = false;
2190                 } else {
2191                         fs.fs_start_stripe = lov_stripe_number(lsm, entry,
2192                                                         fs.fs_ext.e_start);
2193                 }
2194                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
2195                                         &fs.fs_ext, fs.fs_start_stripe,
2196                                         &stripe_count);
2197                 /**
2198                  * A new mirror component is under process, reset
2199                  * fs.fs_end_offset and then fiemap_for_stripe() starts from
2200                  * the overlapping extent, otherwise starts from
2201                  * fs.fs_end_offset.
2202                  */
2203                 if (entry > start_entry && lsme->lsme_extent.e_start == 0) {
2204                         /* new mirror */
2205                         fs.fs_end_offset = 0;
2206                 } else {
2207                         fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap,
2208                                                 lsm, entry, &fs.fs_ext,
2209                                                 &fs.fs_start_stripe);
2210                 }
2211
2212                 /* Check each stripe */
2213                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
2214                      --stripe_count,
2215                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
2216                         /* reset fs_finish_stripe */
2217                         fs.fs_finish_stripe = false;
2218                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
2219                                                fmkey, entry, stripe_last,
2220                                                cur_stripe, &fs);
2221                         if (rc < 0)
2222                                 GOTO(out_fm_local, rc);
2223                         if (fs.fs_enough) {
2224                                 stripe_last += cur_stripe;
2225                                 GOTO(finish, rc);
2226                         }
2227                         if (fs.fs_finish_stripe)
2228                                 break;
2229                 } /* for each stripe */
2230                 stripe_last += lsme->lsme_stripe_count;
2231         } /* for covering layout component entry */
2232
2233 finish:
2234         if (fs.fs_cur_extent > 0)
2235                 cur_ext = fs.fs_cur_extent - 1;
2236         else
2237                 cur_ext = 0;
2238
2239         /* Indicate that we are returning device offsets unless file just has
2240          * single stripe */
2241         if (lsm->lsm_entry_count > 1 ||
2242             (lsm->lsm_entry_count == 1 &&
2243              lsm->lsm_entries[0]->lsme_stripe_count > 1))
2244                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
2245
2246         if (fiemap->fm_extent_count == 0)
2247                 goto skip_last_device_calc;
2248
2249         /* done all the processing */
2250         if (entry > end_entry ||
2251             (fs.fs_enough && fs.fs_finish_stripe && entry == end_entry))
2252                 fiemap->fm_extents[cur_ext].fe_flags |= FIEMAP_EXTENT_LAST;
2253
2254 skip_last_device_calc:
2255         fiemap->fm_mapped_extents = fs.fs_cur_extent;
2256 out_fm_local:
2257         OBD_FREE_LARGE(fm_local, buffer_size);
2258
2259 out_lsm:
2260         lov_lsm_put(lsm);
2261         return rc;
2262 }
2263
2264 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2265                                 struct lov_user_md __user *lum, size_t size)
2266 {
2267         struct lov_object       *lov = cl2lov(obj);
2268         struct lov_stripe_md    *lsm;
2269         int                     rc = 0;
2270         ENTRY;
2271
2272         lsm = lov_lsm_addref(lov);
2273         if (lsm == NULL)
2274                 RETURN(-ENODATA);
2275
2276         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
2277         lov_lsm_put(lsm);
2278         RETURN(rc);
2279 }
2280
2281 static int lov_object_layout_get(const struct lu_env *env,
2282                                  struct cl_object *obj,
2283                                  struct cl_layout *cl)
2284 {
2285         struct lov_object *lov = cl2lov(obj);
2286         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2287         struct lu_buf *buf = &cl->cl_buf;
2288         ssize_t rc;
2289         ENTRY;
2290
2291         if (lsm == NULL) {
2292                 cl->cl_size = 0;
2293                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2294
2295                 RETURN(0);
2296         }
2297
2298         cl->cl_size = lov_comp_md_size(lsm);
2299         cl->cl_layout_gen = lsm->lsm_layout_gen;
2300         cl->cl_is_rdonly = lsm->lsm_is_rdonly;
2301         cl->cl_is_released = lsm->lsm_is_released;
2302         cl->cl_is_composite = lsm_is_composite(lsm->lsm_magic);
2303
2304         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2305         lov_lsm_put(lsm);
2306
2307         /* return error or number of bytes */
2308         RETURN(rc);
2309 }
2310
2311 static loff_t lov_object_maxbytes(struct cl_object *obj)
2312 {
2313         struct lov_object *lov = cl2lov(obj);
2314         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2315         loff_t maxbytes;
2316
2317         if (lsm == NULL)
2318                 return LLONG_MAX;
2319
2320         maxbytes = lsm->lsm_maxbytes;
2321
2322         lov_lsm_put(lsm);
2323
2324         return maxbytes;
2325 }
2326
2327 static int lov_object_flush(const struct lu_env *env, struct cl_object *obj,
2328                             struct ldlm_lock *lock)
2329 {
2330         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_flush, true, env, obj,
2331                                      lock);
2332 }
2333
2334 static const struct cl_object_operations lov_ops = {
2335         .coo_page_init    = lov_page_init,
2336         .coo_lock_init    = lov_lock_init,
2337         .coo_io_init      = lov_io_init,
2338         .coo_attr_get     = lov_attr_get,
2339         .coo_attr_update  = lov_attr_update,
2340         .coo_conf_set     = lov_conf_set,
2341         .coo_getstripe    = lov_object_getstripe,
2342         .coo_layout_get   = lov_object_layout_get,
2343         .coo_maxbytes     = lov_object_maxbytes,
2344         .coo_fiemap       = lov_object_fiemap,
2345         .coo_object_flush = lov_object_flush
2346 };
2347
2348 static const struct lu_object_operations lov_lu_obj_ops = {
2349         .loo_object_init        = lov_object_init,
2350         .loo_object_delete      = lov_object_delete,
2351         .loo_object_release     = NULL,
2352         .loo_object_free        = lov_object_free,
2353         .loo_object_print       = lov_object_print,
2354         .loo_object_invariant   = NULL,
2355 };
2356
2357 struct lu_object *lov_object_alloc(const struct lu_env *env,
2358                                    const struct lu_object_header *unused,
2359                                    struct lu_device *dev)
2360 {
2361         struct lov_object *lov;
2362         struct lu_object  *obj;
2363
2364         ENTRY;
2365         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2366         if (lov != NULL) {
2367                 obj = lov2lu(lov);
2368                 lu_object_init(obj, NULL, dev);
2369                 lov->lo_cl.co_ops = &lov_ops;
2370                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2371                 /*
2372                  * object io operation vector (cl_object::co_iop) is installed
2373                  * later in lov_object_init(), as different vectors are used
2374                  * for object with different layouts.
2375                  */
2376                 obj->lo_ops = &lov_lu_obj_ops;
2377         } else
2378                 obj = NULL;
2379         RETURN(obj);
2380 }
2381
2382 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2383 {
2384         struct lov_stripe_md *lsm = NULL;
2385
2386         lov_conf_freeze(lov);
2387         if (lov->lo_lsm != NULL) {
2388                 lsm = lsm_addref(lov->lo_lsm);
2389                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2390                         lsm, atomic_read(&lsm->lsm_refc),
2391                         test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags),
2392                         current);
2393         }
2394         lov_conf_thaw(lov);
2395         return lsm;
2396 }
2397
2398 int lov_read_and_clear_async_rc(struct cl_object *clob)
2399 {
2400         struct lu_object *luobj;
2401         int rc = 0;
2402         ENTRY;
2403
2404         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2405                                  &lov_device_type);
2406         if (luobj != NULL) {
2407                 struct lov_object *lov = lu2lov(luobj);
2408
2409                 lov_conf_freeze(lov);
2410                 switch (lov->lo_type) {
2411                 case LLT_COMP: {
2412                         struct lov_stripe_md *lsm;
2413                         int i;
2414
2415                         lsm = lov->lo_lsm;
2416                         LASSERT(lsm != NULL);
2417                         for (i = 0; i < lsm->lsm_entry_count; i++) {
2418                                 struct lov_stripe_md_entry *lse =
2419                                                 lsm->lsm_entries[i];
2420                                 int j;
2421
2422                                 if (!lsme_inited(lse) ||
2423                                     !lov_pattern_supported(
2424                                             lov_pattern(lse->lsme_pattern)) ||
2425                                     !lov_supported_comp_magic(lse->lsme_magic))
2426                                         break;
2427
2428                                 if (lsme_is_foreign(lse))
2429                                         break;
2430
2431                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
2432                                         struct lov_oinfo *loi =
2433                                                         lse->lsme_oinfo[j];
2434
2435                                         if (lov_oinfo_is_dummy(loi))
2436                                                 continue;
2437
2438                                         if (loi->loi_ar.ar_rc && !rc)
2439                                                 rc = loi->loi_ar.ar_rc;
2440                                         loi->loi_ar.ar_rc = 0;
2441                                 }
2442                         }
2443                 }
2444                 fallthrough;
2445                 case LLT_RELEASED:
2446                 case LLT_EMPTY:
2447                 case LLT_FOREIGN:
2448                         break;
2449                 default:
2450                         LBUG();
2451                 }
2452                 lov_conf_thaw(lov);
2453         }
2454         RETURN(rc);
2455 }
2456 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
2457
2458 /** @} lov */