Whamcloud - gitweb
LU-12296 llite: improve ll_dom_lock_cancel
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_object for LOV layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include <linux/random.h>
41
42 #include "lov_cl_internal.h"
43
44 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
45 {
46         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
47 }
48
49 /** \addtogroup lov
50  *  @{
51  */
52
53 /*****************************************************************************
54  *
55  * Layout operations.
56  *
57  */
58
59 struct lov_layout_operations {
60         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
61                         struct lov_object *lov, struct lov_stripe_md *lsm,
62                         const struct cl_object_conf *conf,
63                         union lov_layout_state *state);
64         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
65                            union lov_layout_state *state);
66         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
67                          union lov_layout_state *state);
68         int  (*llo_print)(const struct lu_env *env, void *cookie,
69                           lu_printer_t p, const struct lu_object *o);
70         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
71                               struct cl_page *page, pgoff_t index);
72         int  (*llo_lock_init)(const struct lu_env *env,
73                               struct cl_object *obj, struct cl_lock *lock,
74                               const struct cl_io *io);
75         int  (*llo_io_init)(const struct lu_env *env,
76                             struct cl_object *obj, struct cl_io *io);
77         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
78                             struct cl_attr *attr);
79         int  (*llo_flush)(const struct lu_env *env, struct cl_object *obj,
80                           struct ldlm_lock *lock);
81 };
82
83 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
84
85 static void lov_lsm_put(struct lov_stripe_md *lsm)
86 {
87         if (lsm != NULL)
88                 lov_free_memmd(&lsm);
89 }
90
91 /*****************************************************************************
92  *
93  * Lov object layout operations.
94  *
95  */
96
97 static struct cl_object *lov_sub_find(const struct lu_env *env,
98                                       struct cl_device *dev,
99                                       const struct lu_fid *fid,
100                                       const struct cl_object_conf *conf)
101 {
102         struct lu_object *o;
103
104         ENTRY;
105
106         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
107         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
108         RETURN(lu2cl(o));
109 }
110
111 static int lov_page_slice_fixup(struct lov_object *lov,
112                                 struct cl_object *stripe)
113 {
114         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
115         struct cl_object *o;
116
117         if (stripe == NULL)
118                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
119                        cfs_size_round(sizeof(struct lov_page));
120
121         cl_object_for_each(o, stripe)
122                 o->co_slice_off += hdr->coh_page_bufsize;
123
124         return cl_object_header(stripe)->coh_page_bufsize;
125 }
126
127 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
128                         struct cl_object *subobj, struct lov_oinfo *oinfo,
129                         int idx)
130 {
131         struct cl_object_header *hdr;
132         struct cl_object_header *subhdr;
133         struct cl_object_header *parent;
134         int entry = lov_comp_entry(idx);
135         int stripe = lov_comp_stripe(idx);
136         int result;
137
138         if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
139                 /* For sanity:test_206.
140                  * Do not leave the object in cache to avoid accessing
141                  * freed memory. This is because osc_object is referring to
142                  * lov_oinfo of lsm_stripe_data which will be freed due to
143                  * this failure. */
144                 cl_object_kill(env, subobj);
145                 cl_object_put(env, subobj);
146                 return -EIO;
147         }
148
149         hdr = cl_object_header(lov2cl(lov));
150         subhdr = cl_object_header(subobj);
151
152         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
153                " ost idx: %d gen: %d\n",
154                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
155                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
156                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
157
158         /* reuse ->coh_attr_guard to protect coh_parent change */
159         spin_lock(&subhdr->coh_attr_guard);
160         parent = subhdr->coh_parent;
161         if (parent == NULL) {
162                 struct lovsub_object *lso = cl2lovsub(subobj);
163
164                 subhdr->coh_parent = hdr;
165                 spin_unlock(&subhdr->coh_attr_guard);
166                 subhdr->coh_nesting = hdr->coh_nesting + 1;
167                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
168                 lso->lso_super = lov;
169                 lso->lso_index = idx;
170                 result = 0;
171         } else {
172                 struct lu_object  *old_obj;
173                 struct lov_object *old_lov;
174                 unsigned int mask = D_INODE;
175
176                 spin_unlock(&subhdr->coh_attr_guard);
177                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
178                 LASSERT(old_obj != NULL);
179                 old_lov = cl2lov(lu2cl(old_obj));
180                 if (old_lov->lo_layout_invalid) {
181                         /* the object's layout has already changed but isn't
182                          * refreshed */
183                         lu_object_unhash(env, &subobj->co_lu);
184                         result = -EAGAIN;
185                 } else {
186                         mask = D_ERROR;
187                         result = -EIO;
188                 }
189
190                 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
191                                 "stripe %d is already owned.", idx);
192                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
193                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
194                 cl_object_put(env, subobj);
195         }
196         return result;
197 }
198
199 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
200                           struct lov_object *lov, unsigned int index,
201                           const struct cl_object_conf *conf,
202                           struct lov_layout_entry *lle)
203 {
204         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
205         struct lov_thread_info *lti = lov_env_info(env);
206         struct cl_object_conf *subconf = &lti->lti_stripe_conf;
207         struct lu_fid *ofid = &lti->lti_fid;
208         struct cl_object *stripe;
209         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
210         int result;
211         int psz, sz;
212         int i;
213
214         ENTRY;
215
216         spin_lock_init(&r0->lo_sub_lock);
217         r0->lo_nr = lse->lsme_stripe_count;
218
219         OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof(r0->lo_sub[0]));
220         if (r0->lo_sub == NULL)
221                 GOTO(out, result = -ENOMEM);
222
223         psz = 0;
224         result = 0;
225         memset(subconf, 0, sizeof(*subconf));
226
227         /*
228          * Create stripe cl_objects.
229          */
230         for (i = 0; i < r0->lo_nr; ++i) {
231                 struct cl_device *subdev;
232                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
233                 int ost_idx = oinfo->loi_ost_idx;
234
235                 if (lov_oinfo_is_dummy(oinfo))
236                         continue;
237
238                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
239                 if (result != 0)
240                         GOTO(out, result);
241
242                 if (dev->ld_target[ost_idx] == NULL) {
243                         CERROR("%s: OST %04x is not initialized\n",
244                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
245                         GOTO(out, result = -EIO);
246                 }
247
248                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
249                 subconf->u.coc_oinfo = oinfo;
250                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
251                 /* In the function below, .hs_keycmp resolves to
252                  * lu_obj_hop_keycmp() */
253                 /* coverity[overrun-buffer-val] */
254                 stripe = lov_sub_find(env, subdev, ofid, subconf);
255                 if (IS_ERR(stripe))
256                         GOTO(out, result = PTR_ERR(stripe));
257
258                 result = lov_init_sub(env, lov, stripe, oinfo,
259                                       lov_comp_index(index, i));
260                 if (result == -EAGAIN) { /* try again */
261                         --i;
262                         result = 0;
263                         continue;
264                 }
265
266                 if (result == 0) {
267                         r0->lo_sub[i] = cl2lovsub(stripe);
268
269                         sz = lov_page_slice_fixup(lov, stripe);
270                         LASSERT(ergo(psz > 0, psz == sz));
271                         psz = sz;
272                 }
273         }
274         if (result == 0)
275                 result = psz;
276 out:
277         RETURN(result);
278 }
279
280 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
281                                struct lov_layout_raid0 *r0,
282                                struct lovsub_object *los, int idx)
283 {
284         struct cl_object        *sub;
285         struct lu_site          *site;
286         wait_queue_head_t *wq;
287         wait_queue_entry_t *waiter;
288
289         LASSERT(r0->lo_sub[idx] == los);
290
291         sub = lovsub2cl(los);
292         site = sub->co_lu.lo_dev->ld_site;
293         wq = lu_site_wq_from_fid(site, &sub->co_lu.lo_header->loh_fid);
294
295         cl_object_kill(env, sub);
296         /* release a reference to the sub-object and ... */
297         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
298         cl_object_put(env, sub);
299
300         /* ... wait until it is actually destroyed---sub-object clears its
301          * ->lo_sub[] slot in lovsub_object_free() */
302         if (r0->lo_sub[idx] == los) {
303                 waiter = &lov_env_info(env)->lti_waiter;
304                 init_waitqueue_entry(waiter, current);
305                 add_wait_queue(wq, waiter);
306                 set_current_state(TASK_UNINTERRUPTIBLE);
307                 while (1) {
308                         /* this wait-queue is signaled at the end of
309                          * lu_object_free(). */
310                         set_current_state(TASK_UNINTERRUPTIBLE);
311                         spin_lock(&r0->lo_sub_lock);
312                         if (r0->lo_sub[idx] == los) {
313                                 spin_unlock(&r0->lo_sub_lock);
314                                 schedule();
315                         } else {
316                                 spin_unlock(&r0->lo_sub_lock);
317                                 set_current_state(TASK_RUNNING);
318                                 break;
319                         }
320                 }
321                 remove_wait_queue(wq, waiter);
322         }
323         LASSERT(r0->lo_sub[idx] == NULL);
324 }
325
326 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
327                              struct lov_layout_entry *lle)
328 {
329         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
330
331         ENTRY;
332
333         if (r0->lo_sub != NULL) {
334                 int i;
335
336                 for (i = 0; i < r0->lo_nr; ++i) {
337                         struct lovsub_object *los = r0->lo_sub[i];
338
339                         if (los != NULL) {
340                                 cl_object_prune(env, &los->lso_cl);
341                                 /*
342                                  * If top-level object is to be evicted from
343                                  * the cache, so are its sub-objects.
344                                  */
345                                 lov_subobject_kill(env, lov, r0, los, i);
346                         }
347                 }
348         }
349
350         EXIT;
351 }
352
353 static void lov_fini_raid0(const struct lu_env *env,
354                            struct lov_layout_entry *lle)
355 {
356         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
357
358         if (r0->lo_sub != NULL) {
359                 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
360                 r0->lo_sub = NULL;
361         }
362 }
363
364 static int lov_print_raid0(const struct lu_env *env, void *cookie,
365                            lu_printer_t p, const struct lov_layout_entry *lle)
366 {
367         const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
368         int i;
369
370         for (i = 0; i < r0->lo_nr; ++i) {
371                 struct lu_object *sub;
372
373                 if (r0->lo_sub[i] != NULL) {
374                         sub = lovsub2lu(r0->lo_sub[i]);
375                         lu_object_print(env, cookie, p, sub);
376                 } else {
377                         (*p)(env, cookie, "sub %d absent\n", i);
378                 }
379         }
380         return 0;
381 }
382
383 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
384                               unsigned int index, struct lov_layout_entry *lle,
385                               struct cl_attr **lov_attr)
386 {
387         struct lov_layout_raid0 *r0 = &lle->lle_raid0;
388         struct lov_stripe_md *lsm = lov->lo_lsm;
389         struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
390         struct cl_attr *attr = &r0->lo_attr;
391         __u64 kms = 0;
392         int result = 0;
393
394         if (r0->lo_attr_valid) {
395                 *lov_attr = attr;
396                 return 0;
397         }
398
399         memset(lvb, 0, sizeof(*lvb));
400
401         /* XXX: timestamps can be negative by sanity:test_39m,
402          * how can it be? */
403         lvb->lvb_atime = LLONG_MIN;
404         lvb->lvb_ctime = LLONG_MIN;
405         lvb->lvb_mtime = LLONG_MIN;
406
407         /*
408          * XXX that should be replaced with a loop over sub-objects,
409          * doing cl_object_attr_get() on them. But for now, let's
410          * reuse old lov code.
411          */
412
413         /*
414          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
415          * happy. It's not needed, because new code uses
416          * ->coh_attr_guard spin-lock to protect consistency of
417          * sub-object attributes.
418          */
419         lov_stripe_lock(lsm);
420         result = lov_merge_lvb_kms(lsm, index, lvb, &kms);
421         lov_stripe_unlock(lsm);
422         if (result == 0) {
423                 cl_lvb2attr(attr, lvb);
424                 attr->cat_kms = kms;
425                 r0->lo_attr_valid = 1;
426                 *lov_attr = attr;
427         }
428
429         return result;
430 }
431
432 static struct lov_comp_layout_entry_ops raid0_ops = {
433         .lco_init      = lov_init_raid0,
434         .lco_fini      = lov_fini_raid0,
435         .lco_getattr   = lov_attr_get_raid0,
436 };
437
438 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
439                             unsigned int index, struct lov_layout_entry *lle,
440                             struct cl_attr **lov_attr)
441 {
442         struct lov_layout_dom *dom = &lle->lle_dom;
443         struct lov_oinfo *loi = dom->lo_loi;
444         struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
445
446         if (dom->lo_dom_r0.lo_attr_valid) {
447                 *lov_attr = attr;
448                 return 0;
449         }
450
451         if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
452                 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
453
454         cl_lvb2attr(attr, &loi->loi_lvb);
455
456         /* DoM component size can be bigger than stripe size after
457          * client's setattr RPC, so do not count anything beyond
458          * component end. Alternatively, check that limit on server
459          * and do not allow size overflow there. */
460         if (attr->cat_size > lle->lle_extent->e_end)
461                 attr->cat_size = lle->lle_extent->e_end;
462
463         attr->cat_kms = attr->cat_size;
464
465         dom->lo_dom_r0.lo_attr_valid = 1;
466         *lov_attr = attr;
467
468         return 0;
469 }
470
471 /**
472  * Lookup FLD to get MDS index of the given DOM object FID.
473  *
474  * \param[in]  ld       LOV device
475  * \param[in]  fid      FID to lookup
476  * \param[out] nr       index in MDC array to return back
477  *
478  * \retval              0 and \a mds filled with MDS index if successful
479  * \retval              negative value on error
480  */
481 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
482                           __u32 *nr)
483 {
484         __u32 mds_idx;
485         int i, rc;
486
487         ENTRY;
488
489         rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
490                                &mds_idx, LU_SEQ_RANGE_MDT, NULL);
491         if (rc) {
492                 CERROR("%s: error while looking for mds number. Seq %#llx"
493                        ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
494                        fid_seq(fid), rc);
495                 RETURN(rc);
496         }
497
498         CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
499                mds_idx, PFID(fid));
500
501         /* find proper MDC device in the array */
502         for (i = 0; i < ld->ld_md_tgts_nr; i++) {
503                 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
504                     ld->ld_md_tgts[i].ldm_idx == mds_idx)
505                         break;
506         }
507
508         if (i == ld->ld_md_tgts_nr) {
509                 CERROR("%s: cannot find corresponding MDC device for mds #%x "
510                        "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
511                        mds_idx, PFID(fid));
512                 rc = -EINVAL;
513         } else {
514                 *nr = i;
515         }
516         RETURN(rc);
517 }
518
519 /**
520  * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
521  *
522  * Init the DOM object for the first time. It prepares also RAID0 entry
523  * for it to use in common methods with ordinary RAID0 layout entries.
524  *
525  * \param[in] env       execution environment
526  * \param[in] dev       LOV device
527  * \param[in] lov       LOV object
528  * \param[in] index     Composite layout entry index in LSM
529  * \param[in] lle       Composite LOV layout entry
530  */
531 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
532                         struct lov_object *lov, unsigned int index,
533                         const struct cl_object_conf *conf,
534                         struct lov_layout_entry *lle)
535 {
536         struct lov_thread_info *lti = lov_env_info(env);
537         struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
538         struct cl_object *clo;
539         struct lu_object *o = lov2lu(lov);
540         const struct lu_fid *fid = lu_object_fid(o);
541         struct cl_device *mdcdev;
542         struct lov_oinfo *loi = NULL;
543         struct cl_object_conf *sconf = &lti->lti_stripe_conf;
544
545         int rc;
546         __u32 idx = 0;
547
548         ENTRY;
549
550         LASSERT(index == 0);
551
552         /* find proper MDS device */
553         rc = lov_fld_lookup(dev, fid, &idx);
554         if (rc)
555                 RETURN(rc);
556
557         LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
558                  "LOV md target[%u] is NULL\n", idx);
559
560         /* check lsm is DOM, more checks are needed */
561         LASSERT(lsme->lsme_stripe_count == 0);
562
563         /*
564          * Create lower cl_objects.
565          */
566         mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
567
568         LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
569
570         /* DoM object has no oinfo in LSM entry, create it exclusively */
571         OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
572         if (loi == NULL)
573                 RETURN(-ENOMEM);
574
575         fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
576
577         sconf->u.coc_oinfo = loi;
578 again:
579         clo = lov_sub_find(env, mdcdev, fid, sconf);
580         if (IS_ERR(clo))
581                 GOTO(out, rc = PTR_ERR(clo));
582
583         rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
584         if (rc == -EAGAIN) /* try again */
585                 goto again;
586         else if (rc != 0)
587                 GOTO(out, rc);
588
589         lle->lle_dom.lo_dom = cl2lovsub(clo);
590         spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
591         lle->lle_dom.lo_dom_r0.lo_nr = 1;
592         lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
593         lle->lle_dom.lo_loi = loi;
594
595         rc = lov_page_slice_fixup(lov, clo);
596         RETURN(rc);
597
598 out:
599         if (loi != NULL)
600                 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
601         return rc;
602 }
603
604 /**
605  * Implementation of lov_layout_operations::llo_fini for DOM object.
606  *
607  * Finish the DOM object and free related memory.
608  *
609  * \param[in] env       execution environment
610  * \param[in] lov       LOV object
611  * \param[in] state     LOV layout state
612  */
613 static void lov_fini_dom(const struct lu_env *env,
614                          struct lov_layout_entry *lle)
615 {
616         if (lle->lle_dom.lo_dom != NULL)
617                 lle->lle_dom.lo_dom = NULL;
618         if (lle->lle_dom.lo_loi != NULL)
619                 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
620 }
621
622 static struct lov_comp_layout_entry_ops dom_ops = {
623         .lco_init = lov_init_dom,
624         .lco_fini = lov_fini_dom,
625         .lco_getattr = lov_attr_get_dom,
626 };
627
628 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
629                               struct lov_object *lov, struct lov_stripe_md *lsm,
630                               const struct cl_object_conf *conf,
631                               union lov_layout_state *state)
632 {
633         struct lov_layout_composite *comp = &state->composite;
634         struct lov_layout_entry *lle;
635         struct lov_mirror_entry *lre;
636         unsigned int entry_count;
637         unsigned int psz = 0;
638         unsigned int mirror_count;
639         int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
640         int result = 0;
641         unsigned int seq;
642         int i, j;
643
644         ENTRY;
645
646         LASSERT(lsm->lsm_entry_count > 0);
647         LASSERT(lov->lo_lsm == NULL);
648         lov->lo_lsm = lsm_addref(lsm);
649         lov->lo_layout_invalid = true;
650
651         dump_lsm(D_INODE, lsm);
652
653         entry_count = lsm->lsm_entry_count;
654
655         spin_lock_init(&comp->lo_write_lock);
656         comp->lo_flags = lsm->lsm_flags;
657         comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
658         comp->lo_entry_count = lsm->lsm_entry_count;
659         comp->lo_preferred_mirror = -1;
660
661         if (equi(flr_state == LCM_FL_NONE, comp->lo_mirror_count > 1))
662                 RETURN(-EINVAL);
663
664         OBD_ALLOC(comp->lo_mirrors,
665                   comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
666         if (comp->lo_mirrors == NULL)
667                 RETURN(-ENOMEM);
668
669         OBD_ALLOC(comp->lo_entries, entry_count * sizeof(*comp->lo_entries));
670         if (comp->lo_entries == NULL)
671                 RETURN(-ENOMEM);
672
673         /* Initiate all entry types and extents data at first */
674         for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
675                 int mirror_id = 0;
676
677                 lle = &comp->lo_entries[i];
678
679                 lle->lle_lsme = lsm->lsm_entries[i];
680                 lle->lle_type = lov_entry_type(lle->lle_lsme);
681                 switch (lle->lle_type) {
682                 case LOV_PATTERN_RAID0:
683                         lle->lle_comp_ops = &raid0_ops;
684                         break;
685                 case LOV_PATTERN_MDT:
686                         lle->lle_comp_ops = &dom_ops;
687                         break;
688                 default:
689                         CERROR("%s: unknown composite layout entry type %i\n",
690                                lov2obd(dev->ld_lov)->obd_name,
691                                lsm->lsm_entries[i]->lsme_pattern);
692                         dump_lsm(D_ERROR, lsm);
693                         RETURN(-EIO);
694                 }
695
696                 lle->lle_extent = &lle->lle_lsme->lsme_extent;
697                 lle->lle_valid = !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
698
699                 if (flr_state != LCM_FL_NONE)
700                         mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
701
702                 lre = &comp->lo_mirrors[j];
703                 if (i > 0) {
704                         if (mirror_id == lre->lre_mirror_id) {
705                                 lre->lre_valid |= lle->lle_valid;
706                                 lre->lre_stale |= !lle->lle_valid;
707                                 lre->lre_end = i;
708                                 continue;
709                         }
710
711                         /* new mirror detected, assume that the mirrors
712                          * are shorted in layout */
713                         ++mirror_count;
714                         ++j;
715                         if (j >= comp->lo_mirror_count)
716                                 break;
717
718                         lre = &comp->lo_mirrors[j];
719                 }
720
721                 /* entries must be sorted by mirrors */
722                 lre->lre_mirror_id = mirror_id;
723                 lre->lre_start = lre->lre_end = i;
724                 lre->lre_preferred = !!(lle->lle_lsme->lsme_flags &
725                                         LCME_FL_PREF_RD);
726                 lre->lre_valid = lle->lle_valid;
727                 lre->lre_stale = !lle->lle_valid;
728         }
729
730         /* sanity check for FLR */
731         if (mirror_count != comp->lo_mirror_count) {
732                 CDEBUG(D_INODE, DFID
733                        " doesn't have the # of mirrors it claims, %u/%u\n",
734                        PFID(lu_object_fid(lov2lu(lov))), mirror_count,
735                        comp->lo_mirror_count + 1);
736
737                 GOTO(out, result = -EINVAL);
738         }
739
740         lov_foreach_layout_entry(lov, lle) {
741                 int index = lov_layout_entry_index(lov, lle);
742
743                 /**
744                  * If the component has not been init-ed on MDS side, for
745                  * PFL layout, we'd know that the components beyond this one
746                  * will be dynamically init-ed later on file write/trunc ops.
747                  */
748                 if (!lsme_inited(lle->lle_lsme))
749                         continue;
750
751                 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
752                                                      conf, lle);
753                 if (result < 0)
754                         break;
755
756                 LASSERT(ergo(psz > 0, psz == result));
757                 psz = result;
758         }
759
760         if (psz > 0)
761                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
762
763         /* decide the preferred mirror. It uses the hash value of lov_object
764          * so that different clients would use different mirrors for read. */
765         mirror_count = 0;
766         seq = hash_long((unsigned long)lov, 8);
767         for (i = 0; i < comp->lo_mirror_count; i++) {
768                 unsigned int idx = (i + seq) % comp->lo_mirror_count;
769
770                 lre = lov_mirror_entry(lov, idx);
771                 if (lre->lre_stale)
772                         continue;
773
774                 mirror_count++; /* valid mirror */
775
776                 if (lre->lre_preferred || comp->lo_preferred_mirror < 0)
777                         comp->lo_preferred_mirror = idx;
778         }
779         if (!mirror_count) {
780                 CDEBUG(D_INODE, DFID
781                        " doesn't have any valid mirrors\n",
782                        PFID(lu_object_fid(lov2lu(lov))));
783
784                 comp->lo_preferred_mirror = 0;
785         }
786
787         LASSERT(comp->lo_preferred_mirror >= 0);
788
789         EXIT;
790 out:
791         return result > 0 ? 0 : result;
792 }
793
794 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
795                           struct lov_object *lov, struct lov_stripe_md *lsm,
796                           const struct cl_object_conf *conf,
797                           union lov_layout_state *state)
798 {
799         return 0;
800 }
801
802 static int lov_init_released(const struct lu_env *env,
803                              struct lov_device *dev, struct lov_object *lov,
804                              struct lov_stripe_md *lsm,
805                              const struct cl_object_conf *conf,
806                              union lov_layout_state *state)
807 {
808         LASSERT(lsm != NULL);
809         LASSERT(lsm->lsm_is_released);
810         LASSERT(lov->lo_lsm == NULL);
811
812         lov->lo_lsm = lsm_addref(lsm);
813         return 0;
814 }
815
816 static int lov_init_foreign(const struct lu_env *env,
817                             struct lov_device *dev, struct lov_object *lov,
818                             struct lov_stripe_md *lsm,
819                             const struct cl_object_conf *conf,
820                             union lov_layout_state *state)
821 {
822         LASSERT(lsm != NULL);
823         LASSERT(lov->lo_type == LLT_FOREIGN);
824         LASSERT(lov->lo_lsm == NULL);
825
826         lov->lo_lsm = lsm_addref(lsm);
827         return 0;
828 }
829
830 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
831                             union lov_layout_state *state)
832 {
833         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED ||
834                 lov->lo_type == LLT_FOREIGN);
835
836         lov_layout_wait(env, lov);
837         return 0;
838 }
839
840 static int lov_delete_composite(const struct lu_env *env,
841                                 struct lov_object *lov,
842                                 union lov_layout_state *state)
843 {
844         struct lov_layout_entry *entry;
845         struct lov_layout_composite *comp = &state->composite;
846
847         ENTRY;
848
849         dump_lsm(D_INODE, lov->lo_lsm);
850
851         lov_layout_wait(env, lov);
852         if (comp->lo_entries)
853                 lov_foreach_layout_entry(lov, entry)
854                         lov_delete_raid0(env, lov, entry);
855
856         RETURN(0);
857 }
858
859 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
860                            union lov_layout_state *state)
861 {
862         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
863 }
864
865 static void lov_fini_composite(const struct lu_env *env,
866                                struct lov_object *lov,
867                                union lov_layout_state *state)
868 {
869         struct lov_layout_composite *comp = &state->composite;
870         ENTRY;
871
872         if (comp->lo_entries != NULL) {
873                 struct lov_layout_entry *entry;
874
875                 lov_foreach_layout_entry(lov, entry)
876                         entry->lle_comp_ops->lco_fini(env, entry);
877
878                 OBD_FREE(comp->lo_entries,
879                          comp->lo_entry_count * sizeof(*comp->lo_entries));
880                 comp->lo_entries = NULL;
881         }
882
883         if (comp->lo_mirrors != NULL) {
884                 OBD_FREE(comp->lo_mirrors,
885                          comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
886                 comp->lo_mirrors = NULL;
887         }
888
889         memset(comp, 0, sizeof(*comp));
890
891         dump_lsm(D_INODE, lov->lo_lsm);
892         lov_free_memmd(&lov->lo_lsm);
893
894         EXIT;
895 }
896
897 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
898                                 union lov_layout_state *state)
899 {
900         ENTRY;
901         dump_lsm(D_INODE, lov->lo_lsm);
902         lov_free_memmd(&lov->lo_lsm);
903         EXIT;
904 }
905
906 static int lov_print_empty(const struct lu_env *env, void *cookie,
907                            lu_printer_t p, const struct lu_object *o)
908 {
909         (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
910         return 0;
911 }
912
913 static int lov_print_composite(const struct lu_env *env, void *cookie,
914                                lu_printer_t p, const struct lu_object *o)
915 {
916         struct lov_object *lov = lu2lov(o);
917         struct lov_stripe_md *lsm = lov->lo_lsm;
918         int i;
919
920         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
921              lsm->lsm_entry_count,
922              lov->lo_layout_invalid ? "invalid" : "valid", lsm,
923              lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
924              lsm->lsm_layout_gen);
925
926         for (i = 0; i < lsm->lsm_entry_count; i++) {
927                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
928                 struct lov_layout_entry *lle = lov_entry(lov, i);
929
930                 (*p)(env, cookie,
931                      DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
932                      PEXT(&lse->lsme_extent), lse->lsme_magic,
933                      lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
934                      lse->lsme_flags, lse->lsme_stripe_count,
935                      lse->lsme_stripe_size);
936                 lov_print_raid0(env, cookie, p, lle);
937         }
938
939         return 0;
940 }
941
942 static int lov_print_released(const struct lu_env *env, void *cookie,
943                                 lu_printer_t p, const struct lu_object *o)
944 {
945         struct lov_object       *lov = lu2lov(o);
946         struct lov_stripe_md    *lsm = lov->lo_lsm;
947
948         (*p)(env, cookie,
949                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
950                 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
951                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
952                 lsm->lsm_layout_gen);
953         return 0;
954 }
955
956 static int lov_print_foreign(const struct lu_env *env, void *cookie,
957                                 lu_printer_t p, const struct lu_object *o)
958 {
959         struct lov_object       *lov = lu2lov(o);
960         struct lov_stripe_md    *lsm = lov->lo_lsm;
961
962         (*p)(env, cookie,
963                 "foreign: %s, lsm{%p 0x%08X %d %u}:\n",
964                 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
965                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
966                 lsm->lsm_layout_gen);
967         (*p)(env, cookie,
968                 "raw_ea_content '%.*s'\n",
969                 (int)lsm->lsm_foreign_size, (char *)lsm_foreign(lsm));
970         return 0;
971 }
972
973 /**
974  * Implements cl_object_operations::coo_attr_get() method for an object
975  * without stripes (LLT_EMPTY layout type).
976  *
977  * The only attributes this layer is authoritative in this case is
978  * cl_attr::cat_blocks---it's 0.
979  */
980 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
981                               struct cl_attr *attr)
982 {
983         attr->cat_blocks = 0;
984         return 0;
985 }
986
987 static int lov_attr_get_composite(const struct lu_env *env,
988                                   struct cl_object *obj,
989                                   struct cl_attr *attr)
990 {
991         struct lov_object       *lov = cl2lov(obj);
992         struct lov_layout_entry *entry;
993         int                      result = 0;
994
995         ENTRY;
996
997         attr->cat_size = 0;
998         attr->cat_blocks = 0;
999         lov_foreach_layout_entry(lov, entry) {
1000                 struct cl_attr *lov_attr = NULL;
1001                 int index = lov_layout_entry_index(lov, entry);
1002
1003                 if (!entry->lle_valid)
1004                         continue;
1005
1006                 /* PFL: This component has not been init-ed. */
1007                 if (!lsm_entry_inited(lov->lo_lsm, index))
1008                         continue;
1009
1010                 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
1011                                                           entry, &lov_attr);
1012                 if (result < 0)
1013                         RETURN(result);
1014
1015                 if (lov_attr == NULL)
1016                         continue;
1017
1018                 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
1019                        "b=%llu\n", index - 1, lov_attr->cat_size,
1020                        lov_attr->cat_mtime, lov_attr->cat_atime,
1021                        lov_attr->cat_ctime, lov_attr->cat_blocks);
1022
1023                 /* merge results */
1024                 attr->cat_blocks += lov_attr->cat_blocks;
1025                 if (attr->cat_size < lov_attr->cat_size)
1026                         attr->cat_size = lov_attr->cat_size;
1027                 if (attr->cat_kms < lov_attr->cat_kms)
1028                         attr->cat_kms = lov_attr->cat_kms;
1029                 if (attr->cat_atime < lov_attr->cat_atime)
1030                         attr->cat_atime = lov_attr->cat_atime;
1031                 if (attr->cat_ctime < lov_attr->cat_ctime)
1032                         attr->cat_ctime = lov_attr->cat_ctime;
1033                 if (attr->cat_mtime < lov_attr->cat_mtime)
1034                         attr->cat_mtime = lov_attr->cat_mtime;
1035         }
1036
1037         RETURN(0);
1038 }
1039
1040 static int lov_flush_composite(const struct lu_env *env,
1041                                struct cl_object *obj,
1042                                struct ldlm_lock *lock)
1043 {
1044         struct lov_object *lov = cl2lov(obj);
1045         struct lovsub_object *lovsub;
1046
1047         ENTRY;
1048
1049         if (!lsme_is_dom(lov->lo_lsm->lsm_entries[0]))
1050                 RETURN(-EINVAL);
1051
1052         lovsub = lov->u.composite.lo_entries[0].lle_dom.lo_dom;
1053         RETURN(cl_object_flush(env, lovsub2cl(lovsub), lock));
1054 }
1055
1056 const static struct lov_layout_operations lov_dispatch[] = {
1057         [LLT_EMPTY] = {
1058                 .llo_init      = lov_init_empty,
1059                 .llo_delete    = lov_delete_empty,
1060                 .llo_fini      = lov_fini_empty,
1061                 .llo_print     = lov_print_empty,
1062                 .llo_page_init = lov_page_init_empty,
1063                 .llo_lock_init = lov_lock_init_empty,
1064                 .llo_io_init   = lov_io_init_empty,
1065                 .llo_getattr   = lov_attr_get_empty,
1066         },
1067         [LLT_RELEASED] = {
1068                 .llo_init      = lov_init_released,
1069                 .llo_delete    = lov_delete_empty,
1070                 .llo_fini      = lov_fini_released,
1071                 .llo_print     = lov_print_released,
1072                 .llo_page_init = lov_page_init_empty,
1073                 .llo_lock_init = lov_lock_init_empty,
1074                 .llo_io_init   = lov_io_init_released,
1075                 .llo_getattr   = lov_attr_get_empty,
1076         },
1077         [LLT_COMP] = {
1078                 .llo_init      = lov_init_composite,
1079                 .llo_delete    = lov_delete_composite,
1080                 .llo_fini      = lov_fini_composite,
1081                 .llo_print     = lov_print_composite,
1082                 .llo_page_init = lov_page_init_composite,
1083                 .llo_lock_init = lov_lock_init_composite,
1084                 .llo_io_init   = lov_io_init_composite,
1085                 .llo_getattr   = lov_attr_get_composite,
1086                 .llo_flush     = lov_flush_composite,
1087         },
1088         [LLT_FOREIGN] = {
1089                 .llo_init      = lov_init_foreign,
1090                 .llo_delete    = lov_delete_empty,
1091                 .llo_fini      = lov_fini_released,
1092                 .llo_print     = lov_print_foreign,
1093                 .llo_page_init = lov_page_init_foreign,
1094                 .llo_lock_init = lov_lock_init_empty,
1095                 .llo_io_init   = lov_io_init_empty,
1096                 .llo_getattr   = lov_attr_get_empty,
1097         },
1098 };
1099
1100 /**
1101  * Performs a double-dispatch based on the layout type of an object.
1102  */
1103 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
1104 ({                                                      \
1105         struct lov_object *__obj = (obj);               \
1106         enum lov_layout_type __llt;                     \
1107                                                         \
1108         __llt = __obj->lo_type;                         \
1109         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
1110         lov_dispatch[__llt].op(__VA_ARGS__);            \
1111 })
1112
1113 /**
1114  * Return lov_layout_type associated with a given lsm
1115  */
1116 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1117 {
1118         if (lsm == NULL)
1119                 return LLT_EMPTY;
1120
1121         if (lsm->lsm_is_released)
1122                 return LLT_RELEASED;
1123
1124         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1125             lsm->lsm_magic == LOV_MAGIC_V3 ||
1126             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1127                 return LLT_COMP;
1128
1129         if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
1130                 return LLT_FOREIGN;
1131
1132         return LLT_EMPTY;
1133 }
1134
1135 static inline void lov_conf_freeze(struct lov_object *lov)
1136 {
1137         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1138                 lov, lov->lo_owner, current);
1139         if (lov->lo_owner != current)
1140                 down_read(&lov->lo_type_guard);
1141 }
1142
1143 static inline void lov_conf_thaw(struct lov_object *lov)
1144 {
1145         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1146                 lov, lov->lo_owner, current);
1147         if (lov->lo_owner != current)
1148                 up_read(&lov->lo_type_guard);
1149 }
1150
1151 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
1152 ({                                                                      \
1153         struct lov_object                      *__obj = (obj);          \
1154         int                                     __lock = !!(lock);      \
1155         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
1156                                                                         \
1157         if (__lock)                                                     \
1158                 lov_conf_freeze(__obj);                                 \
1159         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
1160         if (__lock)                                                     \
1161                 lov_conf_thaw(__obj);                                   \
1162         __result;                                                       \
1163 })
1164
1165 /**
1166  * Performs a locked double-dispatch based on the layout type of an object.
1167  */
1168 #define LOV_2DISPATCH(obj, op, ...)                     \
1169         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1170
1171 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
1172 do {                                                                    \
1173         struct lov_object                      *__obj = (obj);          \
1174         enum lov_layout_type                    __llt;                  \
1175                                                                         \
1176         lov_conf_freeze(__obj);                                         \
1177         __llt = __obj->lo_type;                                         \
1178         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
1179         lov_dispatch[__llt].op(__VA_ARGS__);                            \
1180         lov_conf_thaw(__obj);                                           \
1181 } while (0)
1182
1183 static void lov_conf_lock(struct lov_object *lov)
1184 {
1185         LASSERT(lov->lo_owner != current);
1186         down_write(&lov->lo_type_guard);
1187         LASSERT(lov->lo_owner == NULL);
1188         lov->lo_owner = current;
1189         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1190                 lov, lov->lo_owner);
1191 }
1192
1193 static void lov_conf_unlock(struct lov_object *lov)
1194 {
1195         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1196                 lov, lov->lo_owner);
1197         lov->lo_owner = NULL;
1198         up_write(&lov->lo_type_guard);
1199 }
1200
1201 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1202 {
1203         struct l_wait_info lwi = { 0 };
1204         ENTRY;
1205
1206         while (atomic_read(&lov->lo_active_ios) > 0) {
1207                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1208                         PFID(lu_object_fid(lov2lu(lov))),
1209                         atomic_read(&lov->lo_active_ios));
1210
1211                 l_wait_event(lov->lo_waitq,
1212                              atomic_read(&lov->lo_active_ios) == 0, &lwi);
1213         }
1214         RETURN(0);
1215 }
1216
1217 static int lov_layout_change(const struct lu_env *unused,
1218                              struct lov_object *lov, struct lov_stripe_md *lsm,
1219                              const struct cl_object_conf *conf)
1220 {
1221         enum lov_layout_type llt = lov_type(lsm);
1222         union lov_layout_state *state = &lov->u;
1223         const struct lov_layout_operations *old_ops;
1224         const struct lov_layout_operations *new_ops;
1225         struct lov_device *lov_dev = lov_object_dev(lov);
1226         struct lu_env *env;
1227         __u16 refcheck;
1228         int rc;
1229         ENTRY;
1230
1231         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1232
1233         env = cl_env_get(&refcheck);
1234         if (IS_ERR(env))
1235                 RETURN(PTR_ERR(env));
1236
1237         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1238
1239         CDEBUG(D_INODE, DFID" from %s to %s\n",
1240                PFID(lu_object_fid(lov2lu(lov))),
1241                llt2str(lov->lo_type), llt2str(llt));
1242
1243         old_ops = &lov_dispatch[lov->lo_type];
1244         new_ops = &lov_dispatch[llt];
1245
1246         rc = cl_object_prune(env, &lov->lo_cl);
1247         if (rc != 0)
1248                 GOTO(out, rc);
1249
1250         rc = old_ops->llo_delete(env, lov, &lov->u);
1251         if (rc != 0)
1252                 GOTO(out, rc);
1253
1254         old_ops->llo_fini(env, lov, &lov->u);
1255
1256         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1257
1258         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1259                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1260
1261         /* page bufsize fixup */
1262         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1263                 lov_page_slice_fixup(lov, NULL);
1264
1265         lov->lo_type = llt;
1266         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1267         if (rc != 0) {
1268                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1269
1270                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1271                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1272                 new_ops->llo_delete(env, lov, state);
1273                 new_ops->llo_fini(env, lov, state);
1274                 /* this file becomes an EMPTY file. */
1275                 lov->lo_type = LLT_EMPTY;
1276                 GOTO(out, rc);
1277         }
1278
1279 out:
1280         cl_env_put(env, &refcheck);
1281         RETURN(rc);
1282 }
1283
1284 /*****************************************************************************
1285  *
1286  * Lov object operations.
1287  *
1288  */
1289 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1290                     const struct lu_object_conf *conf)
1291 {
1292         struct lov_object            *lov   = lu2lov(obj);
1293         struct lov_device            *dev   = lov_object_dev(lov);
1294         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
1295         union lov_layout_state       *set   = &lov->u;
1296         const struct lov_layout_operations *ops;
1297         struct lov_stripe_md *lsm = NULL;
1298         int rc;
1299         ENTRY;
1300
1301         init_rwsem(&lov->lo_type_guard);
1302         atomic_set(&lov->lo_active_ios, 0);
1303         init_waitqueue_head(&lov->lo_waitq);
1304         cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
1305
1306         lov->lo_type = LLT_EMPTY;
1307         if (cconf->u.coc_layout.lb_buf != NULL) {
1308                 lsm = lov_unpackmd(dev->ld_lov,
1309                                    cconf->u.coc_layout.lb_buf,
1310                                    cconf->u.coc_layout.lb_len);
1311                 if (IS_ERR(lsm))
1312                         RETURN(PTR_ERR(lsm));
1313
1314                 dump_lsm(D_INODE, lsm);
1315         }
1316
1317         /* no locking is necessary, as object is being created */
1318         lov->lo_type = lov_type(lsm);
1319         ops = &lov_dispatch[lov->lo_type];
1320         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1321         if (rc != 0)
1322                 GOTO(out_lsm, rc);
1323
1324 out_lsm:
1325         lov_lsm_put(lsm);
1326
1327         RETURN(rc);
1328 }
1329
1330 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1331                         const struct cl_object_conf *conf)
1332 {
1333         struct lov_stripe_md    *lsm = NULL;
1334         struct lov_object       *lov = cl2lov(obj);
1335         int                      result = 0;
1336         ENTRY;
1337
1338         if (conf->coc_opc == OBJECT_CONF_SET &&
1339             conf->u.coc_layout.lb_buf != NULL) {
1340                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1341                                    conf->u.coc_layout.lb_buf,
1342                                    conf->u.coc_layout.lb_len);
1343                 if (IS_ERR(lsm))
1344                         RETURN(PTR_ERR(lsm));
1345                 dump_lsm(D_INODE, lsm);
1346         }
1347
1348         lov_conf_lock(lov);
1349         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1350                 lov->lo_layout_invalid = true;
1351                 GOTO(out, result = 0);
1352         }
1353
1354         if (conf->coc_opc == OBJECT_CONF_WAIT) {
1355                 if (lov->lo_layout_invalid &&
1356                     atomic_read(&lov->lo_active_ios) > 0) {
1357                         lov_conf_unlock(lov);
1358                         result = lov_layout_wait(env, lov);
1359                         lov_conf_lock(lov);
1360                 }
1361                 GOTO(out, result);
1362         }
1363
1364         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1365
1366         if ((lsm == NULL && lov->lo_lsm == NULL) ||
1367             ((lsm != NULL && lov->lo_lsm != NULL) &&
1368              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1369              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1370               lsm->lsm_entries[0]->lsme_pattern))) {
1371                 /* same version of layout */
1372                 lov->lo_layout_invalid = false;
1373                 GOTO(out, result = 0);
1374         }
1375
1376         /* will change layout - check if there still exists active IO. */
1377         if (atomic_read(&lov->lo_active_ios) > 0) {
1378                 lov->lo_layout_invalid = true;
1379                 GOTO(out, result = -EBUSY);
1380         }
1381
1382         result = lov_layout_change(env, lov, lsm, conf);
1383         lov->lo_layout_invalid = result != 0;
1384         EXIT;
1385
1386 out:
1387         lov_conf_unlock(lov);
1388         lov_lsm_put(lsm);
1389         CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
1390                PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
1391         RETURN(result);
1392 }
1393
1394 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1395 {
1396         struct lov_object *lov = lu2lov(obj);
1397
1398         ENTRY;
1399         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1400         EXIT;
1401 }
1402
1403 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1404 {
1405         struct lov_object *lov = lu2lov(obj);
1406
1407         ENTRY;
1408         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1409         lu_object_fini(obj);
1410         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1411         EXIT;
1412 }
1413
1414 static int lov_object_print(const struct lu_env *env, void *cookie,
1415                             lu_printer_t p, const struct lu_object *o)
1416 {
1417         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1418 }
1419
1420 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1421                   struct cl_page *page, pgoff_t index)
1422 {
1423         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1424                                     index);
1425 }
1426
1427 /**
1428  * Implements cl_object_operations::clo_io_init() method for lov
1429  * layer. Dispatches to the appropriate layout io initialization method.
1430  */
1431 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1432                 struct cl_io *io)
1433 {
1434         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1435
1436         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1437                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1438                io->ci_ignore_layout, io->ci_verify_layout);
1439
1440         /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1441          * the OSC layer. It shouldn't take lov layout conf lock in that case,
1442          * because as long as the OSC object exists, the layout can't be
1443          * reconfigured. */
1444         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1445                         !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1446                         env, obj, io);
1447 }
1448
1449 /**
1450  * An implementation of cl_object_operations::clo_attr_get() method for lov
1451  * layer. For raid0 layout this collects and merges attributes of all
1452  * sub-objects.
1453  */
1454 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1455                         struct cl_attr *attr)
1456 {
1457         /* do not take lock, as this function is called under a
1458          * spin-lock. Layout is protected from changing by ongoing IO. */
1459         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1460 }
1461
1462 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1463                            const struct cl_attr *attr, unsigned valid)
1464 {
1465         /*
1466          * No dispatch is required here, as no layout implements this.
1467          */
1468         return 0;
1469 }
1470
1471 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1472                   struct cl_lock *lock, const struct cl_io *io)
1473 {
1474         /* No need to lock because we've taken one refcount of layout.  */
1475         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1476                                     io);
1477 }
1478
1479 /**
1480  * We calculate on which OST the mapping will end. If the length of mapping
1481  * is greater than (stripe_size * stripe_count) then the last_stripe will
1482  * will be one just before start_stripe. Else we check if the mapping
1483  * intersects each OST and find last_stripe.
1484  * This function returns the last_stripe and also sets the stripe_count
1485  * over which the mapping is spread
1486  *
1487  * \param lsm [in]              striping information for the file
1488  * \param index [in]            stripe component index
1489  * \param ext [in]              logical extent of mapping
1490  * \param start_stripe [in]     starting stripe of the mapping
1491  * \param stripe_count [out]    the number of stripes across which to map is
1492  *                              returned
1493  *
1494  * \retval last_stripe          return the last stripe of the mapping
1495  */
1496 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1497                                    struct lu_extent *ext,
1498                                    int start_stripe, int *stripe_count)
1499 {
1500         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1501         int last_stripe;
1502         u64 obd_start;
1503         u64 obd_end;
1504         int i, j;
1505
1506         if (ext->e_end - ext->e_start >
1507             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1508                 last_stripe = (start_stripe < 1 ? lsme->lsme_stripe_count - 1 :
1509                                                   start_stripe - 1);
1510                 *stripe_count = lsme->lsme_stripe_count;
1511         } else {
1512                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1513                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1514                         if ((lov_stripe_intersects(lsm, index,  i, ext,
1515                                                    &obd_start, &obd_end)) == 0)
1516                                 break;
1517                 }
1518                 *stripe_count = j;
1519                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1520         }
1521
1522         return last_stripe;
1523 }
1524
1525 /**
1526  * Set fe_device and copy extents from local buffer into main return buffer.
1527  *
1528  * \param fiemap [out]          fiemap to hold all extents
1529  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1530  * \param ost_index [in]        OST index to be written into the fm_device
1531  *                              field for each extent
1532  * \param ext_count [in]        number of extents to be copied
1533  * \param current_extent [in]   where to start copying in the extent array
1534  */
1535 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1536                                          struct fiemap_extent *lcl_fm_ext,
1537                                          int ost_index, unsigned int ext_count,
1538                                          int current_extent)
1539 {
1540         char            *to;
1541         unsigned int    ext;
1542
1543         for (ext = 0; ext < ext_count; ext++) {
1544                 lcl_fm_ext[ext].fe_device = ost_index;
1545                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1546         }
1547
1548         /* Copy fm_extent's from fm_local to return buffer */
1549         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1550         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1551 }
1552
1553 #define FIEMAP_BUFFER_SIZE 4096
1554
1555 /**
1556  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1557  * call. The local end offset and the device are sent in the first
1558  * fm_extent. This function calculates the stripe number from the index.
1559  * This function returns a stripe_no on which mapping is to be restarted.
1560  *
1561  * This function returns fm_end_offset which is the in-OST offset at which
1562  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1563  * will re-calculate proper offset in next stripe.
1564  * Note that the first extent is passed to lov_get_info via the value field.
1565  *
1566  * \param fiemap [in]           fiemap request header
1567  * \param lsm [in]              striping information for the file
1568  * \param index [in]            stripe component index
1569  * \param ext [in]              logical extent of mapping
1570  * \param start_stripe [out]    starting stripe will be returned in this
1571  */
1572 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1573                                      struct lov_stripe_md *lsm,
1574                                      int index, struct lu_extent *ext,
1575                                      int *start_stripe)
1576 {
1577         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1578         u64 local_end = fiemap->fm_extents[0].fe_logical;
1579         u64 lun_start;
1580         u64 lun_end;
1581         u64 fm_end_offset;
1582         int stripe_no = -1;
1583         int i;
1584
1585         if (fiemap->fm_extent_count == 0 ||
1586             fiemap->fm_extents[0].fe_logical == 0)
1587                 return 0;
1588
1589         /* Find out stripe_no from ost_index saved in the fe_device */
1590         for (i = 0; i < lsme->lsme_stripe_count; i++) {
1591                 struct lov_oinfo *oinfo = lsme->lsme_oinfo[i];
1592
1593                 if (lov_oinfo_is_dummy(oinfo))
1594                         continue;
1595
1596                 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1597                         stripe_no = i;
1598                         break;
1599                 }
1600         }
1601
1602         if (stripe_no == -1)
1603                 return -EINVAL;
1604
1605         /* If we have finished mapping on previous device, shift logical
1606          * offset to start of next device */
1607         if (lov_stripe_intersects(lsm, index, stripe_no, ext,
1608                                    &lun_start, &lun_end) != 0 &&
1609             local_end < lun_end) {
1610                 fm_end_offset = local_end;
1611                 *start_stripe = stripe_no;
1612         } else {
1613                 /* This is a special value to indicate that caller should
1614                  * calculate offset in next stripe. */
1615                 fm_end_offset = 0;
1616                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1617         }
1618
1619         return fm_end_offset;
1620 }
1621
1622 struct fiemap_state {
1623         struct fiemap           *fs_fm;
1624         struct lu_extent        fs_ext;
1625         u64                     fs_length;
1626         u64                     fs_end_offset;
1627         int                     fs_cur_extent;
1628         int                     fs_cnt_need;
1629         int                     fs_start_stripe;
1630         int                     fs_last_stripe;
1631         bool                    fs_device_done;
1632         bool                    fs_finish_stripe;
1633         bool                    fs_enough;
1634 };
1635
1636 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1637                                          struct lov_object *lov,
1638                                          struct lov_stripe_md *lsm,
1639                                          int index)
1640 {
1641         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1642         struct lov_thread_info  *lti = lov_env_info(env);
1643         struct lu_fid           *ofid = &lti->lti_fid;
1644         struct lov_oinfo        *oinfo;
1645         struct cl_device        *subdev;
1646         int                     entry = lov_comp_entry(index);
1647         int                     stripe = lov_comp_stripe(index);
1648         int                     ost_idx;
1649         int                     rc;
1650         struct cl_object        *result;
1651
1652         if (lov->lo_type != LLT_COMP)
1653                 GOTO(out, result = NULL);
1654
1655         if (entry >= lsm->lsm_entry_count ||
1656             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1657                 GOTO(out, result = NULL);
1658
1659         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1660         ost_idx = oinfo->loi_ost_idx;
1661         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1662         if (rc != 0)
1663                 GOTO(out, result = NULL);
1664
1665         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1666         result = lov_sub_find(env, subdev, ofid, NULL);
1667 out:
1668         if (result == NULL)
1669                 result = ERR_PTR(-EINVAL);
1670         return result;
1671 }
1672
1673 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1674                       struct lov_stripe_md *lsm, struct fiemap *fiemap,
1675                       size_t *buflen, struct ll_fiemap_info_key *fmkey,
1676                       int index, int stripeno, struct fiemap_state *fs)
1677 {
1678         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1679         struct cl_object *subobj;
1680         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1681         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1682         u64 req_fm_len; /* Stores length of required mapping */
1683         u64 len_mapped_single_call;
1684         u64 lun_start;
1685         u64 lun_end;
1686         u64 obd_object_end;
1687         unsigned int ext_count;
1688         /* EOF for object */
1689         bool ost_eof = false;
1690         /* done with required mapping for this OST? */
1691         bool ost_done = false;
1692         int ost_index;
1693         int rc = 0;
1694
1695         fs->fs_device_done = false;
1696         /* Find out range of mapping on this stripe */
1697         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1698                                    &lun_start, &obd_object_end)) == 0)
1699                 return 0;
1700
1701         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1702                 return -EIO;
1703
1704         /* If this is a continuation FIEMAP call and we are on
1705          * starting stripe then lun_start needs to be set to
1706          * end_offset */
1707         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1708                 lun_start = fs->fs_end_offset;
1709         lun_end = lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno);
1710         if (lun_start == lun_end)
1711                 return 0;
1712
1713         req_fm_len = obd_object_end - lun_start + 1;
1714         fs->fs_fm->fm_length = 0;
1715         len_mapped_single_call = 0;
1716
1717         /* find lobsub object */
1718         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1719                                  lov_comp_index(index, stripeno));
1720         if (IS_ERR(subobj))
1721                 return PTR_ERR(subobj);
1722         /* If the output buffer is very large and the objects have many
1723          * extents we may need to loop on a single OST repeatedly */
1724         do {
1725                 if (fiemap->fm_extent_count > 0) {
1726                         /* Don't get too many extents. */
1727                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1728                             fiemap->fm_extent_count)
1729                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1730                                                   fs->fs_cur_extent;
1731                 }
1732
1733                 lun_start += len_mapped_single_call;
1734                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1735                 req_fm_len = fs->fs_fm->fm_length;
1736                 /**
1737                  * If we've collected enough extent map, we'd request 1 more,
1738                  * to see whether we coincidentally finished all available
1739                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1740                  */
1741                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1742                                              1 : fs->fs_cnt_need;
1743                 fs->fs_fm->fm_mapped_extents = 0;
1744                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1745
1746                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1747
1748                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1749                         GOTO(obj_put, rc = -EINVAL);
1750                 /* If OST is inactive, return extent with UNKNOWN flag. */
1751                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1752                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1753                         fs->fs_fm->fm_mapped_extents = 1;
1754
1755                         fm_ext[0].fe_logical = lun_start;
1756                         fm_ext[0].fe_length = obd_object_end - lun_start + 1;
1757                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1758
1759                         goto inactive_tgt;
1760                 }
1761
1762                 fs->fs_fm->fm_start = lun_start;
1763                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1764                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1765                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1766
1767                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1768                 if (rc != 0)
1769                         GOTO(obj_put, rc);
1770 inactive_tgt:
1771                 ext_count = fs->fs_fm->fm_mapped_extents;
1772                 if (ext_count == 0) {
1773                         ost_done = true;
1774                         fs->fs_device_done = true;
1775                         /* If last stripe has hold at the end,
1776                          * we need to return */
1777                         if (stripeno == fs->fs_last_stripe) {
1778                                 fiemap->fm_mapped_extents = 0;
1779                                 fs->fs_finish_stripe = true;
1780                                 GOTO(obj_put, rc);
1781                         }
1782                         break;
1783                 } else if (fs->fs_enough) {
1784                         /*
1785                          * We've collected enough extents and there are
1786                          * more extents after it.
1787                          */
1788                         GOTO(obj_put, rc);
1789                 }
1790
1791                 /* If we just need num of extents, got to next device */
1792                 if (fiemap->fm_extent_count == 0) {
1793                         fs->fs_cur_extent += ext_count;
1794                         break;
1795                 }
1796
1797                 /* prepare to copy retrived map extents */
1798                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1799                                          fm_ext[ext_count - 1].fe_length -
1800                                          lun_start;
1801
1802                 /* Have we finished mapping on this device? */
1803                 if (req_fm_len <= len_mapped_single_call) {
1804                         ost_done = true;
1805                         fs->fs_device_done = true;
1806                 }
1807
1808                 /* Clear the EXTENT_LAST flag which can be present on
1809                  * the last extent */
1810                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1811                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1812                 if (lov_stripe_size(lsm, index,
1813                                     fm_ext[ext_count - 1].fe_logical +
1814                                     fm_ext[ext_count - 1].fe_length,
1815                                     stripeno) >= fmkey->lfik_oa.o_size) {
1816                         ost_eof = true;
1817                         fs->fs_device_done = true;
1818                 }
1819
1820                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1821                                              ext_count, fs->fs_cur_extent);
1822                 fs->fs_cur_extent += ext_count;
1823
1824                 /* Ran out of available extents? */
1825                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1826                         fs->fs_enough = true;
1827         } while (!ost_done && !ost_eof);
1828
1829         if (stripeno == fs->fs_last_stripe)
1830                 fs->fs_finish_stripe = true;
1831 obj_put:
1832         cl_object_put(env, subobj);
1833
1834         return rc;
1835 }
1836
1837 /**
1838  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1839  * This also handles the restarting of FIEMAP calls in case mapping overflows
1840  * the available number of extents in single call.
1841  *
1842  * \param env [in]              lustre environment
1843  * \param obj [in]              file object
1844  * \param fmkey [in]            fiemap request header and other info
1845  * \param fiemap [out]          fiemap buffer holding retrived map extents
1846  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1847  *                              each OST, it is used to limit max map needed
1848  * \retval 0    success
1849  * \retval < 0  error
1850  */
1851 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1852                              struct ll_fiemap_info_key *fmkey,
1853                              struct fiemap *fiemap, size_t *buflen)
1854 {
1855         struct lov_stripe_md_entry *lsme;
1856         struct lov_stripe_md *lsm;
1857         struct fiemap *fm_local = NULL;
1858         loff_t whole_start;
1859         loff_t whole_end;
1860         int entry;
1861         int start_entry;
1862         int end_entry;
1863         int cur_stripe = 0;
1864         int stripe_count;
1865         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1866         int rc = 0;
1867         struct fiemap_state fs = { 0 };
1868         ENTRY;
1869
1870         lsm = lov_lsm_addref(cl2lov(obj));
1871         if (lsm == NULL) {
1872                 /* no extent: there is no object for mapping */
1873                 fiemap->fm_mapped_extents = 0;
1874                 return 0;
1875         }
1876
1877         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1878                 /**
1879                  * If the entry count > 1 or stripe_count > 1 and the
1880                  * application does not understand DEVICE_ORDER flag,
1881                  * it cannot interpret the extents correctly.
1882                  */
1883                 if (lsm->lsm_entry_count > 1 ||
1884                     (lsm->lsm_entry_count == 1 &&
1885                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
1886                         GOTO(out_lsm, rc = -ENOTSUPP);
1887         }
1888
1889         /* No support for DOM layout yet. */
1890         if (lsme_is_dom(lsm->lsm_entries[0]))
1891                 GOTO(out_lsm, rc = -ENOTSUPP);
1892
1893         if (lsm->lsm_is_released) {
1894                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1895                         /**
1896                          * released file, return a minimal FIEMAP if
1897                          * request fits in file-size.
1898                          */
1899                         fiemap->fm_mapped_extents = 1;
1900                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1901                         if (fiemap->fm_start + fiemap->fm_length <
1902                             fmkey->lfik_oa.o_size)
1903                                 fiemap->fm_extents[0].fe_length =
1904                                         fiemap->fm_length;
1905                         else
1906                                 fiemap->fm_extents[0].fe_length =
1907                                         fmkey->lfik_oa.o_size -
1908                                         fiemap->fm_start;
1909                         fiemap->fm_extents[0].fe_flags |=
1910                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1911                 }
1912                 GOTO(out_lsm, rc = 0);
1913         }
1914
1915         /* buffer_size is small to hold fm_extent_count of extents. */
1916         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1917                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1918
1919         OBD_ALLOC_LARGE(fm_local, buffer_size);
1920         if (fm_local == NULL)
1921                 GOTO(out_lsm, rc = -ENOMEM);
1922
1923         /**
1924          * Requested extent count exceeds the fiemap buffer size, shrink our
1925          * ambition.
1926          */
1927         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1928                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1929         if (fiemap->fm_extent_count == 0)
1930                 fs.fs_cnt_need = 0;
1931
1932         fs.fs_enough = false;
1933         fs.fs_cur_extent = 0;
1934         fs.fs_fm = fm_local;
1935         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1936
1937         whole_start = fiemap->fm_start;
1938         /* whole_start is beyond the end of the file */
1939         if (whole_start > fmkey->lfik_oa.o_size)
1940                 GOTO(out_fm_local, rc = -EINVAL);
1941         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
1942                                         fmkey->lfik_oa.o_size :
1943                                         whole_start + fiemap->fm_length - 1;
1944         /**
1945          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
1946          * size
1947          */
1948         if (whole_end > fmkey->lfik_oa.o_size)
1949                 whole_end = fmkey->lfik_oa.o_size;
1950
1951         start_entry = lov_lsm_entry(lsm, whole_start);
1952         end_entry = lov_lsm_entry(lsm, whole_end);
1953         if (end_entry == -1)
1954                 end_entry = lsm->lsm_entry_count - 1;
1955
1956         if (start_entry == -1 || end_entry == -1)
1957                 GOTO(out_fm_local, rc = -EINVAL);
1958
1959         /* TODO: rewrite it with lov_foreach_io_layout() */
1960         for (entry = start_entry; entry <= end_entry; entry++) {
1961                 lsme = lsm->lsm_entries[entry];
1962
1963                 if (!lsme_inited(lsme))
1964                         break;
1965
1966                 if (entry == start_entry)
1967                         fs.fs_ext.e_start = whole_start;
1968                 else
1969                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
1970                 if (entry == end_entry)
1971                         fs.fs_ext.e_end = whole_end;
1972                 else
1973                         fs.fs_ext.e_end = lsme->lsme_extent.e_end - 1;
1974                 fs.fs_length = fs.fs_ext.e_end - fs.fs_ext.e_start + 1;
1975
1976                 /* Calculate start stripe, last stripe and length of mapping */
1977                 fs.fs_start_stripe = lov_stripe_number(lsm, entry,
1978                                                        fs.fs_ext.e_start);
1979                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
1980                                         &fs.fs_ext, fs.fs_start_stripe,
1981                                         &stripe_count);
1982                 fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, entry,
1983                                         &fs.fs_ext, &fs.fs_start_stripe);
1984                 /* Check each stripe */
1985                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1986                      --stripe_count,
1987                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
1988                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
1989                                                fmkey, entry, cur_stripe, &fs);
1990                         if (rc < 0)
1991                                 GOTO(out_fm_local, rc);
1992                         if (fs.fs_enough)
1993                                 GOTO(finish, rc);
1994                         if (fs.fs_finish_stripe)
1995                                 break;
1996                 } /* for each stripe */
1997         } /* for covering layout component */
1998         /*
1999          * We've traversed all components, set @entry to the last component
2000          * entry, it's for the last stripe check.
2001          */
2002         entry--;
2003 finish:
2004         /* Indicate that we are returning device offsets unless file just has
2005          * single stripe */
2006         if (lsm->lsm_entry_count > 1 ||
2007             (lsm->lsm_entry_count == 1 &&
2008              lsm->lsm_entries[0]->lsme_stripe_count > 1))
2009                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
2010
2011         if (fiemap->fm_extent_count == 0)
2012                 goto skip_last_device_calc;
2013
2014         /* Check if we have reached the last stripe and whether mapping for that
2015          * stripe is done. */
2016         if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
2017                 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
2018                                                              FIEMAP_EXTENT_LAST;
2019 skip_last_device_calc:
2020         fiemap->fm_mapped_extents = fs.fs_cur_extent;
2021 out_fm_local:
2022         OBD_FREE_LARGE(fm_local, buffer_size);
2023
2024 out_lsm:
2025         lov_lsm_put(lsm);
2026         return rc;
2027 }
2028
2029 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2030                                 struct lov_user_md __user *lum, size_t size)
2031 {
2032         struct lov_object       *lov = cl2lov(obj);
2033         struct lov_stripe_md    *lsm;
2034         int                     rc = 0;
2035         ENTRY;
2036
2037         lsm = lov_lsm_addref(lov);
2038         if (lsm == NULL)
2039                 RETURN(-ENODATA);
2040
2041         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
2042         lov_lsm_put(lsm);
2043         RETURN(rc);
2044 }
2045
2046 static int lov_object_layout_get(const struct lu_env *env,
2047                                  struct cl_object *obj,
2048                                  struct cl_layout *cl)
2049 {
2050         struct lov_object *lov = cl2lov(obj);
2051         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2052         struct lu_buf *buf = &cl->cl_buf;
2053         ssize_t rc;
2054         ENTRY;
2055
2056         if (lsm == NULL) {
2057                 cl->cl_size = 0;
2058                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2059
2060                 RETURN(0);
2061         }
2062
2063         cl->cl_size = lov_comp_md_size(lsm);
2064         cl->cl_layout_gen = lsm->lsm_layout_gen;
2065         cl->cl_dom_comp_size = 0;
2066         cl->cl_is_released = lsm->lsm_is_released;
2067         if (lsm_is_composite(lsm->lsm_magic)) {
2068                 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[0];
2069
2070                 cl->cl_is_composite = true;
2071
2072                 if (lsme_is_dom(lsme))
2073                         cl->cl_dom_comp_size = lsme->lsme_extent.e_end;
2074         } else {
2075                 cl->cl_is_composite = false;
2076         }
2077
2078         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2079         lov_lsm_put(lsm);
2080
2081         RETURN(rc < 0 ? rc : 0);
2082 }
2083
2084 static loff_t lov_object_maxbytes(struct cl_object *obj)
2085 {
2086         struct lov_object *lov = cl2lov(obj);
2087         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2088         loff_t maxbytes;
2089
2090         if (lsm == NULL)
2091                 return LLONG_MAX;
2092
2093         maxbytes = lsm->lsm_maxbytes;
2094
2095         lov_lsm_put(lsm);
2096
2097         return maxbytes;
2098 }
2099
2100 static int lov_object_flush(const struct lu_env *env, struct cl_object *obj,
2101                             struct ldlm_lock *lock)
2102 {
2103         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_flush, env, obj, lock);
2104 }
2105
2106 static const struct cl_object_operations lov_ops = {
2107         .coo_page_init    = lov_page_init,
2108         .coo_lock_init    = lov_lock_init,
2109         .coo_io_init      = lov_io_init,
2110         .coo_attr_get     = lov_attr_get,
2111         .coo_attr_update  = lov_attr_update,
2112         .coo_conf_set     = lov_conf_set,
2113         .coo_getstripe    = lov_object_getstripe,
2114         .coo_layout_get   = lov_object_layout_get,
2115         .coo_maxbytes     = lov_object_maxbytes,
2116         .coo_fiemap       = lov_object_fiemap,
2117         .coo_object_flush = lov_object_flush
2118 };
2119
2120 static const struct lu_object_operations lov_lu_obj_ops = {
2121         .loo_object_init      = lov_object_init,
2122         .loo_object_delete    = lov_object_delete,
2123         .loo_object_release   = NULL,
2124         .loo_object_free      = lov_object_free,
2125         .loo_object_print     = lov_object_print,
2126         .loo_object_invariant = NULL
2127 };
2128
2129 struct lu_object *lov_object_alloc(const struct lu_env *env,
2130                                    const struct lu_object_header *unused,
2131                                    struct lu_device *dev)
2132 {
2133         struct lov_object *lov;
2134         struct lu_object  *obj;
2135
2136         ENTRY;
2137         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2138         if (lov != NULL) {
2139                 obj = lov2lu(lov);
2140                 lu_object_init(obj, NULL, dev);
2141                 lov->lo_cl.co_ops = &lov_ops;
2142                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2143                 /*
2144                  * object io operation vector (cl_object::co_iop) is installed
2145                  * later in lov_object_init(), as different vectors are used
2146                  * for object with different layouts.
2147                  */
2148                 obj->lo_ops = &lov_lu_obj_ops;
2149         } else
2150                 obj = NULL;
2151         RETURN(obj);
2152 }
2153
2154 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2155 {
2156         struct lov_stripe_md *lsm = NULL;
2157
2158         lov_conf_freeze(lov);
2159         if (lov->lo_lsm != NULL) {
2160                 lsm = lsm_addref(lov->lo_lsm);
2161                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2162                         lsm, atomic_read(&lsm->lsm_refc),
2163                         lov->lo_layout_invalid, current);
2164         }
2165         lov_conf_thaw(lov);
2166         return lsm;
2167 }
2168
2169 int lov_read_and_clear_async_rc(struct cl_object *clob)
2170 {
2171         struct lu_object *luobj;
2172         int rc = 0;
2173         ENTRY;
2174
2175         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2176                                  &lov_device_type);
2177         if (luobj != NULL) {
2178                 struct lov_object *lov = lu2lov(luobj);
2179
2180                 lov_conf_freeze(lov);
2181                 switch (lov->lo_type) {
2182                 case LLT_COMP: {
2183                         struct lov_stripe_md *lsm;
2184                         int i;
2185
2186                         lsm = lov->lo_lsm;
2187                         LASSERT(lsm != NULL);
2188                         for (i = 0; i < lsm->lsm_entry_count; i++) {
2189                                 struct lov_stripe_md_entry *lse =
2190                                                 lsm->lsm_entries[i];
2191                                 int j;
2192
2193                                 if (!lsme_inited(lse))
2194                                         break;
2195
2196                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
2197                                         struct lov_oinfo *loi =
2198                                                         lse->lsme_oinfo[j];
2199
2200                                         if (lov_oinfo_is_dummy(loi))
2201                                                 continue;
2202
2203                                         if (loi->loi_ar.ar_rc && !rc)
2204                                                 rc = loi->loi_ar.ar_rc;
2205                                         loi->loi_ar.ar_rc = 0;
2206                                 }
2207                         }
2208                 }
2209                 case LLT_RELEASED:
2210                 case LLT_EMPTY:
2211                         /* fall through */
2212                 case LLT_FOREIGN:
2213                         break;
2214                 default:
2215                         LBUG();
2216                 }
2217                 lov_conf_thaw(lov);
2218         }
2219         RETURN(rc);
2220 }
2221 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
2222
2223 /** @} lov */