Whamcloud - gitweb
LU-9367 llite: restore ll_file_getstripe in ll_lov_setstripe
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_object for LOV layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include "lov_cl_internal.h"
41
42 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
43 {
44         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
45 }
46
47 /** \addtogroup lov
48  *  @{
49  */
50
51 /*****************************************************************************
52  *
53  * Layout operations.
54  *
55  */
56
57 struct lov_layout_operations {
58         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
59                         struct lov_object *lov, struct lov_stripe_md *lsm,
60                         const struct cl_object_conf *conf,
61                         union lov_layout_state *state);
62         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
63                            union lov_layout_state *state);
64         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
65                          union lov_layout_state *state);
66         int  (*llo_print)(const struct lu_env *env, void *cookie,
67                           lu_printer_t p, const struct lu_object *o);
68         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
69                               struct cl_page *page, pgoff_t index);
70         int  (*llo_lock_init)(const struct lu_env *env,
71                               struct cl_object *obj, struct cl_lock *lock,
72                               const struct cl_io *io);
73         int  (*llo_io_init)(const struct lu_env *env,
74                             struct cl_object *obj, struct cl_io *io);
75         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
76                             struct cl_attr *attr);
77 };
78
79 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
80
81 static void lov_lsm_put(struct lov_stripe_md *lsm)
82 {
83         if (lsm != NULL)
84                 lov_free_memmd(&lsm);
85 }
86
87 /*****************************************************************************
88  *
89  * Lov object layout operations.
90  *
91  */
92 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
93                           struct lov_object *lov, struct lov_stripe_md *lsm,
94                           const struct cl_object_conf *conf,
95                           union lov_layout_state *state)
96 {
97         return 0;
98 }
99
100 static struct cl_object *lov_sub_find(const struct lu_env *env,
101                                       struct cl_device *dev,
102                                       const struct lu_fid *fid,
103                                       const struct cl_object_conf *conf)
104 {
105         struct lu_object *o;
106
107         ENTRY;
108         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
109         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
110         RETURN(lu2cl(o));
111 }
112
113 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
114                         struct cl_object *subobj, struct lov_layout_raid0 *r0,
115                         struct lov_oinfo *oinfo, int idx)
116 {
117         struct cl_object_header *hdr;
118         struct cl_object_header *subhdr;
119         struct cl_object_header *parent;
120         int entry = lov_comp_entry(idx);
121         int stripe = lov_comp_stripe(idx);
122         int result;
123
124         if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
125                 /* For sanity:test_206.
126                  * Do not leave the object in cache to avoid accessing
127                  * freed memory. This is because osc_object is referring to
128                  * lov_oinfo of lsm_stripe_data which will be freed due to
129                  * this failure. */
130                 cl_object_kill(env, subobj);
131                 cl_object_put(env, subobj);
132                 return -EIO;
133         }
134
135         hdr    = cl_object_header(lov2cl(lov));
136         subhdr = cl_object_header(subobj);
137
138         CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
139                " ost idx: %d gen: %d\n",
140                PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
141                PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
142                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
143
144         /* reuse ->coh_attr_guard to protect coh_parent change */
145         spin_lock(&subhdr->coh_attr_guard);
146         parent = subhdr->coh_parent;
147         if (parent == NULL) {
148                 subhdr->coh_parent = hdr;
149                 spin_unlock(&subhdr->coh_attr_guard);
150                 subhdr->coh_nesting = hdr->coh_nesting + 1;
151                 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
152                 r0->lo_sub[stripe] = cl2lovsub(subobj);
153                 r0->lo_sub[stripe]->lso_super = lov;
154                 r0->lo_sub[stripe]->lso_index = idx;
155                 result = 0;
156         } else {
157                 struct lu_object  *old_obj;
158                 struct lov_object *old_lov;
159                 unsigned int mask = D_INODE;
160
161                 spin_unlock(&subhdr->coh_attr_guard);
162                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
163                 LASSERT(old_obj != NULL);
164                 old_lov = cl2lov(lu2cl(old_obj));
165                 if (old_lov->lo_layout_invalid) {
166                         /* the object's layout has already changed but isn't
167                          * refreshed */
168                         lu_object_unhash(env, &subobj->co_lu);
169                         result = -EAGAIN;
170                 } else {
171                         mask = D_ERROR;
172                         result = -EIO;
173                 }
174
175                 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
176                                 "stripe %d is already owned.", idx);
177                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
178                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
179                 cl_object_put(env, subobj);
180         }
181         return result;
182 }
183
184 static int lov_page_slice_fixup(struct lov_object *lov,
185                                 struct cl_object *stripe)
186 {
187         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
188         struct cl_object *o;
189
190         if (stripe == NULL)
191                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
192                        cfs_size_round(sizeof(struct lov_page));
193
194         cl_object_for_each(o, stripe)
195                 o->co_slice_off += hdr->coh_page_bufsize;
196
197         return cl_object_header(stripe)->coh_page_bufsize;
198 }
199
200 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
201                           struct lov_object *lov, int index,
202                           struct lov_layout_raid0 *r0)
203 {
204         struct lov_thread_info  *lti     = lov_env_info(env);
205         struct cl_object_conf   *subconf = &lti->lti_stripe_conf;
206         struct lu_fid           *ofid    = &lti->lti_fid;
207         struct cl_object        *stripe;
208         struct lov_stripe_md_entry *lse  = lov_lse(lov, index);
209         int result;
210         int psz;
211         int i;
212
213         ENTRY;
214
215         spin_lock_init(&r0->lo_sub_lock);
216         r0->lo_nr = lse->lsme_stripe_count;
217         LASSERT(r0->lo_nr <= lov_targets_nr(dev));
218
219         OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
220         if (r0->lo_sub == NULL)
221                 GOTO(out, result = -ENOMEM);
222
223         psz = 0;
224         result = 0;
225         memset(subconf, 0, sizeof(*subconf));
226
227         /*
228          * Create stripe cl_objects.
229          */
230         for (i = 0; i < r0->lo_nr; ++i) {
231                 struct cl_device *subdev;
232                 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
233                 int ost_idx = oinfo->loi_ost_idx;
234
235                 if (lov_oinfo_is_dummy(oinfo))
236                         continue;
237
238                 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
239                 if (result != 0)
240                         GOTO(out, result);
241
242                 if (dev->ld_target[ost_idx] == NULL) {
243                         CERROR("%s: OST %04x is not initialized\n",
244                                lov2obd(dev->ld_lov)->obd_name, ost_idx);
245                         GOTO(out, result = -EIO);
246                 }
247
248                 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
249                 subconf->u.coc_oinfo = oinfo;
250                 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
251                 /* In the function below, .hs_keycmp resolves to
252                  * lu_obj_hop_keycmp() */
253                 /* coverity[overrun-buffer-val] */
254                 stripe = lov_sub_find(env, subdev, ofid, subconf);
255                 if (IS_ERR(stripe))
256                         GOTO(out, result = PTR_ERR(stripe));
257
258                 result = lov_init_sub(env, lov, stripe, r0, oinfo,
259                                       lov_comp_index(index, i));
260                 if (result == -EAGAIN) { /* try again */
261                         --i;
262                         result = 0;
263                         continue;
264                 }
265
266                 if (result == 0) {
267                         int sz = lov_page_slice_fixup(lov, stripe);
268                         LASSERT(ergo(psz > 0, psz == sz));
269                         psz = sz;
270                 }
271         }
272         if (result == 0)
273                 result = psz;
274 out:
275         RETURN(result);
276 }
277
278 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
279                               struct lov_object *lov, struct lov_stripe_md *lsm,
280                               const struct cl_object_conf *conf,
281                               union lov_layout_state *state)
282 {
283         struct lov_layout_composite *comp = &state->composite;
284         unsigned int entry_count;
285         unsigned int psz = 0;
286         int result = 0;
287         int i;
288
289         ENTRY;
290
291         LASSERT(lsm->lsm_entry_count > 0);
292         LASSERT(lov->lo_lsm == NULL);
293         lov->lo_lsm = lsm_addref(lsm);
294         lov->lo_layout_invalid = true;
295
296         entry_count = lsm->lsm_entry_count;
297         comp->lo_entry_count = entry_count;
298
299         OBD_ALLOC(comp->lo_entries, entry_count * sizeof(*comp->lo_entries));
300         if (comp->lo_entries == NULL)
301                 RETURN(-ENOMEM);
302
303         for (i = 0; i < entry_count; i++) {
304                 struct lov_layout_entry *le = &comp->lo_entries[i];
305
306                 le->lle_extent = lsm->lsm_entries[i]->lsme_extent;
307                 /**
308                  * If the component has not been init-ed on MDS side, for
309                  * PFL layout, we'd know that the components beyond this one
310                  * will be dynamically init-ed later on file write/trunc ops.
311                  */
312                 if (!lsm_entry_inited(lsm, i))
313                         continue;
314
315                 result = lov_init_raid0(env, dev, lov, i, &le->lle_raid0);
316                 if (result < 0)
317                         break;
318
319                 LASSERT(ergo(psz > 0, psz == result));
320                 psz = result;
321         }
322         if (psz > 0)
323                 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
324
325         return result > 0 ? 0 : result;
326 }
327
328 static int lov_init_released(const struct lu_env *env,
329                              struct lov_device *dev, struct lov_object *lov,
330                              struct lov_stripe_md *lsm,
331                              const struct cl_object_conf *conf,
332                              union lov_layout_state *state)
333 {
334         LASSERT(lsm != NULL);
335         LASSERT(lsm->lsm_is_released);
336         LASSERT(lov->lo_lsm == NULL);
337
338         lov->lo_lsm = lsm_addref(lsm);
339         return 0;
340 }
341
342 static struct cl_object *lov_find_subobj(const struct lu_env *env,
343                                          struct lov_object *lov,
344                                          struct lov_stripe_md *lsm,
345                                          int index)
346 {
347         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
348         struct lov_thread_info  *lti = lov_env_info(env);
349         struct lu_fid           *ofid = &lti->lti_fid;
350         struct lov_oinfo        *oinfo;
351         struct cl_device        *subdev;
352         int                     entry = lov_comp_entry(index);
353         int                     stripe = lov_comp_stripe(index);
354         int                     ost_idx;
355         int                     rc;
356         struct cl_object        *result;
357
358         if (lov->lo_type != LLT_COMP)
359                 GOTO(out, result = NULL);
360
361         if (entry >= lsm->lsm_entry_count ||
362             stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
363                 GOTO(out, result = NULL);
364
365         oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
366         ost_idx = oinfo->loi_ost_idx;
367         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
368         if (rc != 0)
369                 GOTO(out, result = NULL);
370
371         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
372         result = lov_sub_find(env, subdev, ofid, NULL);
373 out:
374         if (result == NULL)
375                 result = ERR_PTR(-EINVAL);
376         return result;
377 }
378
379 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
380                             union lov_layout_state *state)
381 {
382         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
383
384         lov_layout_wait(env, lov);
385         return 0;
386 }
387
388 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
389                                struct lov_layout_raid0 *r0,
390                                struct lovsub_object *los, int idx)
391 {
392         struct cl_object        *sub;
393         struct lu_site          *site;
394         struct lu_site_bkt_data *bkt;
395         wait_queue_t          *waiter;
396
397         LASSERT(r0->lo_sub[idx] == los);
398
399         sub  = lovsub2cl(los);
400         site = sub->co_lu.lo_dev->ld_site;
401         bkt  = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
402
403         cl_object_kill(env, sub);
404         /* release a reference to the sub-object and ... */
405         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
406         cl_object_put(env, sub);
407
408         /* ... wait until it is actually destroyed---sub-object clears its
409          * ->lo_sub[] slot in lovsub_object_fini() */
410         if (r0->lo_sub[idx] == los) {
411                 waiter = &lov_env_info(env)->lti_waiter;
412                 init_waitqueue_entry(waiter, current);
413                 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
414                 set_current_state(TASK_UNINTERRUPTIBLE);
415                 while (1) {
416                         /* this wait-queue is signaled at the end of
417                          * lu_object_free(). */
418                         set_current_state(TASK_UNINTERRUPTIBLE);
419                         spin_lock(&r0->lo_sub_lock);
420                         if (r0->lo_sub[idx] == los) {
421                                 spin_unlock(&r0->lo_sub_lock);
422                                 schedule();
423                         } else {
424                                 spin_unlock(&r0->lo_sub_lock);
425                                 set_current_state(TASK_RUNNING);
426                                 break;
427                         }
428                 }
429                 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
430         }
431         LASSERT(r0->lo_sub[idx] == NULL);
432 }
433
434 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
435                              struct lov_layout_raid0 *r0)
436 {
437         ENTRY;
438
439         if (r0->lo_sub != NULL) {
440                 int i;
441
442                 for (i = 0; i < r0->lo_nr; ++i) {
443                         struct lovsub_object *los = r0->lo_sub[i];
444
445                         if (los != NULL) {
446                                 cl_object_prune(env, &los->lso_cl);
447                                 /*
448                                  * If top-level object is to be evicted from
449                                  * the cache, so are its sub-objects.
450                                  */
451                                 lov_subobject_kill(env, lov, r0, los, i);
452                         }
453                 }
454         }
455
456         EXIT;
457 }
458
459 static int lov_delete_composite(const struct lu_env *env,
460                                 struct lov_object *lov,
461                                 union lov_layout_state *state)
462 {
463         struct lov_layout_entry *entry;
464         struct lov_layout_composite *comp = &state->composite;
465
466         ENTRY;
467
468         dump_lsm(D_INODE, lov->lo_lsm);
469
470         lov_layout_wait(env, lov);
471         if (comp->lo_entries)
472                 lov_foreach_layout_entry(lov, entry)
473                         lov_delete_raid0(env, lov, &entry->lle_raid0);
474
475         RETURN(0);
476 }
477
478 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
479                            union lov_layout_state *state)
480 {
481         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
482 }
483
484 static void lov_fini_raid0(const struct lu_env *env,
485                            struct lov_layout_raid0 *r0)
486 {
487         if (r0->lo_sub != NULL) {
488                 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
489                 r0->lo_sub = NULL;
490         }
491 }
492
493 static void lov_fini_composite(const struct lu_env *env,
494                                struct lov_object *lov,
495                                union lov_layout_state *state)
496 {
497         struct lov_layout_composite *comp = &state->composite;
498         ENTRY;
499
500         if (comp->lo_entries != NULL) {
501                 struct lov_layout_entry *entry;
502
503                 lov_foreach_layout_entry(lov, entry)
504                         lov_fini_raid0(env, &entry->lle_raid0);
505
506                 OBD_FREE(comp->lo_entries,
507                          comp->lo_entry_count * sizeof(*comp->lo_entries));
508                 comp->lo_entries = NULL;
509         }
510
511         dump_lsm(D_INODE, lov->lo_lsm);
512         lov_free_memmd(&lov->lo_lsm);
513
514         EXIT;
515 }
516
517 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
518                                 union lov_layout_state *state)
519 {
520         ENTRY;
521         dump_lsm(D_INODE, lov->lo_lsm);
522         lov_free_memmd(&lov->lo_lsm);
523         EXIT;
524 }
525
526 static int lov_print_empty(const struct lu_env *env, void *cookie,
527                            lu_printer_t p, const struct lu_object *o)
528 {
529         (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
530         return 0;
531 }
532
533 static int lov_print_raid0(const struct lu_env *env, void *cookie,
534                            lu_printer_t p, struct lov_layout_raid0 *r0)
535 {
536         int i;
537
538         for (i = 0; i < r0->lo_nr; ++i) {
539                 struct lu_object *sub;
540
541                 if (r0->lo_sub[i] != NULL) {
542                         sub = lovsub2lu(r0->lo_sub[i]);
543                         lu_object_print(env, cookie, p, sub);
544                 } else {
545                         (*p)(env, cookie, "sub %d absent\n", i);
546                 }
547         }
548         return 0;
549 }
550
551 static int lov_print_composite(const struct lu_env *env, void *cookie,
552                                lu_printer_t p, const struct lu_object *o)
553 {
554         struct lov_object *lov = lu2lov(o);
555         struct lov_stripe_md *lsm = lov->lo_lsm;
556         int i;
557
558         (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
559              lsm->lsm_entry_count,
560              lov->lo_layout_invalid ? "invalid" : "valid", lsm,
561              lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
562              lsm->lsm_layout_gen);
563
564         for (i = 0; i < lsm->lsm_entry_count; i++) {
565                 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
566
567                 (*p)(env, cookie, DEXT ": { 0x%08X, %u, %u, %#x, %u, %u }\n",
568                      PEXT(&lse->lsme_extent), lse->lsme_magic,
569                      lse->lsme_id, lse->lsme_layout_gen, lse->lsme_flags,
570                      lse->lsme_stripe_count, lse->lsme_stripe_size);
571                 lov_print_raid0(env, cookie, p, lov_r0(lov, i));
572         }
573
574         return 0;
575 }
576
577 static int lov_print_released(const struct lu_env *env, void *cookie,
578                                 lu_printer_t p, const struct lu_object *o)
579 {
580         struct lov_object       *lov = lu2lov(o);
581         struct lov_stripe_md    *lsm = lov->lo_lsm;
582
583         (*p)(env, cookie,
584                 "released: %s, lsm{%p 0x%08X %d %u}:\n",
585                 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
586                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
587                 lsm->lsm_layout_gen);
588         return 0;
589 }
590
591 /**
592  * Implements cl_object_operations::coo_attr_get() method for an object
593  * without stripes (LLT_EMPTY layout type).
594  *
595  * The only attributes this layer is authoritative in this case is
596  * cl_attr::cat_blocks---it's 0.
597  */
598 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
599                               struct cl_attr *attr)
600 {
601         attr->cat_blocks = 0;
602         return 0;
603 }
604
605 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
606                               unsigned int index, struct lov_layout_raid0 *r0)
607
608 {
609         struct lov_stripe_md *lsm = lov->lo_lsm;
610         struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
611         struct cl_attr *attr = &r0->lo_attr;
612         __u64 kms = 0;
613         int result = 0;
614
615         if (r0->lo_attr_valid)
616                 return 0;
617
618         memset(lvb, 0, sizeof(*lvb));
619
620         /* XXX: timestamps can be negative by sanity:test_39m,
621          * how can it be? */
622         lvb->lvb_atime = LLONG_MIN;
623         lvb->lvb_ctime = LLONG_MIN;
624         lvb->lvb_mtime = LLONG_MIN;
625
626         /*
627          * XXX that should be replaced with a loop over sub-objects,
628          * doing cl_object_attr_get() on them. But for now, let's
629          * reuse old lov code.
630          */
631
632         /*
633          * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
634          * happy. It's not needed, because new code uses
635          * ->coh_attr_guard spin-lock to protect consistency of
636          * sub-object attributes.
637          */
638         lov_stripe_lock(lsm);
639         result = lov_merge_lvb_kms(lsm, index, lvb, &kms);
640         lov_stripe_unlock(lsm);
641         if (result == 0) {
642                 cl_lvb2attr(attr, lvb);
643                 attr->cat_kms = kms;
644                 r0->lo_attr_valid = 1;
645         }
646
647         return result;
648 }
649
650 static int lov_attr_get_composite(const struct lu_env *env,
651                                   struct cl_object *obj,
652                                   struct cl_attr *attr)
653 {
654         struct lov_object       *lov = cl2lov(obj);
655         struct lov_layout_entry *entry;
656         int                      result = 0;
657         int                      index = 0;
658
659         ENTRY;
660
661         attr->cat_size = 0;
662         attr->cat_blocks = 0;
663         lov_foreach_layout_entry(lov, entry) {
664                 struct lov_layout_raid0 *r0 = &entry->lle_raid0;
665                 struct cl_attr *lov_attr = &r0->lo_attr;
666
667                 /* PFL: This component has not been init-ed. */
668                 if (!lsm_entry_inited(lov->lo_lsm, index))
669                         break;
670
671                 result = lov_attr_get_raid0(env, lov, index, r0);
672                 if (result != 0)
673                         break;
674
675                 index++;
676
677                 /* merge results */
678                 attr->cat_blocks += lov_attr->cat_blocks;
679                 if (attr->cat_size < lov_attr->cat_size)
680                         attr->cat_size = lov_attr->cat_size;
681                 if (attr->cat_kms < lov_attr->cat_kms)
682                         attr->cat_kms = lov_attr->cat_kms;
683                 if (attr->cat_atime < lov_attr->cat_atime)
684                         attr->cat_atime = lov_attr->cat_atime;
685                 if (attr->cat_ctime < lov_attr->cat_ctime)
686                         attr->cat_ctime = lov_attr->cat_ctime;
687                 if (attr->cat_mtime < lov_attr->cat_mtime)
688                         attr->cat_mtime = lov_attr->cat_mtime;
689         }
690         RETURN(result);
691 }
692
693 const static struct lov_layout_operations lov_dispatch[] = {
694         [LLT_EMPTY] = {
695                 .llo_init      = lov_init_empty,
696                 .llo_delete    = lov_delete_empty,
697                 .llo_fini      = lov_fini_empty,
698                 .llo_print     = lov_print_empty,
699                 .llo_page_init = lov_page_init_empty,
700                 .llo_lock_init = lov_lock_init_empty,
701                 .llo_io_init   = lov_io_init_empty,
702                 .llo_getattr   = lov_attr_get_empty,
703         },
704         [LLT_RELEASED] = {
705                 .llo_init      = lov_init_released,
706                 .llo_delete    = lov_delete_empty,
707                 .llo_fini      = lov_fini_released,
708                 .llo_print     = lov_print_released,
709                 .llo_page_init = lov_page_init_empty,
710                 .llo_lock_init = lov_lock_init_empty,
711                 .llo_io_init   = lov_io_init_released,
712                 .llo_getattr   = lov_attr_get_empty,
713         },
714         [LLT_COMP] = {
715                 .llo_init      = lov_init_composite,
716                 .llo_delete    = lov_delete_composite,
717                 .llo_fini      = lov_fini_composite,
718                 .llo_print     = lov_print_composite,
719                 .llo_page_init = lov_page_init_composite,
720                 .llo_lock_init = lov_lock_init_composite,
721                 .llo_io_init   = lov_io_init_composite,
722                 .llo_getattr   = lov_attr_get_composite,
723         },
724 };
725
726 /**
727  * Performs a double-dispatch based on the layout type of an object.
728  */
729 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
730 ({                                                      \
731         struct lov_object *__obj = (obj);               \
732         enum lov_layout_type __llt;                     \
733                                                         \
734         __llt = __obj->lo_type;                         \
735         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
736         lov_dispatch[__llt].op(__VA_ARGS__);            \
737 })
738
739 /**
740  * Return lov_layout_type associated with a given lsm
741  */
742 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
743 {
744         if (lsm == NULL)
745                 return LLT_EMPTY;
746
747         if (lsm->lsm_is_released)
748                 return LLT_RELEASED;
749
750         if (lsm->lsm_magic == LOV_MAGIC_V1 ||
751             lsm->lsm_magic == LOV_MAGIC_V3 ||
752             lsm->lsm_magic == LOV_MAGIC_COMP_V1)
753                 return LLT_COMP;
754
755         return LLT_EMPTY;
756 }
757
758 static inline void lov_conf_freeze(struct lov_object *lov)
759 {
760         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
761                 lov, lov->lo_owner, current);
762         if (lov->lo_owner != current)
763                 down_read(&lov->lo_type_guard);
764 }
765
766 static inline void lov_conf_thaw(struct lov_object *lov)
767 {
768         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
769                 lov, lov->lo_owner, current);
770         if (lov->lo_owner != current)
771                 up_read(&lov->lo_type_guard);
772 }
773
774 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
775 ({                                                                      \
776         struct lov_object                      *__obj = (obj);          \
777         int                                     __lock = !!(lock);      \
778         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
779                                                                         \
780         if (__lock)                                                     \
781                 lov_conf_freeze(__obj);                                 \
782         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
783         if (__lock)                                                     \
784                 lov_conf_thaw(__obj);                                   \
785         __result;                                                       \
786 })
787
788 /**
789  * Performs a locked double-dispatch based on the layout type of an object.
790  */
791 #define LOV_2DISPATCH(obj, op, ...)                     \
792         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
793
794 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
795 do {                                                                    \
796         struct lov_object                      *__obj = (obj);          \
797         enum lov_layout_type                    __llt;                  \
798                                                                         \
799         lov_conf_freeze(__obj);                                         \
800         __llt = __obj->lo_type;                                         \
801         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
802         lov_dispatch[__llt].op(__VA_ARGS__);                            \
803         lov_conf_thaw(__obj);                                           \
804 } while (0)
805
806 static void lov_conf_lock(struct lov_object *lov)
807 {
808         LASSERT(lov->lo_owner != current);
809         down_write(&lov->lo_type_guard);
810         LASSERT(lov->lo_owner == NULL);
811         lov->lo_owner = current;
812         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
813                 lov, lov->lo_owner);
814 }
815
816 static void lov_conf_unlock(struct lov_object *lov)
817 {
818         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
819                 lov, lov->lo_owner);
820         lov->lo_owner = NULL;
821         up_write(&lov->lo_type_guard);
822 }
823
824 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
825 {
826         struct l_wait_info lwi = { 0 };
827         ENTRY;
828
829         while (atomic_read(&lov->lo_active_ios) > 0) {
830                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
831                         PFID(lu_object_fid(lov2lu(lov))),
832                         atomic_read(&lov->lo_active_ios));
833
834                 l_wait_event(lov->lo_waitq,
835                              atomic_read(&lov->lo_active_ios) == 0, &lwi);
836         }
837         RETURN(0);
838 }
839
840 static int lov_layout_change(const struct lu_env *unused,
841                              struct lov_object *lov, struct lov_stripe_md *lsm,
842                              const struct cl_object_conf *conf)
843 {
844         enum lov_layout_type llt = lov_type(lsm);
845         union lov_layout_state *state = &lov->u;
846         const struct lov_layout_operations *old_ops;
847         const struct lov_layout_operations *new_ops;
848         struct lov_device *lov_dev = lov_object_dev(lov);
849         struct lu_env *env;
850         __u16 refcheck;
851         int rc;
852         ENTRY;
853
854         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
855
856         env = cl_env_get(&refcheck);
857         if (IS_ERR(env))
858                 RETURN(PTR_ERR(env));
859
860         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
861
862         CDEBUG(D_INODE, DFID" from %s to %s\n",
863                PFID(lu_object_fid(lov2lu(lov))),
864                llt2str(lov->lo_type), llt2str(llt));
865
866         old_ops = &lov_dispatch[lov->lo_type];
867         new_ops = &lov_dispatch[llt];
868
869         rc = cl_object_prune(env, &lov->lo_cl);
870         if (rc != 0)
871                 GOTO(out, rc);
872
873         rc = old_ops->llo_delete(env, lov, &lov->u);
874         if (rc != 0)
875                 GOTO(out, rc);
876
877         old_ops->llo_fini(env, lov, &lov->u);
878
879         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
880
881         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
882                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
883
884         lov->lo_type = LLT_EMPTY;
885
886         /* page bufsize fixup */
887         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
888                 lov_page_slice_fixup(lov, NULL);
889
890         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
891         if (rc != 0) {
892                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
893
894                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
895                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
896                 new_ops->llo_delete(env, lov, state);
897                 new_ops->llo_fini(env, lov, state);
898                 /* this file becomes an EMPTY file. */
899                 GOTO(out, rc);
900         }
901
902         lov->lo_type = llt;
903
904 out:
905         cl_env_put(env, &refcheck);
906         RETURN(rc);
907 }
908
909 /*****************************************************************************
910  *
911  * Lov object operations.
912  *
913  */
914 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
915                     const struct lu_object_conf *conf)
916 {
917         struct lov_object            *lov   = lu2lov(obj);
918         struct lov_device            *dev   = lov_object_dev(lov);
919         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
920         union lov_layout_state       *set   = &lov->u;
921         const struct lov_layout_operations *ops;
922         struct lov_stripe_md *lsm = NULL;
923         int rc;
924         ENTRY;
925
926         init_rwsem(&lov->lo_type_guard);
927         atomic_set(&lov->lo_active_ios, 0);
928         init_waitqueue_head(&lov->lo_waitq);
929         cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
930
931         lov->lo_type = LLT_EMPTY;
932         if (cconf->u.coc_layout.lb_buf != NULL) {
933                 lsm = lov_unpackmd(dev->ld_lov,
934                                    cconf->u.coc_layout.lb_buf,
935                                    cconf->u.coc_layout.lb_len);
936                 if (IS_ERR(lsm))
937                         RETURN(PTR_ERR(lsm));
938
939                 dump_lsm(D_INODE, lsm);
940         }
941
942         /* no locking is necessary, as object is being created */
943         lov->lo_type = lov_type(lsm);
944         ops = &lov_dispatch[lov->lo_type];
945         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
946         if (rc != 0)
947                 GOTO(out_lsm, rc);
948
949 out_lsm:
950         lov_lsm_put(lsm);
951
952         RETURN(rc);
953 }
954
955 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
956                         const struct cl_object_conf *conf)
957 {
958         struct lov_stripe_md    *lsm = NULL;
959         struct lov_object       *lov = cl2lov(obj);
960         int                      result = 0;
961         ENTRY;
962
963         if (conf->coc_opc == OBJECT_CONF_SET &&
964             conf->u.coc_layout.lb_buf != NULL) {
965                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
966                                    conf->u.coc_layout.lb_buf,
967                                    conf->u.coc_layout.lb_len);
968                 if (IS_ERR(lsm))
969                         RETURN(PTR_ERR(lsm));
970                 dump_lsm(D_INODE, lsm);
971         }
972
973         lov_conf_lock(lov);
974         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
975                 lov->lo_layout_invalid = true;
976                 GOTO(out, result = 0);
977         }
978
979         if (conf->coc_opc == OBJECT_CONF_WAIT) {
980                 if (lov->lo_layout_invalid &&
981                     atomic_read(&lov->lo_active_ios) > 0) {
982                         lov_conf_unlock(lov);
983                         result = lov_layout_wait(env, lov);
984                         lov_conf_lock(lov);
985                 }
986                 GOTO(out, result);
987         }
988
989         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
990
991         if ((lsm == NULL && lov->lo_lsm == NULL) ||
992             ((lsm != NULL && lov->lo_lsm != NULL) &&
993              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
994              (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
995               lsm->lsm_entries[0]->lsme_pattern))) {
996                 /* same version of layout */
997                 lov->lo_layout_invalid = false;
998                 GOTO(out, result = 0);
999         }
1000
1001         /* will change layout - check if there still exists active IO. */
1002         if (atomic_read(&lov->lo_active_ios) > 0) {
1003                 lov->lo_layout_invalid = true;
1004                 GOTO(out, result = -EBUSY);
1005         }
1006
1007         result = lov_layout_change(env, lov, lsm, conf);
1008         lov->lo_layout_invalid = result != 0;
1009         EXIT;
1010
1011 out:
1012         lov_conf_unlock(lov);
1013         lov_lsm_put(lsm);
1014         CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
1015                PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
1016         RETURN(result);
1017 }
1018
1019 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1020 {
1021         struct lov_object *lov = lu2lov(obj);
1022
1023         ENTRY;
1024         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1025         EXIT;
1026 }
1027
1028 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1029 {
1030         struct lov_object *lov = lu2lov(obj);
1031
1032         ENTRY;
1033         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1034         lu_object_fini(obj);
1035         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1036         EXIT;
1037 }
1038
1039 static int lov_object_print(const struct lu_env *env, void *cookie,
1040                             lu_printer_t p, const struct lu_object *o)
1041 {
1042         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1043 }
1044
1045 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1046                   struct cl_page *page, pgoff_t index)
1047 {
1048         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1049                                     index);
1050 }
1051
1052 /**
1053  * Implements cl_object_operations::clo_io_init() method for lov
1054  * layer. Dispatches to the appropriate layout io initialization method.
1055  */
1056 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1057                 struct cl_io *io)
1058 {
1059         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
1060
1061         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1062                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1063                io->ci_ignore_layout, io->ci_verify_layout);
1064
1065         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1066                                      !io->ci_ignore_layout, env, obj, io);
1067 }
1068
1069 /**
1070  * An implementation of cl_object_operations::clo_attr_get() method for lov
1071  * layer. For raid0 layout this collects and merges attributes of all
1072  * sub-objects.
1073  */
1074 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1075                         struct cl_attr *attr)
1076 {
1077         /* do not take lock, as this function is called under a
1078          * spin-lock. Layout is protected from changing by ongoing IO. */
1079         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1080 }
1081
1082 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1083                            const struct cl_attr *attr, unsigned valid)
1084 {
1085         /*
1086          * No dispatch is required here, as no layout implements this.
1087          */
1088         return 0;
1089 }
1090
1091 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1092                   struct cl_lock *lock, const struct cl_io *io)
1093 {
1094         /* No need to lock because we've taken one refcount of layout.  */
1095         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1096                                     io);
1097 }
1098
1099 /**
1100  * We calculate on which OST the mapping will end. If the length of mapping
1101  * is greater than (stripe_size * stripe_count) then the last_stripe will
1102  * will be one just before start_stripe. Else we check if the mapping
1103  * intersects each OST and find last_stripe.
1104  * This function returns the last_stripe and also sets the stripe_count
1105  * over which the mapping is spread
1106  *
1107  * \param lsm [in]              striping information for the file
1108  * \param index [in]            stripe component index
1109  * \param ext [in]              logical extent of mapping
1110  * \param start_stripe [in]     starting stripe of the mapping
1111  * \param stripe_count [out]    the number of stripes across which to map is
1112  *                              returned
1113  *
1114  * \retval last_stripe          return the last stripe of the mapping
1115  */
1116 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1117                                    struct lu_extent *ext,
1118                                    int start_stripe, int *stripe_count)
1119 {
1120         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1121         int last_stripe;
1122         u64 obd_start;
1123         u64 obd_end;
1124         int i, j;
1125
1126         if (ext->e_end - ext->e_start >
1127             lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1128                 last_stripe = (start_stripe < 1 ? lsme->lsme_stripe_count - 1 :
1129                                                   start_stripe - 1);
1130                 *stripe_count = lsme->lsme_stripe_count;
1131         } else {
1132                 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1133                      i = (i + 1) % lsme->lsme_stripe_count, j++) {
1134                         if ((lov_stripe_intersects(lsm, index,  i, ext,
1135                                                    &obd_start, &obd_end)) == 0)
1136                                 break;
1137                 }
1138                 *stripe_count = j;
1139                 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1140         }
1141
1142         return last_stripe;
1143 }
1144
1145 /**
1146  * Set fe_device and copy extents from local buffer into main return buffer.
1147  *
1148  * \param fiemap [out]          fiemap to hold all extents
1149  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1150  * \param ost_index [in]        OST index to be written into the fm_device
1151  *                              field for each extent
1152  * \param ext_count [in]        number of extents to be copied
1153  * \param current_extent [in]   where to start copying in the extent array
1154  */
1155 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1156                                          struct fiemap_extent *lcl_fm_ext,
1157                                          int ost_index, unsigned int ext_count,
1158                                          int current_extent)
1159 {
1160         char            *to;
1161         unsigned int    ext;
1162
1163         for (ext = 0; ext < ext_count; ext++) {
1164                 lcl_fm_ext[ext].fe_device = ost_index;
1165                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1166         }
1167
1168         /* Copy fm_extent's from fm_local to return buffer */
1169         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1170         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1171 }
1172
1173 #define FIEMAP_BUFFER_SIZE 4096
1174
1175 /**
1176  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1177  * call. The local end offset and the device are sent in the first
1178  * fm_extent. This function calculates the stripe number from the index.
1179  * This function returns a stripe_no on which mapping is to be restarted.
1180  *
1181  * This function returns fm_end_offset which is the in-OST offset at which
1182  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1183  * will re-calculate proper offset in next stripe.
1184  * Note that the first extent is passed to lov_get_info via the value field.
1185  *
1186  * \param fiemap [in]           fiemap request header
1187  * \param lsm [in]              striping information for the file
1188  * \param index [in]            stripe component index
1189  * \param ext [in]              logical extent of mapping
1190  * \param start_stripe [out]    starting stripe will be returned in this
1191  */
1192 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1193                                      struct lov_stripe_md *lsm,
1194                                      int index, struct lu_extent *ext,
1195                                      int *start_stripe)
1196 {
1197         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1198         u64 local_end = fiemap->fm_extents[0].fe_logical;
1199         u64 lun_start;
1200         u64 lun_end;
1201         u64 fm_end_offset;
1202         int stripe_no = -1;
1203         int i;
1204
1205         if (fiemap->fm_extent_count == 0 ||
1206             fiemap->fm_extents[0].fe_logical == 0)
1207                 return 0;
1208
1209         /* Find out stripe_no from ost_index saved in the fe_device */
1210         for (i = 0; i < lsme->lsme_stripe_count; i++) {
1211                 struct lov_oinfo *oinfo = lsme->lsme_oinfo[i];
1212
1213                 if (lov_oinfo_is_dummy(oinfo))
1214                         continue;
1215
1216                 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1217                         stripe_no = i;
1218                         break;
1219                 }
1220         }
1221
1222         if (stripe_no == -1)
1223                 return -EINVAL;
1224
1225         /* If we have finished mapping on previous device, shift logical
1226          * offset to start of next device */
1227         if (lov_stripe_intersects(lsm, index, stripe_no, ext,
1228                                    &lun_start, &lun_end) != 0 &&
1229             local_end < lun_end) {
1230                 fm_end_offset = local_end;
1231                 *start_stripe = stripe_no;
1232         } else {
1233                 /* This is a special value to indicate that caller should
1234                  * calculate offset in next stripe. */
1235                 fm_end_offset = 0;
1236                 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1237         }
1238
1239         return fm_end_offset;
1240 }
1241
1242 struct fiemap_state {
1243         struct fiemap           *fs_fm;
1244         struct lu_extent        fs_ext;
1245         u64                     fs_length;
1246         u64                     fs_end_offset;
1247         int                     fs_cur_extent;
1248         int                     fs_cnt_need;
1249         int                     fs_start_stripe;
1250         int                     fs_last_stripe;
1251         bool                    fs_device_done;
1252         bool                    fs_finish_stripe;
1253         bool                    fs_enough;
1254 };
1255
1256 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1257                       struct lov_stripe_md *lsm, struct fiemap *fiemap,
1258                       size_t *buflen, struct ll_fiemap_info_key *fmkey,
1259                       int index, int stripeno, struct fiemap_state *fs)
1260 {
1261         struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1262         struct cl_object *subobj;
1263         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1264         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1265         u64 req_fm_len; /* Stores length of required mapping */
1266         u64 len_mapped_single_call;
1267         u64 lun_start;
1268         u64 lun_end;
1269         u64 obd_object_end;
1270         unsigned int ext_count;
1271         /* EOF for object */
1272         bool ost_eof = false;
1273         /* done with required mapping for this OST? */
1274         bool ost_done = false;
1275         int ost_index;
1276         int rc = 0;
1277
1278         fs->fs_device_done = false;
1279         /* Find out range of mapping on this stripe */
1280         if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1281                                    &lun_start, &obd_object_end)) == 0)
1282                 return 0;
1283
1284         if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1285                 return -EIO;
1286
1287         /* If this is a continuation FIEMAP call and we are on
1288          * starting stripe then lun_start needs to be set to
1289          * end_offset */
1290         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1291                 lun_start = fs->fs_end_offset;
1292         lun_end = lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno);
1293         if (lun_start == lun_end)
1294                 return 0;
1295
1296         req_fm_len = obd_object_end - lun_start;
1297         fs->fs_fm->fm_length = 0;
1298         len_mapped_single_call = 0;
1299
1300         /* find lobsub object */
1301         subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1302                                  lov_comp_index(index, stripeno));
1303         if (IS_ERR(subobj))
1304                 return PTR_ERR(subobj);
1305         /* If the output buffer is very large and the objects have many
1306          * extents we may need to loop on a single OST repeatedly */
1307         do {
1308                 if (fiemap->fm_extent_count > 0) {
1309                         /* Don't get too many extents. */
1310                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1311                             fiemap->fm_extent_count)
1312                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1313                                                   fs->fs_cur_extent;
1314                 }
1315
1316                 lun_start += len_mapped_single_call;
1317                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1318                 req_fm_len = fs->fs_fm->fm_length;
1319                 /**
1320                  * If we've collected enough extent map, we'd request 1 more,
1321                  * to see whether we coincidentally finished all available
1322                  * extent map, so that FIEMAP_EXTENT_LAST would be set.
1323                  */
1324                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1325                                              1 : fs->fs_cnt_need;
1326                 fs->fs_fm->fm_mapped_extents = 0;
1327                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1328
1329                 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1330
1331                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1332                         GOTO(obj_put, rc = -EINVAL);
1333                 /* If OST is inactive, return extent with UNKNOWN flag. */
1334                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1335                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1336                         fs->fs_fm->fm_mapped_extents = 1;
1337
1338                         fm_ext[0].fe_logical = lun_start;
1339                         fm_ext[0].fe_length = obd_object_end - lun_start;
1340                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1341
1342                         goto inactive_tgt;
1343                 }
1344
1345                 fs->fs_fm->fm_start = lun_start;
1346                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1347                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1348                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1349
1350                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1351                 if (rc != 0)
1352                         GOTO(obj_put, rc);
1353 inactive_tgt:
1354                 ext_count = fs->fs_fm->fm_mapped_extents;
1355                 if (ext_count == 0) {
1356                         ost_done = true;
1357                         fs->fs_device_done = true;
1358                         /* If last stripe has hold at the end,
1359                          * we need to return */
1360                         if (stripeno == fs->fs_last_stripe) {
1361                                 fiemap->fm_mapped_extents = 0;
1362                                 fs->fs_finish_stripe = true;
1363                                 GOTO(obj_put, rc);
1364                         }
1365                         break;
1366                 } else if (fs->fs_enough) {
1367                         /*
1368                          * We've collected enough extents and there are
1369                          * more extents after it.
1370                          */
1371                         GOTO(obj_put, rc);
1372                 }
1373
1374                 /* If we just need num of extents, got to next device */
1375                 if (fiemap->fm_extent_count == 0) {
1376                         fs->fs_cur_extent += ext_count;
1377                         break;
1378                 }
1379
1380                 /* prepare to copy retrived map extents */
1381                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1382                                          fm_ext[ext_count - 1].fe_length -
1383                                          lun_start;
1384
1385                 /* Have we finished mapping on this device? */
1386                 if (req_fm_len <= len_mapped_single_call) {
1387                         ost_done = true;
1388                         fs->fs_device_done = true;
1389                 }
1390
1391                 /* Clear the EXTENT_LAST flag which can be present on
1392                  * the last extent */
1393                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1394                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1395                 if (lov_stripe_size(lsm, index,
1396                                     fm_ext[ext_count - 1].fe_logical +
1397                                     fm_ext[ext_count - 1].fe_length,
1398                                     stripeno) >= fmkey->lfik_oa.o_size) {
1399                         ost_eof = true;
1400                         fs->fs_device_done = true;
1401                 }
1402
1403                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1404                                              ext_count, fs->fs_cur_extent);
1405                 fs->fs_cur_extent += ext_count;
1406
1407                 /* Ran out of available extents? */
1408                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1409                         fs->fs_enough = true;
1410         } while (!ost_done && !ost_eof);
1411
1412         if (stripeno == fs->fs_last_stripe)
1413                 fs->fs_finish_stripe = true;
1414 obj_put:
1415         cl_object_put(env, subobj);
1416
1417         return rc;
1418 }
1419
1420 /**
1421  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1422  * This also handles the restarting of FIEMAP calls in case mapping overflows
1423  * the available number of extents in single call.
1424  *
1425  * \param env [in]              lustre environment
1426  * \param obj [in]              file object
1427  * \param fmkey [in]            fiemap request header and other info
1428  * \param fiemap [out]          fiemap buffer holding retrived map extents
1429  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1430  *                              each OST, it is used to limit max map needed
1431  * \retval 0    success
1432  * \retval < 0  error
1433  */
1434 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1435                              struct ll_fiemap_info_key *fmkey,
1436                              struct fiemap *fiemap, size_t *buflen)
1437 {
1438         struct lov_stripe_md_entry *lsme;
1439         struct lov_stripe_md *lsm;
1440         struct fiemap *fm_local = NULL;
1441         loff_t whole_start;
1442         loff_t whole_end;
1443         int entry;
1444         int start_entry;
1445         int end_entry;
1446         int cur_stripe = 0;
1447         int stripe_count;
1448         unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1449         int rc = 0;
1450         struct fiemap_state fs = { 0 };
1451         ENTRY;
1452
1453         lsm = lov_lsm_addref(cl2lov(obj));
1454         if (lsm == NULL)
1455                 RETURN(-ENODATA);
1456
1457         if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1458                 /**
1459                  * If the entry count > 1 or stripe_count > 1 and the
1460                  * application does not understand DEVICE_ORDER flag,
1461                  * it cannot interpret the extents correctly.
1462                  */
1463                 if (lsm->lsm_entry_count > 1 ||
1464                     (lsm->lsm_entry_count == 1 &&
1465                      lsm->lsm_entries[0]->lsme_stripe_count > 1))
1466                         GOTO(out_lsm, rc = -ENOTSUPP);
1467         }
1468
1469         if (lsm->lsm_is_released) {
1470                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1471                         /**
1472                          * released file, return a minimal FIEMAP if
1473                          * request fits in file-size.
1474                          */
1475                         fiemap->fm_mapped_extents = 1;
1476                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1477                         if (fiemap->fm_start + fiemap->fm_length <
1478                             fmkey->lfik_oa.o_size)
1479                                 fiemap->fm_extents[0].fe_length =
1480                                         fiemap->fm_length;
1481                         else
1482                                 fiemap->fm_extents[0].fe_length =
1483                                         fmkey->lfik_oa.o_size -
1484                                         fiemap->fm_start;
1485                         fiemap->fm_extents[0].fe_flags |=
1486                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1487                 }
1488                 GOTO(out_lsm, rc = 0);
1489         }
1490
1491         /* buffer_size is small to hold fm_extent_count of extents. */
1492         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1493                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1494
1495         OBD_ALLOC_LARGE(fm_local, buffer_size);
1496         if (fm_local == NULL)
1497                 GOTO(out_lsm, rc = -ENOMEM);
1498
1499         /**
1500          * Requested extent count exceeds the fiemap buffer size, shrink our
1501          * ambition.
1502          */
1503         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1504                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1505         if (fiemap->fm_extent_count == 0)
1506                 fs.fs_cnt_need = 0;
1507
1508         fs.fs_enough = false;
1509         fs.fs_cur_extent = 0;
1510         fs.fs_fm = fm_local;
1511         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1512
1513         whole_start = fiemap->fm_start;
1514         /* whole_start is beyond the end of the file */
1515         if (whole_start > fmkey->lfik_oa.o_size)
1516                 GOTO(out_fm_local, rc = -EINVAL);
1517         whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
1518                                         fmkey->lfik_oa.o_size :
1519                                         whole_start + fiemap->fm_length - 1;
1520         /**
1521          * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
1522          * size
1523          */
1524         if (whole_end > fmkey->lfik_oa.o_size)
1525                 whole_end = fmkey->lfik_oa.o_size;
1526
1527         start_entry = lov_lsm_entry(lsm, whole_start);
1528         end_entry = lov_lsm_entry(lsm, whole_end);
1529         if (end_entry == -1)
1530                 end_entry = lsm->lsm_entry_count - 1;
1531
1532         if (start_entry == -1 || end_entry == -1)
1533                 GOTO(out_fm_local, rc = -EINVAL);
1534
1535         for (entry = start_entry; entry <= end_entry; entry++) {
1536                 lsme = lsm->lsm_entries[entry];
1537
1538                 if (!lsme_inited(lsme))
1539                         break;
1540
1541                 if (entry == start_entry)
1542                         fs.fs_ext.e_start = whole_start;
1543                 else
1544                         fs.fs_ext.e_start = lsme->lsme_extent.e_start;
1545                 if (entry == end_entry)
1546                         fs.fs_ext.e_end = whole_end;
1547                 else
1548                         fs.fs_ext.e_end = lsme->lsme_extent.e_end - 1;
1549                 fs.fs_length = fs.fs_ext.e_end - fs.fs_ext.e_start + 1;
1550
1551                 /* Calculate start stripe, last stripe and length of mapping */
1552                 fs.fs_start_stripe = lov_stripe_number(lsm, entry,
1553                                                        fs.fs_ext.e_start);
1554                 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
1555                                         &fs.fs_ext, fs.fs_start_stripe,
1556                                         &stripe_count);
1557                 fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, entry,
1558                                         &fs.fs_ext, &fs.fs_start_stripe);
1559                 /* Check each stripe */
1560                 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1561                      --stripe_count,
1562                      cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
1563                         rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
1564                                                fmkey, entry, cur_stripe, &fs);
1565                         if (rc < 0)
1566                                 GOTO(out_fm_local, rc);
1567                         if (fs.fs_enough)
1568                                 GOTO(finish, rc);
1569                         if (fs.fs_finish_stripe)
1570                                 break;
1571                 } /* for each stripe */
1572         } /* for covering layout component */
1573         /*
1574          * We've traversed all components, set @entry to the last component
1575          * entry, it's for the last stripe check.
1576          */
1577         entry--;
1578 finish:
1579         /* Indicate that we are returning device offsets unless file just has
1580          * single stripe */
1581         if (lsm->lsm_entry_count > 1 ||
1582             (lsm->lsm_entry_count == 1 &&
1583              lsm->lsm_entries[0]->lsme_stripe_count > 1))
1584                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1585
1586         if (fiemap->fm_extent_count == 0)
1587                 goto skip_last_device_calc;
1588
1589         /* Check if we have reached the last stripe and whether mapping for that
1590          * stripe is done. */
1591         if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
1592                 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
1593                                                              FIEMAP_EXTENT_LAST;
1594 skip_last_device_calc:
1595         fiemap->fm_mapped_extents = fs.fs_cur_extent;
1596 out_fm_local:
1597         OBD_FREE_LARGE(fm_local, buffer_size);
1598
1599 out_lsm:
1600         lov_lsm_put(lsm);
1601         return rc;
1602 }
1603
1604 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
1605                                 struct lov_user_md __user *lum, size_t size)
1606 {
1607         struct lov_object       *lov = cl2lov(obj);
1608         struct lov_stripe_md    *lsm;
1609         int                     rc = 0;
1610         ENTRY;
1611
1612         lsm = lov_lsm_addref(lov);
1613         if (lsm == NULL)
1614                 RETURN(-ENODATA);
1615
1616         rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
1617         lov_lsm_put(lsm);
1618         RETURN(rc);
1619 }
1620
1621 static int lov_object_layout_get(const struct lu_env *env,
1622                                  struct cl_object *obj,
1623                                  struct cl_layout *cl)
1624 {
1625         struct lov_object *lov = cl2lov(obj);
1626         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1627         struct lu_buf *buf = &cl->cl_buf;
1628         ssize_t rc;
1629         ENTRY;
1630
1631         if (lsm == NULL) {
1632                 cl->cl_size = 0;
1633                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
1634
1635                 RETURN(0);
1636         }
1637
1638         cl->cl_size = lov_comp_md_size(lsm);
1639         cl->cl_layout_gen = lsm->lsm_layout_gen;
1640         cl->cl_is_composite = lsm_is_composite(lsm->lsm_magic);
1641
1642         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
1643         lov_lsm_put(lsm);
1644
1645         RETURN(rc < 0 ? rc : 0);
1646 }
1647
1648 static loff_t lov_object_maxbytes(struct cl_object *obj)
1649 {
1650         struct lov_object *lov = cl2lov(obj);
1651         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1652         loff_t maxbytes;
1653
1654         if (lsm == NULL)
1655                 return LLONG_MAX;
1656
1657         maxbytes = lsm->lsm_maxbytes;
1658
1659         lov_lsm_put(lsm);
1660
1661         return maxbytes;
1662 }
1663
1664 static const struct cl_object_operations lov_ops = {
1665         .coo_page_init    = lov_page_init,
1666         .coo_lock_init    = lov_lock_init,
1667         .coo_io_init      = lov_io_init,
1668         .coo_attr_get     = lov_attr_get,
1669         .coo_attr_update  = lov_attr_update,
1670         .coo_conf_set     = lov_conf_set,
1671         .coo_getstripe    = lov_object_getstripe,
1672         .coo_layout_get   = lov_object_layout_get,
1673         .coo_maxbytes     = lov_object_maxbytes,
1674         .coo_fiemap       = lov_object_fiemap,
1675 };
1676
1677 static const struct lu_object_operations lov_lu_obj_ops = {
1678         .loo_object_init      = lov_object_init,
1679         .loo_object_delete    = lov_object_delete,
1680         .loo_object_release   = NULL,
1681         .loo_object_free      = lov_object_free,
1682         .loo_object_print     = lov_object_print,
1683         .loo_object_invariant = NULL
1684 };
1685
1686 struct lu_object *lov_object_alloc(const struct lu_env *env,
1687                                    const struct lu_object_header *unused,
1688                                    struct lu_device *dev)
1689 {
1690         struct lov_object *lov;
1691         struct lu_object  *obj;
1692
1693         ENTRY;
1694         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
1695         if (lov != NULL) {
1696                 obj = lov2lu(lov);
1697                 lu_object_init(obj, NULL, dev);
1698                 lov->lo_cl.co_ops = &lov_ops;
1699                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
1700                 /*
1701                  * object io operation vector (cl_object::co_iop) is installed
1702                  * later in lov_object_init(), as different vectors are used
1703                  * for object with different layouts.
1704                  */
1705                 obj->lo_ops = &lov_lu_obj_ops;
1706         } else
1707                 obj = NULL;
1708         RETURN(obj);
1709 }
1710
1711 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
1712 {
1713         struct lov_stripe_md *lsm = NULL;
1714
1715         lov_conf_freeze(lov);
1716         if (lov->lo_lsm != NULL) {
1717                 lsm = lsm_addref(lov->lo_lsm);
1718                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
1719                         lsm, atomic_read(&lsm->lsm_refc),
1720                         lov->lo_layout_invalid, current);
1721         }
1722         lov_conf_thaw(lov);
1723         return lsm;
1724 }
1725
1726 int lov_read_and_clear_async_rc(struct cl_object *clob)
1727 {
1728         struct lu_object *luobj;
1729         int rc = 0;
1730         ENTRY;
1731
1732         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
1733                                  &lov_device_type);
1734         if (luobj != NULL) {
1735                 struct lov_object *lov = lu2lov(luobj);
1736
1737                 lov_conf_freeze(lov);
1738                 switch (lov->lo_type) {
1739                 case LLT_COMP: {
1740                         struct lov_stripe_md *lsm;
1741                         int i;
1742
1743                         lsm = lov->lo_lsm;
1744                         LASSERT(lsm != NULL);
1745                         for (i = 0; i < lsm->lsm_entry_count; i++) {
1746                                 struct lov_stripe_md_entry *lse =
1747                                                 lsm->lsm_entries[i];
1748                                 int j;
1749
1750                                 if (!lsme_inited(lse))
1751                                         break;
1752
1753                                 for (j = 0; j < lse->lsme_stripe_count; j++) {
1754                                         struct lov_oinfo *loi =
1755                                                         lse->lsme_oinfo[j];
1756
1757                                         if (lov_oinfo_is_dummy(loi))
1758                                                 continue;
1759
1760                                         if (loi->loi_ar.ar_rc && !rc)
1761                                                 rc = loi->loi_ar.ar_rc;
1762                                         loi->loi_ar.ar_rc = 0;
1763                                 }
1764                         }
1765                 }
1766                 case LLT_RELEASED:
1767                 case LLT_EMPTY:
1768                         break;
1769                 default:
1770                         LBUG();
1771                 }
1772                 lov_conf_thaw(lov);
1773         }
1774         RETURN(rc);
1775 }
1776 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
1777
1778 /** @} lov */