Whamcloud - gitweb
LU-8773 llite: refactor lov_object_fiemap()
[fs/lustre-release.git] / lustre / lov / lov_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_object for LOV layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include "lov_cl_internal.h"
41
42 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
43 {
44         return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
45 }
46
47 /** \addtogroup lov
48  *  @{
49  */
50
51 /*****************************************************************************
52  *
53  * Layout operations.
54  *
55  */
56
57 struct lov_layout_operations {
58         int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
59                         struct lov_object *lov, struct lov_stripe_md *lsm,
60                         const struct cl_object_conf *conf,
61                         union lov_layout_state *state);
62         int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
63                            union lov_layout_state *state);
64         void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
65                          union lov_layout_state *state);
66         void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
67                             union lov_layout_state *state);
68         int  (*llo_print)(const struct lu_env *env, void *cookie,
69                           lu_printer_t p, const struct lu_object *o);
70         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
71                               struct cl_page *page, pgoff_t index);
72         int  (*llo_lock_init)(const struct lu_env *env,
73                               struct cl_object *obj, struct cl_lock *lock,
74                               const struct cl_io *io);
75         int  (*llo_io_init)(const struct lu_env *env,
76                             struct cl_object *obj, struct cl_io *io);
77         int  (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
78                             struct cl_attr *attr);
79 };
80
81 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
82
83 static void lov_lsm_put(struct lov_stripe_md *lsm)
84 {
85         if (lsm != NULL)
86                 lov_free_memmd(&lsm);
87 }
88
89 /*****************************************************************************
90  *
91  * Lov object layout operations.
92  *
93  */
94
95 static void lov_install_empty(const struct lu_env *env,
96                               struct lov_object *lov,
97                               union  lov_layout_state *state)
98 {
99         /*
100          * File without objects.
101          */
102 }
103
104 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
105                           struct lov_object *lov, struct lov_stripe_md *lsm,
106                           const struct cl_object_conf *conf,
107                           union lov_layout_state *state)
108 {
109         return 0;
110 }
111
112 static void lov_install_raid0(const struct lu_env *env,
113                               struct lov_object *lov,
114                               union  lov_layout_state *state)
115 {
116 }
117
118 static struct cl_object *lov_sub_find(const struct lu_env *env,
119                                       struct cl_device *dev,
120                                       const struct lu_fid *fid,
121                                       const struct cl_object_conf *conf)
122 {
123         struct lu_object *o;
124
125         ENTRY;
126         o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
127         LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
128         RETURN(lu2cl(o));
129 }
130
131 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
132                         struct cl_object *stripe, struct lov_layout_raid0 *r0,
133                         int idx)
134 {
135         struct cl_object_header *hdr;
136         struct cl_object_header *subhdr;
137         struct cl_object_header *parent;
138         struct lov_oinfo        *oinfo;
139         int result;
140
141         if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
142                 /* For sanity:test_206.
143                  * Do not leave the object in cache to avoid accessing
144                  * freed memory. This is because osc_object is referring to
145                  * lov_oinfo of lsm_stripe_data which will be freed due to
146                  * this failure. */
147                 cl_object_kill(env, stripe);
148                 cl_object_put(env, stripe);
149                 return -EIO;
150         }
151
152         hdr    = cl_object_header(lov2cl(lov));
153         subhdr = cl_object_header(stripe);
154
155         oinfo = lov->lo_lsm->lsm_oinfo[idx];
156         CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
157                " idx: %d gen: %d\n",
158                PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
159                PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
160                oinfo->loi_ost_idx, oinfo->loi_ost_gen);
161
162         /* reuse ->coh_attr_guard to protect coh_parent change */
163         spin_lock(&subhdr->coh_attr_guard);
164         parent = subhdr->coh_parent;
165         if (parent == NULL) {
166                 subhdr->coh_parent = hdr;
167                 spin_unlock(&subhdr->coh_attr_guard);
168                 subhdr->coh_nesting = hdr->coh_nesting + 1;
169                 lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
170                 r0->lo_sub[idx] = cl2lovsub(stripe);
171                 r0->lo_sub[idx]->lso_super = lov;
172                 r0->lo_sub[idx]->lso_index = idx;
173                 result = 0;
174         } else {
175                 struct lu_object  *old_obj;
176                 struct lov_object *old_lov;
177                 unsigned int mask = D_INODE;
178
179                 spin_unlock(&subhdr->coh_attr_guard);
180                 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
181                 LASSERT(old_obj != NULL);
182                 old_lov = cl2lov(lu2cl(old_obj));
183                 if (old_lov->lo_layout_invalid) {
184                         /* the object's layout has already changed but isn't
185                          * refreshed */
186                         lu_object_unhash(env, &stripe->co_lu);
187                         result = -EAGAIN;
188                 } else {
189                         mask = D_ERROR;
190                         result = -EIO;
191                 }
192
193                 LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
194                                 "stripe %d is already owned.", idx);
195                 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
196                 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
197                 cl_object_put(env, stripe);
198         }
199         return result;
200 }
201
202 static int lov_page_slice_fixup(struct lov_object *lov,
203                                 struct cl_object *stripe)
204 {
205         struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
206         struct cl_object *o;
207
208         if (stripe == NULL)
209                 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
210                        cfs_size_round(sizeof(struct lov_page));
211
212         cl_object_for_each(o, stripe)
213                 o->co_slice_off += hdr->coh_page_bufsize;
214
215         return cl_object_header(stripe)->coh_page_bufsize;
216 }
217
218 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
219                           struct lov_object *lov, struct lov_stripe_md *lsm,
220                           const struct cl_object_conf *conf,
221                           union lov_layout_state *state)
222 {
223         int result;
224         int i;
225
226         struct cl_object        *stripe;
227         struct lov_thread_info  *lti     = lov_env_info(env);
228         struct cl_object_conf   *subconf = &lti->lti_stripe_conf;
229         struct lu_fid           *ofid    = &lti->lti_fid;
230         struct lov_layout_raid0 *r0      = &state->raid0;
231
232         ENTRY;
233
234         if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
235                 dump_lsm(D_ERROR, lsm);
236                 LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
237                          LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
238         }
239
240         LASSERT(lov->lo_lsm == NULL);
241         lov->lo_lsm = lsm_addref(lsm);
242         r0->lo_nr = lsm->lsm_stripe_count;
243         LASSERT(r0->lo_nr <= lov_targets_nr(dev));
244
245         lov->lo_layout_invalid = true;
246
247         OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
248         if (r0->lo_sub != NULL) {
249                 int psz = 0;
250
251                 result = 0;
252                 subconf->coc_inode = conf->coc_inode;
253                 spin_lock_init(&r0->lo_sub_lock);
254                 /*
255                  * Create stripe cl_objects.
256                  */
257                 for (i = 0; i < r0->lo_nr && result == 0; ++i) {
258                         struct cl_device *subdev;
259                         struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
260                         int ost_idx = oinfo->loi_ost_idx;
261
262                         if (lov_oinfo_is_dummy(oinfo))
263                                 continue;
264
265                         result = ostid_to_fid(ofid, &oinfo->loi_oi,
266                                               oinfo->loi_ost_idx);
267                         if (result != 0)
268                                 GOTO(out, result);
269
270                         if (dev->ld_target[ost_idx] == NULL) {
271                                 CERROR("%s: OST %04x is not initialized\n",
272                                        lov2obd(dev->ld_lov)->obd_name, ost_idx);
273                                 GOTO(out, result = -EIO);
274                         }
275
276                         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
277                         subconf->u.coc_oinfo = oinfo;
278                         LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
279                         /* In the function below, .hs_keycmp resolves to
280                          * lu_obj_hop_keycmp() */
281                         /* coverity[overrun-buffer-val] */
282                         stripe = lov_sub_find(env, subdev, ofid, subconf);
283                         if (!IS_ERR(stripe)) {
284                                 result = lov_init_sub(env, lov, stripe, r0, i);
285                                 if (result == -EAGAIN) { /* try again */
286                                         --i;
287                                         result = 0;
288                                         continue;
289                                 }
290                         } else {
291                                 result = PTR_ERR(stripe);
292                         }
293
294                         if (result == 0) {
295                                 int sz = lov_page_slice_fixup(lov, stripe);
296                                 LASSERT(ergo(psz > 0, psz == sz));
297                                 psz = sz;
298                         }
299                 }
300                 if (result == 0)
301                         cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
302         } else
303                 result = -ENOMEM;
304 out:
305         RETURN(result);
306 }
307
308 static int lov_init_released(const struct lu_env *env,
309                              struct lov_device *dev, struct lov_object *lov,
310                              struct lov_stripe_md *lsm,
311                              const struct cl_object_conf *conf,
312                              union lov_layout_state *state)
313 {
314         LASSERT(lsm != NULL);
315         LASSERT(lsm_is_released(lsm));
316         LASSERT(lov->lo_lsm == NULL);
317
318         lov->lo_lsm = lsm_addref(lsm);
319         return 0;
320 }
321
322 static struct cl_object *lov_find_subobj(const struct lu_env *env,
323                                          struct lov_object *lov,
324                                          struct lov_stripe_md *lsm,
325                                          int stripe_idx)
326 {
327         struct lov_device       *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
328         struct lov_oinfo        *oinfo = lsm->lsm_oinfo[stripe_idx];
329         struct lov_thread_info  *lti = lov_env_info(env);
330         struct lu_fid           *ofid = &lti->lti_fid;
331         struct cl_device        *subdev;
332         int                     ost_idx;
333         int                     rc;
334         struct cl_object        *result;
335
336         if (lov->lo_type != LLT_RAID0)
337                 GOTO(out, result = NULL);
338
339         ost_idx = oinfo->loi_ost_idx;
340         rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
341         if (rc != 0)
342                 GOTO(out, result = NULL);
343
344         subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
345         result = lov_sub_find(env, subdev, ofid, NULL);
346 out:
347         if (result == NULL)
348                 result = ERR_PTR(-EINVAL);
349         return result;
350 }
351
352 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
353                             union lov_layout_state *state)
354 {
355         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
356
357         lov_layout_wait(env, lov);
358         return 0;
359 }
360
361 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
362                                struct lovsub_object *los, int idx)
363 {
364         struct cl_object        *sub;
365         struct lov_layout_raid0 *r0;
366         struct lu_site          *site;
367         struct lu_site_bkt_data *bkt;
368         wait_queue_t          *waiter;
369
370         r0  = &lov->u.raid0;
371         LASSERT(r0->lo_sub[idx] == los);
372
373         sub  = lovsub2cl(los);
374         site = sub->co_lu.lo_dev->ld_site;
375         bkt  = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
376
377         cl_object_kill(env, sub);
378         /* release a reference to the sub-object and ... */
379         lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
380         cl_object_put(env, sub);
381
382         /* ... wait until it is actually destroyed---sub-object clears its
383          * ->lo_sub[] slot in lovsub_object_fini() */
384         if (r0->lo_sub[idx] == los) {
385                 waiter = &lov_env_info(env)->lti_waiter;
386                 init_waitqueue_entry(waiter, current);
387                 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
388                 set_current_state(TASK_UNINTERRUPTIBLE);
389                 while (1) {
390                         /* this wait-queue is signaled at the end of
391                          * lu_object_free(). */
392                         set_current_state(TASK_UNINTERRUPTIBLE);
393                         spin_lock(&r0->lo_sub_lock);
394                         if (r0->lo_sub[idx] == los) {
395                                 spin_unlock(&r0->lo_sub_lock);
396                                 schedule();
397                         } else {
398                                 spin_unlock(&r0->lo_sub_lock);
399                                 set_current_state(TASK_RUNNING);
400                                 break;
401                         }
402                 }
403                 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
404         }
405         LASSERT(r0->lo_sub[idx] == NULL);
406 }
407
408 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
409                             union lov_layout_state *state)
410 {
411         struct lov_layout_raid0 *r0 = &state->raid0;
412         struct lov_stripe_md    *lsm = lov->lo_lsm;
413         int i;
414
415         ENTRY;
416
417         dump_lsm(D_INODE, lsm);
418
419         lov_layout_wait(env, lov);
420         if (r0->lo_sub != NULL) {
421                 for (i = 0; i < r0->lo_nr; ++i) {
422                         struct lovsub_object *los = r0->lo_sub[i];
423
424                         if (los != NULL) {
425                                 cl_object_prune(env, &los->lso_cl);
426                                 /*
427                                  * If top-level object is to be evicted from
428                                  * the cache, so are its sub-objects.
429                                  */
430                                 lov_subobject_kill(env, lov, los, i);
431                         }
432                 }
433         }
434         RETURN(0);
435 }
436
437 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
438                            union lov_layout_state *state)
439 {
440         LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
441 }
442
443 static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
444                            union lov_layout_state *state)
445 {
446         struct lov_layout_raid0 *r0 = &state->raid0;
447         ENTRY;
448
449         if (r0->lo_sub != NULL) {
450                 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
451                 r0->lo_sub = NULL;
452         }
453
454         dump_lsm(D_INODE, lov->lo_lsm);
455         lov_free_memmd(&lov->lo_lsm);
456
457         EXIT;
458 }
459
460 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
461                                 union lov_layout_state *state)
462 {
463         ENTRY;
464         dump_lsm(D_INODE, lov->lo_lsm);
465         lov_free_memmd(&lov->lo_lsm);
466         EXIT;
467 }
468
469 static int lov_print_empty(const struct lu_env *env, void *cookie,
470                            lu_printer_t p, const struct lu_object *o)
471 {
472         (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
473         return 0;
474 }
475
476 static int lov_print_raid0(const struct lu_env *env, void *cookie,
477                            lu_printer_t p, const struct lu_object *o)
478 {
479         struct lov_object       *lov = lu2lov(o);
480         struct lov_layout_raid0 *r0  = lov_r0(lov);
481         struct lov_stripe_md    *lsm = lov->lo_lsm;
482         int                      i;
483
484         (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
485                 r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
486                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
487                 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
488         for (i = 0; i < r0->lo_nr; ++i) {
489                 struct lu_object *sub;
490
491                 if (r0->lo_sub[i] != NULL) {
492                         sub = lovsub2lu(r0->lo_sub[i]);
493                         lu_object_print(env, cookie, p, sub);
494                 } else {
495                         (*p)(env, cookie, "sub %d absent\n", i);
496                 }
497         }
498         return 0;
499 }
500
501 static int lov_print_released(const struct lu_env *env, void *cookie,
502                                 lu_printer_t p, const struct lu_object *o)
503 {
504         struct lov_object       *lov = lu2lov(o);
505         struct lov_stripe_md    *lsm = lov->lo_lsm;
506
507         (*p)(env, cookie,
508                 "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
509                 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
510                 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
511                 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
512         return 0;
513 }
514
515 /**
516  * Implements cl_object_operations::coo_attr_get() method for an object
517  * without stripes (LLT_EMPTY layout type).
518  *
519  * The only attributes this layer is authoritative in this case is
520  * cl_attr::cat_blocks---it's 0.
521  */
522 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
523                               struct cl_attr *attr)
524 {
525         attr->cat_blocks = 0;
526         return 0;
527 }
528
529 static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
530                               struct cl_attr *attr)
531 {
532         struct lov_object       *lov = cl2lov(obj);
533         struct lov_layout_raid0 *r0 = lov_r0(lov);
534         struct cl_attr          *lov_attr = &r0->lo_attr;
535         int                      result = 0;
536
537         ENTRY;
538
539         /* this is called w/o holding type guard mutex, so it must be inside
540          * an on going IO otherwise lsm may be replaced.
541          * LU-2117: it turns out there exists one exception. For mmaped files,
542          * the lock of those files may be requested in the other file's IO
543          * context, and this function is called in ccc_lock_state(), it will
544          * hit this assertion.
545          * Anyway, it's still okay to call attr_get w/o type guard as layout
546          * can't go if locks exist. */
547         /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
548
549         if (!r0->lo_attr_valid) {
550                 struct lov_stripe_md    *lsm = lov->lo_lsm;
551                 struct ost_lvb          *lvb = &lov_env_info(env)->lti_lvb;
552                 __u64                    kms = 0;
553
554                 memset(lvb, 0, sizeof(*lvb));
555                 /* XXX: timestamps can be negative by sanity:test_39m,
556                  * how can it be? */
557                 lvb->lvb_atime = LLONG_MIN;
558                 lvb->lvb_ctime = LLONG_MIN;
559                 lvb->lvb_mtime = LLONG_MIN;
560
561                 /*
562                  * XXX that should be replaced with a loop over sub-objects,
563                  * doing cl_object_attr_get() on them. But for now, let's
564                  * reuse old lov code.
565                  */
566
567                 /*
568                  * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
569                  * happy. It's not needed, because new code uses
570                  * ->coh_attr_guard spin-lock to protect consistency of
571                  * sub-object attributes.
572                  */
573                 lov_stripe_lock(lsm);
574                 result = lov_merge_lvb_kms(lsm, lvb, &kms);
575                 lov_stripe_unlock(lsm);
576                 if (result == 0) {
577                         cl_lvb2attr(lov_attr, lvb);
578                         lov_attr->cat_kms = kms;
579                         r0->lo_attr_valid = 1;
580                 }
581         }
582         if (result == 0) { /* merge results */
583                 attr->cat_blocks = lov_attr->cat_blocks;
584                 attr->cat_size = lov_attr->cat_size;
585                 attr->cat_kms = lov_attr->cat_kms;
586                 if (attr->cat_atime < lov_attr->cat_atime)
587                         attr->cat_atime = lov_attr->cat_atime;
588                 if (attr->cat_ctime < lov_attr->cat_ctime)
589                         attr->cat_ctime = lov_attr->cat_ctime;
590                 if (attr->cat_mtime < lov_attr->cat_mtime)
591                         attr->cat_mtime = lov_attr->cat_mtime;
592         }
593         RETURN(result);
594 }
595
596 const static struct lov_layout_operations lov_dispatch[] = {
597         [LLT_EMPTY] = {
598                 .llo_init      = lov_init_empty,
599                 .llo_delete    = lov_delete_empty,
600                 .llo_fini      = lov_fini_empty,
601                 .llo_install   = lov_install_empty,
602                 .llo_print     = lov_print_empty,
603                 .llo_page_init = lov_page_init_empty,
604                 .llo_lock_init = lov_lock_init_empty,
605                 .llo_io_init   = lov_io_init_empty,
606                 .llo_getattr   = lov_attr_get_empty,
607         },
608         [LLT_RAID0] = {
609                 .llo_init      = lov_init_raid0,
610                 .llo_delete    = lov_delete_raid0,
611                 .llo_fini      = lov_fini_raid0,
612                 .llo_install   = lov_install_raid0,
613                 .llo_print     = lov_print_raid0,
614                 .llo_page_init = lov_page_init_raid0,
615                 .llo_lock_init = lov_lock_init_raid0,
616                 .llo_io_init   = lov_io_init_raid0,
617                 .llo_getattr   = lov_attr_get_raid0,
618         },
619         [LLT_RELEASED] = {
620                 .llo_init      = lov_init_released,
621                 .llo_delete    = lov_delete_empty,
622                 .llo_fini      = lov_fini_released,
623                 .llo_install   = lov_install_empty,
624                 .llo_print     = lov_print_released,
625                 .llo_page_init = lov_page_init_empty,
626                 .llo_lock_init = lov_lock_init_empty,
627                 .llo_io_init   = lov_io_init_released,
628                 .llo_getattr   = lov_attr_get_empty,
629         }
630 };
631
632 /**
633  * Performs a double-dispatch based on the layout type of an object.
634  */
635 #define LOV_2DISPATCH_NOLOCK(obj, op, ...)              \
636 ({                                                      \
637         struct lov_object *__obj = (obj);               \
638         enum lov_layout_type __llt;                     \
639                                                         \
640         __llt = __obj->lo_type;                         \
641         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));      \
642         lov_dispatch[__llt].op(__VA_ARGS__);            \
643 })
644
645 /**
646  * Return lov_layout_type associated with a given lsm
647  */
648 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
649 {
650         if (lsm == NULL)
651                 return LLT_EMPTY;
652         if (lsm_is_released(lsm))
653                 return LLT_RELEASED;
654         return LLT_RAID0;
655 }
656
657 static inline void lov_conf_freeze(struct lov_object *lov)
658 {
659         CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
660                 lov, lov->lo_owner, current);
661         if (lov->lo_owner != current)
662                 down_read(&lov->lo_type_guard);
663 }
664
665 static inline void lov_conf_thaw(struct lov_object *lov)
666 {
667         CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
668                 lov, lov->lo_owner, current);
669         if (lov->lo_owner != current)
670                 up_read(&lov->lo_type_guard);
671 }
672
673 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
674 ({                                                                      \
675         struct lov_object                      *__obj = (obj);          \
676         int                                     __lock = !!(lock);      \
677         typeof(lov_dispatch[0].op(__VA_ARGS__)) __result;               \
678                                                                         \
679         if (__lock)                                                     \
680                 lov_conf_freeze(__obj);                                 \
681         __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__);          \
682         if (__lock)                                                     \
683                 lov_conf_thaw(__obj);                                   \
684         __result;                                                       \
685 })
686
687 /**
688  * Performs a locked double-dispatch based on the layout type of an object.
689  */
690 #define LOV_2DISPATCH(obj, op, ...)                     \
691         LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
692
693 #define LOV_2DISPATCH_VOID(obj, op, ...)                                \
694 do {                                                                    \
695         struct lov_object                      *__obj = (obj);          \
696         enum lov_layout_type                    __llt;                  \
697                                                                         \
698         lov_conf_freeze(__obj);                                         \
699         __llt = __obj->lo_type;                                         \
700         LASSERT(__llt < ARRAY_SIZE(lov_dispatch));                      \
701         lov_dispatch[__llt].op(__VA_ARGS__);                            \
702         lov_conf_thaw(__obj);                                           \
703 } while (0)
704
705 static void lov_conf_lock(struct lov_object *lov)
706 {
707         LASSERT(lov->lo_owner != current);
708         down_write(&lov->lo_type_guard);
709         LASSERT(lov->lo_owner == NULL);
710         lov->lo_owner = current;
711         CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
712                 lov, lov->lo_owner);
713 }
714
715 static void lov_conf_unlock(struct lov_object *lov)
716 {
717         CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
718                 lov, lov->lo_owner);
719         lov->lo_owner = NULL;
720         up_write(&lov->lo_type_guard);
721 }
722
723 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
724 {
725         struct l_wait_info lwi = { 0 };
726         ENTRY;
727
728         while (atomic_read(&lov->lo_active_ios) > 0) {
729                 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
730                         PFID(lu_object_fid(lov2lu(lov))),
731                         atomic_read(&lov->lo_active_ios));
732
733                 l_wait_event(lov->lo_waitq,
734                              atomic_read(&lov->lo_active_ios) == 0, &lwi);
735         }
736         RETURN(0);
737 }
738
739 static int lov_layout_change(const struct lu_env *unused,
740                              struct lov_object *lov, struct lov_stripe_md *lsm,
741                              const struct cl_object_conf *conf)
742 {
743         enum lov_layout_type llt = lov_type(lsm);
744         union lov_layout_state *state = &lov->u;
745         const struct lov_layout_operations *old_ops;
746         const struct lov_layout_operations *new_ops;
747         struct lov_device *lov_dev = lov_object_dev(lov);
748         struct lu_env *env;
749         __u16 refcheck;
750         int rc;
751         ENTRY;
752
753         LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
754
755         env = cl_env_get(&refcheck);
756         if (IS_ERR(env))
757                 RETURN(PTR_ERR(env));
758
759         LASSERT(llt < ARRAY_SIZE(lov_dispatch));
760
761         CDEBUG(D_INODE, DFID" from %s to %s\n",
762                PFID(lu_object_fid(lov2lu(lov))),
763                llt2str(lov->lo_type), llt2str(llt));
764
765         old_ops = &lov_dispatch[lov->lo_type];
766         new_ops = &lov_dispatch[llt];
767
768         rc = cl_object_prune(env, &lov->lo_cl);
769         if (rc != 0)
770                 GOTO(out, rc);
771
772         rc = old_ops->llo_delete(env, lov, &lov->u);
773         if (rc != 0)
774                 GOTO(out, rc);
775
776         old_ops->llo_fini(env, lov, &lov->u);
777
778         LASSERT(atomic_read(&lov->lo_active_ios) == 0);
779
780         CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
781                PFID(lu_object_fid(lov2lu(lov))), lov, llt);
782
783         lov->lo_type = LLT_EMPTY;
784
785         /* page bufsize fixup */
786         cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
787                 lov_page_slice_fixup(lov, NULL);
788
789         rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
790         if (rc != 0) {
791                 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
792
793                 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
794                        obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
795                 new_ops->llo_delete(env, lov, state);
796                 new_ops->llo_fini(env, lov, state);
797                 /* this file becomes an EMPTY file. */
798                 GOTO(out, rc);
799         }
800
801         new_ops->llo_install(env, lov, state);
802         lov->lo_type = llt;
803
804 out:
805         cl_env_put(env, &refcheck);
806         RETURN(rc);
807 }
808
809 /*****************************************************************************
810  *
811  * Lov object operations.
812  *
813  */
814 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
815                     const struct lu_object_conf *conf)
816 {
817         struct lov_object            *lov   = lu2lov(obj);
818         struct lov_device            *dev   = lov_object_dev(lov);
819         const struct cl_object_conf  *cconf = lu2cl_conf(conf);
820         union lov_layout_state       *set   = &lov->u;
821         const struct lov_layout_operations *ops;
822         struct lov_stripe_md *lsm = NULL;
823         int rc;
824         ENTRY;
825
826         init_rwsem(&lov->lo_type_guard);
827         atomic_set(&lov->lo_active_ios, 0);
828         init_waitqueue_head(&lov->lo_waitq);
829         cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
830
831         lov->lo_type = LLT_EMPTY;
832         if (cconf->u.coc_layout.lb_buf != NULL) {
833                 lsm = lov_unpackmd(dev->ld_lov,
834                                    cconf->u.coc_layout.lb_buf,
835                                    cconf->u.coc_layout.lb_len);
836                 if (IS_ERR(lsm))
837                         RETURN(PTR_ERR(lsm));
838         }
839
840         /* no locking is necessary, as object is being created */
841         lov->lo_type = lov_type(lsm);
842         ops = &lov_dispatch[lov->lo_type];
843         rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
844         if (rc != 0)
845                 GOTO(out_lsm, rc);
846
847         ops->llo_install(env, lov, set);
848
849 out_lsm:
850         lov_lsm_put(lsm);
851
852         RETURN(rc);
853 }
854
855 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
856                         const struct cl_object_conf *conf)
857 {
858         struct lov_stripe_md    *lsm = NULL;
859         struct lov_object       *lov = cl2lov(obj);
860         int                      result = 0;
861         ENTRY;
862
863         if (conf->coc_opc == OBJECT_CONF_SET &&
864             conf->u.coc_layout.lb_buf != NULL) {
865                 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
866                                    conf->u.coc_layout.lb_buf,
867                                    conf->u.coc_layout.lb_len);
868                 if (IS_ERR(lsm))
869                         RETURN(PTR_ERR(lsm));
870         }
871
872         lov_conf_lock(lov);
873         if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
874                 lov->lo_layout_invalid = true;
875                 GOTO(out, result = 0);
876         }
877
878         if (conf->coc_opc == OBJECT_CONF_WAIT) {
879                 if (lov->lo_layout_invalid &&
880                     atomic_read(&lov->lo_active_ios) > 0) {
881                         lov_conf_unlock(lov);
882                         result = lov_layout_wait(env, lov);
883                         lov_conf_lock(lov);
884                 }
885                 GOTO(out, result);
886         }
887
888         LASSERT(conf->coc_opc == OBJECT_CONF_SET);
889
890         if ((lsm == NULL && lov->lo_lsm == NULL) ||
891             ((lsm != NULL && lov->lo_lsm != NULL) &&
892              (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
893              (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
894                 /* same version of layout */
895                 lov->lo_layout_invalid = false;
896                 GOTO(out, result = 0);
897         }
898
899         /* will change layout - check if there still exists active IO. */
900         if (atomic_read(&lov->lo_active_ios) > 0) {
901                 lov->lo_layout_invalid = true;
902                 GOTO(out, result = -EBUSY);
903         }
904
905         result = lov_layout_change(env, lov, lsm, conf);
906         lov->lo_layout_invalid = result != 0;
907         EXIT;
908
909 out:
910         lov_conf_unlock(lov);
911         lov_lsm_put(lsm);
912         CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
913                PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
914         RETURN(result);
915 }
916
917 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
918 {
919         struct lov_object *lov = lu2lov(obj);
920
921         ENTRY;
922         LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
923         EXIT;
924 }
925
926 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
927 {
928         struct lov_object *lov = lu2lov(obj);
929
930         ENTRY;
931         LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
932         lu_object_fini(obj);
933         OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
934         EXIT;
935 }
936
937 static int lov_object_print(const struct lu_env *env, void *cookie,
938                             lu_printer_t p, const struct lu_object *o)
939 {
940         return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
941 }
942
943 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
944                   struct cl_page *page, pgoff_t index)
945 {
946         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
947                                     index);
948 }
949
950 /**
951  * Implements cl_object_operations::clo_io_init() method for lov
952  * layer. Dispatches to the appropriate layout io initialization method.
953  */
954 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
955                 struct cl_io *io)
956 {
957         CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
958
959         CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
960                PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
961                io->ci_ignore_layout, io->ci_verify_layout);
962
963         return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
964                                      !io->ci_ignore_layout, env, obj, io);
965 }
966
967 /**
968  * An implementation of cl_object_operations::clo_attr_get() method for lov
969  * layer. For raid0 layout this collects and merges attributes of all
970  * sub-objects.
971  */
972 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
973                         struct cl_attr *attr)
974 {
975         /* do not take lock, as this function is called under a
976          * spin-lock. Layout is protected from changing by ongoing IO. */
977         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
978 }
979
980 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
981                            const struct cl_attr *attr, unsigned valid)
982 {
983         /*
984          * No dispatch is required here, as no layout implements this.
985          */
986         return 0;
987 }
988
989 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
990                   struct cl_lock *lock, const struct cl_io *io)
991 {
992         /* No need to lock because we've taken one refcount of layout.  */
993         return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
994                                     io);
995 }
996
997 /**
998  * We calculate on which OST the mapping will end. If the length of mapping
999  * is greater than (stripe_size * stripe_count) then the last_stripe will
1000  * will be one just before start_stripe. Else we check if the mapping
1001  * intersects each OST and find last_stripe.
1002  * This function returns the last_stripe and also sets the stripe_count
1003  * over which the mapping is spread
1004  *
1005  * \param lsm [in]              striping information for the file
1006  * \param fm_start [in]         logical start of mapping
1007  * \param fm_end [in]           logical end of mapping
1008  * \param start_stripe [in]     starting stripe of the mapping
1009  * \param stripe_count [out]    the number of stripes across which to map is
1010  *                              returned
1011  *
1012  * \retval last_stripe          return the last stripe of the mapping
1013  */
1014 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
1015                                    u64 fm_start, u64 fm_end,
1016                                    int start_stripe, int *stripe_count)
1017 {
1018         int last_stripe;
1019         u64 obd_start;
1020         u64 obd_end;
1021         int i, j;
1022
1023         if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
1024                 last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
1025                                                               start_stripe - 1);
1026                 *stripe_count = lsm->lsm_stripe_count;
1027         } else {
1028                 for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
1029                      i = (i + 1) % lsm->lsm_stripe_count, j++) {
1030                         if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
1031                                                    &obd_start, &obd_end)) == 0)
1032                                 break;
1033                 }
1034                 *stripe_count = j;
1035                 last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
1036         }
1037
1038         return last_stripe;
1039 }
1040
1041 /**
1042  * Set fe_device and copy extents from local buffer into main return buffer.
1043  *
1044  * \param fiemap [out]          fiemap to hold all extents
1045  * \param lcl_fm_ext [in]       array of fiemap extents get from OSC layer
1046  * \param ost_index [in]        OST index to be written into the fm_device
1047  *                              field for each extent
1048  * \param ext_count [in]        number of extents to be copied
1049  * \param current_extent [in]   where to start copying in the extent array
1050  */
1051 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1052                                          struct fiemap_extent *lcl_fm_ext,
1053                                          int ost_index, unsigned int ext_count,
1054                                          int current_extent)
1055 {
1056         char            *to;
1057         unsigned int    ext;
1058
1059         for (ext = 0; ext < ext_count; ext++) {
1060                 lcl_fm_ext[ext].fe_device = ost_index;
1061                 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1062         }
1063
1064         /* Copy fm_extent's from fm_local to return buffer */
1065         to = (char *)fiemap + fiemap_count_to_size(current_extent);
1066         memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1067 }
1068
1069 #define FIEMAP_BUFFER_SIZE 4096
1070
1071 /**
1072  * Non-zero fe_logical indicates that this is a continuation FIEMAP
1073  * call. The local end offset and the device are sent in the first
1074  * fm_extent. This function calculates the stripe number from the index.
1075  * This function returns a stripe_no on which mapping is to be restarted.
1076  *
1077  * This function returns fm_end_offset which is the in-OST offset at which
1078  * mapping should be restarted. If fm_end_offset=0 is returned then caller
1079  * will re-calculate proper offset in next stripe.
1080  * Note that the first extent is passed to lov_get_info via the value field.
1081  *
1082  * \param fiemap [in]           fiemap request header
1083  * \param lsm [in]              striping information for the file
1084  * \param fm_start [in]         logical start of mapping
1085  * \param fm_end [in]           logical end of mapping
1086  * \param start_stripe [out]    starting stripe will be returned in this
1087  */
1088 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1089                                      struct lov_stripe_md *lsm,
1090                                      u64 fm_start, u64 fm_end,
1091                                      int *start_stripe)
1092 {
1093         u64 local_end = fiemap->fm_extents[0].fe_logical;
1094         u64 lun_start;
1095         u64 lun_end;
1096         u64 fm_end_offset;
1097         int stripe_no = -1;
1098         int i;
1099
1100         if (fiemap->fm_extent_count == 0 ||
1101             fiemap->fm_extents[0].fe_logical == 0)
1102                 return 0;
1103
1104         /* Find out stripe_no from ost_index saved in the fe_device */
1105         for (i = 0; i < lsm->lsm_stripe_count; i++) {
1106                 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
1107
1108                 if (lov_oinfo_is_dummy(oinfo))
1109                         continue;
1110
1111                 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1112                         stripe_no = i;
1113                         break;
1114                 }
1115         }
1116
1117         if (stripe_no == -1)
1118                 return -EINVAL;
1119
1120         /* If we have finished mapping on previous device, shift logical
1121          * offset to start of next device */
1122         if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
1123                                   &lun_start, &lun_end) != 0 &&
1124             local_end < lun_end) {
1125                 fm_end_offset = local_end;
1126                 *start_stripe = stripe_no;
1127         } else {
1128                 /* This is a special value to indicate that caller should
1129                  * calculate offset in next stripe. */
1130                 fm_end_offset = 0;
1131                 *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
1132         }
1133
1134         return fm_end_offset;
1135 }
1136
1137 struct fiemap_state {
1138         struct fiemap   *fs_fm;
1139         u64             fs_start;
1140         u64             fs_length;
1141         u64             fs_end;
1142         u64             fs_end_offset;
1143         int             fs_cur_extent;
1144         int             fs_cnt_need;
1145         int             fs_start_stripe;
1146         int             fs_last_stripe;
1147         bool            fs_device_done;
1148         bool            fs_finish;
1149         bool            fs_enough;
1150 };
1151
1152 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1153                       struct lov_stripe_md *lsm,
1154                       struct fiemap *fiemap, size_t *buflen,
1155                       struct ll_fiemap_info_key *fmkey, int stripeno,
1156                       struct fiemap_state *fs)
1157 {
1158         struct cl_object *subobj;
1159         struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1160         struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1161         u64 req_fm_len; /* Stores length of required mapping */
1162         u64 len_mapped_single_call;
1163         u64 lun_start;
1164         u64 lun_end;
1165         u64 obd_object_end;
1166         unsigned int ext_count;
1167         /* EOF for object */
1168         bool ost_eof = false;
1169         /* done with required mapping for this OST? */
1170         bool ost_done = false;
1171         int ost_index;
1172         int rc = 0;
1173
1174         fs->fs_device_done = false;
1175         /* Find out range of mapping on this stripe */
1176         if ((lov_stripe_intersects(lsm, stripeno, fs->fs_start, fs->fs_end,
1177                                    &lun_start, &obd_object_end)) == 0)
1178                 return 0;
1179
1180         if (lov_oinfo_is_dummy(lsm->lsm_oinfo[stripeno]))
1181                 return -EIO;
1182
1183         /* If this is a continuation FIEMAP call and we are on
1184          * starting stripe then lun_start needs to be set to
1185          * end_offset */
1186         if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1187                 lun_start = fs->fs_end_offset;
1188
1189         lun_end = fs->fs_length;
1190         if (lun_end != ~0ULL) {
1191                 /* Handle fs->fs_start + fs->fs_length overflow */
1192                 if (fs->fs_start + fs->fs_length < fs->fs_start)
1193                         fs->fs_length = ~0ULL - fs->fs_start;
1194                 lun_end = lov_size_to_stripe(lsm, fs->fs_start + fs->fs_length,
1195                                              stripeno);
1196         }
1197
1198         if (lun_start == lun_end)
1199                 return 0;
1200
1201         req_fm_len = obd_object_end - lun_start;
1202         fs->fs_fm->fm_length = 0;
1203         len_mapped_single_call = 0;
1204
1205         /* find lobsub object */
1206         subobj = lov_find_subobj(env, cl2lov(obj), lsm, stripeno);
1207         if (IS_ERR(subobj))
1208                 return PTR_ERR(subobj);
1209         /* If the output buffer is very large and the objects have many
1210          * extents we may need to loop on a single OST repeatedly */
1211         do {
1212                 if (fiemap->fm_extent_count > 0) {
1213                         /* Don't get too many extents. */
1214                         if (fs->fs_cur_extent + fs->fs_cnt_need >
1215                             fiemap->fm_extent_count)
1216                                 fs->fs_cnt_need = fiemap->fm_extent_count -
1217                                                   fs->fs_cur_extent;
1218                 }
1219
1220                 lun_start += len_mapped_single_call;
1221                 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1222                 req_fm_len = fs->fs_fm->fm_length;
1223                 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1224                                              1 : fs->fs_cnt_need;
1225                 fs->fs_fm->fm_mapped_extents = 0;
1226                 fs->fs_fm->fm_flags = fiemap->fm_flags;
1227
1228                 ost_index = lsm->lsm_oinfo[stripeno]->loi_ost_idx;
1229
1230                 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1231                         GOTO(obj_put, rc = -EINVAL);
1232                 /* If OST is inactive, return extent with UNKNOWN flag. */
1233                 if (!lov->lov_tgts[ost_index]->ltd_active) {
1234                         fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1235                         fs->fs_fm->fm_mapped_extents = 1;
1236
1237                         fm_ext[0].fe_logical = lun_start;
1238                         fm_ext[0].fe_length = obd_object_end - lun_start;
1239                         fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1240
1241                         goto inactive_tgt;
1242                 }
1243
1244                 fs->fs_fm->fm_start = lun_start;
1245                 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1246                 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1247                 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1248
1249                 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1250                 if (rc != 0)
1251                         GOTO(obj_put, rc);
1252 inactive_tgt:
1253                 ext_count = fs->fs_fm->fm_mapped_extents;
1254                 if (ext_count == 0) {
1255                         ost_done = true;
1256                         fs->fs_device_done = true;
1257                         /* If last stripe has hold at the end,
1258                          * we need to return */
1259                         if (stripeno == fs->fs_last_stripe) {
1260                                 fiemap->fm_mapped_extents = 0;
1261                                 fs->fs_finish = true;
1262                                 GOTO(obj_put, rc);
1263                         }
1264                         break;
1265                 } else if (fs->fs_enough) {
1266                         /*
1267                          * We've collected enough extents and there are
1268                          * more extents after it.
1269                          */
1270                         fs->fs_finish = true;
1271                         GOTO(obj_put, rc);
1272                 }
1273
1274                 /* If we just need num of extents, got to next device */
1275                 if (fiemap->fm_extent_count == 0) {
1276                         fs->fs_cur_extent += ext_count;
1277                         break;
1278                 }
1279
1280                 /* prepare to copy retrived map extents */
1281                 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1282                                          fm_ext[ext_count - 1].fe_length -
1283                                          lun_start;
1284
1285                 /* Have we finished mapping on this device? */
1286                 if (req_fm_len <= len_mapped_single_call) {
1287                         ost_done = true;
1288                         fs->fs_device_done = true;
1289                 }
1290
1291                 /* Clear the EXTENT_LAST flag which can be present on
1292                  * the last extent */
1293                 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1294                         fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1295                 if (lov_stripe_size(lsm, fm_ext[ext_count - 1].fe_logical +
1296                                          fm_ext[ext_count - 1].fe_length,
1297                                     stripeno) >= fmkey->lfik_oa.o_size) {
1298                         ost_eof = true;
1299                         fs->fs_device_done = true;
1300                 }
1301
1302                 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1303                                              ext_count, fs->fs_cur_extent);
1304                 fs->fs_cur_extent += ext_count;
1305
1306                 /* Ran out of available extents? */
1307                 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1308                         fs->fs_enough = true;
1309         } while (!ost_done && !ost_eof);
1310
1311         if (stripeno == fs->fs_last_stripe)
1312                 fs->fs_finish = true;
1313 obj_put:
1314         cl_object_put(env, subobj);
1315
1316         return rc;
1317 }
1318
1319 /**
1320  * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1321  * This also handles the restarting of FIEMAP calls in case mapping overflows
1322  * the available number of extents in single call.
1323  *
1324  * \param env [in]              lustre environment
1325  * \param obj [in]              file object
1326  * \param fmkey [in]            fiemap request header and other info
1327  * \param fiemap [out]          fiemap buffer holding retrived map extents
1328  * \param buflen [in/out]       max buffer length of @fiemap, when iterate
1329  *                              each OST, it is used to limit max map needed
1330  * \retval 0    success
1331  * \retval < 0  error
1332  */
1333 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1334                              struct ll_fiemap_info_key *fmkey,
1335                              struct fiemap *fiemap, size_t *buflen)
1336 {
1337         struct lov_stripe_md    *lsm;
1338         struct fiemap           *fm_local = NULL;
1339         int                     cur_stripe;
1340         int                     stripe_count;
1341         unsigned int            buffer_size = FIEMAP_BUFFER_SIZE;
1342         int                     rc = 0;
1343         struct fiemap_state fs = { 0 };
1344         ENTRY;
1345
1346         lsm = lov_lsm_addref(cl2lov(obj));
1347         if (lsm == NULL)
1348                 RETURN(-ENODATA);
1349
1350         /**
1351          * If the stripe_count > 1 and the application does not understand
1352          * DEVICE_ORDER flag, it cannot interpret the extents correctly.
1353          */
1354         if (lsm->lsm_stripe_count > 1 && !(fiemap->fm_flags &
1355                                            FIEMAP_FLAG_DEVICE_ORDER))
1356                 GOTO(out_lsm, rc = -ENOTSUPP);
1357
1358         if (lsm_is_released(lsm)) {
1359                 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1360                         /**
1361                          * released file, return a minimal FIEMAP if
1362                          * request fits in file-size.
1363                          */
1364                         fiemap->fm_mapped_extents = 1;
1365                         fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1366                         if (fiemap->fm_start + fiemap->fm_length <
1367                             fmkey->lfik_oa.o_size)
1368                                 fiemap->fm_extents[0].fe_length =
1369                                         fiemap->fm_length;
1370                         else
1371                                 fiemap->fm_extents[0].fe_length =
1372                                         fmkey->lfik_oa.o_size -
1373                                         fiemap->fm_start;
1374                         fiemap->fm_extents[0].fe_flags |=
1375                                 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1376                 }
1377                 GOTO(out_lsm, rc = 0);
1378         }
1379
1380         if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1381                 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1382
1383         OBD_ALLOC_LARGE(fm_local, buffer_size);
1384         if (fm_local == NULL)
1385                 GOTO(out_lsm, rc = -ENOMEM);
1386
1387         fs.fs_fm = fm_local;
1388         fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1389
1390         fs.fs_start = fiemap->fm_start;
1391         /* fs.fs_start is beyond the end of the file */
1392         if (fs.fs_start > fmkey->lfik_oa.o_size)
1393                 GOTO(out_fm_local, rc = -EINVAL);
1394
1395         fs.fs_length = fiemap->fm_length;
1396         /* Calculate start stripe, last stripe and length of mapping */
1397         fs.fs_start_stripe = lov_stripe_number(lsm, fs.fs_start);
1398         fs.fs_end = (fs.fs_length == ~0ULL) ? fmkey->lfik_oa.o_size :
1399                                               fs.fs_start + fs.fs_length - 1;
1400         /* If fs_length != ~0ULL but fs_start+fs_length-1 exceeds file size */
1401         if (fs.fs_end > fmkey->lfik_oa.o_size) {
1402                 fs.fs_end = fmkey->lfik_oa.o_size;
1403                 fs.fs_length = fs.fs_end - fs.fs_start;
1404         }
1405
1406         fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, fs.fs_start, fs.fs_end,
1407                                                     fs.fs_start_stripe,
1408                                                     &stripe_count);
1409         fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fs.fs_start,
1410                                                      fs.fs_end,
1411                                                      &fs.fs_start_stripe);
1412         if (fs.fs_end_offset == -EINVAL)
1413                 GOTO(out_fm_local, rc = -EINVAL);
1414
1415         /**
1416          * Requested extent count exceeds the fiemap buffer size, shrink our
1417          * ambition.
1418          */
1419         if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1420                 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1421         if (fiemap->fm_extent_count == 0)
1422                 fs.fs_cnt_need = 0;
1423
1424         fs.fs_finish = false;
1425         fs.fs_enough = false;
1426         fs.fs_cur_extent = 0;
1427
1428         /* Check each stripe */
1429         for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1430              --stripe_count,
1431              cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
1432                 rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen, fmkey,
1433                                        cur_stripe, &fs);
1434                 if (rc < 0)
1435                         GOTO(out_fm_local, rc);
1436                 if (fs.fs_finish)
1437                         break;
1438         } /* for each stripe */
1439
1440         /* Indicate that we are returning device offsets unless file just has
1441          * single stripe */
1442         if (lsm->lsm_stripe_count > 1)
1443                 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1444
1445         if (fiemap->fm_extent_count == 0)
1446                 goto skip_last_device_calc;
1447
1448         /* Check if we have reached the last stripe and whether mapping for that
1449          * stripe is done. */
1450         if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
1451                 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
1452                                                              FIEMAP_EXTENT_LAST;
1453 skip_last_device_calc:
1454         fiemap->fm_mapped_extents = fs.fs_cur_extent;
1455 out_fm_local:
1456         OBD_FREE_LARGE(fm_local, buffer_size);
1457
1458 out_lsm:
1459         lov_lsm_put(lsm);
1460
1461         return rc;
1462 }
1463
1464 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
1465                                 struct lov_user_md __user *lum)
1466 {
1467         struct lov_object       *lov = cl2lov(obj);
1468         struct lov_stripe_md    *lsm;
1469         int                     rc = 0;
1470         ENTRY;
1471
1472         lsm = lov_lsm_addref(lov);
1473         if (lsm == NULL)
1474                 RETURN(-ENODATA);
1475
1476         rc = lov_getstripe(cl2lov(obj), lsm, lum);
1477         lov_lsm_put(lsm);
1478         RETURN(rc);
1479 }
1480
1481 static int lov_object_layout_get(const struct lu_env *env,
1482                                  struct cl_object *obj,
1483                                  struct cl_layout *cl)
1484 {
1485         struct lov_object *lov = cl2lov(obj);
1486         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1487         struct lu_buf *buf = &cl->cl_buf;
1488         ssize_t rc;
1489         ENTRY;
1490
1491         if (lsm == NULL) {
1492                 cl->cl_size = 0;
1493                 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
1494
1495                 RETURN(0);
1496         }
1497
1498         cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
1499         cl->cl_layout_gen = lsm->lsm_layout_gen;
1500
1501         rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
1502         lov_lsm_put(lsm);
1503
1504         RETURN(rc < 0 ? rc : 0);
1505 }
1506
1507 static loff_t lov_object_maxbytes(struct cl_object *obj)
1508 {
1509         struct lov_object *lov = cl2lov(obj);
1510         struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1511         loff_t maxbytes;
1512
1513         if (lsm == NULL)
1514                 return LLONG_MAX;
1515
1516         maxbytes = lsm->lsm_maxbytes;
1517
1518         lov_lsm_put(lsm);
1519
1520         return maxbytes;
1521 }
1522
1523 static const struct cl_object_operations lov_ops = {
1524         .coo_page_init    = lov_page_init,
1525         .coo_lock_init    = lov_lock_init,
1526         .coo_io_init      = lov_io_init,
1527         .coo_attr_get     = lov_attr_get,
1528         .coo_attr_update  = lov_attr_update,
1529         .coo_conf_set     = lov_conf_set,
1530         .coo_getstripe    = lov_object_getstripe,
1531         .coo_layout_get   = lov_object_layout_get,
1532         .coo_maxbytes     = lov_object_maxbytes,
1533         .coo_fiemap       = lov_object_fiemap,
1534 };
1535
1536 static const struct lu_object_operations lov_lu_obj_ops = {
1537         .loo_object_init      = lov_object_init,
1538         .loo_object_delete    = lov_object_delete,
1539         .loo_object_release   = NULL,
1540         .loo_object_free      = lov_object_free,
1541         .loo_object_print     = lov_object_print,
1542         .loo_object_invariant = NULL
1543 };
1544
1545 struct lu_object *lov_object_alloc(const struct lu_env *env,
1546                                    const struct lu_object_header *unused,
1547                                    struct lu_device *dev)
1548 {
1549         struct lov_object *lov;
1550         struct lu_object  *obj;
1551
1552         ENTRY;
1553         OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
1554         if (lov != NULL) {
1555                 obj = lov2lu(lov);
1556                 lu_object_init(obj, NULL, dev);
1557                 lov->lo_cl.co_ops = &lov_ops;
1558                 lov->lo_type = -1; /* invalid, to catch uninitialized type */
1559                 /*
1560                  * object io operation vector (cl_object::co_iop) is installed
1561                  * later in lov_object_init(), as different vectors are used
1562                  * for object with different layouts.
1563                  */
1564                 obj->lo_ops = &lov_lu_obj_ops;
1565         } else
1566                 obj = NULL;
1567         RETURN(obj);
1568 }
1569
1570 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
1571 {
1572         struct lov_stripe_md *lsm = NULL;
1573
1574         lov_conf_freeze(lov);
1575         if (lov->lo_lsm != NULL) {
1576                 lsm = lsm_addref(lov->lo_lsm);
1577                 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
1578                         lsm, atomic_read(&lsm->lsm_refc),
1579                         lov->lo_layout_invalid, current);
1580         }
1581         lov_conf_thaw(lov);
1582         return lsm;
1583 }
1584
1585 int lov_read_and_clear_async_rc(struct cl_object *clob)
1586 {
1587         struct lu_object *luobj;
1588         int rc = 0;
1589         ENTRY;
1590
1591         luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
1592                                  &lov_device_type);
1593         if (luobj != NULL) {
1594                 struct lov_object *lov = lu2lov(luobj);
1595
1596                 lov_conf_freeze(lov);
1597                 switch (lov->lo_type) {
1598                 case LLT_RAID0: {
1599                         struct lov_stripe_md *lsm;
1600                         int i;
1601
1602                         lsm = lov->lo_lsm;
1603                         LASSERT(lsm != NULL);
1604                         for (i = 0; i < lsm->lsm_stripe_count; i++) {
1605                                 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
1606
1607                                 if (lov_oinfo_is_dummy(loi))
1608                                         continue;
1609
1610                                 if (loi->loi_ar.ar_rc && !rc)
1611                                         rc = loi->loi_ar.ar_rc;
1612                                 loi->loi_ar.ar_rc = 0;
1613                         }
1614                 }
1615                 case LLT_RELEASED:
1616                 case LLT_EMPTY:
1617                         break;
1618                 default:
1619                         LBUG();
1620                 }
1621                 lov_conf_thaw(lov);
1622         }
1623         RETURN(rc);
1624 }
1625 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
1626
1627 /** @} lov */