4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_object for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include <linux/random.h>
42 #include "lov_cl_internal.h"
44 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
46 return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
53 /*****************************************************************************
59 struct lov_layout_operations {
60 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
61 struct lov_object *lov, struct lov_stripe_md *lsm,
62 const struct cl_object_conf *conf,
63 union lov_layout_state *state);
64 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
65 union lov_layout_state *state);
66 void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
67 union lov_layout_state *state);
68 int (*llo_print)(const struct lu_env *env, void *cookie,
69 lu_printer_t p, const struct lu_object *o);
70 int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
71 struct cl_page *page, pgoff_t index);
72 int (*llo_lock_init)(const struct lu_env *env,
73 struct cl_object *obj, struct cl_lock *lock,
74 const struct cl_io *io);
75 int (*llo_io_init)(const struct lu_env *env,
76 struct cl_object *obj, struct cl_io *io);
77 int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
78 struct cl_attr *attr);
81 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
83 static void lov_lsm_put(struct lov_stripe_md *lsm)
89 /*****************************************************************************
91 * Lov object layout operations.
95 static struct cl_object *lov_sub_find(const struct lu_env *env,
96 struct cl_device *dev,
97 const struct lu_fid *fid,
98 const struct cl_object_conf *conf)
104 o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
105 LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
109 static int lov_page_slice_fixup(struct lov_object *lov,
110 struct cl_object *stripe)
112 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
116 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
117 cfs_size_round(sizeof(struct lov_page));
119 cl_object_for_each(o, stripe)
120 o->co_slice_off += hdr->coh_page_bufsize;
122 return cl_object_header(stripe)->coh_page_bufsize;
125 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
126 struct cl_object *subobj, struct lov_oinfo *oinfo,
129 struct cl_object_header *hdr;
130 struct cl_object_header *subhdr;
131 struct cl_object_header *parent;
132 int entry = lov_comp_entry(idx);
133 int stripe = lov_comp_stripe(idx);
136 if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
137 /* For sanity:test_206.
138 * Do not leave the object in cache to avoid accessing
139 * freed memory. This is because osc_object is referring to
140 * lov_oinfo of lsm_stripe_data which will be freed due to
142 cl_object_kill(env, subobj);
143 cl_object_put(env, subobj);
147 hdr = cl_object_header(lov2cl(lov));
148 subhdr = cl_object_header(subobj);
150 CDEBUG(D_INODE, DFID"@%p[%d:%d] -> "DFID"@%p: ostid: "DOSTID
151 " ost idx: %d gen: %d\n",
152 PFID(lu_object_fid(&subobj->co_lu)), subhdr, entry, stripe,
153 PFID(lu_object_fid(lov2lu(lov))), hdr, POSTID(&oinfo->loi_oi),
154 oinfo->loi_ost_idx, oinfo->loi_ost_gen);
156 /* reuse ->coh_attr_guard to protect coh_parent change */
157 spin_lock(&subhdr->coh_attr_guard);
158 parent = subhdr->coh_parent;
159 if (parent == NULL) {
160 struct lovsub_object *lso = cl2lovsub(subobj);
162 subhdr->coh_parent = hdr;
163 spin_unlock(&subhdr->coh_attr_guard);
164 subhdr->coh_nesting = hdr->coh_nesting + 1;
165 lu_object_ref_add(&subobj->co_lu, "lov-parent", lov);
166 lso->lso_super = lov;
167 lso->lso_index = idx;
170 struct lu_object *old_obj;
171 struct lov_object *old_lov;
172 unsigned int mask = D_INODE;
174 spin_unlock(&subhdr->coh_attr_guard);
175 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
176 LASSERT(old_obj != NULL);
177 old_lov = cl2lov(lu2cl(old_obj));
178 if (old_lov->lo_layout_invalid) {
179 /* the object's layout has already changed but isn't
181 lu_object_unhash(env, &subobj->co_lu);
188 LU_OBJECT_DEBUG(mask, env, &subobj->co_lu,
189 "stripe %d is already owned.", idx);
190 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
191 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
192 cl_object_put(env, subobj);
197 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
198 struct lov_object *lov, unsigned int index,
199 const struct cl_object_conf *conf,
200 struct lov_layout_entry *lle)
202 struct lov_layout_raid0 *r0 = &lle->lle_raid0;
203 struct lov_thread_info *lti = lov_env_info(env);
204 struct cl_object_conf *subconf = <i->lti_stripe_conf;
205 struct lu_fid *ofid = <i->lti_fid;
206 struct cl_object *stripe;
207 struct lov_stripe_md_entry *lse = lov_lse(lov, index);
214 spin_lock_init(&r0->lo_sub_lock);
215 r0->lo_nr = lse->lsme_stripe_count;
216 LASSERT(r0->lo_nr <= lov_targets_nr(dev));
218 OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
219 if (r0->lo_sub == NULL)
220 GOTO(out, result = -ENOMEM);
224 memset(subconf, 0, sizeof(*subconf));
227 * Create stripe cl_objects.
229 for (i = 0; i < r0->lo_nr; ++i) {
230 struct cl_device *subdev;
231 struct lov_oinfo *oinfo = lse->lsme_oinfo[i];
232 int ost_idx = oinfo->loi_ost_idx;
234 if (lov_oinfo_is_dummy(oinfo))
237 result = ostid_to_fid(ofid, &oinfo->loi_oi, oinfo->loi_ost_idx);
241 if (dev->ld_target[ost_idx] == NULL) {
242 CERROR("%s: OST %04x is not initialized\n",
243 lov2obd(dev->ld_lov)->obd_name, ost_idx);
244 GOTO(out, result = -EIO);
247 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
248 subconf->u.coc_oinfo = oinfo;
249 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
250 /* In the function below, .hs_keycmp resolves to
251 * lu_obj_hop_keycmp() */
252 /* coverity[overrun-buffer-val] */
253 stripe = lov_sub_find(env, subdev, ofid, subconf);
255 GOTO(out, result = PTR_ERR(stripe));
257 result = lov_init_sub(env, lov, stripe, oinfo,
258 lov_comp_index(index, i));
259 if (result == -EAGAIN) { /* try again */
266 r0->lo_sub[i] = cl2lovsub(stripe);
268 sz = lov_page_slice_fixup(lov, stripe);
269 LASSERT(ergo(psz > 0, psz == sz));
279 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
280 struct lov_layout_raid0 *r0,
281 struct lovsub_object *los, int idx)
283 struct cl_object *sub;
284 struct lu_site *site;
285 struct lu_site_bkt_data *bkt;
286 wait_queue_entry_t *waiter;
288 LASSERT(r0->lo_sub[idx] == los);
290 sub = lovsub2cl(los);
291 site = sub->co_lu.lo_dev->ld_site;
292 bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
294 cl_object_kill(env, sub);
295 /* release a reference to the sub-object and ... */
296 lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
297 cl_object_put(env, sub);
299 /* ... wait until it is actually destroyed---sub-object clears its
300 * ->lo_sub[] slot in lovsub_object_free() */
301 if (r0->lo_sub[idx] == los) {
302 waiter = &lov_env_info(env)->lti_waiter;
303 init_waitqueue_entry(waiter, current);
304 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
305 set_current_state(TASK_UNINTERRUPTIBLE);
307 /* this wait-queue is signaled at the end of
308 * lu_object_free(). */
309 set_current_state(TASK_UNINTERRUPTIBLE);
310 spin_lock(&r0->lo_sub_lock);
311 if (r0->lo_sub[idx] == los) {
312 spin_unlock(&r0->lo_sub_lock);
315 spin_unlock(&r0->lo_sub_lock);
316 set_current_state(TASK_RUNNING);
320 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
322 LASSERT(r0->lo_sub[idx] == NULL);
325 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
326 struct lov_layout_entry *lle)
328 struct lov_layout_raid0 *r0 = &lle->lle_raid0;
332 if (r0->lo_sub != NULL) {
335 for (i = 0; i < r0->lo_nr; ++i) {
336 struct lovsub_object *los = r0->lo_sub[i];
339 cl_object_prune(env, &los->lso_cl);
341 * If top-level object is to be evicted from
342 * the cache, so are its sub-objects.
344 lov_subobject_kill(env, lov, r0, los, i);
352 static void lov_fini_raid0(const struct lu_env *env,
353 struct lov_layout_entry *lle)
355 struct lov_layout_raid0 *r0 = &lle->lle_raid0;
357 if (r0->lo_sub != NULL) {
358 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
363 static int lov_print_raid0(const struct lu_env *env, void *cookie,
364 lu_printer_t p, const struct lov_layout_entry *lle)
366 const struct lov_layout_raid0 *r0 = &lle->lle_raid0;
369 for (i = 0; i < r0->lo_nr; ++i) {
370 struct lu_object *sub;
372 if (r0->lo_sub[i] != NULL) {
373 sub = lovsub2lu(r0->lo_sub[i]);
374 lu_object_print(env, cookie, p, sub);
376 (*p)(env, cookie, "sub %d absent\n", i);
382 static int lov_attr_get_raid0(const struct lu_env *env, struct lov_object *lov,
383 unsigned int index, struct lov_layout_entry *lle,
384 struct cl_attr **lov_attr)
386 struct lov_layout_raid0 *r0 = &lle->lle_raid0;
387 struct lov_stripe_md *lsm = lov->lo_lsm;
388 struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
389 struct cl_attr *attr = &r0->lo_attr;
393 if (r0->lo_attr_valid) {
398 memset(lvb, 0, sizeof(*lvb));
400 /* XXX: timestamps can be negative by sanity:test_39m,
402 lvb->lvb_atime = LLONG_MIN;
403 lvb->lvb_ctime = LLONG_MIN;
404 lvb->lvb_mtime = LLONG_MIN;
407 * XXX that should be replaced with a loop over sub-objects,
408 * doing cl_object_attr_get() on them. But for now, let's
409 * reuse old lov code.
413 * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
414 * happy. It's not needed, because new code uses
415 * ->coh_attr_guard spin-lock to protect consistency of
416 * sub-object attributes.
418 lov_stripe_lock(lsm);
419 result = lov_merge_lvb_kms(lsm, index, lvb, &kms);
420 lov_stripe_unlock(lsm);
422 cl_lvb2attr(attr, lvb);
424 r0->lo_attr_valid = 1;
431 static struct lov_comp_layout_entry_ops raid0_ops = {
432 .lco_init = lov_init_raid0,
433 .lco_fini = lov_fini_raid0,
434 .lco_getattr = lov_attr_get_raid0,
437 static int lov_attr_get_dom(const struct lu_env *env, struct lov_object *lov,
438 unsigned int index, struct lov_layout_entry *lle,
439 struct cl_attr **lov_attr)
441 struct lov_layout_dom *dom = &lle->lle_dom;
442 struct lov_oinfo *loi = dom->lo_loi;
443 struct cl_attr *attr = &dom->lo_dom_r0.lo_attr;
445 if (dom->lo_dom_r0.lo_attr_valid) {
450 if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks))
451 return OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
453 cl_lvb2attr(attr, &loi->loi_lvb);
455 /* DoM component size can be bigger than stripe size after
456 * client's setattr RPC, so do not count anything beyond
457 * component end. Alternatively, check that limit on server
458 * and do not allow size overflow there. */
459 if (attr->cat_size > lle->lle_extent->e_end)
460 attr->cat_size = lle->lle_extent->e_end;
462 attr->cat_kms = attr->cat_size;
464 dom->lo_dom_r0.lo_attr_valid = 1;
471 * Lookup FLD to get MDS index of the given DOM object FID.
473 * \param[in] ld LOV device
474 * \param[in] fid FID to lookup
475 * \param[out] nr index in MDC array to return back
477 * \retval 0 and \a mds filled with MDS index if successful
478 * \retval negative value on error
480 static int lov_fld_lookup(struct lov_device *ld, const struct lu_fid *fid,
488 rc = fld_client_lookup(&ld->ld_lmv->u.lmv.lmv_fld, fid_seq(fid),
489 &mds_idx, LU_SEQ_RANGE_MDT, NULL);
491 CERROR("%s: error while looking for mds number. Seq %#llx"
492 ", err = %d\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
497 CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
500 /* find proper MDC device in the array */
501 for (i = 0; i < ld->ld_md_tgts_nr; i++) {
502 if (ld->ld_md_tgts[i].ldm_mdc != NULL &&
503 ld->ld_md_tgts[i].ldm_idx == mds_idx)
507 if (i == ld->ld_md_tgts_nr) {
508 CERROR("%s: cannot find corresponding MDC device for mds #%x "
509 "for fid="DFID"\n", lu_dev_name(cl2lu_dev(&ld->ld_cl)),
519 * Implementation of lov_comp_layout_entry_ops::lco_init for DOM object.
521 * Init the DOM object for the first time. It prepares also RAID0 entry
522 * for it to use in common methods with ordinary RAID0 layout entries.
524 * \param[in] env execution environment
525 * \param[in] dev LOV device
526 * \param[in] lov LOV object
527 * \param[in] index Composite layout entry index in LSM
528 * \param[in] lle Composite LOV layout entry
530 static int lov_init_dom(const struct lu_env *env, struct lov_device *dev,
531 struct lov_object *lov, unsigned int index,
532 const struct cl_object_conf *conf,
533 struct lov_layout_entry *lle)
535 struct lov_thread_info *lti = lov_env_info(env);
536 struct lov_stripe_md_entry *lsme = lov_lse(lov, index);
537 struct cl_object *clo;
538 struct lu_object *o = lov2lu(lov);
539 const struct lu_fid *fid = lu_object_fid(o);
540 struct cl_device *mdcdev;
541 struct lov_oinfo *loi = NULL;
542 struct cl_object_conf *sconf = <i->lti_stripe_conf;
551 /* find proper MDS device */
552 rc = lov_fld_lookup(dev, fid, &idx);
556 LASSERTF(dev->ld_md_tgts[idx].ldm_mdc != NULL,
557 "LOV md target[%u] is NULL\n", idx);
559 /* check lsm is DOM, more checks are needed */
560 LASSERT(lsme->lsme_stripe_count == 0);
563 * Create lower cl_objects.
565 mdcdev = dev->ld_md_tgts[idx].ldm_mdc;
567 LASSERTF(mdcdev != NULL, "non-initialized mdc subdev\n");
569 /* DoM object has no oinfo in LSM entry, create it exclusively */
570 OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
574 fid_to_ostid(lu_object_fid(lov2lu(lov)), &loi->loi_oi);
576 sconf->u.coc_oinfo = loi;
578 clo = lov_sub_find(env, mdcdev, fid, sconf);
580 GOTO(out, rc = PTR_ERR(clo));
582 rc = lov_init_sub(env, lov, clo, loi, lov_comp_index(index, 0));
583 if (rc == -EAGAIN) /* try again */
588 lle->lle_dom.lo_dom = cl2lovsub(clo);
589 spin_lock_init(&lle->lle_dom.lo_dom_r0.lo_sub_lock);
590 lle->lle_dom.lo_dom_r0.lo_nr = 1;
591 lle->lle_dom.lo_dom_r0.lo_sub = &lle->lle_dom.lo_dom;
592 lle->lle_dom.lo_loi = loi;
594 rc = lov_page_slice_fixup(lov, clo);
599 OBD_SLAB_FREE_PTR(loi, lov_oinfo_slab);
604 * Implementation of lov_layout_operations::llo_fini for DOM object.
606 * Finish the DOM object and free related memory.
608 * \param[in] env execution environment
609 * \param[in] lov LOV object
610 * \param[in] state LOV layout state
612 static void lov_fini_dom(const struct lu_env *env,
613 struct lov_layout_entry *lle)
615 if (lle->lle_dom.lo_dom != NULL)
616 lle->lle_dom.lo_dom = NULL;
617 if (lle->lle_dom.lo_loi != NULL)
618 OBD_SLAB_FREE_PTR(lle->lle_dom.lo_loi, lov_oinfo_slab);
621 static struct lov_comp_layout_entry_ops dom_ops = {
622 .lco_init = lov_init_dom,
623 .lco_fini = lov_fini_dom,
624 .lco_getattr = lov_attr_get_dom,
627 static int lov_init_composite(const struct lu_env *env, struct lov_device *dev,
628 struct lov_object *lov, struct lov_stripe_md *lsm,
629 const struct cl_object_conf *conf,
630 union lov_layout_state *state)
632 struct lov_layout_composite *comp = &state->composite;
633 struct lov_layout_entry *lle;
634 struct lov_mirror_entry *lre;
635 unsigned int entry_count;
636 unsigned int psz = 0;
637 unsigned int mirror_count;
638 int flr_state = lsm->lsm_flags & LCM_FL_FLR_MASK;
645 LASSERT(lsm->lsm_entry_count > 0);
646 LASSERT(lov->lo_lsm == NULL);
647 lov->lo_lsm = lsm_addref(lsm);
648 lov->lo_layout_invalid = true;
650 dump_lsm(D_INODE, lsm);
652 entry_count = lsm->lsm_entry_count;
654 spin_lock_init(&comp->lo_write_lock);
655 comp->lo_flags = lsm->lsm_flags;
656 comp->lo_mirror_count = lsm->lsm_mirror_count + 1;
657 comp->lo_entry_count = lsm->lsm_entry_count;
658 comp->lo_preferred_mirror = -1;
660 if (equi(flr_state == LCM_FL_NONE, comp->lo_mirror_count > 1))
663 OBD_ALLOC(comp->lo_mirrors,
664 comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
665 if (comp->lo_mirrors == NULL)
668 OBD_ALLOC(comp->lo_entries, entry_count * sizeof(*comp->lo_entries));
669 if (comp->lo_entries == NULL)
672 /* Initiate all entry types and extents data at first */
673 for (i = 0, j = 0, mirror_count = 1; i < entry_count; i++) {
676 lle = &comp->lo_entries[i];
678 lle->lle_lsme = lsm->lsm_entries[i];
679 lle->lle_type = lov_entry_type(lle->lle_lsme);
680 switch (lle->lle_type) {
681 case LOV_PATTERN_RAID0:
682 lle->lle_comp_ops = &raid0_ops;
684 case LOV_PATTERN_MDT:
685 lle->lle_comp_ops = &dom_ops;
688 CERROR("%s: unknown composite layout entry type %i\n",
689 lov2obd(dev->ld_lov)->obd_name,
690 lsm->lsm_entries[i]->lsme_pattern);
691 dump_lsm(D_ERROR, lsm);
695 lle->lle_extent = &lle->lle_lsme->lsme_extent;
696 lle->lle_valid = !(lle->lle_lsme->lsme_flags & LCME_FL_STALE);
698 if (flr_state != LCM_FL_NONE)
699 mirror_id = mirror_id_of(lle->lle_lsme->lsme_id);
701 lre = &comp->lo_mirrors[j];
703 if (mirror_id == lre->lre_mirror_id) {
704 lre->lre_valid |= lle->lle_valid;
705 lre->lre_stale |= !lle->lle_valid;
710 /* new mirror detected, assume that the mirrors
711 * are shorted in layout */
714 if (j >= comp->lo_mirror_count)
717 lre = &comp->lo_mirrors[j];
720 /* entries must be sorted by mirrors */
721 lre->lre_mirror_id = mirror_id;
722 lre->lre_start = lre->lre_end = i;
723 lre->lre_preferred = !!(lle->lle_lsme->lsme_flags &
725 lre->lre_valid = lle->lle_valid;
726 lre->lre_stale = !lle->lle_valid;
729 /* sanity check for FLR */
730 if (mirror_count != comp->lo_mirror_count) {
732 " doesn't have the # of mirrors it claims, %u/%u\n",
733 PFID(lu_object_fid(lov2lu(lov))), mirror_count,
734 comp->lo_mirror_count + 1);
736 GOTO(out, result = -EINVAL);
739 lov_foreach_layout_entry(lov, lle) {
740 int index = lov_layout_entry_index(lov, lle);
743 * If the component has not been init-ed on MDS side, for
744 * PFL layout, we'd know that the components beyond this one
745 * will be dynamically init-ed later on file write/trunc ops.
747 if (!lsme_inited(lle->lle_lsme))
750 result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
755 LASSERT(ergo(psz > 0, psz == result));
760 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
762 /* decide the preferred mirror. It uses the hash value of lov_object
763 * so that different clients would use different mirrors for read. */
765 seq = hash_long((unsigned long)lov, 8);
766 for (i = 0; i < comp->lo_mirror_count; i++) {
767 unsigned int idx = (i + seq) % comp->lo_mirror_count;
769 lre = lov_mirror_entry(lov, idx);
773 mirror_count++; /* valid mirror */
775 if (lre->lre_preferred || comp->lo_preferred_mirror < 0)
776 comp->lo_preferred_mirror = idx;
780 " doesn't have any valid mirrors\n",
781 PFID(lu_object_fid(lov2lu(lov))));
783 comp->lo_preferred_mirror = 0;
786 LASSERT(comp->lo_preferred_mirror >= 0);
790 return result > 0 ? 0 : result;
793 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
794 struct lov_object *lov, struct lov_stripe_md *lsm,
795 const struct cl_object_conf *conf,
796 union lov_layout_state *state)
801 static int lov_init_released(const struct lu_env *env,
802 struct lov_device *dev, struct lov_object *lov,
803 struct lov_stripe_md *lsm,
804 const struct cl_object_conf *conf,
805 union lov_layout_state *state)
807 LASSERT(lsm != NULL);
808 LASSERT(lsm->lsm_is_released);
809 LASSERT(lov->lo_lsm == NULL);
811 lov->lo_lsm = lsm_addref(lsm);
815 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
816 union lov_layout_state *state)
818 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
820 lov_layout_wait(env, lov);
824 static int lov_delete_composite(const struct lu_env *env,
825 struct lov_object *lov,
826 union lov_layout_state *state)
828 struct lov_layout_entry *entry;
829 struct lov_layout_composite *comp = &state->composite;
833 dump_lsm(D_INODE, lov->lo_lsm);
835 lov_layout_wait(env, lov);
836 if (comp->lo_entries)
837 lov_foreach_layout_entry(lov, entry)
838 lov_delete_raid0(env, lov, entry);
843 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
844 union lov_layout_state *state)
846 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
849 static void lov_fini_composite(const struct lu_env *env,
850 struct lov_object *lov,
851 union lov_layout_state *state)
853 struct lov_layout_composite *comp = &state->composite;
856 if (comp->lo_entries != NULL) {
857 struct lov_layout_entry *entry;
859 lov_foreach_layout_entry(lov, entry)
860 entry->lle_comp_ops->lco_fini(env, entry);
862 OBD_FREE(comp->lo_entries,
863 comp->lo_entry_count * sizeof(*comp->lo_entries));
864 comp->lo_entries = NULL;
867 if (comp->lo_mirrors != NULL) {
868 OBD_FREE(comp->lo_mirrors,
869 comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
870 comp->lo_mirrors = NULL;
873 memset(comp, 0, sizeof(*comp));
875 dump_lsm(D_INODE, lov->lo_lsm);
876 lov_free_memmd(&lov->lo_lsm);
881 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
882 union lov_layout_state *state)
885 dump_lsm(D_INODE, lov->lo_lsm);
886 lov_free_memmd(&lov->lo_lsm);
890 static int lov_print_empty(const struct lu_env *env, void *cookie,
891 lu_printer_t p, const struct lu_object *o)
893 (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
897 static int lov_print_composite(const struct lu_env *env, void *cookie,
898 lu_printer_t p, const struct lu_object *o)
900 struct lov_object *lov = lu2lov(o);
901 struct lov_stripe_md *lsm = lov->lo_lsm;
904 (*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
905 lsm->lsm_entry_count,
906 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
907 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
908 lsm->lsm_layout_gen);
910 for (i = 0; i < lsm->lsm_entry_count; i++) {
911 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
912 struct lov_layout_entry *lle = lov_entry(lov, i);
915 DEXT ": { 0x%08X, %u, %#x, %u, %#x, %u, %u }\n",
916 PEXT(&lse->lsme_extent), lse->lsme_magic,
917 lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
918 lse->lsme_flags, lse->lsme_stripe_count,
919 lse->lsme_stripe_size);
920 lov_print_raid0(env, cookie, p, lle);
926 static int lov_print_released(const struct lu_env *env, void *cookie,
927 lu_printer_t p, const struct lu_object *o)
929 struct lov_object *lov = lu2lov(o);
930 struct lov_stripe_md *lsm = lov->lo_lsm;
933 "released: %s, lsm{%p 0x%08X %d %u}:\n",
934 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
935 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
936 lsm->lsm_layout_gen);
941 * Implements cl_object_operations::coo_attr_get() method for an object
942 * without stripes (LLT_EMPTY layout type).
944 * The only attributes this layer is authoritative in this case is
945 * cl_attr::cat_blocks---it's 0.
947 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
948 struct cl_attr *attr)
950 attr->cat_blocks = 0;
954 static int lov_attr_get_composite(const struct lu_env *env,
955 struct cl_object *obj,
956 struct cl_attr *attr)
958 struct lov_object *lov = cl2lov(obj);
959 struct lov_layout_entry *entry;
965 attr->cat_blocks = 0;
966 lov_foreach_layout_entry(lov, entry) {
967 struct cl_attr *lov_attr = NULL;
968 int index = lov_layout_entry_index(lov, entry);
970 if (!entry->lle_valid)
973 /* PFL: This component has not been init-ed. */
974 if (!lsm_entry_inited(lov->lo_lsm, index))
977 result = entry->lle_comp_ops->lco_getattr(env, lov, index,
982 if (lov_attr == NULL)
985 CDEBUG(D_INODE, "COMP ID #%i: s=%llu m=%llu a=%llu c=%llu "
986 "b=%llu\n", index - 1, lov_attr->cat_size,
987 lov_attr->cat_mtime, lov_attr->cat_atime,
988 lov_attr->cat_ctime, lov_attr->cat_blocks);
991 attr->cat_blocks += lov_attr->cat_blocks;
992 if (attr->cat_size < lov_attr->cat_size)
993 attr->cat_size = lov_attr->cat_size;
994 if (attr->cat_kms < lov_attr->cat_kms)
995 attr->cat_kms = lov_attr->cat_kms;
996 if (attr->cat_atime < lov_attr->cat_atime)
997 attr->cat_atime = lov_attr->cat_atime;
998 if (attr->cat_ctime < lov_attr->cat_ctime)
999 attr->cat_ctime = lov_attr->cat_ctime;
1000 if (attr->cat_mtime < lov_attr->cat_mtime)
1001 attr->cat_mtime = lov_attr->cat_mtime;
1007 const static struct lov_layout_operations lov_dispatch[] = {
1009 .llo_init = lov_init_empty,
1010 .llo_delete = lov_delete_empty,
1011 .llo_fini = lov_fini_empty,
1012 .llo_print = lov_print_empty,
1013 .llo_page_init = lov_page_init_empty,
1014 .llo_lock_init = lov_lock_init_empty,
1015 .llo_io_init = lov_io_init_empty,
1016 .llo_getattr = lov_attr_get_empty,
1019 .llo_init = lov_init_released,
1020 .llo_delete = lov_delete_empty,
1021 .llo_fini = lov_fini_released,
1022 .llo_print = lov_print_released,
1023 .llo_page_init = lov_page_init_empty,
1024 .llo_lock_init = lov_lock_init_empty,
1025 .llo_io_init = lov_io_init_released,
1026 .llo_getattr = lov_attr_get_empty,
1029 .llo_init = lov_init_composite,
1030 .llo_delete = lov_delete_composite,
1031 .llo_fini = lov_fini_composite,
1032 .llo_print = lov_print_composite,
1033 .llo_page_init = lov_page_init_composite,
1034 .llo_lock_init = lov_lock_init_composite,
1035 .llo_io_init = lov_io_init_composite,
1036 .llo_getattr = lov_attr_get_composite,
1041 * Performs a double-dispatch based on the layout type of an object.
1043 #define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
1045 struct lov_object *__obj = (obj); \
1046 enum lov_layout_type __llt; \
1048 __llt = __obj->lo_type; \
1049 LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \
1050 lov_dispatch[__llt].op(__VA_ARGS__); \
1054 * Return lov_layout_type associated with a given lsm
1056 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
1061 if (lsm->lsm_is_released)
1062 return LLT_RELEASED;
1064 if (lsm->lsm_magic == LOV_MAGIC_V1 ||
1065 lsm->lsm_magic == LOV_MAGIC_V3 ||
1066 lsm->lsm_magic == LOV_MAGIC_COMP_V1)
1072 static inline void lov_conf_freeze(struct lov_object *lov)
1074 CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
1075 lov, lov->lo_owner, current);
1076 if (lov->lo_owner != current)
1077 down_read(&lov->lo_type_guard);
1080 static inline void lov_conf_thaw(struct lov_object *lov)
1082 CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
1083 lov, lov->lo_owner, current);
1084 if (lov->lo_owner != current)
1085 up_read(&lov->lo_type_guard);
1088 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
1090 struct lov_object *__obj = (obj); \
1091 int __lock = !!(lock); \
1092 typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
1095 lov_conf_freeze(__obj); \
1096 __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
1098 lov_conf_thaw(__obj); \
1103 * Performs a locked double-dispatch based on the layout type of an object.
1105 #define LOV_2DISPATCH(obj, op, ...) \
1106 LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
1108 #define LOV_2DISPATCH_VOID(obj, op, ...) \
1110 struct lov_object *__obj = (obj); \
1111 enum lov_layout_type __llt; \
1113 lov_conf_freeze(__obj); \
1114 __llt = __obj->lo_type; \
1115 LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \
1116 lov_dispatch[__llt].op(__VA_ARGS__); \
1117 lov_conf_thaw(__obj); \
1120 static void lov_conf_lock(struct lov_object *lov)
1122 LASSERT(lov->lo_owner != current);
1123 down_write(&lov->lo_type_guard);
1124 LASSERT(lov->lo_owner == NULL);
1125 lov->lo_owner = current;
1126 CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
1127 lov, lov->lo_owner);
1130 static void lov_conf_unlock(struct lov_object *lov)
1132 CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
1133 lov, lov->lo_owner);
1134 lov->lo_owner = NULL;
1135 up_write(&lov->lo_type_guard);
1138 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
1140 struct l_wait_info lwi = { 0 };
1143 while (atomic_read(&lov->lo_active_ios) > 0) {
1144 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
1145 PFID(lu_object_fid(lov2lu(lov))),
1146 atomic_read(&lov->lo_active_ios));
1148 l_wait_event(lov->lo_waitq,
1149 atomic_read(&lov->lo_active_ios) == 0, &lwi);
1154 static int lov_layout_change(const struct lu_env *unused,
1155 struct lov_object *lov, struct lov_stripe_md *lsm,
1156 const struct cl_object_conf *conf)
1158 enum lov_layout_type llt = lov_type(lsm);
1159 union lov_layout_state *state = &lov->u;
1160 const struct lov_layout_operations *old_ops;
1161 const struct lov_layout_operations *new_ops;
1162 struct lov_device *lov_dev = lov_object_dev(lov);
1168 LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
1170 env = cl_env_get(&refcheck);
1172 RETURN(PTR_ERR(env));
1174 LASSERT(llt < ARRAY_SIZE(lov_dispatch));
1176 CDEBUG(D_INODE, DFID" from %s to %s\n",
1177 PFID(lu_object_fid(lov2lu(lov))),
1178 llt2str(lov->lo_type), llt2str(llt));
1180 old_ops = &lov_dispatch[lov->lo_type];
1181 new_ops = &lov_dispatch[llt];
1183 rc = cl_object_prune(env, &lov->lo_cl);
1187 rc = old_ops->llo_delete(env, lov, &lov->u);
1191 old_ops->llo_fini(env, lov, &lov->u);
1193 LASSERT(atomic_read(&lov->lo_active_ios) == 0);
1195 CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
1196 PFID(lu_object_fid(lov2lu(lov))), lov, llt);
1198 /* page bufsize fixup */
1199 cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
1200 lov_page_slice_fixup(lov, NULL);
1203 rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
1205 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
1207 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
1208 obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
1209 new_ops->llo_delete(env, lov, state);
1210 new_ops->llo_fini(env, lov, state);
1211 /* this file becomes an EMPTY file. */
1212 lov->lo_type = LLT_EMPTY;
1217 cl_env_put(env, &refcheck);
1221 /*****************************************************************************
1223 * Lov object operations.
1226 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
1227 const struct lu_object_conf *conf)
1229 struct lov_object *lov = lu2lov(obj);
1230 struct lov_device *dev = lov_object_dev(lov);
1231 const struct cl_object_conf *cconf = lu2cl_conf(conf);
1232 union lov_layout_state *set = &lov->u;
1233 const struct lov_layout_operations *ops;
1234 struct lov_stripe_md *lsm = NULL;
1238 init_rwsem(&lov->lo_type_guard);
1239 atomic_set(&lov->lo_active_ios, 0);
1240 init_waitqueue_head(&lov->lo_waitq);
1241 cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
1243 lov->lo_type = LLT_EMPTY;
1244 if (cconf->u.coc_layout.lb_buf != NULL) {
1245 lsm = lov_unpackmd(dev->ld_lov,
1246 cconf->u.coc_layout.lb_buf,
1247 cconf->u.coc_layout.lb_len);
1249 RETURN(PTR_ERR(lsm));
1251 dump_lsm(D_INODE, lsm);
1254 /* no locking is necessary, as object is being created */
1255 lov->lo_type = lov_type(lsm);
1256 ops = &lov_dispatch[lov->lo_type];
1257 rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
1267 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
1268 const struct cl_object_conf *conf)
1270 struct lov_stripe_md *lsm = NULL;
1271 struct lov_object *lov = cl2lov(obj);
1275 if (conf->coc_opc == OBJECT_CONF_SET &&
1276 conf->u.coc_layout.lb_buf != NULL) {
1277 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
1278 conf->u.coc_layout.lb_buf,
1279 conf->u.coc_layout.lb_len);
1281 RETURN(PTR_ERR(lsm));
1282 dump_lsm(D_INODE, lsm);
1286 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
1287 lov->lo_layout_invalid = true;
1288 GOTO(out, result = 0);
1291 if (conf->coc_opc == OBJECT_CONF_WAIT) {
1292 if (lov->lo_layout_invalid &&
1293 atomic_read(&lov->lo_active_ios) > 0) {
1294 lov_conf_unlock(lov);
1295 result = lov_layout_wait(env, lov);
1301 LASSERT(conf->coc_opc == OBJECT_CONF_SET);
1303 if ((lsm == NULL && lov->lo_lsm == NULL) ||
1304 ((lsm != NULL && lov->lo_lsm != NULL) &&
1305 (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
1306 (lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
1307 lsm->lsm_entries[0]->lsme_pattern))) {
1308 /* same version of layout */
1309 lov->lo_layout_invalid = false;
1310 GOTO(out, result = 0);
1313 /* will change layout - check if there still exists active IO. */
1314 if (atomic_read(&lov->lo_active_ios) > 0) {
1315 lov->lo_layout_invalid = true;
1316 GOTO(out, result = -EBUSY);
1319 result = lov_layout_change(env, lov, lsm, conf);
1320 lov->lo_layout_invalid = result != 0;
1324 lov_conf_unlock(lov);
1326 CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
1327 PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
1331 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
1333 struct lov_object *lov = lu2lov(obj);
1336 LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
1340 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
1342 struct lov_object *lov = lu2lov(obj);
1345 LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
1346 lu_object_fini(obj);
1347 OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
1351 static int lov_object_print(const struct lu_env *env, void *cookie,
1352 lu_printer_t p, const struct lu_object *o)
1354 return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
1357 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
1358 struct cl_page *page, pgoff_t index)
1360 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
1365 * Implements cl_object_operations::clo_io_init() method for lov
1366 * layer. Dispatches to the appropriate layout io initialization method.
1368 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
1371 CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
1373 CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
1374 PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
1375 io->ci_ignore_layout, io->ci_verify_layout);
1377 /* IO type CIT_MISC with ci_ignore_layout set are usually invoked from
1378 * the OSC layer. It shouldn't take lov layout conf lock in that case,
1379 * because as long as the OSC object exists, the layout can't be
1381 return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
1382 !(io->ci_ignore_layout && io->ci_type == CIT_MISC),
1387 * An implementation of cl_object_operations::clo_attr_get() method for lov
1388 * layer. For raid0 layout this collects and merges attributes of all
1391 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
1392 struct cl_attr *attr)
1394 /* do not take lock, as this function is called under a
1395 * spin-lock. Layout is protected from changing by ongoing IO. */
1396 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
1399 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
1400 const struct cl_attr *attr, unsigned valid)
1403 * No dispatch is required here, as no layout implements this.
1408 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
1409 struct cl_lock *lock, const struct cl_io *io)
1411 /* No need to lock because we've taken one refcount of layout. */
1412 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
1417 * We calculate on which OST the mapping will end. If the length of mapping
1418 * is greater than (stripe_size * stripe_count) then the last_stripe will
1419 * will be one just before start_stripe. Else we check if the mapping
1420 * intersects each OST and find last_stripe.
1421 * This function returns the last_stripe and also sets the stripe_count
1422 * over which the mapping is spread
1424 * \param lsm [in] striping information for the file
1425 * \param index [in] stripe component index
1426 * \param ext [in] logical extent of mapping
1427 * \param start_stripe [in] starting stripe of the mapping
1428 * \param stripe_count [out] the number of stripes across which to map is
1431 * \retval last_stripe return the last stripe of the mapping
1433 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, int index,
1434 struct lu_extent *ext,
1435 int start_stripe, int *stripe_count)
1437 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1443 if (ext->e_end - ext->e_start >
1444 lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
1445 last_stripe = (start_stripe < 1 ? lsme->lsme_stripe_count - 1 :
1447 *stripe_count = lsme->lsme_stripe_count;
1449 for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
1450 i = (i + 1) % lsme->lsme_stripe_count, j++) {
1451 if ((lov_stripe_intersects(lsm, index, i, ext,
1452 &obd_start, &obd_end)) == 0)
1456 last_stripe = (start_stripe + j - 1) % lsme->lsme_stripe_count;
1463 * Set fe_device and copy extents from local buffer into main return buffer.
1465 * \param fiemap [out] fiemap to hold all extents
1466 * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer
1467 * \param ost_index [in] OST index to be written into the fm_device
1468 * field for each extent
1469 * \param ext_count [in] number of extents to be copied
1470 * \param current_extent [in] where to start copying in the extent array
1472 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1473 struct fiemap_extent *lcl_fm_ext,
1474 int ost_index, unsigned int ext_count,
1480 for (ext = 0; ext < ext_count; ext++) {
1481 lcl_fm_ext[ext].fe_device = ost_index;
1482 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1485 /* Copy fm_extent's from fm_local to return buffer */
1486 to = (char *)fiemap + fiemap_count_to_size(current_extent);
1487 memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1490 #define FIEMAP_BUFFER_SIZE 4096
1493 * Non-zero fe_logical indicates that this is a continuation FIEMAP
1494 * call. The local end offset and the device are sent in the first
1495 * fm_extent. This function calculates the stripe number from the index.
1496 * This function returns a stripe_no on which mapping is to be restarted.
1498 * This function returns fm_end_offset which is the in-OST offset at which
1499 * mapping should be restarted. If fm_end_offset=0 is returned then caller
1500 * will re-calculate proper offset in next stripe.
1501 * Note that the first extent is passed to lov_get_info via the value field.
1503 * \param fiemap [in] fiemap request header
1504 * \param lsm [in] striping information for the file
1505 * \param index [in] stripe component index
1506 * \param ext [in] logical extent of mapping
1507 * \param start_stripe [out] starting stripe will be returned in this
1509 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1510 struct lov_stripe_md *lsm,
1511 int index, struct lu_extent *ext,
1514 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1515 u64 local_end = fiemap->fm_extents[0].fe_logical;
1522 if (fiemap->fm_extent_count == 0 ||
1523 fiemap->fm_extents[0].fe_logical == 0)
1526 /* Find out stripe_no from ost_index saved in the fe_device */
1527 for (i = 0; i < lsme->lsme_stripe_count; i++) {
1528 struct lov_oinfo *oinfo = lsme->lsme_oinfo[i];
1530 if (lov_oinfo_is_dummy(oinfo))
1533 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1539 if (stripe_no == -1)
1542 /* If we have finished mapping on previous device, shift logical
1543 * offset to start of next device */
1544 if (lov_stripe_intersects(lsm, index, stripe_no, ext,
1545 &lun_start, &lun_end) != 0 &&
1546 local_end < lun_end) {
1547 fm_end_offset = local_end;
1548 *start_stripe = stripe_no;
1550 /* This is a special value to indicate that caller should
1551 * calculate offset in next stripe. */
1553 *start_stripe = (stripe_no + 1) % lsme->lsme_stripe_count;
1556 return fm_end_offset;
1559 struct fiemap_state {
1560 struct fiemap *fs_fm;
1561 struct lu_extent fs_ext;
1566 int fs_start_stripe;
1568 bool fs_device_done;
1569 bool fs_finish_stripe;
1573 static struct cl_object *lov_find_subobj(const struct lu_env *env,
1574 struct lov_object *lov,
1575 struct lov_stripe_md *lsm,
1578 struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
1579 struct lov_thread_info *lti = lov_env_info(env);
1580 struct lu_fid *ofid = <i->lti_fid;
1581 struct lov_oinfo *oinfo;
1582 struct cl_device *subdev;
1583 int entry = lov_comp_entry(index);
1584 int stripe = lov_comp_stripe(index);
1587 struct cl_object *result;
1589 if (lov->lo_type != LLT_COMP)
1590 GOTO(out, result = NULL);
1592 if (entry >= lsm->lsm_entry_count ||
1593 stripe >= lsm->lsm_entries[entry]->lsme_stripe_count)
1594 GOTO(out, result = NULL);
1596 oinfo = lsm->lsm_entries[entry]->lsme_oinfo[stripe];
1597 ost_idx = oinfo->loi_ost_idx;
1598 rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
1600 GOTO(out, result = NULL);
1602 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
1603 result = lov_sub_find(env, subdev, ofid, NULL);
1606 result = ERR_PTR(-EINVAL);
1610 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1611 struct lov_stripe_md *lsm, struct fiemap *fiemap,
1612 size_t *buflen, struct ll_fiemap_info_key *fmkey,
1613 int index, int stripeno, struct fiemap_state *fs)
1615 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
1616 struct cl_object *subobj;
1617 struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1618 struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1619 u64 req_fm_len; /* Stores length of required mapping */
1620 u64 len_mapped_single_call;
1624 unsigned int ext_count;
1625 /* EOF for object */
1626 bool ost_eof = false;
1627 /* done with required mapping for this OST? */
1628 bool ost_done = false;
1632 fs->fs_device_done = false;
1633 /* Find out range of mapping on this stripe */
1634 if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
1635 &lun_start, &obd_object_end)) == 0)
1638 if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
1641 /* If this is a continuation FIEMAP call and we are on
1642 * starting stripe then lun_start needs to be set to
1644 if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1645 lun_start = fs->fs_end_offset;
1646 lun_end = lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno);
1647 if (lun_start == lun_end)
1650 req_fm_len = obd_object_end - lun_start;
1651 fs->fs_fm->fm_length = 0;
1652 len_mapped_single_call = 0;
1654 /* find lobsub object */
1655 subobj = lov_find_subobj(env, cl2lov(obj), lsm,
1656 lov_comp_index(index, stripeno));
1658 return PTR_ERR(subobj);
1659 /* If the output buffer is very large and the objects have many
1660 * extents we may need to loop on a single OST repeatedly */
1662 if (fiemap->fm_extent_count > 0) {
1663 /* Don't get too many extents. */
1664 if (fs->fs_cur_extent + fs->fs_cnt_need >
1665 fiemap->fm_extent_count)
1666 fs->fs_cnt_need = fiemap->fm_extent_count -
1670 lun_start += len_mapped_single_call;
1671 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1672 req_fm_len = fs->fs_fm->fm_length;
1674 * If we've collected enough extent map, we'd request 1 more,
1675 * to see whether we coincidentally finished all available
1676 * extent map, so that FIEMAP_EXTENT_LAST would be set.
1678 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1679 1 : fs->fs_cnt_need;
1680 fs->fs_fm->fm_mapped_extents = 0;
1681 fs->fs_fm->fm_flags = fiemap->fm_flags;
1683 ost_index = lsme->lsme_oinfo[stripeno]->loi_ost_idx;
1685 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1686 GOTO(obj_put, rc = -EINVAL);
1687 /* If OST is inactive, return extent with UNKNOWN flag. */
1688 if (!lov->lov_tgts[ost_index]->ltd_active) {
1689 fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1690 fs->fs_fm->fm_mapped_extents = 1;
1692 fm_ext[0].fe_logical = lun_start;
1693 fm_ext[0].fe_length = obd_object_end - lun_start;
1694 fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1699 fs->fs_fm->fm_start = lun_start;
1700 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1701 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1702 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1704 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1708 ext_count = fs->fs_fm->fm_mapped_extents;
1709 if (ext_count == 0) {
1711 fs->fs_device_done = true;
1712 /* If last stripe has hold at the end,
1713 * we need to return */
1714 if (stripeno == fs->fs_last_stripe) {
1715 fiemap->fm_mapped_extents = 0;
1716 fs->fs_finish_stripe = true;
1720 } else if (fs->fs_enough) {
1722 * We've collected enough extents and there are
1723 * more extents after it.
1728 /* If we just need num of extents, got to next device */
1729 if (fiemap->fm_extent_count == 0) {
1730 fs->fs_cur_extent += ext_count;
1734 /* prepare to copy retrived map extents */
1735 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1736 fm_ext[ext_count - 1].fe_length -
1739 /* Have we finished mapping on this device? */
1740 if (req_fm_len <= len_mapped_single_call) {
1742 fs->fs_device_done = true;
1745 /* Clear the EXTENT_LAST flag which can be present on
1746 * the last extent */
1747 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1748 fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1749 if (lov_stripe_size(lsm, index,
1750 fm_ext[ext_count - 1].fe_logical +
1751 fm_ext[ext_count - 1].fe_length,
1752 stripeno) >= fmkey->lfik_oa.o_size) {
1754 fs->fs_device_done = true;
1757 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1758 ext_count, fs->fs_cur_extent);
1759 fs->fs_cur_extent += ext_count;
1761 /* Ran out of available extents? */
1762 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1763 fs->fs_enough = true;
1764 } while (!ost_done && !ost_eof);
1766 if (stripeno == fs->fs_last_stripe)
1767 fs->fs_finish_stripe = true;
1769 cl_object_put(env, subobj);
1775 * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1776 * This also handles the restarting of FIEMAP calls in case mapping overflows
1777 * the available number of extents in single call.
1779 * \param env [in] lustre environment
1780 * \param obj [in] file object
1781 * \param fmkey [in] fiemap request header and other info
1782 * \param fiemap [out] fiemap buffer holding retrived map extents
1783 * \param buflen [in/out] max buffer length of @fiemap, when iterate
1784 * each OST, it is used to limit max map needed
1788 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1789 struct ll_fiemap_info_key *fmkey,
1790 struct fiemap *fiemap, size_t *buflen)
1792 struct lov_stripe_md_entry *lsme;
1793 struct lov_stripe_md *lsm;
1794 struct fiemap *fm_local = NULL;
1802 unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1804 struct fiemap_state fs = { 0 };
1807 lsm = lov_lsm_addref(cl2lov(obj));
1809 /* no extent: there is no object for mapping */
1810 fiemap->fm_mapped_extents = 0;
1814 if (!(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
1816 * If the entry count > 1 or stripe_count > 1 and the
1817 * application does not understand DEVICE_ORDER flag,
1818 * it cannot interpret the extents correctly.
1820 if (lsm->lsm_entry_count > 1 ||
1821 (lsm->lsm_entry_count == 1 &&
1822 lsm->lsm_entries[0]->lsme_stripe_count > 1))
1823 GOTO(out_lsm, rc = -ENOTSUPP);
1826 /* No support for DOM layout yet. */
1827 if (lsme_is_dom(lsm->lsm_entries[0]))
1828 GOTO(out_lsm, rc = -ENOTSUPP);
1830 if (lsm->lsm_is_released) {
1831 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1833 * released file, return a minimal FIEMAP if
1834 * request fits in file-size.
1836 fiemap->fm_mapped_extents = 1;
1837 fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1838 if (fiemap->fm_start + fiemap->fm_length <
1839 fmkey->lfik_oa.o_size)
1840 fiemap->fm_extents[0].fe_length =
1843 fiemap->fm_extents[0].fe_length =
1844 fmkey->lfik_oa.o_size -
1846 fiemap->fm_extents[0].fe_flags |=
1847 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1849 GOTO(out_lsm, rc = 0);
1852 /* buffer_size is small to hold fm_extent_count of extents. */
1853 if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1854 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1856 OBD_ALLOC_LARGE(fm_local, buffer_size);
1857 if (fm_local == NULL)
1858 GOTO(out_lsm, rc = -ENOMEM);
1861 * Requested extent count exceeds the fiemap buffer size, shrink our
1864 if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1865 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1866 if (fiemap->fm_extent_count == 0)
1869 fs.fs_enough = false;
1870 fs.fs_cur_extent = 0;
1871 fs.fs_fm = fm_local;
1872 fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1874 whole_start = fiemap->fm_start;
1875 /* whole_start is beyond the end of the file */
1876 if (whole_start > fmkey->lfik_oa.o_size)
1877 GOTO(out_fm_local, rc = -EINVAL);
1878 whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
1879 fmkey->lfik_oa.o_size :
1880 whole_start + fiemap->fm_length - 1;
1882 * If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
1885 if (whole_end > fmkey->lfik_oa.o_size)
1886 whole_end = fmkey->lfik_oa.o_size;
1888 start_entry = lov_lsm_entry(lsm, whole_start);
1889 end_entry = lov_lsm_entry(lsm, whole_end);
1890 if (end_entry == -1)
1891 end_entry = lsm->lsm_entry_count - 1;
1893 if (start_entry == -1 || end_entry == -1)
1894 GOTO(out_fm_local, rc = -EINVAL);
1896 /* TODO: rewrite it with lov_foreach_io_layout() */
1897 for (entry = start_entry; entry <= end_entry; entry++) {
1898 lsme = lsm->lsm_entries[entry];
1900 if (!lsme_inited(lsme))
1903 if (entry == start_entry)
1904 fs.fs_ext.e_start = whole_start;
1906 fs.fs_ext.e_start = lsme->lsme_extent.e_start;
1907 if (entry == end_entry)
1908 fs.fs_ext.e_end = whole_end;
1910 fs.fs_ext.e_end = lsme->lsme_extent.e_end - 1;
1911 fs.fs_length = fs.fs_ext.e_end - fs.fs_ext.e_start + 1;
1913 /* Calculate start stripe, last stripe and length of mapping */
1914 fs.fs_start_stripe = lov_stripe_number(lsm, entry,
1916 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
1917 &fs.fs_ext, fs.fs_start_stripe,
1919 fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, entry,
1920 &fs.fs_ext, &fs.fs_start_stripe);
1921 /* Check each stripe */
1922 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1924 cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
1925 rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
1926 fmkey, entry, cur_stripe, &fs);
1928 GOTO(out_fm_local, rc);
1931 if (fs.fs_finish_stripe)
1933 } /* for each stripe */
1934 } /* for covering layout component */
1936 * We've traversed all components, set @entry to the last component
1937 * entry, it's for the last stripe check.
1941 /* Indicate that we are returning device offsets unless file just has
1943 if (lsm->lsm_entry_count > 1 ||
1944 (lsm->lsm_entry_count == 1 &&
1945 lsm->lsm_entries[0]->lsme_stripe_count > 1))
1946 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1948 if (fiemap->fm_extent_count == 0)
1949 goto skip_last_device_calc;
1951 /* Check if we have reached the last stripe and whether mapping for that
1952 * stripe is done. */
1953 if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
1954 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
1956 skip_last_device_calc:
1957 fiemap->fm_mapped_extents = fs.fs_cur_extent;
1959 OBD_FREE_LARGE(fm_local, buffer_size);
1966 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
1967 struct lov_user_md __user *lum, size_t size)
1969 struct lov_object *lov = cl2lov(obj);
1970 struct lov_stripe_md *lsm;
1974 lsm = lov_lsm_addref(lov);
1978 rc = lov_getstripe(env, cl2lov(obj), lsm, lum, size);
1983 static int lov_object_layout_get(const struct lu_env *env,
1984 struct cl_object *obj,
1985 struct cl_layout *cl)
1987 struct lov_object *lov = cl2lov(obj);
1988 struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1989 struct lu_buf *buf = &cl->cl_buf;
1995 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
2000 cl->cl_size = lov_comp_md_size(lsm);
2001 cl->cl_layout_gen = lsm->lsm_layout_gen;
2002 cl->cl_dom_comp_size = 0;
2003 if (lsm_is_composite(lsm->lsm_magic)) {
2004 struct lov_stripe_md_entry *lsme = lsm->lsm_entries[0];
2006 cl->cl_is_composite = true;
2008 if (lsme_is_dom(lsme))
2009 cl->cl_dom_comp_size = lsme->lsme_extent.e_end;
2011 cl->cl_is_composite = false;
2014 rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
2017 RETURN(rc < 0 ? rc : 0);
2020 static loff_t lov_object_maxbytes(struct cl_object *obj)
2022 struct lov_object *lov = cl2lov(obj);
2023 struct lov_stripe_md *lsm = lov_lsm_addref(lov);
2029 maxbytes = lsm->lsm_maxbytes;
2036 static const struct cl_object_operations lov_ops = {
2037 .coo_page_init = lov_page_init,
2038 .coo_lock_init = lov_lock_init,
2039 .coo_io_init = lov_io_init,
2040 .coo_attr_get = lov_attr_get,
2041 .coo_attr_update = lov_attr_update,
2042 .coo_conf_set = lov_conf_set,
2043 .coo_getstripe = lov_object_getstripe,
2044 .coo_layout_get = lov_object_layout_get,
2045 .coo_maxbytes = lov_object_maxbytes,
2046 .coo_fiemap = lov_object_fiemap,
2049 static const struct lu_object_operations lov_lu_obj_ops = {
2050 .loo_object_init = lov_object_init,
2051 .loo_object_delete = lov_object_delete,
2052 .loo_object_release = NULL,
2053 .loo_object_free = lov_object_free,
2054 .loo_object_print = lov_object_print,
2055 .loo_object_invariant = NULL
2058 struct lu_object *lov_object_alloc(const struct lu_env *env,
2059 const struct lu_object_header *unused,
2060 struct lu_device *dev)
2062 struct lov_object *lov;
2063 struct lu_object *obj;
2066 OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
2069 lu_object_init(obj, NULL, dev);
2070 lov->lo_cl.co_ops = &lov_ops;
2071 lov->lo_type = -1; /* invalid, to catch uninitialized type */
2073 * object io operation vector (cl_object::co_iop) is installed
2074 * later in lov_object_init(), as different vectors are used
2075 * for object with different layouts.
2077 obj->lo_ops = &lov_lu_obj_ops;
2083 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
2085 struct lov_stripe_md *lsm = NULL;
2087 lov_conf_freeze(lov);
2088 if (lov->lo_lsm != NULL) {
2089 lsm = lsm_addref(lov->lo_lsm);
2090 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
2091 lsm, atomic_read(&lsm->lsm_refc),
2092 lov->lo_layout_invalid, current);
2098 int lov_read_and_clear_async_rc(struct cl_object *clob)
2100 struct lu_object *luobj;
2104 luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
2106 if (luobj != NULL) {
2107 struct lov_object *lov = lu2lov(luobj);
2109 lov_conf_freeze(lov);
2110 switch (lov->lo_type) {
2112 struct lov_stripe_md *lsm;
2116 LASSERT(lsm != NULL);
2117 for (i = 0; i < lsm->lsm_entry_count; i++) {
2118 struct lov_stripe_md_entry *lse =
2119 lsm->lsm_entries[i];
2122 if (!lsme_inited(lse))
2125 for (j = 0; j < lse->lsme_stripe_count; j++) {
2126 struct lov_oinfo *loi =
2129 if (lov_oinfo_is_dummy(loi))
2132 if (loi->loi_ar.ar_rc && !rc)
2133 rc = loi->loi_ar.ar_rc;
2134 loi->loi_ar.ar_rc = 0;
2148 EXPORT_SYMBOL(lov_read_and_clear_async_rc);