4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_object for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include "lov_cl_internal.h"
42 static inline struct lov_device *lov_object_dev(struct lov_object *obj)
44 return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
51 /*****************************************************************************
57 struct lov_layout_operations {
58 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
59 struct lov_object *lov, struct lov_stripe_md *lsm,
60 const struct cl_object_conf *conf,
61 union lov_layout_state *state);
62 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
63 union lov_layout_state *state);
64 void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
65 union lov_layout_state *state);
66 void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
67 union lov_layout_state *state);
68 int (*llo_print)(const struct lu_env *env, void *cookie,
69 lu_printer_t p, const struct lu_object *o);
70 int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
71 struct cl_page *page, pgoff_t index);
72 int (*llo_lock_init)(const struct lu_env *env,
73 struct cl_object *obj, struct cl_lock *lock,
74 const struct cl_io *io);
75 int (*llo_io_init)(const struct lu_env *env,
76 struct cl_object *obj, struct cl_io *io);
77 int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
78 struct cl_attr *attr);
81 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
83 static void lov_lsm_put(struct lov_stripe_md *lsm)
89 /*****************************************************************************
91 * Lov object layout operations.
95 static void lov_install_empty(const struct lu_env *env,
96 struct lov_object *lov,
97 union lov_layout_state *state)
100 * File without objects.
104 static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
105 struct lov_object *lov, struct lov_stripe_md *lsm,
106 const struct cl_object_conf *conf,
107 union lov_layout_state *state)
112 static void lov_install_raid0(const struct lu_env *env,
113 struct lov_object *lov,
114 union lov_layout_state *state)
118 static struct cl_object *lov_sub_find(const struct lu_env *env,
119 struct cl_device *dev,
120 const struct lu_fid *fid,
121 const struct cl_object_conf *conf)
126 o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
127 LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
131 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
132 struct cl_object *stripe, struct lov_layout_raid0 *r0,
135 struct cl_object_header *hdr;
136 struct cl_object_header *subhdr;
137 struct cl_object_header *parent;
138 struct lov_oinfo *oinfo;
141 if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
142 /* For sanity:test_206.
143 * Do not leave the object in cache to avoid accessing
144 * freed memory. This is because osc_object is referring to
145 * lov_oinfo of lsm_stripe_data which will be freed due to
147 cl_object_kill(env, stripe);
148 cl_object_put(env, stripe);
152 hdr = cl_object_header(lov2cl(lov));
153 subhdr = cl_object_header(stripe);
155 oinfo = lov->lo_lsm->lsm_oinfo[idx];
156 CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
157 " idx: %d gen: %d\n",
158 PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
159 PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
160 oinfo->loi_ost_idx, oinfo->loi_ost_gen);
162 /* reuse ->coh_attr_guard to protect coh_parent change */
163 spin_lock(&subhdr->coh_attr_guard);
164 parent = subhdr->coh_parent;
165 if (parent == NULL) {
166 subhdr->coh_parent = hdr;
167 spin_unlock(&subhdr->coh_attr_guard);
168 subhdr->coh_nesting = hdr->coh_nesting + 1;
169 lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
170 r0->lo_sub[idx] = cl2lovsub(stripe);
171 r0->lo_sub[idx]->lso_super = lov;
172 r0->lo_sub[idx]->lso_index = idx;
175 struct lu_object *old_obj;
176 struct lov_object *old_lov;
177 unsigned int mask = D_INODE;
179 spin_unlock(&subhdr->coh_attr_guard);
180 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
181 LASSERT(old_obj != NULL);
182 old_lov = cl2lov(lu2cl(old_obj));
183 if (old_lov->lo_layout_invalid) {
184 /* the object's layout has already changed but isn't
186 lu_object_unhash(env, &stripe->co_lu);
193 LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
194 "stripe %d is already owned.", idx);
195 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
196 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
197 cl_object_put(env, stripe);
202 static int lov_page_slice_fixup(struct lov_object *lov,
203 struct cl_object *stripe)
205 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
209 return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
210 cfs_size_round(sizeof(struct lov_page));
212 cl_object_for_each(o, stripe)
213 o->co_slice_off += hdr->coh_page_bufsize;
215 return cl_object_header(stripe)->coh_page_bufsize;
218 static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
219 struct lov_object *lov, struct lov_stripe_md *lsm,
220 const struct cl_object_conf *conf,
221 union lov_layout_state *state)
226 struct cl_object *stripe;
227 struct lov_thread_info *lti = lov_env_info(env);
228 struct cl_object_conf *subconf = <i->lti_stripe_conf;
229 struct lu_fid *ofid = <i->lti_fid;
230 struct lov_layout_raid0 *r0 = &state->raid0;
234 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
235 dump_lsm(D_ERROR, lsm);
236 LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
237 LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
240 LASSERT(lov->lo_lsm == NULL);
241 lov->lo_lsm = lsm_addref(lsm);
242 r0->lo_nr = lsm->lsm_stripe_count;
243 LASSERT(r0->lo_nr <= lov_targets_nr(dev));
245 lov->lo_layout_invalid = true;
247 OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
248 if (r0->lo_sub != NULL) {
252 subconf->coc_inode = conf->coc_inode;
253 spin_lock_init(&r0->lo_sub_lock);
255 * Create stripe cl_objects.
257 for (i = 0; i < r0->lo_nr && result == 0; ++i) {
258 struct cl_device *subdev;
259 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
260 int ost_idx = oinfo->loi_ost_idx;
262 if (lov_oinfo_is_dummy(oinfo))
265 result = ostid_to_fid(ofid, &oinfo->loi_oi,
270 if (dev->ld_target[ost_idx] == NULL) {
271 CERROR("%s: OST %04x is not initialized\n",
272 lov2obd(dev->ld_lov)->obd_name, ost_idx);
273 GOTO(out, result = -EIO);
276 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
277 subconf->u.coc_oinfo = oinfo;
278 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
279 /* In the function below, .hs_keycmp resolves to
280 * lu_obj_hop_keycmp() */
281 /* coverity[overrun-buffer-val] */
282 stripe = lov_sub_find(env, subdev, ofid, subconf);
283 if (!IS_ERR(stripe)) {
284 result = lov_init_sub(env, lov, stripe, r0, i);
285 if (result == -EAGAIN) { /* try again */
291 result = PTR_ERR(stripe);
295 int sz = lov_page_slice_fixup(lov, stripe);
296 LASSERT(ergo(psz > 0, psz == sz));
301 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
308 static int lov_init_released(const struct lu_env *env,
309 struct lov_device *dev, struct lov_object *lov,
310 struct lov_stripe_md *lsm,
311 const struct cl_object_conf *conf,
312 union lov_layout_state *state)
314 LASSERT(lsm != NULL);
315 LASSERT(lsm_is_released(lsm));
316 LASSERT(lov->lo_lsm == NULL);
318 lov->lo_lsm = lsm_addref(lsm);
322 static struct cl_object *lov_find_subobj(const struct lu_env *env,
323 struct lov_object *lov,
324 struct lov_stripe_md *lsm,
327 struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
328 struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx];
329 struct lov_thread_info *lti = lov_env_info(env);
330 struct lu_fid *ofid = <i->lti_fid;
331 struct cl_device *subdev;
334 struct cl_object *result;
336 if (lov->lo_type != LLT_RAID0)
337 GOTO(out, result = NULL);
339 ost_idx = oinfo->loi_ost_idx;
340 rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
342 GOTO(out, result = NULL);
344 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
345 result = lov_sub_find(env, subdev, ofid, NULL);
348 result = ERR_PTR(-EINVAL);
352 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
353 union lov_layout_state *state)
355 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
357 lov_layout_wait(env, lov);
361 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
362 struct lovsub_object *los, int idx)
364 struct cl_object *sub;
365 struct lov_layout_raid0 *r0;
366 struct lu_site *site;
367 struct lu_site_bkt_data *bkt;
368 wait_queue_t *waiter;
371 LASSERT(r0->lo_sub[idx] == los);
373 sub = lovsub2cl(los);
374 site = sub->co_lu.lo_dev->ld_site;
375 bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
377 cl_object_kill(env, sub);
378 /* release a reference to the sub-object and ... */
379 lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
380 cl_object_put(env, sub);
382 /* ... wait until it is actually destroyed---sub-object clears its
383 * ->lo_sub[] slot in lovsub_object_fini() */
384 if (r0->lo_sub[idx] == los) {
385 waiter = &lov_env_info(env)->lti_waiter;
386 init_waitqueue_entry(waiter, current);
387 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
388 set_current_state(TASK_UNINTERRUPTIBLE);
390 /* this wait-queue is signaled at the end of
391 * lu_object_free(). */
392 set_current_state(TASK_UNINTERRUPTIBLE);
393 spin_lock(&r0->lo_sub_lock);
394 if (r0->lo_sub[idx] == los) {
395 spin_unlock(&r0->lo_sub_lock);
398 spin_unlock(&r0->lo_sub_lock);
399 set_current_state(TASK_RUNNING);
403 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
405 LASSERT(r0->lo_sub[idx] == NULL);
408 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
409 union lov_layout_state *state)
411 struct lov_layout_raid0 *r0 = &state->raid0;
412 struct lov_stripe_md *lsm = lov->lo_lsm;
417 dump_lsm(D_INODE, lsm);
419 lov_layout_wait(env, lov);
420 if (r0->lo_sub != NULL) {
421 for (i = 0; i < r0->lo_nr; ++i) {
422 struct lovsub_object *los = r0->lo_sub[i];
425 cl_object_prune(env, &los->lso_cl);
427 * If top-level object is to be evicted from
428 * the cache, so are its sub-objects.
430 lov_subobject_kill(env, lov, los, i);
437 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
438 union lov_layout_state *state)
440 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
443 static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
444 union lov_layout_state *state)
446 struct lov_layout_raid0 *r0 = &state->raid0;
449 if (r0->lo_sub != NULL) {
450 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
454 dump_lsm(D_INODE, lov->lo_lsm);
455 lov_free_memmd(&lov->lo_lsm);
460 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
461 union lov_layout_state *state)
464 dump_lsm(D_INODE, lov->lo_lsm);
465 lov_free_memmd(&lov->lo_lsm);
469 static int lov_print_empty(const struct lu_env *env, void *cookie,
470 lu_printer_t p, const struct lu_object *o)
472 (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
476 static int lov_print_raid0(const struct lu_env *env, void *cookie,
477 lu_printer_t p, const struct lu_object *o)
479 struct lov_object *lov = lu2lov(o);
480 struct lov_layout_raid0 *r0 = lov_r0(lov);
481 struct lov_stripe_md *lsm = lov->lo_lsm;
484 (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
485 r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
486 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
487 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
488 for (i = 0; i < r0->lo_nr; ++i) {
489 struct lu_object *sub;
491 if (r0->lo_sub[i] != NULL) {
492 sub = lovsub2lu(r0->lo_sub[i]);
493 lu_object_print(env, cookie, p, sub);
495 (*p)(env, cookie, "sub %d absent\n", i);
501 static int lov_print_released(const struct lu_env *env, void *cookie,
502 lu_printer_t p, const struct lu_object *o)
504 struct lov_object *lov = lu2lov(o);
505 struct lov_stripe_md *lsm = lov->lo_lsm;
508 "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
509 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
510 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
511 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
516 * Implements cl_object_operations::coo_attr_get() method for an object
517 * without stripes (LLT_EMPTY layout type).
519 * The only attributes this layer is authoritative in this case is
520 * cl_attr::cat_blocks---it's 0.
522 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
523 struct cl_attr *attr)
525 attr->cat_blocks = 0;
529 static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
530 struct cl_attr *attr)
532 struct lov_object *lov = cl2lov(obj);
533 struct lov_layout_raid0 *r0 = lov_r0(lov);
534 struct cl_attr *lov_attr = &r0->lo_attr;
539 /* this is called w/o holding type guard mutex, so it must be inside
540 * an on going IO otherwise lsm may be replaced.
541 * LU-2117: it turns out there exists one exception. For mmaped files,
542 * the lock of those files may be requested in the other file's IO
543 * context, and this function is called in ccc_lock_state(), it will
544 * hit this assertion.
545 * Anyway, it's still okay to call attr_get w/o type guard as layout
546 * can't go if locks exist. */
547 /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
549 if (!r0->lo_attr_valid) {
550 struct lov_stripe_md *lsm = lov->lo_lsm;
551 struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
554 memset(lvb, 0, sizeof(*lvb));
555 /* XXX: timestamps can be negative by sanity:test_39m,
557 lvb->lvb_atime = LLONG_MIN;
558 lvb->lvb_ctime = LLONG_MIN;
559 lvb->lvb_mtime = LLONG_MIN;
562 * XXX that should be replaced with a loop over sub-objects,
563 * doing cl_object_attr_get() on them. But for now, let's
564 * reuse old lov code.
568 * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
569 * happy. It's not needed, because new code uses
570 * ->coh_attr_guard spin-lock to protect consistency of
571 * sub-object attributes.
573 lov_stripe_lock(lsm);
574 result = lov_merge_lvb_kms(lsm, lvb, &kms);
575 lov_stripe_unlock(lsm);
577 cl_lvb2attr(lov_attr, lvb);
578 lov_attr->cat_kms = kms;
579 r0->lo_attr_valid = 1;
582 if (result == 0) { /* merge results */
583 attr->cat_blocks = lov_attr->cat_blocks;
584 attr->cat_size = lov_attr->cat_size;
585 attr->cat_kms = lov_attr->cat_kms;
586 if (attr->cat_atime < lov_attr->cat_atime)
587 attr->cat_atime = lov_attr->cat_atime;
588 if (attr->cat_ctime < lov_attr->cat_ctime)
589 attr->cat_ctime = lov_attr->cat_ctime;
590 if (attr->cat_mtime < lov_attr->cat_mtime)
591 attr->cat_mtime = lov_attr->cat_mtime;
596 const static struct lov_layout_operations lov_dispatch[] = {
598 .llo_init = lov_init_empty,
599 .llo_delete = lov_delete_empty,
600 .llo_fini = lov_fini_empty,
601 .llo_install = lov_install_empty,
602 .llo_print = lov_print_empty,
603 .llo_page_init = lov_page_init_empty,
604 .llo_lock_init = lov_lock_init_empty,
605 .llo_io_init = lov_io_init_empty,
606 .llo_getattr = lov_attr_get_empty,
609 .llo_init = lov_init_raid0,
610 .llo_delete = lov_delete_raid0,
611 .llo_fini = lov_fini_raid0,
612 .llo_install = lov_install_raid0,
613 .llo_print = lov_print_raid0,
614 .llo_page_init = lov_page_init_raid0,
615 .llo_lock_init = lov_lock_init_raid0,
616 .llo_io_init = lov_io_init_raid0,
617 .llo_getattr = lov_attr_get_raid0,
620 .llo_init = lov_init_released,
621 .llo_delete = lov_delete_empty,
622 .llo_fini = lov_fini_released,
623 .llo_install = lov_install_empty,
624 .llo_print = lov_print_released,
625 .llo_page_init = lov_page_init_empty,
626 .llo_lock_init = lov_lock_init_empty,
627 .llo_io_init = lov_io_init_released,
628 .llo_getattr = lov_attr_get_empty,
633 * Performs a double-dispatch based on the layout type of an object.
635 #define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
637 struct lov_object *__obj = (obj); \
638 enum lov_layout_type __llt; \
640 __llt = __obj->lo_type; \
641 LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \
642 lov_dispatch[__llt].op(__VA_ARGS__); \
646 * Return lov_layout_type associated with a given lsm
648 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
652 if (lsm_is_released(lsm))
657 static inline void lov_conf_freeze(struct lov_object *lov)
659 CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n",
660 lov, lov->lo_owner, current);
661 if (lov->lo_owner != current)
662 down_read(&lov->lo_type_guard);
665 static inline void lov_conf_thaw(struct lov_object *lov)
667 CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n",
668 lov, lov->lo_owner, current);
669 if (lov->lo_owner != current)
670 up_read(&lov->lo_type_guard);
673 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
675 struct lov_object *__obj = (obj); \
676 int __lock = !!(lock); \
677 typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
680 lov_conf_freeze(__obj); \
681 __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
683 lov_conf_thaw(__obj); \
688 * Performs a locked double-dispatch based on the layout type of an object.
690 #define LOV_2DISPATCH(obj, op, ...) \
691 LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
693 #define LOV_2DISPATCH_VOID(obj, op, ...) \
695 struct lov_object *__obj = (obj); \
696 enum lov_layout_type __llt; \
698 lov_conf_freeze(__obj); \
699 __llt = __obj->lo_type; \
700 LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \
701 lov_dispatch[__llt].op(__VA_ARGS__); \
702 lov_conf_thaw(__obj); \
705 static void lov_conf_lock(struct lov_object *lov)
707 LASSERT(lov->lo_owner != current);
708 down_write(&lov->lo_type_guard);
709 LASSERT(lov->lo_owner == NULL);
710 lov->lo_owner = current;
711 CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n",
715 static void lov_conf_unlock(struct lov_object *lov)
717 CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n",
719 lov->lo_owner = NULL;
720 up_write(&lov->lo_type_guard);
723 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
725 struct l_wait_info lwi = { 0 };
728 while (atomic_read(&lov->lo_active_ios) > 0) {
729 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
730 PFID(lu_object_fid(lov2lu(lov))),
731 atomic_read(&lov->lo_active_ios));
733 l_wait_event(lov->lo_waitq,
734 atomic_read(&lov->lo_active_ios) == 0, &lwi);
739 static int lov_layout_change(const struct lu_env *unused,
740 struct lov_object *lov, struct lov_stripe_md *lsm,
741 const struct cl_object_conf *conf)
743 enum lov_layout_type llt = lov_type(lsm);
744 union lov_layout_state *state = &lov->u;
745 const struct lov_layout_operations *old_ops;
746 const struct lov_layout_operations *new_ops;
747 struct lov_device *lov_dev = lov_object_dev(lov);
753 LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch));
755 env = cl_env_get(&refcheck);
757 RETURN(PTR_ERR(env));
759 LASSERT(llt < ARRAY_SIZE(lov_dispatch));
761 CDEBUG(D_INODE, DFID" from %s to %s\n",
762 PFID(lu_object_fid(lov2lu(lov))),
763 llt2str(lov->lo_type), llt2str(llt));
765 old_ops = &lov_dispatch[lov->lo_type];
766 new_ops = &lov_dispatch[llt];
768 rc = cl_object_prune(env, &lov->lo_cl);
772 rc = old_ops->llo_delete(env, lov, &lov->u);
776 old_ops->llo_fini(env, lov, &lov->u);
778 LASSERT(atomic_read(&lov->lo_active_ios) == 0);
780 CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n",
781 PFID(lu_object_fid(lov2lu(lov))), lov, llt);
783 lov->lo_type = LLT_EMPTY;
785 /* page bufsize fixup */
786 cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
787 lov_page_slice_fixup(lov, NULL);
789 rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state);
791 struct obd_device *obd = lov2obd(lov_dev->ld_lov);
793 CERROR("%s: cannot apply new layout on "DFID" : rc = %d\n",
794 obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc);
795 new_ops->llo_delete(env, lov, state);
796 new_ops->llo_fini(env, lov, state);
797 /* this file becomes an EMPTY file. */
801 new_ops->llo_install(env, lov, state);
805 cl_env_put(env, &refcheck);
809 /*****************************************************************************
811 * Lov object operations.
814 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
815 const struct lu_object_conf *conf)
817 struct lov_object *lov = lu2lov(obj);
818 struct lov_device *dev = lov_object_dev(lov);
819 const struct cl_object_conf *cconf = lu2cl_conf(conf);
820 union lov_layout_state *set = &lov->u;
821 const struct lov_layout_operations *ops;
822 struct lov_stripe_md *lsm = NULL;
826 init_rwsem(&lov->lo_type_guard);
827 atomic_set(&lov->lo_active_ios, 0);
828 init_waitqueue_head(&lov->lo_waitq);
829 cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
831 lov->lo_type = LLT_EMPTY;
832 if (cconf->u.coc_layout.lb_buf != NULL) {
833 lsm = lov_unpackmd(dev->ld_lov,
834 cconf->u.coc_layout.lb_buf,
835 cconf->u.coc_layout.lb_len);
837 RETURN(PTR_ERR(lsm));
840 /* no locking is necessary, as object is being created */
841 lov->lo_type = lov_type(lsm);
842 ops = &lov_dispatch[lov->lo_type];
843 rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
847 ops->llo_install(env, lov, set);
855 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
856 const struct cl_object_conf *conf)
858 struct lov_stripe_md *lsm = NULL;
859 struct lov_object *lov = cl2lov(obj);
863 if (conf->coc_opc == OBJECT_CONF_SET &&
864 conf->u.coc_layout.lb_buf != NULL) {
865 lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
866 conf->u.coc_layout.lb_buf,
867 conf->u.coc_layout.lb_len);
869 RETURN(PTR_ERR(lsm));
873 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
874 lov->lo_layout_invalid = true;
875 GOTO(out, result = 0);
878 if (conf->coc_opc == OBJECT_CONF_WAIT) {
879 if (lov->lo_layout_invalid &&
880 atomic_read(&lov->lo_active_ios) > 0) {
881 lov_conf_unlock(lov);
882 result = lov_layout_wait(env, lov);
888 LASSERT(conf->coc_opc == OBJECT_CONF_SET);
890 if ((lsm == NULL && lov->lo_lsm == NULL) ||
891 ((lsm != NULL && lov->lo_lsm != NULL) &&
892 (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
893 (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
894 /* same version of layout */
895 lov->lo_layout_invalid = false;
896 GOTO(out, result = 0);
899 /* will change layout - check if there still exists active IO. */
900 if (atomic_read(&lov->lo_active_ios) > 0) {
901 lov->lo_layout_invalid = true;
902 GOTO(out, result = -EBUSY);
905 result = lov_layout_change(env, lov, lsm, conf);
906 lov->lo_layout_invalid = result != 0;
910 lov_conf_unlock(lov);
912 CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
913 PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
917 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
919 struct lov_object *lov = lu2lov(obj);
922 LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
926 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
928 struct lov_object *lov = lu2lov(obj);
931 LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
933 OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
937 static int lov_object_print(const struct lu_env *env, void *cookie,
938 lu_printer_t p, const struct lu_object *o)
940 return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
943 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
944 struct cl_page *page, pgoff_t index)
946 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
951 * Implements cl_object_operations::clo_io_init() method for lov
952 * layer. Dispatches to the appropriate layout io initialization method.
954 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
957 CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
959 CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n",
960 PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type,
961 io->ci_ignore_layout, io->ci_verify_layout);
963 return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
964 !io->ci_ignore_layout, env, obj, io);
968 * An implementation of cl_object_operations::clo_attr_get() method for lov
969 * layer. For raid0 layout this collects and merges attributes of all
972 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
973 struct cl_attr *attr)
975 /* do not take lock, as this function is called under a
976 * spin-lock. Layout is protected from changing by ongoing IO. */
977 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
980 static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
981 const struct cl_attr *attr, unsigned valid)
984 * No dispatch is required here, as no layout implements this.
989 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
990 struct cl_lock *lock, const struct cl_io *io)
992 /* No need to lock because we've taken one refcount of layout. */
993 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
998 * We calculate on which OST the mapping will end. If the length of mapping
999 * is greater than (stripe_size * stripe_count) then the last_stripe will
1000 * will be one just before start_stripe. Else we check if the mapping
1001 * intersects each OST and find last_stripe.
1002 * This function returns the last_stripe and also sets the stripe_count
1003 * over which the mapping is spread
1005 * \param lsm [in] striping information for the file
1006 * \param fm_start [in] logical start of mapping
1007 * \param fm_end [in] logical end of mapping
1008 * \param start_stripe [in] starting stripe of the mapping
1009 * \param stripe_count [out] the number of stripes across which to map is
1012 * \retval last_stripe return the last stripe of the mapping
1014 static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
1015 u64 fm_start, u64 fm_end,
1016 int start_stripe, int *stripe_count)
1023 if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
1024 last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
1026 *stripe_count = lsm->lsm_stripe_count;
1028 for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
1029 i = (i + 1) % lsm->lsm_stripe_count, j++) {
1030 if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
1031 &obd_start, &obd_end)) == 0)
1035 last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
1042 * Set fe_device and copy extents from local buffer into main return buffer.
1044 * \param fiemap [out] fiemap to hold all extents
1045 * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer
1046 * \param ost_index [in] OST index to be written into the fm_device
1047 * field for each extent
1048 * \param ext_count [in] number of extents to be copied
1049 * \param current_extent [in] where to start copying in the extent array
1051 static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
1052 struct fiemap_extent *lcl_fm_ext,
1053 int ost_index, unsigned int ext_count,
1059 for (ext = 0; ext < ext_count; ext++) {
1060 lcl_fm_ext[ext].fe_device = ost_index;
1061 lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
1064 /* Copy fm_extent's from fm_local to return buffer */
1065 to = (char *)fiemap + fiemap_count_to_size(current_extent);
1066 memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
1069 #define FIEMAP_BUFFER_SIZE 4096
1072 * Non-zero fe_logical indicates that this is a continuation FIEMAP
1073 * call. The local end offset and the device are sent in the first
1074 * fm_extent. This function calculates the stripe number from the index.
1075 * This function returns a stripe_no on which mapping is to be restarted.
1077 * This function returns fm_end_offset which is the in-OST offset at which
1078 * mapping should be restarted. If fm_end_offset=0 is returned then caller
1079 * will re-calculate proper offset in next stripe.
1080 * Note that the first extent is passed to lov_get_info via the value field.
1082 * \param fiemap [in] fiemap request header
1083 * \param lsm [in] striping information for the file
1084 * \param fm_start [in] logical start of mapping
1085 * \param fm_end [in] logical end of mapping
1086 * \param start_stripe [out] starting stripe will be returned in this
1088 static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap,
1089 struct lov_stripe_md *lsm,
1090 u64 fm_start, u64 fm_end,
1093 u64 local_end = fiemap->fm_extents[0].fe_logical;
1100 if (fiemap->fm_extent_count == 0 ||
1101 fiemap->fm_extents[0].fe_logical == 0)
1104 /* Find out stripe_no from ost_index saved in the fe_device */
1105 for (i = 0; i < lsm->lsm_stripe_count; i++) {
1106 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
1108 if (lov_oinfo_is_dummy(oinfo))
1111 if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
1117 if (stripe_no == -1)
1120 /* If we have finished mapping on previous device, shift logical
1121 * offset to start of next device */
1122 if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
1123 &lun_start, &lun_end) != 0 &&
1124 local_end < lun_end) {
1125 fm_end_offset = local_end;
1126 *start_stripe = stripe_no;
1128 /* This is a special value to indicate that caller should
1129 * calculate offset in next stripe. */
1131 *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
1134 return fm_end_offset;
1137 struct fiemap_state {
1138 struct fiemap *fs_fm;
1145 int fs_start_stripe;
1147 bool fs_device_done;
1152 int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
1153 struct lov_stripe_md *lsm,
1154 struct fiemap *fiemap, size_t *buflen,
1155 struct ll_fiemap_info_key *fmkey, int stripeno,
1156 struct fiemap_state *fs)
1158 struct cl_object *subobj;
1159 struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
1160 struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
1161 u64 req_fm_len; /* Stores length of required mapping */
1162 u64 len_mapped_single_call;
1166 unsigned int ext_count;
1167 /* EOF for object */
1168 bool ost_eof = false;
1169 /* done with required mapping for this OST? */
1170 bool ost_done = false;
1174 fs->fs_device_done = false;
1175 /* Find out range of mapping on this stripe */
1176 if ((lov_stripe_intersects(lsm, stripeno, fs->fs_start, fs->fs_end,
1177 &lun_start, &obd_object_end)) == 0)
1180 if (lov_oinfo_is_dummy(lsm->lsm_oinfo[stripeno]))
1183 /* If this is a continuation FIEMAP call and we are on
1184 * starting stripe then lun_start needs to be set to
1186 if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
1187 lun_start = fs->fs_end_offset;
1189 lun_end = fs->fs_length;
1190 if (lun_end != ~0ULL) {
1191 /* Handle fs->fs_start + fs->fs_length overflow */
1192 if (fs->fs_start + fs->fs_length < fs->fs_start)
1193 fs->fs_length = ~0ULL - fs->fs_start;
1194 lun_end = lov_size_to_stripe(lsm, fs->fs_start + fs->fs_length,
1198 if (lun_start == lun_end)
1201 req_fm_len = obd_object_end - lun_start;
1202 fs->fs_fm->fm_length = 0;
1203 len_mapped_single_call = 0;
1205 /* find lobsub object */
1206 subobj = lov_find_subobj(env, cl2lov(obj), lsm, stripeno);
1208 return PTR_ERR(subobj);
1209 /* If the output buffer is very large and the objects have many
1210 * extents we may need to loop on a single OST repeatedly */
1212 if (fiemap->fm_extent_count > 0) {
1213 /* Don't get too many extents. */
1214 if (fs->fs_cur_extent + fs->fs_cnt_need >
1215 fiemap->fm_extent_count)
1216 fs->fs_cnt_need = fiemap->fm_extent_count -
1220 lun_start += len_mapped_single_call;
1221 fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
1222 req_fm_len = fs->fs_fm->fm_length;
1223 fs->fs_fm->fm_extent_count = fs->fs_enough ?
1224 1 : fs->fs_cnt_need;
1225 fs->fs_fm->fm_mapped_extents = 0;
1226 fs->fs_fm->fm_flags = fiemap->fm_flags;
1228 ost_index = lsm->lsm_oinfo[stripeno]->loi_ost_idx;
1230 if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count)
1231 GOTO(obj_put, rc = -EINVAL);
1232 /* If OST is inactive, return extent with UNKNOWN flag. */
1233 if (!lov->lov_tgts[ost_index]->ltd_active) {
1234 fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
1235 fs->fs_fm->fm_mapped_extents = 1;
1237 fm_ext[0].fe_logical = lun_start;
1238 fm_ext[0].fe_length = obd_object_end - lun_start;
1239 fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
1244 fs->fs_fm->fm_start = lun_start;
1245 fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
1246 memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
1247 *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
1249 rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen);
1253 ext_count = fs->fs_fm->fm_mapped_extents;
1254 if (ext_count == 0) {
1256 fs->fs_device_done = true;
1257 /* If last stripe has hold at the end,
1258 * we need to return */
1259 if (stripeno == fs->fs_last_stripe) {
1260 fiemap->fm_mapped_extents = 0;
1261 fs->fs_finish = true;
1265 } else if (fs->fs_enough) {
1267 * We've collected enough extents and there are
1268 * more extents after it.
1270 fs->fs_finish = true;
1274 /* If we just need num of extents, got to next device */
1275 if (fiemap->fm_extent_count == 0) {
1276 fs->fs_cur_extent += ext_count;
1280 /* prepare to copy retrived map extents */
1281 len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
1282 fm_ext[ext_count - 1].fe_length -
1285 /* Have we finished mapping on this device? */
1286 if (req_fm_len <= len_mapped_single_call) {
1288 fs->fs_device_done = true;
1291 /* Clear the EXTENT_LAST flag which can be present on
1292 * the last extent */
1293 if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST)
1294 fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST;
1295 if (lov_stripe_size(lsm, fm_ext[ext_count - 1].fe_logical +
1296 fm_ext[ext_count - 1].fe_length,
1297 stripeno) >= fmkey->lfik_oa.o_size) {
1299 fs->fs_device_done = true;
1302 fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
1303 ext_count, fs->fs_cur_extent);
1304 fs->fs_cur_extent += ext_count;
1306 /* Ran out of available extents? */
1307 if (fs->fs_cur_extent >= fiemap->fm_extent_count)
1308 fs->fs_enough = true;
1309 } while (!ost_done && !ost_eof);
1311 if (stripeno == fs->fs_last_stripe)
1312 fs->fs_finish = true;
1314 cl_object_put(env, subobj);
1320 * Break down the FIEMAP request and send appropriate calls to individual OSTs.
1321 * This also handles the restarting of FIEMAP calls in case mapping overflows
1322 * the available number of extents in single call.
1324 * \param env [in] lustre environment
1325 * \param obj [in] file object
1326 * \param fmkey [in] fiemap request header and other info
1327 * \param fiemap [out] fiemap buffer holding retrived map extents
1328 * \param buflen [in/out] max buffer length of @fiemap, when iterate
1329 * each OST, it is used to limit max map needed
1333 static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
1334 struct ll_fiemap_info_key *fmkey,
1335 struct fiemap *fiemap, size_t *buflen)
1337 struct lov_stripe_md *lsm;
1338 struct fiemap *fm_local = NULL;
1341 unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
1343 struct fiemap_state fs = { 0 };
1346 lsm = lov_lsm_addref(cl2lov(obj));
1351 * If the stripe_count > 1 and the application does not understand
1352 * DEVICE_ORDER flag, it cannot interpret the extents correctly.
1354 if (lsm->lsm_stripe_count > 1 && !(fiemap->fm_flags &
1355 FIEMAP_FLAG_DEVICE_ORDER))
1356 GOTO(out_lsm, rc = -ENOTSUPP);
1358 if (lsm_is_released(lsm)) {
1359 if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
1361 * released file, return a minimal FIEMAP if
1362 * request fits in file-size.
1364 fiemap->fm_mapped_extents = 1;
1365 fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
1366 if (fiemap->fm_start + fiemap->fm_length <
1367 fmkey->lfik_oa.o_size)
1368 fiemap->fm_extents[0].fe_length =
1371 fiemap->fm_extents[0].fe_length =
1372 fmkey->lfik_oa.o_size -
1374 fiemap->fm_extents[0].fe_flags |=
1375 FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
1377 GOTO(out_lsm, rc = 0);
1380 if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
1381 buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
1383 OBD_ALLOC_LARGE(fm_local, buffer_size);
1384 if (fm_local == NULL)
1385 GOTO(out_lsm, rc = -ENOMEM);
1387 fs.fs_fm = fm_local;
1388 fs.fs_cnt_need = fiemap_size_to_count(buffer_size);
1390 fs.fs_start = fiemap->fm_start;
1391 /* fs.fs_start is beyond the end of the file */
1392 if (fs.fs_start > fmkey->lfik_oa.o_size)
1393 GOTO(out_fm_local, rc = -EINVAL);
1395 fs.fs_length = fiemap->fm_length;
1396 /* Calculate start stripe, last stripe and length of mapping */
1397 fs.fs_start_stripe = lov_stripe_number(lsm, fs.fs_start);
1398 fs.fs_end = (fs.fs_length == ~0ULL) ? fmkey->lfik_oa.o_size :
1399 fs.fs_start + fs.fs_length - 1;
1400 /* If fs_length != ~0ULL but fs_start+fs_length-1 exceeds file size */
1401 if (fs.fs_end > fmkey->lfik_oa.o_size) {
1402 fs.fs_end = fmkey->lfik_oa.o_size;
1403 fs.fs_length = fs.fs_end - fs.fs_start;
1406 fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, fs.fs_start, fs.fs_end,
1409 fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fs.fs_start,
1411 &fs.fs_start_stripe);
1412 if (fs.fs_end_offset == -EINVAL)
1413 GOTO(out_fm_local, rc = -EINVAL);
1416 * Requested extent count exceeds the fiemap buffer size, shrink our
1419 if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
1420 fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
1421 if (fiemap->fm_extent_count == 0)
1424 fs.fs_finish = false;
1425 fs.fs_enough = false;
1426 fs.fs_cur_extent = 0;
1428 /* Check each stripe */
1429 for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
1431 cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
1432 rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen, fmkey,
1435 GOTO(out_fm_local, rc);
1438 } /* for each stripe */
1440 /* Indicate that we are returning device offsets unless file just has
1442 if (lsm->lsm_stripe_count > 1)
1443 fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
1445 if (fiemap->fm_extent_count == 0)
1446 goto skip_last_device_calc;
1448 /* Check if we have reached the last stripe and whether mapping for that
1449 * stripe is done. */
1450 if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
1451 fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
1453 skip_last_device_calc:
1454 fiemap->fm_mapped_extents = fs.fs_cur_extent;
1456 OBD_FREE_LARGE(fm_local, buffer_size);
1464 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
1465 struct lov_user_md __user *lum)
1467 struct lov_object *lov = cl2lov(obj);
1468 struct lov_stripe_md *lsm;
1472 lsm = lov_lsm_addref(lov);
1476 rc = lov_getstripe(cl2lov(obj), lsm, lum);
1481 static int lov_object_layout_get(const struct lu_env *env,
1482 struct cl_object *obj,
1483 struct cl_layout *cl)
1485 struct lov_object *lov = cl2lov(obj);
1486 struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1487 struct lu_buf *buf = &cl->cl_buf;
1493 cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
1498 cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
1499 cl->cl_layout_gen = lsm->lsm_layout_gen;
1501 rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
1504 RETURN(rc < 0 ? rc : 0);
1507 static loff_t lov_object_maxbytes(struct cl_object *obj)
1509 struct lov_object *lov = cl2lov(obj);
1510 struct lov_stripe_md *lsm = lov_lsm_addref(lov);
1516 maxbytes = lsm->lsm_maxbytes;
1523 static const struct cl_object_operations lov_ops = {
1524 .coo_page_init = lov_page_init,
1525 .coo_lock_init = lov_lock_init,
1526 .coo_io_init = lov_io_init,
1527 .coo_attr_get = lov_attr_get,
1528 .coo_attr_update = lov_attr_update,
1529 .coo_conf_set = lov_conf_set,
1530 .coo_getstripe = lov_object_getstripe,
1531 .coo_layout_get = lov_object_layout_get,
1532 .coo_maxbytes = lov_object_maxbytes,
1533 .coo_fiemap = lov_object_fiemap,
1536 static const struct lu_object_operations lov_lu_obj_ops = {
1537 .loo_object_init = lov_object_init,
1538 .loo_object_delete = lov_object_delete,
1539 .loo_object_release = NULL,
1540 .loo_object_free = lov_object_free,
1541 .loo_object_print = lov_object_print,
1542 .loo_object_invariant = NULL
1545 struct lu_object *lov_object_alloc(const struct lu_env *env,
1546 const struct lu_object_header *unused,
1547 struct lu_device *dev)
1549 struct lov_object *lov;
1550 struct lu_object *obj;
1553 OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
1556 lu_object_init(obj, NULL, dev);
1557 lov->lo_cl.co_ops = &lov_ops;
1558 lov->lo_type = -1; /* invalid, to catch uninitialized type */
1560 * object io operation vector (cl_object::co_iop) is installed
1561 * later in lov_object_init(), as different vectors are used
1562 * for object with different layouts.
1564 obj->lo_ops = &lov_lu_obj_ops;
1570 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
1572 struct lov_stripe_md *lsm = NULL;
1574 lov_conf_freeze(lov);
1575 if (lov->lo_lsm != NULL) {
1576 lsm = lsm_addref(lov->lo_lsm);
1577 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
1578 lsm, atomic_read(&lsm->lsm_refc),
1579 lov->lo_layout_invalid, current);
1585 int lov_read_and_clear_async_rc(struct cl_object *clob)
1587 struct lu_object *luobj;
1591 luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
1593 if (luobj != NULL) {
1594 struct lov_object *lov = lu2lov(luobj);
1596 lov_conf_freeze(lov);
1597 switch (lov->lo_type) {
1599 struct lov_stripe_md *lsm;
1603 LASSERT(lsm != NULL);
1604 for (i = 0; i < lsm->lsm_stripe_count; i++) {
1605 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
1607 if (lov_oinfo_is_dummy(loi))
1610 if (loi->loi_ar.ar_rc && !rc)
1611 rc = loi->loi_ar.ar_rc;
1612 loi->loi_ar.ar_rc = 0;
1625 EXPORT_SYMBOL(lov_read_and_clear_async_rc);