4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_object for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LOV
44 #include "lov_cl_internal.h"
45 #include <lustre_debug.h>
51 /*****************************************************************************
57 struct lov_layout_operations {
58 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
59 struct lov_object *lov,
60 const struct cl_object_conf *conf,
61 union lov_layout_state *state);
62 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
63 union lov_layout_state *state);
64 void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
65 union lov_layout_state *state);
66 void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
67 union lov_layout_state *state);
68 int (*llo_print)(const struct lu_env *env, void *cookie,
69 lu_printer_t p, const struct lu_object *o);
70 struct cl_page *(*llo_page_init)(const struct lu_env *env,
71 struct cl_object *obj,
74 int (*llo_lock_init)(const struct lu_env *env,
75 struct cl_object *obj, struct cl_lock *lock,
76 const struct cl_io *io);
77 int (*llo_io_init)(const struct lu_env *env,
78 struct cl_object *obj, struct cl_io *io);
79 int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
80 struct cl_attr *attr);
83 /*****************************************************************************
85 * Lov object layout operations.
89 static void lov_install_empty(const struct lu_env *env,
90 struct lov_object *lov,
91 union lov_layout_state *state)
94 * File without objects.
98 static int lov_init_empty(const struct lu_env *env,
99 struct lov_device *dev, struct lov_object *lov,
100 const struct cl_object_conf *conf,
101 union lov_layout_state *state)
106 static void lov_install_raid0(const struct lu_env *env,
107 struct lov_object *lov,
108 union lov_layout_state *state)
113 static struct cl_object *lov_sub_find(const struct lu_env *env,
114 struct cl_device *dev,
115 const struct lu_fid *fid,
116 const struct cl_object_conf *conf)
121 o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
122 LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
126 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
127 struct cl_object *stripe,
128 struct lov_layout_raid0 *r0, int idx)
130 struct cl_object_header *hdr;
131 struct cl_object_header *subhdr;
132 struct cl_object_header *parent;
133 struct lov_oinfo *oinfo;
136 hdr = cl_object_header(lov2cl(lov));
137 subhdr = cl_object_header(stripe);
138 parent = subhdr->coh_parent;
140 oinfo = lov->lo_lsm->lsm_oinfo[idx];
141 CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: id: "LPU64" seq: "LPU64
142 " idx: %d gen: %d\n",
143 PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
144 PFID(&hdr->coh_lu.loh_fid), hdr,
145 oinfo->loi_id, oinfo->loi_seq,
146 oinfo->loi_ost_idx, oinfo->loi_ost_gen);
148 if (parent == NULL) {
149 subhdr->coh_parent = hdr;
150 subhdr->coh_nesting = hdr->coh_nesting + 1;
151 lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
152 r0->lo_sub[idx] = cl2lovsub(stripe);
153 r0->lo_sub[idx]->lso_super = lov;
154 r0->lo_sub[idx]->lso_index = idx;
157 CERROR("Stripe is already owned by other file (%d).\n", idx);
158 LU_OBJECT_DEBUG(D_ERROR, env, &stripe->co_lu, "\n");
159 LU_OBJECT_DEBUG(D_ERROR, env, lu_object_top(&parent->coh_lu),
161 LU_OBJECT_HEADER(D_ERROR, env, lov2lu(lov), "new\n");
162 cl_object_put(env, stripe);
168 static int lov_init_raid0(const struct lu_env *env,
169 struct lov_device *dev, struct lov_object *lov,
170 const struct cl_object_conf *conf,
171 union lov_layout_state *state)
176 struct cl_object *stripe;
177 struct lov_thread_info *lti = lov_env_info(env);
178 struct cl_object_conf *subconf = <i->lti_stripe_conf;
179 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
180 struct lu_fid *ofid = <i->lti_fid;
181 struct lov_layout_raid0 *r0 = &state->raid0;
185 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
186 dump_lsm(D_ERROR, lsm);
187 LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
188 LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
191 LASSERT(lov->lo_lsm == NULL);
192 lov->lo_lsm = lsm_addref(lsm);
193 r0->lo_nr = lsm->lsm_stripe_count;
194 LASSERT(r0->lo_nr <= lov_targets_nr(dev));
196 OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
197 if (r0->lo_sub != NULL) {
199 subconf->coc_inode = conf->coc_inode;
200 cfs_spin_lock_init(&r0->lo_sub_lock);
202 * Create stripe cl_objects.
204 for (i = 0; i < r0->lo_nr && result == 0; ++i) {
205 struct cl_device *subdev;
206 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
207 int ost_idx = oinfo->loi_ost_idx;
209 fid_ostid_unpack(ofid, &oinfo->loi_oi,
211 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
212 subconf->u.coc_oinfo = oinfo;
213 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
214 stripe = lov_sub_find(env, subdev, ofid, subconf);
216 result = lov_init_sub(env, lov, stripe, r0, i);
218 result = PTR_ERR(stripe);
225 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
226 union lov_layout_state *state)
228 LASSERT(lov->lo_type == LLT_EMPTY);
232 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
233 struct lovsub_object *los, int idx)
235 struct cl_object *sub;
236 struct lov_layout_raid0 *r0;
237 struct lu_site *site;
238 struct lu_site_bkt_data *bkt;
239 cfs_waitlink_t *waiter;
242 LASSERT(r0->lo_sub[idx] == los);
244 sub = lovsub2cl(los);
245 site = sub->co_lu.lo_dev->ld_site;
246 bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
248 cl_object_kill(env, sub);
249 /* release a reference to the sub-object and ... */
250 lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
251 cl_object_put(env, sub);
253 /* ... wait until it is actually destroyed---sub-object clears its
254 * ->lo_sub[] slot in lovsub_object_fini() */
255 if (r0->lo_sub[idx] == los) {
256 waiter = &lov_env_info(env)->lti_waiter;
257 cfs_waitlink_init(waiter);
258 cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
259 cfs_set_current_state(CFS_TASK_UNINT);
261 /* this wait-queue is signaled at the end of
262 * lu_object_free(). */
263 cfs_set_current_state(CFS_TASK_UNINT);
264 cfs_spin_lock(&r0->lo_sub_lock);
265 if (r0->lo_sub[idx] == los) {
266 cfs_spin_unlock(&r0->lo_sub_lock);
267 cfs_waitq_wait(waiter, CFS_TASK_UNINT);
269 cfs_spin_unlock(&r0->lo_sub_lock);
270 cfs_set_current_state(CFS_TASK_RUNNING);
274 cfs_waitq_del(&bkt->lsb_marche_funebre, waiter);
276 LASSERT(r0->lo_sub[idx] == NULL);
279 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
280 union lov_layout_state *state)
282 struct lov_layout_raid0 *r0 = &state->raid0;
283 struct lov_stripe_md *lsm = lov->lo_lsm;
288 dump_lsm(D_INODE, lsm);
289 if (cfs_atomic_read(&lsm->lsm_refc) > 1)
292 if (r0->lo_sub != NULL) {
293 for (i = 0; i < r0->lo_nr; ++i) {
294 struct lovsub_object *los = r0->lo_sub[i];
298 * If top-level object is to be evicted from
299 * the cache, so are its sub-objects.
301 lov_subobject_kill(env, lov, los, i);
307 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
308 union lov_layout_state *state)
310 LASSERT(lov->lo_type == LLT_EMPTY);
313 static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
314 union lov_layout_state *state)
316 struct lov_layout_raid0 *r0 = &state->raid0;
320 if (r0->lo_sub != NULL) {
321 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
325 LASSERTF(cfs_atomic_read(&lov->lo_lsm->lsm_refc) == 1,
326 "actual %d proc %p.\n",
327 cfs_atomic_read(&lov->lo_lsm->lsm_refc), cfs_current());
328 lov_free_memmd(&lov->lo_lsm);
334 static int lov_print_empty(const struct lu_env *env, void *cookie,
335 lu_printer_t p, const struct lu_object *o)
337 (*p)(env, cookie, "empty\n");
341 static int lov_print_raid0(const struct lu_env *env, void *cookie,
342 lu_printer_t p, const struct lu_object *o)
344 struct lov_object *lov = lu2lov(o);
345 struct lov_layout_raid0 *r0 = lov_r0(lov);
348 (*p)(env, cookie, "stripes: %d:\n", r0->lo_nr);
349 for (i = 0; i < r0->lo_nr; ++i) {
350 struct lu_object *sub;
352 if (r0->lo_sub[i] != NULL) {
353 sub = lovsub2lu(r0->lo_sub[i]);
354 lu_object_print(env, cookie, p, sub);
356 (*p)(env, cookie, "sub %d absent\n", i);
362 * Implements cl_object_operations::coo_attr_get() method for an object
363 * without stripes (LLT_EMPTY layout type).
365 * The only attributes this layer is authoritative in this case is
366 * cl_attr::cat_blocks---it's 0.
368 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
369 struct cl_attr *attr)
371 attr->cat_blocks = 0;
375 static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
376 struct cl_attr *attr)
378 struct lov_object *lov = cl2lov(obj);
379 struct lov_layout_raid0 *r0 = lov_r0(lov);
380 struct lov_stripe_md *lsm = lov->lo_lsm;
381 struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
387 /* this is called w/o holding type guard mutex, so it must be inside
388 * an on going IO otherwise lsm may be replaced. */
389 LASSERT(cfs_atomic_read(&lsm->lsm_refc) > 1);
391 if (!r0->lo_attr_valid) {
393 * Fill LVB with attributes already initialized by the upper
396 cl_attr2lvb(lvb, attr);
400 * XXX that should be replaced with a loop over sub-objects,
401 * doing cl_object_attr_get() on them. But for now, let's
402 * reuse old lov code.
406 * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
407 * happy. It's not needed, because new code uses
408 * ->coh_attr_guard spin-lock to protect consistency of
409 * sub-object attributes.
411 lov_stripe_lock(lsm);
412 result = lov_merge_lvb_kms(lsm, lvb, &kms);
413 lov_stripe_unlock(lsm);
415 cl_lvb2attr(attr, lvb);
417 r0->lo_attr_valid = 1;
425 const static struct lov_layout_operations lov_dispatch[] = {
427 .llo_init = lov_init_empty,
428 .llo_delete = lov_delete_empty,
429 .llo_fini = lov_fini_empty,
430 .llo_install = lov_install_empty,
431 .llo_print = lov_print_empty,
432 .llo_page_init = lov_page_init_empty,
433 .llo_lock_init = NULL,
434 .llo_io_init = lov_io_init_empty,
435 .llo_getattr = lov_attr_get_empty
438 .llo_init = lov_init_raid0,
439 .llo_delete = lov_delete_raid0,
440 .llo_fini = lov_fini_raid0,
441 .llo_install = lov_install_raid0,
442 .llo_print = lov_print_raid0,
443 .llo_page_init = lov_page_init_raid0,
444 .llo_lock_init = lov_lock_init_raid0,
445 .llo_io_init = lov_io_init_raid0,
446 .llo_getattr = lov_attr_get_raid0
452 * Performs a double-dispatch based on the layout type of an object.
454 #define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
456 struct lov_object *__obj = (obj); \
457 enum lov_layout_type __llt; \
459 __llt = __obj->lo_type; \
460 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
461 lov_dispatch[__llt].op(__VA_ARGS__); \
464 static inline void lov_conf_freeze(struct lov_object *lov)
466 if (lov->lo_owner != cfs_current())
467 cfs_down_read(&lov->lo_type_guard);
470 static inline void lov_conf_thaw(struct lov_object *lov)
472 if (lov->lo_owner != cfs_current())
473 cfs_up_read(&lov->lo_type_guard);
476 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
478 struct lov_object *__obj = (obj); \
479 int __lock = !!(lock); \
480 typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
483 lov_conf_freeze(__obj); \
484 __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
486 lov_conf_thaw(__obj); \
491 * Performs a locked double-dispatch based on the layout type of an object.
493 #define LOV_2DISPATCH(obj, op, ...) \
494 LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
496 #define LOV_2DISPATCH_VOID(obj, op, ...) \
498 struct lov_object *__obj = (obj); \
499 enum lov_layout_type __llt; \
501 lov_conf_freeze(__obj); \
502 __llt = __obj->lo_type; \
503 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
504 lov_dispatch[__llt].op(__VA_ARGS__); \
505 lov_conf_thaw(__obj); \
508 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
510 struct l_wait_info lwi = { 0 };
511 struct lov_stripe_md *lsm = lov->lo_lsm;
514 if (!lov->lo_lsm_invalid || lsm == NULL)
517 l_wait_event(lov->lo_waitq, cfs_atomic_read(&lsm->lsm_refc) == 1, &lwi);
521 static int lov_layout_change(const struct lu_env *env,
522 struct lov_object *lov, enum lov_layout_type llt,
523 const struct cl_object_conf *conf)
526 union lov_layout_state *state = &lov_env_info(env)->lti_state;
527 const struct lov_layout_operations *old_ops;
528 const struct lov_layout_operations *new_ops;
530 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
532 struct lu_env *nested;
535 LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
536 LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
539 cookie = cl_env_reenter();
540 nested = cl_env_get(&refcheck);
542 cl_object_prune(nested, &lov->lo_cl);
544 result = PTR_ERR(nested);
545 cl_env_put(nested, &refcheck);
546 cl_env_reexit(cookie);
548 old_ops = &lov_dispatch[lov->lo_type];
549 new_ops = &lov_dispatch[llt];
551 result = old_ops->llo_delete(env, lov, &lov->u);
553 old_ops->llo_fini(env, lov, &lov->u);
554 LASSERT(cfs_list_empty(&hdr->coh_locks));
555 LASSERT(hdr->coh_tree.rnode == NULL);
556 LASSERT(hdr->coh_pages == 0);
558 result = new_ops->llo_init(env,
559 lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
562 new_ops->llo_install(env, lov, state);
569 /*****************************************************************************
571 * Lov object operations.
575 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
576 const struct lu_object_conf *conf)
578 struct lov_device *dev = lu2lov_dev(obj->lo_dev);
579 struct lov_object *lov = lu2lov(obj);
580 const struct cl_object_conf *cconf = lu2cl_conf(conf);
581 union lov_layout_state *set = &lov_env_info(env)->lti_state;
582 const struct lov_layout_operations *ops;
586 cfs_init_rwsem(&lov->lo_type_guard);
587 cfs_waitq_init(&lov->lo_waitq);
589 /* no locking is necessary, as object is being created */
590 lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
591 ops = &lov_dispatch[lov->lo_type];
592 result = ops->llo_init(env, dev, lov, cconf, set);
594 ops->llo_install(env, lov, set);
596 ops->llo_fini(env, lov, set);
600 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
601 const struct cl_object_conf *conf)
603 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
604 struct lov_object *lov = cl2lov(obj);
609 * Only LLT_EMPTY <-> LLT_RAID0 transitions are supported.
611 LASSERT(lov->lo_owner != cfs_current());
612 cfs_down_write(&lov->lo_type_guard);
613 LASSERT(lov->lo_owner == NULL);
614 lov->lo_owner = cfs_current();
616 if (conf->coc_invalidate) {
617 lov->lo_lsm_invalid = 1;
618 GOTO(out, result = 0);
621 if (conf->coc_validate_only) {
622 if (!lov->lo_lsm_invalid)
623 GOTO(out, result = 0);
625 lov_layout_wait(env, lov);
626 /* fall through to set up new layout */
629 switch (lov->lo_type) {
632 result = lov_layout_change(env, lov, LLT_RAID0, conf);
636 result = lov_layout_change(env, lov, LLT_EMPTY, conf);
637 else if (lov_stripe_md_cmp(lov->lo_lsm, lsm))
638 result = -EOPNOTSUPP;
643 lov->lo_lsm_invalid = result != 0;
647 lov->lo_owner = NULL;
648 cfs_up_write(&lov->lo_type_guard);
652 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
654 struct lov_object *lov = lu2lov(obj);
657 LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
661 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
663 struct lov_object *lov = lu2lov(obj);
666 LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
668 OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
672 static int lov_object_print(const struct lu_env *env, void *cookie,
673 lu_printer_t p, const struct lu_object *o)
675 return LOV_2DISPATCH(lu2lov(o), llo_print, env, cookie, p, o);
678 struct cl_page *lov_page_init(const struct lu_env *env, struct cl_object *obj,
679 struct cl_page *page, cfs_page_t *vmpage)
681 return LOV_2DISPATCH(cl2lov(obj),
682 llo_page_init, env, obj, page, vmpage);
686 * Implements cl_object_operations::clo_io_init() method for lov
687 * layer. Dispatches to the appropriate layout io initialization method.
689 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
692 struct lov_io *lio = lov_env_io(env);
694 CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
696 /* hold lsm before initializing because io relies on it */
697 lio->lis_lsm = lov_lsm_addref(cl2lov(obj));
700 * Do not take lock in case of CIT_MISC io, because
702 * - if this is an io for a glimpse, then we don't care;
704 * - if this not a glimpse (writepage or lock cancellation), then
705 * layout change cannot happen because a page or a lock
708 * - lock ordering (lock mutex nests within layout rw-semaphore)
709 * is obeyed in case of lock cancellation.
711 return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
712 io->ci_type != CIT_MISC, env, obj, io);
716 * An implementation of cl_object_operations::clo_attr_get() method for lov
717 * layer. For raid0 layout this collects and merges attributes of all
720 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
721 struct cl_attr *attr)
723 /* do not take lock, as this function is called under a
724 * spin-lock. Layout is protected from changing by ongoing IO. */
725 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
728 static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
729 const struct cl_attr *attr, unsigned valid)
732 * No dispatch is required here, as no layout implements this.
737 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
738 struct cl_lock *lock, const struct cl_io *io)
740 return LOV_2DISPATCH(cl2lov(obj), llo_lock_init, env, obj, lock, io);
743 static const struct cl_object_operations lov_ops = {
744 .coo_page_init = lov_page_init,
745 .coo_lock_init = lov_lock_init,
746 .coo_io_init = lov_io_init,
747 .coo_attr_get = lov_attr_get,
748 .coo_attr_set = lov_attr_set,
749 .coo_conf_set = lov_conf_set
752 static const struct lu_object_operations lov_lu_obj_ops = {
753 .loo_object_init = lov_object_init,
754 .loo_object_delete = lov_object_delete,
755 .loo_object_release = NULL,
756 .loo_object_free = lov_object_free,
757 .loo_object_print = lov_object_print,
758 .loo_object_invariant = NULL
761 struct lu_object *lov_object_alloc(const struct lu_env *env,
762 const struct lu_object_header *unused,
763 struct lu_device *dev)
765 struct lov_object *lov;
766 struct lu_object *obj;
769 OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, CFS_ALLOC_IO);
772 lu_object_init(obj, NULL, dev);
773 lov->lo_cl.co_ops = &lov_ops;
774 lov->lo_type = -1; /* invalid, to catch uninitialized type */
776 * object io operation vector (cl_object::co_iop) is installed
777 * later in lov_object_init(), as different vectors are used
778 * for object with different layouts.
780 obj->lo_ops = &lov_lu_obj_ops;
786 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
788 struct lov_stripe_md *lsm = NULL;
790 lov_conf_freeze(lov);
791 if (!lov->lo_lsm_invalid && lov->lo_lsm != NULL) {
792 lsm = lsm_addref(lov->lo_lsm);
793 CDEBUG(D_INODE, "lsm %p addref %d by %p.\n",
794 lsm, cfs_atomic_read(&lsm->lsm_refc), cfs_current());
800 void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm)
805 CDEBUG(D_INODE, "lsm %p decref %d by %p.\n",
806 lsm, cfs_atomic_read(&lsm->lsm_refc), cfs_current());
808 if (lov_free_memmd(&lsm) <= 1 && lov->lo_lsm_invalid)
809 cfs_waitq_signal(&lov->lo_waitq);
812 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
814 struct lu_object *luobj;
815 struct lov_stripe_md *lsm = NULL;
820 luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
823 lsm = lov_lsm_addref(lu2lov(luobj));
826 EXPORT_SYMBOL(lov_lsm_get);
828 void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm)
830 struct lu_object *luobj;
832 if (clobj == NULL || lsm == NULL)
835 luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
837 LASSERT(luobj != NULL);
839 lov_lsm_decref(lu2lov(luobj), lsm);
841 EXPORT_SYMBOL(lov_lsm_put);
843 int lov_read_and_clear_async_rc(struct cl_object *clob)
845 struct lu_object *luobj;
849 luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
852 struct lov_object *lov = lu2lov(luobj);
854 lov_conf_freeze(lov);
855 switch (lov->lo_type) {
857 struct lov_stripe_md *lsm;
861 LASSERT(lsm != NULL);
862 for (i = 0; i < lsm->lsm_stripe_count; i++) {
863 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
864 if (loi->loi_ar.ar_rc && !rc)
865 rc = loi->loi_ar.ar_rc;
866 loi->loi_ar.ar_rc = 0;
878 EXPORT_SYMBOL(lov_read_and_clear_async_rc);