4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_object for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "lov_cl_internal.h"
49 /*****************************************************************************
55 struct lov_layout_operations {
56 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
57 struct lov_object *lov,
58 const struct cl_object_conf *conf,
59 union lov_layout_state *state);
60 void (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
61 union lov_layout_state *state);
62 void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
63 union lov_layout_state *state);
64 void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
65 union lov_layout_state *state);
66 int (*llo_print)(const struct lu_env *env, void *cookie,
67 lu_printer_t p, const struct lu_object *o);
68 struct cl_page *(*llo_page_init)(const struct lu_env *env,
69 struct cl_object *obj,
72 int (*llo_lock_init)(const struct lu_env *env,
73 struct cl_object *obj, struct cl_lock *lock,
74 const struct cl_io *io);
75 int (*llo_io_init)(const struct lu_env *env,
76 struct cl_object *obj, struct cl_io *io);
77 int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
78 struct cl_attr *attr);
81 /*****************************************************************************
83 * Lov object layout operations.
87 static void lov_install_empty(const struct lu_env *env,
88 struct lov_object *lov,
89 union lov_layout_state *state)
92 * File without objects.
96 static int lov_init_empty(const struct lu_env *env,
97 struct lov_device *dev, struct lov_object *lov,
98 const struct cl_object_conf *conf,
99 union lov_layout_state *state)
104 static void lov_install_raid0(const struct lu_env *env,
105 struct lov_object *lov,
106 union lov_layout_state *state)
111 static struct cl_object *lov_sub_find(const struct lu_env *env,
112 struct cl_device *dev,
113 const struct lu_fid *fid,
114 const struct cl_object_conf *conf)
119 o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
120 LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
124 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
125 struct cl_object *stripe,
126 struct lov_layout_raid0 *r0, int idx)
128 struct cl_object_header *hdr;
129 struct cl_object_header *subhdr;
130 struct cl_object_header *parent;
131 struct lov_oinfo *oinfo;
134 hdr = cl_object_header(lov2cl(lov));
135 subhdr = cl_object_header(stripe);
136 parent = subhdr->coh_parent;
138 oinfo = r0->lo_lsm->lsm_oinfo[idx];
139 CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: id: "LPU64" seq: "LPU64
140 " idx: %d gen: %d\n",
141 PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
142 PFID(&hdr->coh_lu.loh_fid), hdr,
143 oinfo->loi_id, oinfo->loi_seq,
144 oinfo->loi_ost_idx, oinfo->loi_ost_gen);
146 if (parent == NULL) {
147 subhdr->coh_parent = hdr;
148 subhdr->coh_nesting = hdr->coh_nesting + 1;
149 lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
150 r0->lo_sub[idx] = cl2lovsub(stripe);
151 r0->lo_sub[idx]->lso_super = lov;
152 r0->lo_sub[idx]->lso_index = idx;
155 CERROR("Stripe is already owned by other file (%d).\n", idx);
156 LU_OBJECT_DEBUG(D_ERROR, env, &stripe->co_lu, "\n");
157 LU_OBJECT_DEBUG(D_ERROR, env, lu_object_top(&parent->coh_lu),
159 LU_OBJECT_HEADER(D_ERROR, env, lov2lu(lov), "new\n");
160 cl_object_put(env, stripe);
166 static int lov_init_raid0(const struct lu_env *env,
167 struct lov_device *dev, struct lov_object *lov,
168 const struct cl_object_conf *conf,
169 union lov_layout_state *state)
174 struct cl_object *stripe;
175 struct lov_thread_info *lti = lov_env_info(env);
176 struct cl_object_conf *subconf = <i->lti_stripe_conf;
177 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
178 struct lu_fid *ofid = <i->lti_fid;
179 struct lov_layout_raid0 *r0 = &state->raid0;
182 r0->lo_nr = conf->u.coc_md->lsm->lsm_stripe_count;
183 r0->lo_lsm = conf->u.coc_md->lsm;
184 LASSERT(r0->lo_nr <= lov_targets_nr(dev));
186 OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
187 if (r0->lo_sub != NULL) {
189 subconf->coc_inode = conf->coc_inode;
190 cfs_spin_lock_init(&r0->lo_sub_lock);
192 * Create stripe cl_objects.
194 for (i = 0; i < r0->lo_nr && result == 0; ++i) {
195 struct cl_device *subdev;
196 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
197 int ost_idx = oinfo->loi_ost_idx;
199 fid_ostid_unpack(ofid, &oinfo->loi_oi,
201 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
202 subconf->u.coc_oinfo = oinfo;
203 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
204 stripe = lov_sub_find(env, subdev, ofid, subconf);
206 result = lov_init_sub(env, lov, stripe, r0, i);
208 result = PTR_ERR(stripe);
215 static void lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
216 union lov_layout_state *state)
218 LASSERT(lov->lo_type == LLT_EMPTY);
221 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
222 struct lovsub_object *los, int idx)
224 struct cl_object *sub;
225 struct lov_layout_raid0 *r0;
226 struct lu_site *site;
227 struct lu_site_bkt_data *bkt;
228 cfs_waitlink_t *waiter;
231 LASSERT(r0->lo_sub[idx] == los);
233 sub = lovsub2cl(los);
234 site = sub->co_lu.lo_dev->ld_site;
235 bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
237 cl_object_kill(env, sub);
238 /* release a reference to the sub-object and ... */
239 lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
240 cl_object_put(env, sub);
242 /* ... wait until it is actually destroyed---sub-object clears its
243 * ->lo_sub[] slot in lovsub_object_fini() */
244 if (r0->lo_sub[idx] == los) {
245 waiter = &lov_env_info(env)->lti_waiter;
246 cfs_waitlink_init(waiter);
247 cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
248 cfs_set_current_state(CFS_TASK_UNINT);
250 /* this wait-queue is signaled at the end of
251 * lu_object_free(). */
252 cfs_set_current_state(CFS_TASK_UNINT);
253 cfs_spin_lock(&r0->lo_sub_lock);
254 if (r0->lo_sub[idx] == los) {
255 cfs_spin_unlock(&r0->lo_sub_lock);
256 cfs_waitq_wait(waiter, CFS_TASK_UNINT);
258 cfs_spin_unlock(&r0->lo_sub_lock);
259 cfs_set_current_state(CFS_TASK_RUNNING);
263 cfs_waitq_del(&bkt->lsb_marche_funebre, waiter);
265 LASSERT(r0->lo_sub[idx] == NULL);
268 static void lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
269 union lov_layout_state *state)
271 struct lov_layout_raid0 *r0 = &state->raid0;
275 if (r0->lo_sub != NULL) {
276 for (i = 0; i < r0->lo_nr; ++i) {
277 struct lovsub_object *los = r0->lo_sub[i];
281 * If top-level object is to be evicted from
282 * the cache, so are its sub-objects.
284 lov_subobject_kill(env, lov, los, i);
290 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
291 union lov_layout_state *state)
293 LASSERT(lov->lo_type == LLT_EMPTY);
296 static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
297 union lov_layout_state *state)
299 struct lov_layout_raid0 *r0 = &state->raid0;
302 if (r0->lo_sub != NULL) {
303 OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
309 static int lov_print_empty(const struct lu_env *env, void *cookie,
310 lu_printer_t p, const struct lu_object *o)
312 (*p)(env, cookie, "empty\n");
316 static int lov_print_raid0(const struct lu_env *env, void *cookie,
317 lu_printer_t p, const struct lu_object *o)
319 struct lov_object *lov = lu2lov(o);
320 struct lov_layout_raid0 *r0 = lov_r0(lov);
323 (*p)(env, cookie, "stripes: %d:\n", r0->lo_nr);
324 for (i = 0; i < r0->lo_nr; ++i) {
325 struct lu_object *sub;
327 if (r0->lo_sub[i] != NULL) {
328 sub = lovsub2lu(r0->lo_sub[i]);
329 lu_object_print(env, cookie, p, sub);
331 (*p)(env, cookie, "sub %d absent\n", i);
337 * Implements cl_object_operations::coo_attr_get() method for an object
338 * without stripes (LLT_EMPTY layout type).
340 * The only attributes this layer is authoritative in this case is
341 * cl_attr::cat_blocks---it's 0.
343 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
344 struct cl_attr *attr)
346 attr->cat_blocks = 0;
350 static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
351 struct cl_attr *attr)
353 struct lov_object *lov = cl2lov(obj);
354 struct lov_layout_raid0 *r0 = lov_r0(lov);
355 struct lov_stripe_md *lsm = lov->u.raid0.lo_lsm;
356 struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
361 if (!r0->lo_attr_valid) {
363 * Fill LVB with attributes already initialized by the upper
366 cl_attr2lvb(lvb, attr);
370 * XXX that should be replaced with a loop over sub-objects,
371 * doing cl_object_attr_get() on them. But for now, let's
372 * reuse old lov code.
376 * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
377 * happy. It's not needed, because new code uses
378 * ->coh_attr_guard spin-lock to protect consistency of
379 * sub-object attributes.
381 lov_stripe_lock(lsm);
382 result = lov_merge_lvb_kms(lsm, lvb, &kms);
383 lov_stripe_unlock(lsm);
385 cl_lvb2attr(attr, lvb);
387 r0->lo_attr_valid = 1;
395 const static struct lov_layout_operations lov_dispatch[] = {
397 .llo_init = lov_init_empty,
398 .llo_delete = lov_delete_empty,
399 .llo_fini = lov_fini_empty,
400 .llo_install = lov_install_empty,
401 .llo_print = lov_print_empty,
402 .llo_page_init = lov_page_init_empty,
403 .llo_lock_init = NULL,
404 .llo_io_init = lov_io_init_empty,
405 .llo_getattr = lov_attr_get_empty
408 .llo_init = lov_init_raid0,
409 .llo_delete = lov_delete_raid0,
410 .llo_fini = lov_fini_raid0,
411 .llo_install = lov_install_raid0,
412 .llo_print = lov_print_raid0,
413 .llo_page_init = lov_page_init_raid0,
414 .llo_lock_init = lov_lock_init_raid0,
415 .llo_io_init = lov_io_init_raid0,
416 .llo_getattr = lov_attr_get_raid0
422 * Performs a double-dispatch based on the layout type of an object.
424 #define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
426 struct lov_object *__obj = (obj); \
427 enum lov_layout_type __llt; \
429 __llt = __obj->lo_type; \
430 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
431 lov_dispatch[__llt].op(__VA_ARGS__); \
434 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
436 struct lov_object *__obj = (obj); \
437 int __lock = !!(lock); \
438 typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
440 __lock &= __obj->lo_owner != cfs_current(); \
442 cfs_down_read(&__obj->lo_type_guard); \
443 __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
445 cfs_up_read(&__obj->lo_type_guard); \
450 * Performs a locked double-dispatch based on the layout type of an object.
452 #define LOV_2DISPATCH(obj, op, ...) \
453 LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
455 #define LOV_2DISPATCH_VOID(obj, op, ...) \
457 struct lov_object *__obj = (obj); \
458 enum lov_layout_type __llt; \
460 if (__obj->lo_owner != cfs_current()) \
461 cfs_down_read(&__obj->lo_type_guard); \
462 __llt = __obj->lo_type; \
463 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
464 lov_dispatch[__llt].op(__VA_ARGS__); \
465 if (__obj->lo_owner != cfs_current()) \
466 cfs_up_read(&__obj->lo_type_guard); \
469 static int lov_layout_change(const struct lu_env *env,
470 struct lov_object *obj, enum lov_layout_type llt,
471 const struct cl_object_conf *conf)
474 union lov_layout_state *state = &lov_env_info(env)->lti_state;
475 const struct lov_layout_operations *old_ops;
476 const struct lov_layout_operations *new_ops;
478 LASSERT(0 <= obj->lo_type && obj->lo_type < ARRAY_SIZE(lov_dispatch));
479 LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
482 old_ops = &lov_dispatch[obj->lo_type];
483 new_ops = &lov_dispatch[llt];
485 result = new_ops->llo_init(env, lu2lov_dev(obj->lo_cl.co_lu.lo_dev),
488 struct cl_object_header *hdr = cl_object_header(&obj->lo_cl);
490 struct lu_env *nested;
493 cookie = cl_env_reenter();
494 nested = cl_env_get(&refcheck);
496 cl_object_prune(nested, &obj->lo_cl);
498 result = PTR_ERR(nested);
499 cl_env_put(nested, &refcheck);
500 cl_env_reexit(cookie);
502 old_ops->llo_fini(env, obj, &obj->u);
503 LASSERT(cfs_list_empty(&hdr->coh_locks));
504 LASSERT(hdr->coh_tree.rnode == NULL);
505 LASSERT(hdr->coh_pages == 0);
507 new_ops->llo_install(env, obj, state);
510 new_ops->llo_fini(env, obj, state);
514 /*****************************************************************************
516 * Lov object operations.
520 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
521 const struct lu_object_conf *conf)
523 struct lov_device *dev = lu2lov_dev(obj->lo_dev);
524 struct lov_object *lov = lu2lov(obj);
525 const struct cl_object_conf *cconf = lu2cl_conf(conf);
526 union lov_layout_state *set = &lov_env_info(env)->lti_state;
527 const struct lov_layout_operations *ops;
531 cfs_init_rwsem(&lov->lo_type_guard);
533 /* no locking is necessary, as object is being created */
534 lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
535 ops = &lov_dispatch[lov->lo_type];
536 result = ops->llo_init(env, dev, lov, cconf, set);
538 ops->llo_install(env, lov, set);
540 ops->llo_fini(env, lov, set);
544 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
545 const struct cl_object_conf *conf)
547 struct lov_object *lov = cl2lov(obj);
552 * Currently only LLT_EMPTY -> LLT_RAID0 transition is supported.
554 LASSERT(lov->lo_owner != cfs_current());
555 cfs_down_write(&lov->lo_type_guard);
556 LASSERT(lov->lo_owner == NULL);
557 lov->lo_owner = cfs_current();
558 if (lov->lo_type == LLT_EMPTY && conf->u.coc_md->lsm != NULL)
559 result = lov_layout_change(env, lov, LLT_RAID0, conf);
561 result = -EOPNOTSUPP;
562 lov->lo_owner = NULL;
563 cfs_up_write(&lov->lo_type_guard);
567 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
569 struct lov_object *lov = lu2lov(obj);
572 LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
576 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
578 struct lov_object *lov = lu2lov(obj);
581 LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
583 OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
587 static int lov_object_print(const struct lu_env *env, void *cookie,
588 lu_printer_t p, const struct lu_object *o)
590 return LOV_2DISPATCH(lu2lov(o), llo_print, env, cookie, p, o);
593 struct cl_page *lov_page_init(const struct lu_env *env, struct cl_object *obj,
594 struct cl_page *page, cfs_page_t *vmpage)
596 return LOV_2DISPATCH(cl2lov(obj),
597 llo_page_init, env, obj, page, vmpage);
601 * Implements cl_object_operations::clo_io_init() method for lov
602 * layer. Dispatches to the appropriate layout io initialization method.
604 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
607 CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
609 * Do not take lock in case of CIT_MISC io, because
611 * - if this is an io for a glimpse, then we don't care;
613 * - if this not a glimpse (writepage or lock cancellation), then
614 * layout change cannot happen because a page or a lock
617 * - lock ordering (lock mutex nests within layout rw-semaphore)
618 * is obeyed in case of lock cancellation.
620 return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
621 io->ci_type != CIT_MISC, env, obj, io);
625 * An implementation of cl_object_operations::clo_attr_get() method for lov
626 * layer. For raid0 layout this collects and merges attributes of all
629 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
630 struct cl_attr *attr)
632 /* do not take lock, as this function is called under a
633 * spin-lock. Layout is protected from changing by ongoing IO. */
634 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
637 static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
638 const struct cl_attr *attr, unsigned valid)
641 * No dispatch is required here, as no layout implements this.
646 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
647 struct cl_lock *lock, const struct cl_io *io)
649 return LOV_2DISPATCH(cl2lov(obj), llo_lock_init, env, obj, lock, io);
652 static const struct cl_object_operations lov_ops = {
653 .coo_page_init = lov_page_init,
654 .coo_lock_init = lov_lock_init,
655 .coo_io_init = lov_io_init,
656 .coo_attr_get = lov_attr_get,
657 .coo_attr_set = lov_attr_set,
658 .coo_conf_set = lov_conf_set
661 static const struct lu_object_operations lov_lu_obj_ops = {
662 .loo_object_init = lov_object_init,
663 .loo_object_delete = lov_object_delete,
664 .loo_object_release = NULL,
665 .loo_object_free = lov_object_free,
666 .loo_object_print = lov_object_print,
667 .loo_object_invariant = NULL
670 struct lu_object *lov_object_alloc(const struct lu_env *env,
671 const struct lu_object_header *unused,
672 struct lu_device *dev)
674 struct lov_object *lov;
675 struct lu_object *obj;
678 OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, CFS_ALLOC_IO);
681 lu_object_init(obj, NULL, dev);
682 lov->lo_cl.co_ops = &lov_ops;
683 lov->lo_type = -1; /* invalid, to catch uninitialized type */
685 * object io operation vector (cl_object::co_iop) is installed
686 * later in lov_object_init(), as different vectors are used
687 * for object with different layouts.
689 obj->lo_ops = &lov_lu_obj_ops;