4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Internal interfaces of LOV layer.
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #ifndef LOV_CL_INTERNAL_H
44 #define LOV_CL_INTERNAL_H
47 # include <libcfs/libcfs.h>
49 # include <liblustre.h>
53 #include <cl_object.h>
54 #include "lov_internal.h"
57 * Logical object volume layer. This layer implements data striping (raid0).
59 * At the lov layer top-entity (object, page, lock, io) is connected to one or
60 * more sub-entities: top-object, representing a file is connected to a set of
61 * sub-objects, each representing a stripe, file-level top-lock is connected
62 * to a set of per-stripe sub-locks, top-page is connected to a (single)
63 * sub-page, and a top-level IO is connected to a set of (potentially
64 * concurrent) sub-IO's.
66 * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
67 * respectively, while a single sub-lock can be part of multiple top-locks.
69 * Reference counting models are different for different types of entities:
71 * - top-object keeps a reference to its sub-objects, and destroys them
72 * when it is destroyed.
74 * - top-page keeps a reference to its sub-page, and destroys it when it
77 * - sub-lock keep a reference to its top-locks. Top-lock keeps a
78 * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
79 * actively using them (that is, in cl_lock_state::CLS_QUEUING,
80 * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
81 * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
82 * hold. From this moment top-lock has only a 'weak' reference to its
83 * sub-locks. This reference is protected by top-lock
84 * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
85 * when the latter is destroyed. When a sub-lock is canceled, a
86 * reference to it is removed from the top-lock array, and top-lock is
87 * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
88 * while their top-lock is in CLS_HELD or CLS_CACHED states.
90 * - IO's are not reference counted.
92 * To implement a connection between top and sub entities, lov layer is split
93 * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both
94 * implementing full set of cl-interfaces. For example, top-object has vvp and
95 * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is
96 * used to track child-parent relationship.
101 struct lovsub_device;
102 struct lovsub_object;
105 enum lov_device_flags {
106 LOV_DEV_INITIALIZED = 1 << 0
114 * Resources that are used in memory-cleaning path, and whose allocation
115 * cannot fail even when memory is tight. They are preallocated in sufficient
116 * quantities in lov_device::ld_emerg[], and access to them is serialized
117 * lov_device::ld_mutex.
119 struct lov_device_emerg {
121 * Page list used to submit IO when memory is in pressure.
123 struct cl_page_list emrg_page_list;
125 * sub-io's shared by all threads accessing this device when memory is
126 * too low to allocate sub-io's dynamically.
128 struct cl_io emrg_subio;
130 * Environments used by sub-io's in
131 * lov_device_emerg::emrg_subio.
133 struct lu_env *emrg_env;
135 * Refchecks for lov_device_emerg::emrg_env.
144 * XXX Locking of lov-private data is missing.
146 struct cl_device ld_cl;
147 struct lov_obd *ld_lov;
148 /** size of lov_device::ld_target[] array */
150 struct lovsub_device **ld_target;
153 /** Emergency resources used in memory-cleansing paths. */
154 struct lov_device_emerg **ld_emrg;
156 * Serializes access to lov_device::ld_emrg in low-memory
159 cfs_mutex_t ld_mutex;
165 enum lov_layout_type {
166 /** empty file without body */
174 * lov-specific file state.
176 * lov object has particular layout type, determining how top-object is built
177 * on top of sub-objects. Layout type can change dynamically. When this
178 * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode,
179 * all state pertaining to the old layout type is destroyed, and new state is
180 * constructed. All object methods take said semaphore in the shared mode,
181 * providing serialization against transition between layout types.
183 * To avoid multiple `if' or `switch' statements, selecting behavior for the
184 * current layout type, object methods perform double-dispatch, invoking
185 * function corresponding to the current layout type.
188 struct cl_object lo_cl;
190 * Serializes object operations with transitions between layout types.
192 * This semaphore is taken in shared mode by all object methods, and
193 * is taken in exclusive mode when object type is changed.
195 * \see lov_object::lo_type
197 cfs_rw_semaphore_t lo_type_guard;
199 * Type of an object. Protected by lov_object::lo_type_guard.
201 enum lov_layout_type lo_type;
203 * True if layout is valid. This bit is cleared when layout lock
206 unsigned lo_lsm_invalid:1;
210 struct lov_stripe_md *lo_lsm;
212 * Waitq - wait for no one else is using lo_lsm
214 cfs_waitq_t lo_waitq;
216 union lov_layout_state {
217 struct lov_layout_raid0 {
220 * When this is true, lov_object::lo_attr contains
221 * valid up to date attributes for a top-level
222 * object. This field is reset to 0 when attributes of
223 * any sub-object change.
227 * Array of sub-objects. Allocated when top-object is
228 * created (lov_init_raid0()).
230 * Top-object is a strict master of its sub-objects:
231 * it is created before them, and outlives its
232 * children (this later is necessary so that basic
233 * functions like cl_object_top() always
234 * work). Top-object keeps a reference on every
237 * When top-object is destroyed (lov_delete_raid0())
238 * it releases its reference to a sub-object and waits
239 * until the latter is finally destroyed.
241 struct lovsub_object **lo_sub;
245 cfs_spinlock_t lo_sub_lock;
247 * Cached object attribute, built from sub-object
250 struct cl_attr lo_attr;
252 struct lov_layout_state_empty {
256 * Thread that acquired lov_object::lo_type_guard in an exclusive
259 cfs_task_t *lo_owner;
263 * Flags that top-lock can set on each of its sub-locks.
266 /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
271 * State lov_lock keeps for each sub-lock.
273 struct lov_lock_sub {
274 /** sub-lock itself */
275 struct lovsub_lock *sub_lock;
276 /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
279 struct cl_lock_descr sub_descr;
280 struct cl_lock_descr sub_got;
284 * lov-specific lock state.
287 struct cl_lock_slice lls_cl;
288 /** Number of sub-locks in this lock */
291 * Number of existing sub-locks.
293 unsigned lls_nr_filled;
295 * Set when sub-lock was canceled, while top-lock was being
298 int lls_cancel_race:1;
300 * An array of sub-locks
302 * There are two issues with managing sub-locks:
304 * - sub-locks are concurrently canceled, and
306 * - sub-locks are shared with other top-locks.
308 * To manage cancellation, top-lock acquires a hold on a sublock
309 * (lov_sublock_adopt()) when the latter is inserted into
310 * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
311 * when top-lock is going into CLS_CACHED state or destroyed. Hold
312 * prevents sub-lock from cancellation.
314 * Sub-lock sharing means, among other things, that top-lock that is
315 * in the process of creation (i.e., not yet inserted into lock list)
316 * is already accessible to other threads once at least one of its
317 * sub-locks is created, see lov_lock_sub_init().
319 * Sub-lock can be in one of the following states:
321 * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
322 * sub-lock was either never created (top-lock is in CLS_NEW
323 * state), or it was created, then canceled, then destroyed
324 * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
326 * - sub-lock exists and is on
327 * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
328 * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
331 * - sub-lock exists, but is not held by the top-lock. This
332 * happens after top-lock released a hold on sub-locks before
333 * going into cache (lov_lock_unuse()).
335 * \todo To support wide-striping, array has to be replaced with a set
336 * of queues to avoid scanning.
338 struct lov_lock_sub *lls_sub;
340 * Original description with which lock was enqueued.
342 struct cl_lock_descr lls_orig;
346 struct cl_page_slice lps_cl;
354 struct lovsub_device {
355 struct cl_device acid_cl;
356 struct lov_device *acid_super;
358 struct cl_device *acid_next;
361 struct lovsub_object {
362 struct cl_object_header lso_header;
363 struct cl_object lso_cl;
364 struct lov_object *lso_super;
369 * A link between a top-lock and a sub-lock. Separate data-structure is
370 * necessary, because top-locks and sub-locks are in M:N relationship.
372 * \todo This can be optimized for a (by far) most frequent case of a single
373 * top-lock per sub-lock.
375 struct lov_lock_link {
376 struct lov_lock *lll_super;
377 /** An index within parent lock. */
380 * A linkage into per sub-lock list of all corresponding top-locks,
381 * hanging off lovsub_lock::lss_parents.
387 * Lock state at lovsub layer.
390 struct cl_lock_slice lss_cl;
392 * List of top-locks that have given sub-lock as their part. Protected
393 * by cl_lock::cll_guard mutex.
395 cfs_list_t lss_parents;
397 * Top-lock that initiated current operation on this sub-lock. This is
398 * only set during top-to-bottom lock operations like enqueue, and is
399 * used to optimize state change notification. Protected by
400 * cl_lock::cll_guard mutex.
402 * \see lovsub_lock_state_one().
404 struct cl_lock *lss_active;
408 * Describe the environment settings for sublocks.
410 struct lov_sublock_env {
411 const struct lu_env *lse_env;
412 struct cl_io *lse_io;
413 struct lov_io_sub *lse_sub;
417 struct cl_page_slice lsb_cl;
421 struct lov_thread_info {
422 struct cl_object_conf lti_stripe_conf;
423 struct lu_fid lti_fid;
424 struct cl_lock_descr lti_ldescr;
425 struct ost_lvb lti_lvb;
426 struct cl_2queue lti_cl2q;
427 struct cl_lock_closure lti_closure;
428 cfs_waitlink_t lti_waiter;
432 * State that lov_io maintains for every sub-io.
437 * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
438 * independently, with lov acting as a scheduler to maximize overall
441 struct cl_io *sub_io;
443 * Linkage into a list (hanging off lov_io::lis_active) of all
444 * sub-io's active for the current IO iteration.
446 cfs_list_t sub_linkage;
448 * true, iff cl_io_init() was successfully executed against
449 * lov_io_sub::sub_io.
451 int sub_io_initialized;
453 * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
454 * allocated, but borrowed from a per-device emergency pool.
458 * environment, in which sub-io executes.
460 struct lu_env *sub_env;
462 * environment's refcheck.
473 * IO state private for LOV.
477 struct cl_io_slice lis_cl;
479 * Pointer to the object slice. This is a duplicate of
480 * lov_io::lis_cl::cis_object.
482 struct lov_object *lis_object;
484 * Lov stripe - this determines how this io fans out.
485 * Hold a refcount to the lsm so it can't go away during IO.
487 struct lov_stripe_md *lis_lsm;
489 * Original end-of-io position for this IO, set by the upper layer as
490 * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
491 * changes pos and count to fit IO into a single stripe and uses saved
492 * value to determine when IO iterations have to stop.
494 * This is used only for CIT_READ and CIT_WRITE io's.
496 loff_t lis_io_endpos;
499 * starting position within a file, for the current io loop iteration
500 * (stripe), used by ci_io_loop().
504 * end position with in a file, for the current stripe io. This is
505 * exclusive (i.e., next offset after last byte affected by io).
510 int lis_stripe_count;
511 int lis_active_subios;
514 * the index of ls_single_subio in ls_subios array
516 int lis_single_subio_index;
517 struct cl_io lis_single_subio;
520 * size of ls_subios array, actually the highest stripe #
523 struct lov_io_sub *lis_subs;
525 * List of active sub-io's.
527 cfs_list_t lis_active;
532 struct lov_sublock_env ls_subenv;
536 * State of transfer for lov.
539 struct cl_req_slice lr_cl;
543 * State of transfer for lovsub.
546 struct cl_req_slice lsrq_cl;
549 extern struct lu_device_type lov_device_type;
550 extern struct lu_device_type lovsub_device_type;
552 extern struct lu_context_key lov_key;
553 extern struct lu_context_key lov_session_key;
555 extern cfs_mem_cache_t *lov_page_kmem;
556 extern cfs_mem_cache_t *lov_lock_kmem;
557 extern cfs_mem_cache_t *lov_object_kmem;
558 extern cfs_mem_cache_t *lov_thread_kmem;
559 extern cfs_mem_cache_t *lov_session_kmem;
560 extern cfs_mem_cache_t *lov_req_kmem;
562 extern cfs_mem_cache_t *lovsub_page_kmem;
563 extern cfs_mem_cache_t *lovsub_lock_kmem;
564 extern cfs_mem_cache_t *lovsub_object_kmem;
565 extern cfs_mem_cache_t *lovsub_req_kmem;
567 extern cfs_mem_cache_t *lov_lock_link_kmem;
569 int lov_object_init (const struct lu_env *env, struct lu_object *obj,
570 const struct lu_object_conf *conf);
571 int lovsub_object_init (const struct lu_env *env, struct lu_object *obj,
572 const struct lu_object_conf *conf);
573 int lov_lock_init (const struct lu_env *env, struct cl_object *obj,
574 struct cl_lock *lock, const struct cl_io *io);
575 int lov_io_init (const struct lu_env *env, struct cl_object *obj,
577 int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj,
578 struct cl_lock *lock, const struct cl_io *io);
580 int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj,
581 struct cl_lock *lock, const struct cl_io *io);
582 int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
584 int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
586 void lov_lock_unlink (const struct lu_env *env, struct lov_lock_link *link,
587 struct lovsub_lock *sub);
589 struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
591 void lov_sub_put (struct lov_io_sub *sub);
592 int lov_sublock_modify (const struct lu_env *env, struct lov_lock *lov,
593 struct lovsub_lock *sublock,
594 const struct cl_lock_descr *d, int idx);
597 struct cl_page *lov_page_init (const struct lu_env *env, struct cl_object *ob,
598 struct cl_page *page, cfs_page_t *vmpage);
599 struct cl_page *lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
600 struct cl_page *page, cfs_page_t *vmpage);
602 struct cl_page *lov_page_init_empty(const struct lu_env *env,
603 struct cl_object *obj,
604 struct cl_page *page, cfs_page_t *vmpage);
605 struct cl_page *lov_page_init_raid0(const struct lu_env *env,
606 struct cl_object *obj,
607 struct cl_page *page, cfs_page_t *vmpage);
608 struct lu_object *lov_object_alloc (const struct lu_env *env,
609 const struct lu_object_header *hdr,
610 struct lu_device *dev);
611 struct lu_object *lovsub_object_alloc(const struct lu_env *env,
612 const struct lu_object_header *hdr,
613 struct lu_device *dev);
615 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
616 struct lov_lock *lck,
617 struct lovsub_lock *sub);
618 struct lov_io_sub *lov_page_subio (const struct lu_env *env,
620 const struct cl_page_slice *slice);
622 void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm);
623 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
625 #define lov_foreach_target(lov, var) \
626 for (var = 0; var < lov_targets_nr(lov); ++var)
628 /*****************************************************************************
636 static inline struct lov_session *lov_env_session(const struct lu_env *env)
638 struct lov_session *ses;
640 ses = lu_context_key_get(env->le_ses, &lov_session_key);
641 LASSERT(ses != NULL);
645 static inline struct lov_io *lov_env_io(const struct lu_env *env)
647 return &lov_env_session(env)->ls_io;
650 static inline int lov_is_object(const struct lu_object *obj)
652 return obj->lo_dev->ld_type == &lov_device_type;
655 static inline int lovsub_is_object(const struct lu_object *obj)
657 return obj->lo_dev->ld_type == &lovsub_device_type;
660 static inline struct lu_device *lov2lu_dev(struct lov_device *lov)
662 return &lov->ld_cl.cd_lu_dev;
665 static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
667 LINVRNT(d->ld_type == &lov_device_type);
668 return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
671 static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
673 return &lovsub->acid_cl;
676 static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub)
678 return &lovsub2cl_dev(lovsub)->cd_lu_dev;
681 static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
683 LINVRNT(d->ld_type == &lovsub_device_type);
684 return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
687 static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
689 LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
690 return container_of0(d, struct lovsub_device, acid_cl);
693 static inline struct lu_object *lov2lu(struct lov_object *lov)
695 return &lov->lo_cl.co_lu;
698 static inline struct cl_object *lov2cl(struct lov_object *lov)
703 static inline struct lov_object *lu2lov(const struct lu_object *obj)
705 LINVRNT(lov_is_object(obj));
706 return container_of0(obj, struct lov_object, lo_cl.co_lu);
709 static inline struct lov_object *cl2lov(const struct cl_object *obj)
711 LINVRNT(lov_is_object(&obj->co_lu));
712 return container_of0(obj, struct lov_object, lo_cl);
715 static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
717 return &los->lso_cl.co_lu;
720 static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
725 static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
727 LINVRNT(lovsub_is_object(&obj->co_lu));
728 return container_of0(obj, struct lovsub_object, lso_cl);
731 static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
733 LINVRNT(lovsub_is_object(obj));
734 return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
737 static inline struct lovsub_lock *
738 cl2lovsub_lock(const struct cl_lock_slice *slice)
740 LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
741 return container_of(slice, struct lovsub_lock, lss_cl);
744 static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
746 const struct cl_lock_slice *slice;
748 slice = cl_lock_at(lock, &lovsub_device_type);
749 LASSERT(slice != NULL);
750 return cl2lovsub_lock(slice);
753 static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
755 LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
756 return container_of(slice, struct lov_lock, lls_cl);
759 static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
761 LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
762 return container_of0(slice, struct lov_page, lps_cl);
765 static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
767 return container_of0(slice, struct lov_req, lr_cl);
770 static inline struct lovsub_page *
771 cl2lovsub_page(const struct cl_page_slice *slice)
773 LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
774 return container_of0(slice, struct lovsub_page, lsb_cl);
777 static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
779 return container_of0(slice, struct lovsub_req, lsrq_cl);
782 static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
784 return slice->cpl_page->cp_child;
787 static inline struct lov_io *cl2lov_io(const struct lu_env *env,
788 const struct cl_io_slice *ios)
792 lio = container_of(ios, struct lov_io, lis_cl);
793 LASSERT(lio == lov_env_io(env));
797 static inline int lov_targets_nr(const struct lov_device *lov)
799 return lov->ld_lov->desc.ld_tgt_count;
802 static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
804 struct lov_thread_info *info;
806 info = lu_context_key_get(&env->le_ctx, &lov_key);
807 LASSERT(info != NULL);
811 static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
813 LASSERT(lov->lo_type == LLT_RAID0);
814 LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
815 lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
816 return &lov->u.raid0;