4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 * This file is part of Lustre, http://www.lustre.org/
38 * Lustre is a trademark of Sun Microsystems, Inc.
40 * Internal interfaces of LOV layer.
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #ifndef LOV_CL_INTERNAL_H
46 #define LOV_CL_INTERNAL_H
49 # include <libcfs/libcfs.h>
51 # include <liblustre.h>
55 #include <cl_object.h>
56 #include "lov_internal.h"
59 * Logical object volume layer. This layer implements data striping (raid0).
61 * At the lov layer top-entity (object, page, lock, io) is connected to one or
62 * more sub-entities: top-object, representing a file is connected to a set of
63 * sub-objects, each representing a stripe, file-level top-lock is connected
64 * to a set of per-stripe sub-locks, top-page is connected to a (single)
65 * sub-page, and a top-level IO is connected to a set of (potentially
66 * concurrent) sub-IO's.
68 * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
69 * respectively, while a single sub-lock can be part of multiple top-locks.
71 * Reference counting models are different for different types of entities:
73 * - top-object keeps a reference to its sub-objects, and destroys them
74 * when it is destroyed.
76 * - top-page keeps a reference to its sub-page, and destroys it when it
79 * - sub-lock keep a reference to its top-locks. Top-lock keeps a
80 * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
81 * actively using them (that is, in cl_lock_state::CLS_QUEUING,
82 * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
83 * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
84 * hold. From this moment top-lock has only a 'weak' reference to its
85 * sub-locks. This reference is protected by top-lock
86 * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
87 * when the latter is destroyed. When a sub-lock is canceled, a
88 * reference to it is removed from the top-lock array, and top-lock is
89 * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
90 * while their top-lock is in CLS_HELD or CLS_CACHED states.
92 * - IO's are not reference counted.
94 * To implement a connection between top and sub entities, lov layer is split
95 * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both
96 * implementing full set of cl-interfaces. For example, top-object has vvp and
97 * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is
98 * used to track child-parent relationship.
103 struct lovsub_device;
104 struct lovsub_object;
107 enum lov_device_flags {
108 LOV_DEV_INITIALIZED = 1 << 0
116 * Resources that are used in memory-cleaning path, and whose allocation
117 * cannot fail even when memory is tight. They are preallocated in sufficient
118 * quantities in lov_device::ld_emerg[], and access to them is serialized
119 * lov_device::ld_mutex.
121 struct lov_device_emerg {
123 * Page list used to submit IO when memory is in pressure.
125 struct cl_page_list emrg_page_list;
127 * sub-io's shared by all threads accessing this device when memory is
128 * too low to allocate sub-io's dynamically.
130 struct cl_io emrg_subio;
132 * Environments used by sub-io's in
133 * lov_device_emerg::emrg_subio.
135 struct lu_env *emrg_env;
137 * Refchecks for lov_device_emerg::emrg_env.
146 * XXX Locking of lov-private data is missing.
148 struct cl_device ld_cl;
149 struct lov_obd *ld_lov;
150 /** size of lov_device::ld_target[] array */
152 struct lovsub_device **ld_target;
155 /** Emergency resources used in memory-cleansing paths. */
156 struct lov_device_emerg **ld_emrg;
158 * Serializes access to lov_device::ld_emrg in low-memory
161 struct mutex ld_mutex;
167 enum lov_layout_type {
168 /** empty file without body */
176 * lov-specific file state.
178 * lov object has particular layout type, determining how top-object is built
179 * on top of sub-objects. Layout type can change dynamically. When this
180 * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode,
181 * all state pertaining to the old layout type is destroyed, and new state is
182 * constructed. All object methods take said semaphore in the shared mode,
183 * providing serialization against transition between layout types.
185 * To avoid multiple `if' or `switch' statements, selecting behavior for the
186 * current layout type, object methods perform double-dispatch, invoking
187 * function corresponding to the current layout type.
190 struct cl_object lo_cl;
192 * Serializes object operations with transitions between layout types.
194 * This semaphore is taken in shared mode by all object methods, and
195 * is taken in exclusive mode when object type is changed.
197 * \see lov_object::lo_type
199 struct rw_semaphore lo_type_guard;
201 * Type of an object. Protected by lov_object::lo_type_guard.
203 enum lov_layout_type lo_type;
205 * True if layout is valid. This bit is cleared when layout lock
208 unsigned lo_lsm_invalid:1;
212 struct lov_stripe_md *lo_lsm;
214 * Waitq - wait for no one else is using lo_lsm
216 cfs_waitq_t lo_waitq;
218 union lov_layout_state {
219 struct lov_layout_raid0 {
222 * When this is true, lov_object::lo_attr contains
223 * valid up to date attributes for a top-level
224 * object. This field is reset to 0 when attributes of
225 * any sub-object change.
229 * Array of sub-objects. Allocated when top-object is
230 * created (lov_init_raid0()).
232 * Top-object is a strict master of its sub-objects:
233 * it is created before them, and outlives its
234 * children (this later is necessary so that basic
235 * functions like cl_object_top() always
236 * work). Top-object keeps a reference on every
239 * When top-object is destroyed (lov_delete_raid0())
240 * it releases its reference to a sub-object and waits
241 * until the latter is finally destroyed.
243 struct lovsub_object **lo_sub;
247 spinlock_t lo_sub_lock;
249 * Cached object attribute, built from sub-object
252 struct cl_attr lo_attr;
254 struct lov_layout_state_empty {
258 * Thread that acquired lov_object::lo_type_guard in an exclusive
261 cfs_task_t *lo_owner;
265 * Flags that top-lock can set on each of its sub-locks.
268 /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
273 * State lov_lock keeps for each sub-lock.
275 struct lov_lock_sub {
276 /** sub-lock itself */
277 struct lovsub_lock *sub_lock;
278 /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
281 struct cl_lock_descr sub_descr;
282 struct cl_lock_descr sub_got;
286 * lov-specific lock state.
289 struct cl_lock_slice lls_cl;
290 /** Number of sub-locks in this lock */
293 * Number of existing sub-locks.
295 unsigned lls_nr_filled;
297 * Set when sub-lock was canceled, while top-lock was being
300 int lls_cancel_race:1;
302 * An array of sub-locks
304 * There are two issues with managing sub-locks:
306 * - sub-locks are concurrently canceled, and
308 * - sub-locks are shared with other top-locks.
310 * To manage cancellation, top-lock acquires a hold on a sublock
311 * (lov_sublock_adopt()) when the latter is inserted into
312 * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
313 * when top-lock is going into CLS_CACHED state or destroyed. Hold
314 * prevents sub-lock from cancellation.
316 * Sub-lock sharing means, among other things, that top-lock that is
317 * in the process of creation (i.e., not yet inserted into lock list)
318 * is already accessible to other threads once at least one of its
319 * sub-locks is created, see lov_lock_sub_init().
321 * Sub-lock can be in one of the following states:
323 * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
324 * sub-lock was either never created (top-lock is in CLS_NEW
325 * state), or it was created, then canceled, then destroyed
326 * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
328 * - sub-lock exists and is on
329 * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
330 * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
333 * - sub-lock exists, but is not held by the top-lock. This
334 * happens after top-lock released a hold on sub-locks before
335 * going into cache (lov_lock_unuse()).
337 * \todo To support wide-striping, array has to be replaced with a set
338 * of queues to avoid scanning.
340 struct lov_lock_sub *lls_sub;
342 * Original description with which lock was enqueued.
344 struct cl_lock_descr lls_orig;
348 struct cl_page_slice lps_cl;
356 struct lovsub_device {
357 struct cl_device acid_cl;
358 struct lov_device *acid_super;
360 struct cl_device *acid_next;
363 struct lovsub_object {
364 struct cl_object_header lso_header;
365 struct cl_object lso_cl;
366 struct lov_object *lso_super;
371 * A link between a top-lock and a sub-lock. Separate data-structure is
372 * necessary, because top-locks and sub-locks are in M:N relationship.
374 * \todo This can be optimized for a (by far) most frequent case of a single
375 * top-lock per sub-lock.
377 struct lov_lock_link {
378 struct lov_lock *lll_super;
379 /** An index within parent lock. */
382 * A linkage into per sub-lock list of all corresponding top-locks,
383 * hanging off lovsub_lock::lss_parents.
389 * Lock state at lovsub layer.
392 struct cl_lock_slice lss_cl;
394 * List of top-locks that have given sub-lock as their part. Protected
395 * by cl_lock::cll_guard mutex.
397 cfs_list_t lss_parents;
399 * Top-lock that initiated current operation on this sub-lock. This is
400 * only set during top-to-bottom lock operations like enqueue, and is
401 * used to optimize state change notification. Protected by
402 * cl_lock::cll_guard mutex.
404 * \see lovsub_lock_state_one().
406 struct cl_lock *lss_active;
410 * Describe the environment settings for sublocks.
412 struct lov_sublock_env {
413 const struct lu_env *lse_env;
414 struct cl_io *lse_io;
415 struct lov_io_sub *lse_sub;
419 struct cl_page_slice lsb_cl;
423 struct lov_thread_info {
424 struct cl_object_conf lti_stripe_conf;
425 struct lu_fid lti_fid;
426 struct cl_lock_descr lti_ldescr;
427 struct ost_lvb lti_lvb;
428 struct cl_2queue lti_cl2q;
429 struct cl_lock_closure lti_closure;
430 cfs_waitlink_t lti_waiter;
434 * State that lov_io maintains for every sub-io.
439 * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
440 * independently, with lov acting as a scheduler to maximize overall
443 struct cl_io *sub_io;
445 * Linkage into a list (hanging off lov_io::lis_active) of all
446 * sub-io's active for the current IO iteration.
448 cfs_list_t sub_linkage;
450 * true, iff cl_io_init() was successfully executed against
451 * lov_io_sub::sub_io.
453 int sub_io_initialized;
455 * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
456 * allocated, but borrowed from a per-device emergency pool.
460 * environment, in which sub-io executes.
462 struct lu_env *sub_env;
464 * environment's refcheck.
475 * IO state private for LOV.
479 struct cl_io_slice lis_cl;
481 * Pointer to the object slice. This is a duplicate of
482 * lov_io::lis_cl::cis_object.
484 struct lov_object *lis_object;
486 * Lov stripe - this determines how this io fans out.
487 * Hold a refcount to the lsm so it can't go away during IO.
489 struct lov_stripe_md *lis_lsm;
491 * Original end-of-io position for this IO, set by the upper layer as
492 * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
493 * changes pos and count to fit IO into a single stripe and uses saved
494 * value to determine when IO iterations have to stop.
496 * This is used only for CIT_READ and CIT_WRITE io's.
498 loff_t lis_io_endpos;
501 * starting position within a file, for the current io loop iteration
502 * (stripe), used by ci_io_loop().
506 * end position with in a file, for the current stripe io. This is
507 * exclusive (i.e., next offset after last byte affected by io).
512 int lis_stripe_count;
513 int lis_active_subios;
516 * the index of ls_single_subio in ls_subios array
518 int lis_single_subio_index;
519 struct cl_io lis_single_subio;
522 * size of ls_subios array, actually the highest stripe #
525 struct lov_io_sub *lis_subs;
527 * List of active sub-io's.
529 cfs_list_t lis_active;
534 struct lov_sublock_env ls_subenv;
538 * State of transfer for lov.
541 struct cl_req_slice lr_cl;
545 * State of transfer for lovsub.
548 struct cl_req_slice lsrq_cl;
551 extern struct lu_device_type lov_device_type;
552 extern struct lu_device_type lovsub_device_type;
554 extern struct lu_context_key lov_key;
555 extern struct lu_context_key lov_session_key;
557 extern cfs_mem_cache_t *lov_page_kmem;
558 extern cfs_mem_cache_t *lov_lock_kmem;
559 extern cfs_mem_cache_t *lov_object_kmem;
560 extern cfs_mem_cache_t *lov_thread_kmem;
561 extern cfs_mem_cache_t *lov_session_kmem;
562 extern cfs_mem_cache_t *lov_req_kmem;
564 extern cfs_mem_cache_t *lovsub_page_kmem;
565 extern cfs_mem_cache_t *lovsub_lock_kmem;
566 extern cfs_mem_cache_t *lovsub_object_kmem;
567 extern cfs_mem_cache_t *lovsub_req_kmem;
569 extern cfs_mem_cache_t *lov_lock_link_kmem;
571 int lov_object_init (const struct lu_env *env, struct lu_object *obj,
572 const struct lu_object_conf *conf);
573 int lovsub_object_init (const struct lu_env *env, struct lu_object *obj,
574 const struct lu_object_conf *conf);
575 int lov_lock_init (const struct lu_env *env, struct cl_object *obj,
576 struct cl_lock *lock, const struct cl_io *io);
577 int lov_io_init (const struct lu_env *env, struct cl_object *obj,
579 int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj,
580 struct cl_lock *lock, const struct cl_io *io);
582 int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj,
583 struct cl_lock *lock, const struct cl_io *io);
584 int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj,
585 struct cl_lock *lock, const struct cl_io *io);
586 int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
588 int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
590 void lov_lock_unlink (const struct lu_env *env, struct lov_lock_link *link,
591 struct lovsub_lock *sub);
593 struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
595 void lov_sub_put (struct lov_io_sub *sub);
596 int lov_sublock_modify (const struct lu_env *env, struct lov_lock *lov,
597 struct lovsub_lock *sublock,
598 const struct cl_lock_descr *d, int idx);
601 struct cl_page *lov_page_init (const struct lu_env *env, struct cl_object *ob,
602 struct cl_page *page, cfs_page_t *vmpage);
603 struct cl_page *lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
604 struct cl_page *page, cfs_page_t *vmpage);
606 struct cl_page *lov_page_init_empty(const struct lu_env *env,
607 struct cl_object *obj,
608 struct cl_page *page, cfs_page_t *vmpage);
609 struct cl_page *lov_page_init_raid0(const struct lu_env *env,
610 struct cl_object *obj,
611 struct cl_page *page, cfs_page_t *vmpage);
612 struct lu_object *lov_object_alloc (const struct lu_env *env,
613 const struct lu_object_header *hdr,
614 struct lu_device *dev);
615 struct lu_object *lovsub_object_alloc(const struct lu_env *env,
616 const struct lu_object_header *hdr,
617 struct lu_device *dev);
619 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
620 struct lov_lock *lck,
621 struct lovsub_lock *sub);
622 struct lov_io_sub *lov_page_subio (const struct lu_env *env,
624 const struct cl_page_slice *slice);
626 void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm);
627 struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
629 #define lov_foreach_target(lov, var) \
630 for (var = 0; var < lov_targets_nr(lov); ++var)
632 /*****************************************************************************
640 static inline struct lov_session *lov_env_session(const struct lu_env *env)
642 struct lov_session *ses;
644 ses = lu_context_key_get(env->le_ses, &lov_session_key);
645 LASSERT(ses != NULL);
649 static inline struct lov_io *lov_env_io(const struct lu_env *env)
651 return &lov_env_session(env)->ls_io;
654 static inline int lov_is_object(const struct lu_object *obj)
656 return obj->lo_dev->ld_type == &lov_device_type;
659 static inline int lovsub_is_object(const struct lu_object *obj)
661 return obj->lo_dev->ld_type == &lovsub_device_type;
664 static inline struct lu_device *lov2lu_dev(struct lov_device *lov)
666 return &lov->ld_cl.cd_lu_dev;
669 static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
671 LINVRNT(d->ld_type == &lov_device_type);
672 return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
675 static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
677 return &lovsub->acid_cl;
680 static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub)
682 return &lovsub2cl_dev(lovsub)->cd_lu_dev;
685 static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
687 LINVRNT(d->ld_type == &lovsub_device_type);
688 return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
691 static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
693 LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
694 return container_of0(d, struct lovsub_device, acid_cl);
697 static inline struct lu_object *lov2lu(struct lov_object *lov)
699 return &lov->lo_cl.co_lu;
702 static inline struct cl_object *lov2cl(struct lov_object *lov)
707 static inline struct lov_object *lu2lov(const struct lu_object *obj)
709 LINVRNT(lov_is_object(obj));
710 return container_of0(obj, struct lov_object, lo_cl.co_lu);
713 static inline struct lov_object *cl2lov(const struct cl_object *obj)
715 LINVRNT(lov_is_object(&obj->co_lu));
716 return container_of0(obj, struct lov_object, lo_cl);
719 static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
721 return &los->lso_cl.co_lu;
724 static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
729 static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
731 LINVRNT(lovsub_is_object(&obj->co_lu));
732 return container_of0(obj, struct lovsub_object, lso_cl);
735 static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
737 LINVRNT(lovsub_is_object(obj));
738 return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
741 static inline struct lovsub_lock *
742 cl2lovsub_lock(const struct cl_lock_slice *slice)
744 LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
745 return container_of(slice, struct lovsub_lock, lss_cl);
748 static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
750 const struct cl_lock_slice *slice;
752 slice = cl_lock_at(lock, &lovsub_device_type);
753 LASSERT(slice != NULL);
754 return cl2lovsub_lock(slice);
757 static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
759 LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
760 return container_of(slice, struct lov_lock, lls_cl);
763 static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
765 LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
766 return container_of0(slice, struct lov_page, lps_cl);
769 static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
771 return container_of0(slice, struct lov_req, lr_cl);
774 static inline struct lovsub_page *
775 cl2lovsub_page(const struct cl_page_slice *slice)
777 LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
778 return container_of0(slice, struct lovsub_page, lsb_cl);
781 static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
783 return container_of0(slice, struct lovsub_req, lsrq_cl);
786 static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
788 return slice->cpl_page->cp_child;
791 static inline struct lov_io *cl2lov_io(const struct lu_env *env,
792 const struct cl_io_slice *ios)
796 lio = container_of(ios, struct lov_io, lis_cl);
797 LASSERT(lio == lov_env_io(env));
801 static inline int lov_targets_nr(const struct lov_device *lov)
803 return lov->ld_lov->desc.ld_tgt_count;
806 static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
808 struct lov_thread_info *info;
810 info = lu_context_key_get(&env->le_ctx, &lov_key);
811 LASSERT(info != NULL);
815 static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
817 LASSERT(lov->lo_type == LLT_RAID0);
818 LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
819 lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
820 return &lov->u.raid0;