4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 #ifndef _LUSTRE_CL_OBJECT_H
37 #define _LUSTRE_CL_OBJECT_H
39 /** \defgroup clio clio
41 * Client objects implement io operations and cache pages.
43 * Examples: lov and osc are implementations of cl interface.
45 * Big Theory Statement.
49 * Client implementation is based on the following data-types:
55 * - cl_lock represents an extent lock on an object.
57 * - cl_io represents high-level i/o activity such as whole read/write
58 * system call, or write-out of pages from under the lock being
59 * canceled. cl_io has sub-ios that can be stopped and resumed
60 * independently, thus achieving high degree of transfer
61 * parallelism. Single cl_io can be advanced forward by
62 * the multiple threads (although in the most usual case of
63 * read/write system call it is associated with the single user
64 * thread, that issued the system call).
66 * - cl_req represents a collection of pages for a transfer. cl_req is
67 * constructed by req-forming engine that tries to saturate
68 * transport with large and continuous transfers.
72 * - to avoid confusion high-level I/O operation like read or write system
73 * call is referred to as "an io", whereas low-level I/O operation, like
74 * RPC, is referred to as "a transfer"
76 * - "generic code" means generic (not file system specific) code in the
77 * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
78 * is not layer specific.
84 * - cl_object_header::coh_page_guard
85 * - cl_object_header::coh_lock_guard
88 * See the top comment in cl_object.c for the description of overall locking and
89 * reference-counting design.
91 * See comments below for the description of i/o, page, and dlm-locking
98 * super-class definitions.
100 #include <lu_object.h>
102 # include <linux/mutex.h>
103 # include <linux/radix-tree.h>
109 struct cl_device_operations;
112 struct cl_object_page_operations;
113 struct cl_object_lock_operations;
116 struct cl_page_slice;
118 struct cl_lock_slice;
120 struct cl_lock_operations;
121 struct cl_page_operations;
130 * Operations for each data device in the client stack.
132 * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
134 struct cl_device_operations {
136 * Initialize cl_req. This method is called top-to-bottom on all
137 * devices in the stack to get them a chance to allocate layer-private
138 * data, and to attach them to the cl_req by calling
139 * cl_req_slice_add().
141 * \see osc_req_init(), lov_req_init(), lovsub_req_init()
142 * \see ccc_req_init()
144 int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
149 * Device in the client stack.
151 * \see ccc_device, lov_device, lovsub_device, osc_device
155 struct lu_device cd_lu_dev;
156 /** Per-layer operation vector. */
157 const struct cl_device_operations *cd_ops;
160 /** \addtogroup cl_object cl_object
163 * "Data attributes" of cl_object. Data attributes can be updated
164 * independently for a sub-object, and top-object's attributes are calculated
165 * from sub-objects' ones.
168 /** Object size, in bytes */
171 * Known minimal size, in bytes.
173 * This is only valid when at least one DLM lock is held.
176 /** Modification time. Measured in seconds since epoch. */
178 /** Access time. Measured in seconds since epoch. */
180 /** Change time. Measured in seconds since epoch. */
183 * Blocks allocated to this cl_object on the server file system.
185 * \todo XXX An interface for block size is needed.
189 * User identifier for quota purposes.
193 * Group identifier for quota purposes.
199 * Fields in cl_attr that are being set.
213 * Sub-class of lu_object with methods common for objects on the client
216 * cl_object: represents a regular file system object, both a file and a
217 * stripe. cl_object is based on lu_object: it is identified by a fid,
218 * layered, cached, hashed, and lrued. Important distinction with the server
219 * side, where md_object and dt_object are used, is that cl_object "fans out"
220 * at the lov/sns level: depending on the file layout, single file is
221 * represented as a set of "sub-objects" (stripes). At the implementation
222 * level, struct lov_object contains an array of cl_objects. Each sub-object
223 * is a full-fledged cl_object, having its fid, living in the lru and hash
226 * This leads to the next important difference with the server side: on the
227 * client, it's quite usual to have objects with the different sequence of
228 * layers. For example, typical top-object is composed of the following
234 * whereas its sub-objects are composed of
239 * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
240 * track of the object-subobject relationship.
242 * Sub-objects are not cached independently: when top-object is about to
243 * be discarded from the memory, all its sub-objects are torn-down and
246 * \see ccc_object, lov_object, lovsub_object, osc_object
250 struct lu_object co_lu;
251 /** per-object-layer operations */
252 const struct cl_object_operations *co_ops;
253 /** offset of page slice in cl_page buffer */
258 * Description of the client object configuration. This is used for the
259 * creation of a new client object that is identified by a more state than
262 struct cl_object_conf {
264 struct lu_object_conf coc_lu;
267 * Object layout. This is consumed by lov.
269 struct lustre_md *coc_md;
271 * Description of particular stripe location in the
272 * cluster. This is consumed by osc.
274 struct lov_oinfo *coc_oinfo;
277 * VFS inode. This is consumed by vvp.
279 struct inode *coc_inode;
281 * Layout lock handle.
283 struct ldlm_lock *coc_lock;
285 * Operation to handle layout, OBJECT_CONF_XYZ.
291 /** configure layout, set up a new stripe, must be called while
292 * holding layout lock. */
294 /** invalidate the current stripe configuration due to losing
296 OBJECT_CONF_INVALIDATE = 1,
297 /** wait for old layout to go away so that new layout can be
303 * Operations implemented for each cl object layer.
305 * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
307 struct cl_object_operations {
309 * Initialize page slice for this layer. Called top-to-bottom through
310 * every object layer when a new cl_page is instantiated. Layer
311 * keeping private per-page data, or requiring its own page operations
312 * vector should allocate these data here, and attach then to the page
313 * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
316 * \retval NULL success.
318 * \retval ERR_PTR(errno) failure code.
320 * \retval valid-pointer pointer to already existing referenced page
321 * to be used instead of newly created.
323 int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
324 struct cl_page *page, struct page *vmpage);
326 * Initialize lock slice for this layer. Called top-to-bottom through
327 * every object layer when a new cl_lock is instantiated. Layer
328 * keeping private per-lock data, or requiring its own lock operations
329 * vector should allocate these data here, and attach then to the lock
330 * by calling cl_lock_slice_add(). Mandatory.
332 int (*coo_lock_init)(const struct lu_env *env,
333 struct cl_object *obj, struct cl_lock *lock,
334 const struct cl_io *io);
336 * Initialize io state for a given layer.
338 * called top-to-bottom once per io existence to initialize io
339 * state. If layer wants to keep some state for this type of io, it
340 * has to embed struct cl_io_slice in lu_env::le_ses, and register
341 * slice with cl_io_slice_add(). It is guaranteed that all threads
342 * participating in this io share the same session.
344 int (*coo_io_init)(const struct lu_env *env,
345 struct cl_object *obj, struct cl_io *io);
347 * Fill portion of \a attr that this layer controls. This method is
348 * called top-to-bottom through all object layers.
350 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
352 * \return 0: to continue
353 * \return +ve: to stop iterating through layers (but 0 is returned
354 * from enclosing cl_object_attr_get())
355 * \return -ve: to signal error
357 int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
358 struct cl_attr *attr);
362 * \a valid is a bitmask composed from enum #cl_attr_valid, and
363 * indicating what attributes are to be set.
365 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
367 * \return the same convention as for
368 * cl_object_operations::coo_attr_get() is used.
370 int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
371 const struct cl_attr *attr, unsigned valid);
373 * Update object configuration. Called top-to-bottom to modify object
376 * XXX error conditions and handling.
378 int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
379 const struct cl_object_conf *conf);
381 * Glimpse ast. Executed when glimpse ast arrives for a lock on this
382 * object. Layers are supposed to fill parts of \a lvb that will be
383 * shipped to the glimpse originator as a glimpse result.
385 * \see ccc_object_glimpse(), lovsub_object_glimpse(),
386 * \see osc_object_glimpse()
388 int (*coo_glimpse)(const struct lu_env *env,
389 const struct cl_object *obj, struct ost_lvb *lvb);
391 * Object prune method. Called when the layout is going to change on
392 * this object, therefore each layer has to clean up their cache,
393 * mainly pages and locks.
395 int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
399 * Extended header for client object.
401 struct cl_object_header {
402 /** Standard lu_object_header. cl_object::co_lu::lo_header points
404 struct lu_object_header coh_lu;
406 * \todo XXX move locks below to the separate cache-lines, they are
407 * mostly useless otherwise.
410 /** Lock protecting lock list. */
411 spinlock_t coh_lock_guard;
413 /** List of cl_lock's granted for this object. */
414 cfs_list_t coh_locks;
417 * Parent object. It is assumed that an object has a well-defined
418 * parent, but not a well-defined child (there may be multiple
419 * sub-objects, for the same top-object). cl_object_header::coh_parent
420 * field allows certain code to be written generically, without
421 * limiting possible cl_object layouts unduly.
423 struct cl_object_header *coh_parent;
425 * Protects consistency between cl_attr of parent object and
426 * attributes of sub-objects, that the former is calculated ("merged")
429 * \todo XXX this can be read/write lock if needed.
431 spinlock_t coh_attr_guard;
433 * Size of cl_page + page slices
435 unsigned short coh_page_bufsize;
437 * Number of objects above this one: 0 for a top-object, 1 for its
440 unsigned char coh_nesting;
444 * Helper macro: iterate over all layers of the object \a obj, assigning every
445 * layer top-to-bottom to \a slice.
447 #define cl_object_for_each(slice, obj) \
448 cfs_list_for_each_entry((slice), \
449 &(obj)->co_lu.lo_header->loh_layers, \
452 * Helper macro: iterate over all layers of the object \a obj, assigning every
453 * layer bottom-to-top to \a slice.
455 #define cl_object_for_each_reverse(slice, obj) \
456 cfs_list_for_each_entry_reverse((slice), \
457 &(obj)->co_lu.lo_header->loh_layers, \
462 #define pgoff_t unsigned long
465 #define CL_PAGE_EOF ((pgoff_t)~0ull)
467 /** \addtogroup cl_page cl_page
471 * Layered client page.
473 * cl_page: represents a portion of a file, cached in the memory. All pages
474 * of the given file are of the same size, and are kept in the radix tree
475 * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
476 * of the top-level file object are first class cl_objects, they have their
477 * own radix trees of pages and hence page is implemented as a sequence of
478 * struct cl_pages's, linked into double-linked list through
479 * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
480 * corresponding radix tree at the corresponding logical offset.
482 * cl_page is associated with VM page of the hosting environment (struct
483 * page in Linux kernel, for example), struct page. It is assumed, that this
484 * association is implemented by one of cl_page layers (top layer in the
485 * current design) that
487 * - intercepts per-VM-page call-backs made by the environment (e.g.,
490 * - translates state (page flag bits) and locking between lustre and
493 * The association between cl_page and struct page is immutable and
494 * established when cl_page is created.
496 * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
497 * this io an exclusive access to this page w.r.t. other io attempts and
498 * various events changing page state (such as transfer completion, or
499 * eviction of the page from the memory). Note, that in general cl_io
500 * cannot be identified with a particular thread, and page ownership is not
501 * exactly equal to the current thread holding a lock on the page. Layer
502 * implementing association between cl_page and struct page has to implement
503 * ownership on top of available synchronization mechanisms.
505 * While lustre client maintains the notion of an page ownership by io,
506 * hosting MM/VM usually has its own page concurrency control
507 * mechanisms. For example, in Linux, page access is synchronized by the
508 * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
509 * takes care to acquire and release such locks as necessary around the
510 * calls to the file system methods (->readpage(), ->prepare_write(),
511 * ->commit_write(), etc.). This leads to the situation when there are two
512 * different ways to own a page in the client:
514 * - client code explicitly and voluntary owns the page (cl_page_own());
516 * - VM locks a page and then calls the client, that has "to assume"
517 * the ownership from the VM (cl_page_assume()).
519 * Dual methods to release ownership are cl_page_disown() and
520 * cl_page_unassume().
522 * cl_page is reference counted (cl_page::cp_ref). When reference counter
523 * drops to 0, the page is returned to the cache, unless it is in
524 * cl_page_state::CPS_FREEING state, in which case it is immediately
527 * The general logic guaranteeing the absence of "existential races" for
528 * pages is the following:
530 * - there are fixed known ways for a thread to obtain a new reference
533 * - by doing a lookup in the cl_object radix tree, protected by the
536 * - by starting from VM-locked struct page and following some
537 * hosting environment method (e.g., following ->private pointer in
538 * the case of Linux kernel), see cl_vmpage_page();
540 * - when the page enters cl_page_state::CPS_FREEING state, all these
541 * ways are severed with the proper synchronization
542 * (cl_page_delete());
544 * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
547 * - no new references to the page in cl_page_state::CPS_FREEING state
548 * are allowed (checked in cl_page_get()).
550 * Together this guarantees that when last reference to a
551 * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
552 * page, as neither references to it can be acquired at that point, nor
555 * cl_page is a state machine. States are enumerated in enum
556 * cl_page_state. Possible state transitions are enumerated in
557 * cl_page_state_set(). State transition process (i.e., actual changing of
558 * cl_page::cp_state field) is protected by the lock on the underlying VM
561 * Linux Kernel implementation.
563 * Binding between cl_page and struct page (which is a typedef for
564 * struct page) is implemented in the vvp layer. cl_page is attached to the
565 * ->private pointer of the struct page, together with the setting of
566 * PG_private bit in page->flags, and acquiring additional reference on the
567 * struct page (much like struct buffer_head, or any similar file system
568 * private data structures).
570 * PG_locked lock is used to implement both ownership and transfer
571 * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
572 * states. No additional references are acquired for the duration of the
575 * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
576 * write-out is "protected" by the special PG_writeback bit.
580 * States of cl_page. cl_page.c assumes particular order here.
582 * The page state machine is rather crude, as it doesn't recognize finer page
583 * states like "dirty" or "up to date". This is because such states are not
584 * always well defined for the whole stack (see, for example, the
585 * implementation of the read-ahead, that hides page up-to-dateness to track
586 * cache hits accurately). Such sub-states are maintained by the layers that
587 * are interested in them.
591 * Page is in the cache, un-owned. Page leaves cached state in the
594 * - [cl_page_state::CPS_OWNED] io comes across the page and
597 * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
598 * req-formation engine decides that it wants to include this page
599 * into an cl_req being constructed, and yanks it from the cache;
601 * - [cl_page_state::CPS_FREEING] VM callback is executed to
602 * evict the page form the memory;
604 * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
608 * Page is exclusively owned by some cl_io. Page may end up in this
609 * state as a result of
611 * - io creating new page and immediately owning it;
613 * - [cl_page_state::CPS_CACHED] io finding existing cached page
616 * - [cl_page_state::CPS_OWNED] io finding existing owned page
617 * and waiting for owner to release the page;
619 * Page leaves owned state in the following cases:
621 * - [cl_page_state::CPS_CACHED] io decides to leave the page in
622 * the cache, doing nothing;
624 * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
627 * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
628 * transfer for this page;
630 * - [cl_page_state::CPS_FREEING] io decides to destroy this
631 * page (e.g., as part of truncate or extent lock cancellation).
633 * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
637 * Page is being written out, as a part of a transfer. This state is
638 * entered when req-formation logic decided that it wants this page to
639 * be sent through the wire _now_. Specifically, it means that once
640 * this state is achieved, transfer completion handler (with either
641 * success or failure indication) is guaranteed to be executed against
642 * this page independently of any locks and any scheduling decisions
643 * made by the hosting environment (that effectively means that the
644 * page is never put into cl_page_state::CPS_PAGEOUT state "in
645 * advance". This property is mentioned, because it is important when
646 * reasoning about possible dead-locks in the system). The page can
647 * enter this state as a result of
649 * - [cl_page_state::CPS_OWNED] an io requesting an immediate
650 * write-out of this page, or
652 * - [cl_page_state::CPS_CACHED] req-forming engine deciding
653 * that it has enough dirty pages cached to issue a "good"
656 * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
657 * is completed---it is moved into cl_page_state::CPS_CACHED state.
659 * Underlying VM page is locked for the duration of transfer.
661 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
665 * Page is being read in, as a part of a transfer. This is quite
666 * similar to the cl_page_state::CPS_PAGEOUT state, except that
667 * read-in is always "immediate"---there is no such thing a sudden
668 * construction of read cl_req from cached, presumably not up to date,
671 * Underlying VM page is locked for the duration of transfer.
673 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
677 * Page is being destroyed. This state is entered when client decides
678 * that page has to be deleted from its host object, as, e.g., a part
681 * Once this state is reached, there is no way to escape it.
683 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
690 /** Host page, the page is from the host inode which the cl_page
694 /** Transient page, the transient cl_page is used to bind a cl_page
695 * to vmpage which is not belonging to the same object of cl_page.
696 * it is used in DirectIO, lockless IO and liblustre. */
701 * Flags maintained for every cl_page.
705 * Set when pagein completes. Used for debugging (read completes at
706 * most once for a page).
708 CPF_READ_COMPLETED = 1 << 0
712 * Fields are protected by the lock on struct page, except for atomics and
715 * \invariant Data type invariants are in cl_page_invariant(). Basically:
716 * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
717 * list, consistent with the parent/child pointers in the cl_page::cp_obj and
718 * cl_page::cp_owner (when set).
721 /** Reference counter. */
723 /** An object this page is a part of. Immutable after creation. */
724 struct cl_object *cp_obj;
725 /** Logical page index within the object. Immutable after creation. */
727 /** List of slices. Immutable after creation. */
728 cfs_list_t cp_layers;
729 /** Parent page, NULL for top-level page. Immutable after creation. */
730 struct cl_page *cp_parent;
731 /** Lower-layer page. NULL for bottommost page. Immutable after
733 struct cl_page *cp_child;
735 * Page state. This field is const to avoid accidental update, it is
736 * modified only internally within cl_page.c. Protected by a VM lock.
738 const enum cl_page_state cp_state;
739 /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
741 /** Mutex serializing membership of a page in a batch. */
742 struct mutex cp_mutex;
743 /** Linkage of pages within cl_req. */
744 cfs_list_t cp_flight;
745 /** Transfer error. */
749 * Page type. Only CPT_TRANSIENT is used so far. Immutable after
752 enum cl_page_type cp_type;
755 * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
756 * by sub-io. Protected by a VM lock.
758 struct cl_io *cp_owner;
760 * Debug information, the task is owning the page.
762 struct task_struct *cp_task;
764 * Owning IO request in cl_page_state::CPS_PAGEOUT and
765 * cl_page_state::CPS_PAGEIN states. This field is maintained only in
766 * the top-level pages. Protected by a VM lock.
768 struct cl_req *cp_req;
769 /** List of references to this page, for debugging. */
770 struct lu_ref cp_reference;
771 /** Link to an object, for debugging. */
772 struct lu_ref_link cp_obj_ref;
773 /** Link to a queue, for debugging. */
774 struct lu_ref_link cp_queue_ref;
775 /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
777 /** Assigned if doing a sync_io */
778 struct cl_sync_io *cp_sync_io;
782 * Per-layer part of cl_page.
784 * \see ccc_page, lov_page, osc_page
786 struct cl_page_slice {
787 struct cl_page *cpl_page;
789 * Object slice corresponding to this page slice. Immutable after
792 struct cl_object *cpl_obj;
793 const struct cl_page_operations *cpl_ops;
794 /** Linkage into cl_page::cp_layers. Immutable after creation. */
795 cfs_list_t cpl_linkage;
799 * Lock mode. For the client extent locks.
801 * \warning: cl_lock_mode_match() assumes particular ordering here.
806 * Mode of a lock that protects no data, and exists only as a
807 * placeholder. This is used for `glimpse' requests. A phantom lock
808 * might get promoted to real lock at some point.
817 * Requested transfer type.
827 * Per-layer page operations.
829 * Methods taking an \a io argument are for the activity happening in the
830 * context of given \a io. Page is assumed to be owned by that io, except for
831 * the obvious cases (like cl_page_operations::cpo_own()).
833 * \see vvp_page_ops, lov_page_ops, osc_page_ops
835 struct cl_page_operations {
837 * cl_page<->struct page methods. Only one layer in the stack has to
838 * implement these. Current code assumes that this functionality is
839 * provided by the topmost layer, see cl_page_disown0() as an example.
843 * \return the underlying VM page. Optional.
845 struct page *(*cpo_vmpage)(const struct lu_env *env,
846 const struct cl_page_slice *slice);
848 * Called when \a io acquires this page into the exclusive
849 * ownership. When this method returns, it is guaranteed that the is
850 * not owned by other io, and no transfer is going on against
854 * \see vvp_page_own(), lov_page_own()
856 int (*cpo_own)(const struct lu_env *env,
857 const struct cl_page_slice *slice,
858 struct cl_io *io, int nonblock);
859 /** Called when ownership it yielded. Optional.
861 * \see cl_page_disown()
862 * \see vvp_page_disown()
864 void (*cpo_disown)(const struct lu_env *env,
865 const struct cl_page_slice *slice, struct cl_io *io);
867 * Called for a page that is already "owned" by \a io from VM point of
870 * \see cl_page_assume()
871 * \see vvp_page_assume(), lov_page_assume()
873 void (*cpo_assume)(const struct lu_env *env,
874 const struct cl_page_slice *slice, struct cl_io *io);
875 /** Dual to cl_page_operations::cpo_assume(). Optional. Called
876 * bottom-to-top when IO releases a page without actually unlocking
879 * \see cl_page_unassume()
880 * \see vvp_page_unassume()
882 void (*cpo_unassume)(const struct lu_env *env,
883 const struct cl_page_slice *slice,
886 * Announces whether the page contains valid data or not by \a uptodate.
888 * \see cl_page_export()
889 * \see vvp_page_export()
891 void (*cpo_export)(const struct lu_env *env,
892 const struct cl_page_slice *slice, int uptodate);
894 * Checks whether underlying VM page is locked (in the suitable
895 * sense). Used for assertions.
897 * \retval -EBUSY: page is protected by a lock of a given mode;
898 * \retval -ENODATA: page is not protected by a lock;
899 * \retval 0: this layer cannot decide. (Should never happen.)
901 int (*cpo_is_vmlocked)(const struct lu_env *env,
902 const struct cl_page_slice *slice);
908 * Called when page is truncated from the object. Optional.
910 * \see cl_page_discard()
911 * \see vvp_page_discard(), osc_page_discard()
913 void (*cpo_discard)(const struct lu_env *env,
914 const struct cl_page_slice *slice,
917 * Called when page is removed from the cache, and is about to being
918 * destroyed. Optional.
920 * \see cl_page_delete()
921 * \see vvp_page_delete(), osc_page_delete()
923 void (*cpo_delete)(const struct lu_env *env,
924 const struct cl_page_slice *slice);
925 /** Destructor. Frees resources and slice itself. */
926 void (*cpo_fini)(const struct lu_env *env,
927 struct cl_page_slice *slice);
930 * Checks whether the page is protected by a cl_lock. This is a
931 * per-layer method, because certain layers have ways to check for the
932 * lock much more efficiently than through the generic locks scan, or
933 * implement locking mechanisms separate from cl_lock, e.g.,
934 * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
935 * being canceled, or scheduled for cancellation as soon as the last
936 * user goes away, too.
938 * \retval -EBUSY: page is protected by a lock of a given mode;
939 * \retval -ENODATA: page is not protected by a lock;
940 * \retval 0: this layer cannot decide.
942 * \see cl_page_is_under_lock()
944 int (*cpo_is_under_lock)(const struct lu_env *env,
945 const struct cl_page_slice *slice,
949 * Optional debugging helper. Prints given page slice.
951 * \see cl_page_print()
953 int (*cpo_print)(const struct lu_env *env,
954 const struct cl_page_slice *slice,
955 void *cookie, lu_printer_t p);
959 * Transfer methods. See comment on cl_req for a description of
960 * transfer formation and life-cycle.
965 * Request type dependent vector of operations.
967 * Transfer operations depend on transfer mode (cl_req_type). To avoid
968 * passing transfer mode to each and every of these methods, and to
969 * avoid branching on request type inside of the methods, separate
970 * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
971 * provided. That is, method invocation usually looks like
973 * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
977 * Called when a page is submitted for a transfer as a part of
980 * \return 0 : page is eligible for submission;
981 * \return -EALREADY : skip this page;
982 * \return -ve : error.
984 * \see cl_page_prep()
986 int (*cpo_prep)(const struct lu_env *env,
987 const struct cl_page_slice *slice,
990 * Completion handler. This is guaranteed to be eventually
991 * fired after cl_page_operations::cpo_prep() or
992 * cl_page_operations::cpo_make_ready() call.
994 * This method can be called in a non-blocking context. It is
995 * guaranteed however, that the page involved and its object
996 * are pinned in memory (and, hence, calling cl_page_put() is
999 * \see cl_page_completion()
1001 void (*cpo_completion)(const struct lu_env *env,
1002 const struct cl_page_slice *slice,
1005 * Called when cached page is about to be added to the
1006 * cl_req as a part of req formation.
1008 * \return 0 : proceed with this page;
1009 * \return -EAGAIN : skip this page;
1010 * \return -ve : error.
1012 * \see cl_page_make_ready()
1014 int (*cpo_make_ready)(const struct lu_env *env,
1015 const struct cl_page_slice *slice);
1017 * Announce that this page is to be written out
1018 * opportunistically, that is, page is dirty, it is not
1019 * necessary to start write-out transfer right now, but
1020 * eventually page has to be written out.
1022 * Main caller of this is the write path (see
1023 * vvp_io_commit_write()), using this method to build a
1024 * "transfer cache" from which large transfers are then
1025 * constructed by the req-formation engine.
1027 * \todo XXX it would make sense to add page-age tracking
1028 * semantics here, and to oblige the req-formation engine to
1029 * send the page out not later than it is too old.
1031 * \see cl_page_cache_add()
1033 int (*cpo_cache_add)(const struct lu_env *env,
1034 const struct cl_page_slice *slice,
1038 * Tell transfer engine that only [to, from] part of a page should be
1041 * This is used for immediate transfers.
1043 * \todo XXX this is not very good interface. It would be much better
1044 * if all transfer parameters were supplied as arguments to
1045 * cl_io_operations::cio_submit() call, but it is not clear how to do
1046 * this for page queues.
1048 * \see cl_page_clip()
1050 void (*cpo_clip)(const struct lu_env *env,
1051 const struct cl_page_slice *slice,
1054 * \pre the page was queued for transferring.
1055 * \post page is removed from client's pending list, or -EBUSY
1056 * is returned if it has already been in transferring.
1058 * This is one of seldom page operation which is:
1059 * 0. called from top level;
1060 * 1. don't have vmpage locked;
1061 * 2. every layer should synchronize execution of its ->cpo_cancel()
1062 * with completion handlers. Osc uses client obd lock for this
1063 * purpose. Based on there is no vvp_page_cancel and
1064 * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
1066 * \see osc_page_cancel().
1068 int (*cpo_cancel)(const struct lu_env *env,
1069 const struct cl_page_slice *slice);
1071 * Write out a page by kernel. This is only called by ll_writepage
1074 * \see cl_page_flush()
1076 int (*cpo_flush)(const struct lu_env *env,
1077 const struct cl_page_slice *slice,
1083 * Helper macro, dumping detailed information about \a page into a log.
1085 #define CL_PAGE_DEBUG(mask, env, page, format, ...) \
1087 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1089 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1090 cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
1091 CDEBUG(mask, format , ## __VA_ARGS__); \
1096 * Helper macro, dumping shorter information about \a page into a log.
1098 #define CL_PAGE_HEADER(mask, env, page, format, ...) \
1100 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1102 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1103 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
1104 CDEBUG(mask, format , ## __VA_ARGS__); \
1108 static inline int __page_in_use(const struct cl_page *page, int refc)
1110 if (page->cp_type == CPT_CACHEABLE)
1112 LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
1113 return (cfs_atomic_read(&page->cp_ref) > refc);
1115 #define cl_page_in_use(pg) __page_in_use(pg, 1)
1116 #define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
1120 /** \addtogroup cl_lock cl_lock
1124 * Extent locking on the client.
1128 * The locking model of the new client code is built around
1132 * data-type representing an extent lock on a regular file. cl_lock is a
1133 * layered object (much like cl_object and cl_page), it consists of a header
1134 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
1135 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
1137 * All locks for a given object are linked into cl_object_header::coh_locks
1138 * list (protected by cl_object_header::coh_lock_guard spin-lock) through
1139 * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
1140 * sort it in starting lock offset, or use altogether different data structure
1143 * Typical cl_lock consists of the two layers:
1145 * - vvp_lock (vvp specific data), and
1146 * - lov_lock (lov specific data).
1148 * lov_lock contains an array of sub-locks. Each of these sub-locks is a
1149 * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
1151 * - lovsub_lock, and
1154 * Each sub-lock is associated with a cl_object (representing stripe
1155 * sub-object or the file to which top-level cl_lock is associated to), and is
1156 * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
1157 * cl_object (that at lov layer also fans out into multiple sub-objects), and
1158 * is different from cl_page, that doesn't fan out (there is usually exactly
1159 * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
1160 * a "top-lock" and its lovsub-osc portion a "sub-lock".
1164 * cl_lock is reference counted. When reference counter drops to 0, lock is
1165 * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
1166 * lock is destroyed when last reference is released. Referencing between
1167 * top-lock and its sub-locks is described in the lov documentation module.
1171 * Also, cl_lock is a state machine. This requires some clarification. One of
1172 * the goals of client IO re-write was to make IO path non-blocking, or at
1173 * least to make it easier to make it non-blocking in the future. Here
1174 * `non-blocking' means that when a system call (read, write, truncate)
1175 * reaches a situation where it has to wait for a communication with the
1176 * server, it should --instead of waiting-- remember its current state and
1177 * switch to some other work. E.g,. instead of waiting for a lock enqueue,
1178 * client should proceed doing IO on the next stripe, etc. Obviously this is
1179 * rather radical redesign, and it is not planned to be fully implemented at
1180 * this time, instead we are putting some infrastructure in place, that would
1181 * make it easier to do asynchronous non-blocking IO easier in the
1182 * future. Specifically, where old locking code goes to sleep (waiting for
1183 * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
1184 * enqueue reply comes, its completion handler signals that lock state-machine
1185 * is ready to transit to the next state. There is some generic code in
1186 * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
1187 * this cl_lock.c code, it looks like locking is done in normal blocking
1188 * fashion, and it the same time it is possible to switch to the non-blocking
1189 * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
1192 * For a description of state machine states and transitions see enum
1195 * There are two ways to restrict a set of states which lock might move to:
1197 * - placing a "hold" on a lock guarantees that lock will not be moved
1198 * into cl_lock_state::CLS_FREEING state until hold is released. Hold
1199 * can be only acquired on a lock that is not in
1200 * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
1201 * cl_lock::cll_holds. Hold protects lock from cancellation and
1202 * destruction. Requests to cancel and destroy a lock on hold will be
1203 * recorded, but only honored when last hold on a lock is released;
1205 * - placing a "user" on a lock guarantees that lock will not leave
1206 * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
1207 * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
1208 * states, once it enters this set. That is, if a user is added onto a
1209 * lock in a state not from this set, it doesn't immediately enforce
1210 * lock to move to this set, but once lock enters this set it will
1211 * remain there until all users are removed. Lock users are counted in
1212 * cl_lock::cll_users.
1214 * User is used to assure that lock is not canceled or destroyed while
1215 * it is being enqueued, or actively used by some IO.
1217 * Currently, a user always comes with a hold (cl_lock_invariant()
1218 * checks that a number of holds is not less than a number of users).
1222 * This is how lock state-machine operates. struct cl_lock contains a mutex
1223 * cl_lock::cll_guard that protects struct fields.
1225 * - mutex is taken, and cl_lock::cll_state is examined.
1227 * - for every state there are possible target states where lock can move
1228 * into. They are tried in order. Attempts to move into next state are
1229 * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
1231 * - if the transition can be performed immediately, state is changed,
1232 * and mutex is released.
1234 * - if the transition requires blocking, _try() function returns
1235 * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
1236 * sleep, waiting for possibility of lock state change. It is woken
1237 * up when some event occurs, that makes lock state change possible
1238 * (e.g., the reception of the reply from the server), and repeats
1241 * Top-lock and sub-lock has separate mutexes and the latter has to be taken
1242 * first to avoid dead-lock.
1244 * To see an example of interaction of all these issues, take a look at the
1245 * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
1246 * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
1247 * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
1248 * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
1249 * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
1250 * done in parallel, rather than one after another (this is used for glimpse
1251 * locks, that cannot dead-lock).
1253 * INTERFACE AND USAGE
1255 * struct cl_lock_operations provide a number of call-backs that are invoked
1256 * when events of interest occurs. Layers can intercept and handle glimpse,
1257 * blocking, cancel ASTs and a reception of the reply from the server.
1259 * One important difference with the old client locking model is that new
1260 * client has a representation for the top-lock, whereas in the old code only
1261 * sub-locks existed as real data structures and file-level locks are
1262 * represented by "request sets" that are created and destroyed on each and
1263 * every lock creation.
1265 * Top-locks are cached, and can be found in the cache by the system calls. It
1266 * is possible that top-lock is in cache, but some of its sub-locks were
1267 * canceled and destroyed. In that case top-lock has to be enqueued again
1268 * before it can be used.
1270 * Overall process of the locking during IO operation is as following:
1272 * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
1273 * is called on each layer. Responsibility of this method is to add locks,
1274 * needed by a given layer into cl_io.ci_lockset.
1276 * - once locks for all layers were collected, they are sorted to avoid
1277 * dead-locks (cl_io_locks_sort()), and enqueued.
1279 * - when all locks are acquired, IO is performed;
1281 * - locks are released into cache.
1283 * Striping introduces major additional complexity into locking. The
1284 * fundamental problem is that it is generally unsafe to actively use (hold)
1285 * two locks on the different OST servers at the same time, as this introduces
1286 * inter-server dependency and can lead to cascading evictions.
1288 * Basic solution is to sub-divide large read/write IOs into smaller pieces so
1289 * that no multi-stripe locks are taken (note that this design abandons POSIX
1290 * read/write semantics). Such pieces ideally can be executed concurrently. At
1291 * the same time, certain types of IO cannot be sub-divived, without
1292 * sacrificing correctness. This includes:
1294 * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
1297 * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
1299 * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
1300 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
1301 * has to be held together with the usual lock on [offset, offset + count].
1303 * As multi-stripe locks have to be allowed, it makes sense to cache them, so
1304 * that, for example, a sequence of O_APPEND writes can proceed quickly
1305 * without going down to the individual stripes to do lock matching. On the
1306 * other hand, multi-stripe locks shouldn't be used by normal read/write
1307 * calls. To achieve this, every layer can implement ->clo_fits_into() method,
1308 * that is called by lock matching code (cl_lock_lookup()), and that can be
1309 * used to selectively disable matching of certain locks for certain IOs. For
1310 * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
1311 * locks to be matched only for truncates and O_APPEND writes.
1313 * Interaction with DLM
1315 * In the expected setup, cl_lock is ultimately backed up by a collection of
1316 * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
1317 * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
1318 * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
1319 * description of interaction with DLM.
1325 struct cl_lock_descr {
1326 /** Object this lock is granted for. */
1327 struct cl_object *cld_obj;
1328 /** Index of the first page protected by this lock. */
1330 /** Index of the last page (inclusive) protected by this lock. */
1332 /** Group ID, for group lock */
1335 enum cl_lock_mode cld_mode;
1337 * flags to enqueue lock. A combination of bit-flags from
1338 * enum cl_enq_flags.
1340 __u32 cld_enq_flags;
1343 #define DDESCR "%s(%d):[%lu, %lu]"
1344 #define PDESCR(descr) \
1345 cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
1346 (descr)->cld_start, (descr)->cld_end
1348 const char *cl_lock_mode_name(const enum cl_lock_mode mode);
1351 * Lock state-machine states.
1356 * Possible state transitions:
1358 * +------------------>NEW
1360 * | | cl_enqueue_try()
1362 * | cl_unuse_try() V
1363 * | +--------------QUEUING (*)
1365 * | | | cl_enqueue_try()
1367 * | | cl_unuse_try() V
1368 * sub-lock | +-------------ENQUEUED (*)
1370 * | | | cl_wait_try()
1375 * | | HELD<---------+
1377 * | | | | cl_use_try()
1378 * | | cl_unuse_try() | |
1381 * | +------------>INTRANSIT (D) <--+
1383 * | cl_unuse_try() | | cached lock found
1384 * | | | cl_use_try()
1387 * +------------------CACHED---------+
1396 * In states marked with (*) transition to the same state (i.e., a loop
1397 * in the diagram) is possible.
1399 * (R) is the point where Receive call-back is invoked: it allows layers
1400 * to handle arrival of lock reply.
1402 * (C) is the point where Cancellation call-back is invoked.
1404 * (D) is the transit state which means the lock is changing.
1406 * Transition to FREEING state is possible from any other state in the
1407 * diagram in case of unrecoverable error.
1411 * These states are for individual cl_lock object. Top-lock and its sub-locks
1412 * can be in the different states. Another way to say this is that we have
1413 * nested state-machines.
1415 * Separate QUEUING and ENQUEUED states are needed to support non-blocking
1416 * operation for locks with multiple sub-locks. Imagine lock on a file F, that
1417 * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
1418 * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
1419 * its completion and at last enqueue lock for S2, and wait for its
1420 * completion. In that case, top-lock is in QUEUING state while S0, S1 are
1421 * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
1422 * that in this case, sub-locks move from state to state, and top-lock remains
1423 * in the same state).
1425 enum cl_lock_state {
1427 * Lock that wasn't yet enqueued
1431 * Enqueue is in progress, blocking for some intermediate interaction
1432 * with the other side.
1436 * Lock is fully enqueued, waiting for server to reply when it is
1441 * Lock granted, actively used by some IO.
1445 * This state is used to mark the lock is being used, or unused.
1446 * We need this state because the lock may have several sublocks,
1447 * so it's impossible to have an atomic way to bring all sublocks
1448 * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
1450 * If a thread is referring to a lock, and it sees the lock is in this
1451 * state, it must wait for the lock.
1452 * See state diagram for details.
1456 * Lock granted, not used.
1460 * Lock is being destroyed.
1466 enum cl_lock_flags {
1468 * lock has been cancelled. This flag is never cleared once set (by
1469 * cl_lock_cancel0()).
1471 CLF_CANCELLED = 1 << 0,
1472 /** cancellation is pending for this lock. */
1473 CLF_CANCELPEND = 1 << 1,
1474 /** destruction is pending for this lock. */
1475 CLF_DOOMED = 1 << 2,
1476 /** from enqueue RPC reply upcall. */
1477 CLF_FROM_UPCALL= 1 << 3,
1483 * Lock closure is a collection of locks (both top-locks and sub-locks) that
1484 * might be updated in a result of an operation on a certain lock (which lock
1485 * this is a closure of).
1487 * Closures are needed to guarantee dead-lock freedom in the presence of
1489 * - nested state-machines (top-lock state-machine composed of sub-lock
1490 * state-machines), and
1492 * - shared sub-locks.
1494 * Specifically, many operations, such as lock enqueue, wait, unlock,
1495 * etc. start from a top-lock, and then operate on a sub-locks of this
1496 * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
1497 * of such operation, this change has to be propagated to all top-locks that
1498 * share this sub-lock. Obviously, no natural lock ordering (e.g.,
1499 * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
1500 * to be used. Lock closure systematizes this try-and-repeat logic.
1502 struct cl_lock_closure {
1504 * Lock that is mutexed when closure construction is started. When
1505 * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
1506 * origin is released before waiting.
1508 struct cl_lock *clc_origin;
1510 * List of enclosed locks, so far. Locks are linked here through
1511 * cl_lock::cll_inclosure.
1513 cfs_list_t clc_list;
1515 * True iff closure is in a `wait' mode. This determines what
1516 * cl_lock_enclosure() does when a lock L to be added to the closure
1517 * is currently mutexed by some other thread.
1519 * If cl_lock_closure::clc_wait is not set, then closure construction
1520 * fails with CLO_REPEAT immediately.
1522 * In wait mode, cl_lock_enclosure() waits until next attempt to build
1523 * a closure might succeed. To this end it releases an origin mutex
1524 * (cl_lock_closure::clc_origin), that has to be the only lock mutex
1525 * owned by the current thread, and then waits on L mutex (by grabbing
1526 * it and immediately releasing), before returning CLO_REPEAT to the
1530 /** Number of locks in the closure. */
1535 * Layered client lock.
1538 /** Reference counter. */
1539 cfs_atomic_t cll_ref;
1540 /** List of slices. Immutable after creation. */
1541 cfs_list_t cll_layers;
1543 * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
1544 * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
1546 cfs_list_t cll_linkage;
1548 * Parameters of this lock. Protected by
1549 * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
1550 * cl_lock::cll_guard. Modified only on lock creation and in
1553 struct cl_lock_descr cll_descr;
1554 /** Protected by cl_lock::cll_guard. */
1555 enum cl_lock_state cll_state;
1556 /** signals state changes. */
1557 wait_queue_head_t cll_wq;
1559 * Recursive lock, most fields in cl_lock{} are protected by this.
1561 * Locking rules: this mutex is never held across network
1562 * communication, except when lock is being canceled.
1564 * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
1565 * on a top-lock. Other direction is implemented through a
1566 * try-lock-repeat loop. Mutices of unrelated locks can be taken only
1569 * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
1571 struct mutex cll_guard;
1572 struct task_struct *cll_guarder;
1576 * the owner for INTRANSIT state
1578 struct task_struct *cll_intransit_owner;
1581 * Number of holds on a lock. A hold prevents a lock from being
1582 * canceled and destroyed. Protected by cl_lock::cll_guard.
1584 * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
1588 * Number of lock users. Valid in cl_lock_state::CLS_HELD state
1589 * only. Lock user pins lock in CLS_HELD state. Protected by
1590 * cl_lock::cll_guard.
1592 * \see cl_wait(), cl_unuse().
1596 * Flag bit-mask. Values from enum cl_lock_flags. Updates are
1597 * protected by cl_lock::cll_guard.
1599 unsigned long cll_flags;
1601 * A linkage into a list of locks in a closure.
1603 * \see cl_lock_closure
1605 cfs_list_t cll_inclosure;
1607 * Confict lock at queuing time.
1609 struct cl_lock *cll_conflict;
1611 * A list of references to this lock, for debugging.
1613 struct lu_ref cll_reference;
1615 * A list of holds on this lock, for debugging.
1617 struct lu_ref cll_holders;
1619 * A reference for cl_lock::cll_descr::cld_obj. For debugging.
1621 struct lu_ref_link cll_obj_ref;
1622 #ifdef CONFIG_LOCKDEP
1623 /* "dep_map" name is assumed by lockdep.h macros. */
1624 struct lockdep_map dep_map;
1629 * Per-layer part of cl_lock
1631 * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
1633 struct cl_lock_slice {
1634 struct cl_lock *cls_lock;
1635 /** Object slice corresponding to this lock slice. Immutable after
1637 struct cl_object *cls_obj;
1638 const struct cl_lock_operations *cls_ops;
1639 /** Linkage into cl_lock::cll_layers. Immutable after creation. */
1640 cfs_list_t cls_linkage;
1644 * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
1646 * NOTE: lov_subresult() depends on ordering here.
1648 enum cl_lock_transition {
1649 /** operation cannot be completed immediately. Wait for state change. */
1651 /** operation had to release lock mutex, restart. */
1653 /** lower layer re-enqueued. */
1659 * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
1661 struct cl_lock_operations {
1663 * \name statemachine
1665 * State machine transitions. These 3 methods are called to transfer
1666 * lock from one state to another, as described in the commentary
1667 * above enum #cl_lock_state.
1669 * \retval 0 this layer has nothing more to do to before
1670 * transition to the target state happens;
1672 * \retval CLO_REPEAT method had to release and re-acquire cl_lock
1673 * mutex, repeat invocation of transition method
1674 * across all layers;
1676 * \retval CLO_WAIT this layer cannot move to the target state
1677 * immediately, as it has to wait for certain event
1678 * (e.g., the communication with the server). It
1679 * is guaranteed, that when the state transfer
1680 * becomes possible, cl_lock::cll_wq wait-queue
1681 * is signaled. Caller can wait for this event by
1682 * calling cl_lock_state_wait();
1684 * \retval -ve failure, abort state transition, move the lock
1685 * into cl_lock_state::CLS_FREEING state, and set
1686 * cl_lock::cll_error.
1688 * Once all layers voted to agree to transition (by returning 0), lock
1689 * is moved into corresponding target state. All state transition
1690 * methods are optional.
1694 * Attempts to enqueue the lock. Called top-to-bottom.
1696 * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
1697 * \see osc_lock_enqueue()
1699 int (*clo_enqueue)(const struct lu_env *env,
1700 const struct cl_lock_slice *slice,
1701 struct cl_io *io, __u32 enqflags);
1703 * Attempts to wait for enqueue result. Called top-to-bottom.
1705 * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
1707 int (*clo_wait)(const struct lu_env *env,
1708 const struct cl_lock_slice *slice);
1710 * Attempts to unlock the lock. Called bottom-to-top. In addition to
1711 * usual return values of lock state-machine methods, this can return
1712 * -ESTALE to indicate that lock cannot be returned to the cache, and
1713 * has to be re-initialized.
1714 * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
1716 * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
1718 int (*clo_unuse)(const struct lu_env *env,
1719 const struct cl_lock_slice *slice);
1721 * Notifies layer that cached lock is started being used.
1723 * \pre lock->cll_state == CLS_CACHED
1725 * \see lov_lock_use(), osc_lock_use()
1727 int (*clo_use)(const struct lu_env *env,
1728 const struct cl_lock_slice *slice);
1729 /** @} statemachine */
1731 * A method invoked when lock state is changed (as a result of state
1732 * transition). This is used, for example, to track when the state of
1733 * a sub-lock changes, to propagate this change to the corresponding
1734 * top-lock. Optional
1736 * \see lovsub_lock_state()
1738 void (*clo_state)(const struct lu_env *env,
1739 const struct cl_lock_slice *slice,
1740 enum cl_lock_state st);
1742 * Returns true, iff given lock is suitable for the given io, idea
1743 * being, that there are certain "unsafe" locks, e.g., ones acquired
1744 * for O_APPEND writes, that we don't want to re-use for a normal
1745 * write, to avoid the danger of cascading evictions. Optional. Runs
1746 * under cl_object_header::coh_lock_guard.
1748 * XXX this should take more information about lock needed by
1749 * io. Probably lock description or something similar.
1751 * \see lov_fits_into()
1753 int (*clo_fits_into)(const struct lu_env *env,
1754 const struct cl_lock_slice *slice,
1755 const struct cl_lock_descr *need,
1756 const struct cl_io *io);
1759 * Asynchronous System Traps. All of then are optional, all are
1760 * executed bottom-to-top.
1765 * Cancellation callback. Cancel a lock voluntarily, or under
1766 * the request of server.
1768 void (*clo_cancel)(const struct lu_env *env,
1769 const struct cl_lock_slice *slice);
1771 * Lock weighting ast. Executed to estimate how precious this lock
1772 * is. The sum of results across all layers is used to determine
1773 * whether lock worth keeping in cache given present memory usage.
1775 * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
1777 unsigned long (*clo_weigh)(const struct lu_env *env,
1778 const struct cl_lock_slice *slice);
1782 * \see lovsub_lock_closure()
1784 int (*clo_closure)(const struct lu_env *env,
1785 const struct cl_lock_slice *slice,
1786 struct cl_lock_closure *closure);
1788 * Executed bottom-to-top when lock description changes (e.g., as a
1789 * result of server granting more generous lock than was requested).
1791 * \see lovsub_lock_modify()
1793 int (*clo_modify)(const struct lu_env *env,
1794 const struct cl_lock_slice *slice,
1795 const struct cl_lock_descr *updated);
1797 * Notifies layers (bottom-to-top) that lock is going to be
1798 * destroyed. Responsibility of layers is to prevent new references on
1799 * this lock from being acquired once this method returns.
1801 * This can be called multiple times due to the races.
1803 * \see cl_lock_delete()
1804 * \see osc_lock_delete(), lovsub_lock_delete()
1806 void (*clo_delete)(const struct lu_env *env,
1807 const struct cl_lock_slice *slice);
1809 * Destructor. Frees resources and the slice.
1811 * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
1812 * \see osc_lock_fini()
1814 void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
1816 * Optional debugging helper. Prints given lock slice.
1818 int (*clo_print)(const struct lu_env *env,
1819 void *cookie, lu_printer_t p,
1820 const struct cl_lock_slice *slice);
1823 #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
1825 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1827 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1828 cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
1829 CDEBUG(mask, format , ## __VA_ARGS__); \
1833 #define CL_LOCK_ASSERT(expr, env, lock) do { \
1837 CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
1843 /** \addtogroup cl_page_list cl_page_list
1844 * Page list used to perform collective operations on a group of pages.
1846 * Pages are added to the list one by one. cl_page_list acquires a reference
1847 * for every page in it. Page list is used to perform collective operations on
1850 * - submit pages for an immediate transfer,
1852 * - own pages on behalf of certain io (waiting for each page in turn),
1856 * When list is finalized, it releases references on all pages it still has.
1858 * \todo XXX concurrency control.
1862 struct cl_page_list {
1864 cfs_list_t pl_pages;
1865 struct task_struct *pl_owner;
1869 * A 2-queue of pages. A convenience data-type for common use case, 2-queue
1870 * contains an incoming page list and an outgoing page list.
1873 struct cl_page_list c2_qin;
1874 struct cl_page_list c2_qout;
1877 /** @} cl_page_list */
1879 /** \addtogroup cl_io cl_io
1884 * cl_io represents a high level I/O activity like
1885 * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
1888 * cl_io is a layered object, much like cl_{object,page,lock} but with one
1889 * important distinction. We want to minimize number of calls to the allocator
1890 * in the fast path, e.g., in the case of read(2) when everything is cached:
1891 * client already owns the lock over region being read, and data are cached
1892 * due to read-ahead. To avoid allocation of cl_io layers in such situations,
1893 * per-layer io state is stored in the session, associated with the io, see
1894 * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
1895 * by using free-lists, see cl_env_get().
1897 * There is a small predefined number of possible io types, enumerated in enum
1900 * cl_io is a state machine, that can be advanced concurrently by the multiple
1901 * threads. It is up to these threads to control the concurrency and,
1902 * specifically, to detect when io is done, and its state can be safely
1905 * For read/write io overall execution plan is as following:
1907 * (0) initialize io state through all layers;
1909 * (1) loop: prepare chunk of work to do
1911 * (2) call all layers to collect locks they need to process current chunk
1913 * (3) sort all locks to avoid dead-locks, and acquire them
1915 * (4) process the chunk: call per-page methods
1916 * (cl_io_operations::cio_read_page() for read,
1917 * cl_io_operations::cio_prepare_write(),
1918 * cl_io_operations::cio_commit_write() for write)
1924 * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
1925 * address allocation efficiency issues mentioned above), and returns with the
1926 * special error condition from per-page method when current sub-io has to
1927 * block. This causes io loop to be repeated, and lov switches to the next
1928 * sub-io in its cl_io_operations::cio_iter_init() implementation.
1933 /** read system call */
1935 /** write system call */
1937 /** truncate, utime system calls */
1940 * page fault handling
1944 * fsync system call handling
1945 * To write out a range of file
1949 * Miscellaneous io. This is used for occasional io activity that
1950 * doesn't fit into other types. Currently this is used for:
1952 * - cancellation of an extent lock. This io exists as a context
1953 * to write dirty pages from under the lock being canceled back
1956 * - VM induced page write-out. An io context for writing page out
1957 * for memory cleansing;
1959 * - glimpse. An io context to acquire glimpse lock.
1961 * - grouplock. An io context to acquire group lock.
1963 * CIT_MISC io is used simply as a context in which locks and pages
1964 * are manipulated. Such io has no internal "process", that is,
1965 * cl_io_loop() is never called for it.
1972 * States of cl_io state machine
1975 /** Not initialized. */
1979 /** IO iteration started. */
1983 /** Actual IO is in progress. */
1985 /** IO for the current iteration finished. */
1987 /** Locks released. */
1989 /** Iteration completed. */
1991 /** cl_io finalized. */
1996 * IO state private for a layer.
1998 * This is usually embedded into layer session data, rather than allocated
2001 * \see vvp_io, lov_io, osc_io, ccc_io
2003 struct cl_io_slice {
2004 struct cl_io *cis_io;
2005 /** corresponding object slice. Immutable after creation. */
2006 struct cl_object *cis_obj;
2007 /** io operations. Immutable after creation. */
2008 const struct cl_io_operations *cis_iop;
2010 * linkage into a list of all slices for a given cl_io, hanging off
2011 * cl_io::ci_layers. Immutable after creation.
2013 cfs_list_t cis_linkage;
2018 * Per-layer io operations.
2019 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
2021 struct cl_io_operations {
2023 * Vector of io state transition methods for every io type.
2025 * \see cl_page_operations::io
2029 * Prepare io iteration at a given layer.
2031 * Called top-to-bottom at the beginning of each iteration of
2032 * "io loop" (if it makes sense for this type of io). Here
2033 * layer selects what work it will do during this iteration.
2035 * \see cl_io_operations::cio_iter_fini()
2037 int (*cio_iter_init) (const struct lu_env *env,
2038 const struct cl_io_slice *slice);
2040 * Finalize io iteration.
2042 * Called bottom-to-top at the end of each iteration of "io
2043 * loop". Here layers can decide whether IO has to be
2046 * \see cl_io_operations::cio_iter_init()
2048 void (*cio_iter_fini) (const struct lu_env *env,
2049 const struct cl_io_slice *slice);
2051 * Collect locks for the current iteration of io.
2053 * Called top-to-bottom to collect all locks necessary for
2054 * this iteration. This methods shouldn't actually enqueue
2055 * anything, instead it should post a lock through
2056 * cl_io_lock_add(). Once all locks are collected, they are
2057 * sorted and enqueued in the proper order.
2059 int (*cio_lock) (const struct lu_env *env,
2060 const struct cl_io_slice *slice);
2062 * Finalize unlocking.
2064 * Called bottom-to-top to finish layer specific unlocking
2065 * functionality, after generic code released all locks
2066 * acquired by cl_io_operations::cio_lock().
2068 void (*cio_unlock)(const struct lu_env *env,
2069 const struct cl_io_slice *slice);
2071 * Start io iteration.
2073 * Once all locks are acquired, called top-to-bottom to
2074 * commence actual IO. In the current implementation,
2075 * top-level vvp_io_{read,write}_start() does all the work
2076 * synchronously by calling generic_file_*(), so other layers
2077 * are called when everything is done.
2079 int (*cio_start)(const struct lu_env *env,
2080 const struct cl_io_slice *slice);
2082 * Called top-to-bottom at the end of io loop. Here layer
2083 * might wait for an unfinished asynchronous io.
2085 void (*cio_end) (const struct lu_env *env,
2086 const struct cl_io_slice *slice);
2088 * Called bottom-to-top to notify layers that read/write IO
2089 * iteration finished, with \a nob bytes transferred.
2091 void (*cio_advance)(const struct lu_env *env,
2092 const struct cl_io_slice *slice,
2095 * Called once per io, bottom-to-top to release io resources.
2097 void (*cio_fini) (const struct lu_env *env,
2098 const struct cl_io_slice *slice);
2102 * Submit pages from \a queue->c2_qin for IO, and move
2103 * successfully submitted pages into \a queue->c2_qout. Return
2104 * non-zero if failed to submit even the single page. If
2105 * submission failed after some pages were moved into \a
2106 * queue->c2_qout, completion callback with non-zero ioret is
2109 int (*cio_submit)(const struct lu_env *env,
2110 const struct cl_io_slice *slice,
2111 enum cl_req_type crt,
2112 struct cl_2queue *queue);
2115 * Read missing page.
2117 * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
2118 * method, when it hits not-up-to-date page in the range. Optional.
2120 * \pre io->ci_type == CIT_READ
2122 int (*cio_read_page)(const struct lu_env *env,
2123 const struct cl_io_slice *slice,
2124 const struct cl_page_slice *page);
2126 * Prepare write of a \a page. Called bottom-to-top by a top-level
2127 * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
2128 * get data from user-level buffer.
2130 * \pre io->ci_type == CIT_WRITE
2132 * \see vvp_io_prepare_write(), lov_io_prepare_write(),
2133 * osc_io_prepare_write().
2135 int (*cio_prepare_write)(const struct lu_env *env,
2136 const struct cl_io_slice *slice,
2137 const struct cl_page_slice *page,
2138 unsigned from, unsigned to);
2141 * \pre io->ci_type == CIT_WRITE
2143 * \see vvp_io_commit_write(), lov_io_commit_write(),
2144 * osc_io_commit_write().
2146 int (*cio_commit_write)(const struct lu_env *env,
2147 const struct cl_io_slice *slice,
2148 const struct cl_page_slice *page,
2149 unsigned from, unsigned to);
2151 * Optional debugging helper. Print given io slice.
2153 int (*cio_print)(const struct lu_env *env, void *cookie,
2154 lu_printer_t p, const struct cl_io_slice *slice);
2158 * Flags to lock enqueue procedure.
2163 * instruct server to not block, if conflicting lock is found. Instead
2164 * -EWOULDBLOCK is returned immediately.
2166 CEF_NONBLOCK = 0x00000001,
2168 * take lock asynchronously (out of order), as it cannot
2169 * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
2171 CEF_ASYNC = 0x00000002,
2173 * tell the server to instruct (though a flag in the blocking ast) an
2174 * owner of the conflicting lock, that it can drop dirty pages
2175 * protected by this lock, without sending them to the server.
2177 CEF_DISCARD_DATA = 0x00000004,
2179 * tell the sub layers that it must be a `real' lock. This is used for
2180 * mmapped-buffer locks and glimpse locks that must be never converted
2181 * into lockless mode.
2183 * \see vvp_mmap_locks(), cl_glimpse_lock().
2185 CEF_MUST = 0x00000008,
2187 * tell the sub layers that never request a `real' lock. This flag is
2188 * not used currently.
2190 * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
2191 * conversion policy: ci_lockreq describes generic information of lock
2192 * requirement for this IO, especially for locks which belong to the
2193 * object doing IO; however, lock itself may have precise requirements
2194 * that are described by the enqueue flags.
2196 CEF_NEVER = 0x00000010,
2198 * for async glimpse lock.
2200 CEF_AGL = 0x00000020,
2202 * mask of enq_flags.
2204 CEF_MASK = 0x0000003f,
2208 * Link between lock and io. Intermediate structure is needed, because the
2209 * same lock can be part of multiple io's simultaneously.
2211 struct cl_io_lock_link {
2212 /** linkage into one of cl_lockset lists. */
2213 cfs_list_t cill_linkage;
2214 struct cl_lock_descr cill_descr;
2215 struct cl_lock *cill_lock;
2216 /** optional destructor */
2217 void (*cill_fini)(const struct lu_env *env,
2218 struct cl_io_lock_link *link);
2222 * Lock-set represents a collection of locks, that io needs at a
2223 * time. Generally speaking, client tries to avoid holding multiple locks when
2226 * - holding extent locks over multiple ost's introduces the danger of
2227 * "cascading timeouts";
2229 * - holding multiple locks over the same ost is still dead-lock prone,
2230 * see comment in osc_lock_enqueue(),
2232 * but there are certain situations where this is unavoidable:
2234 * - O_APPEND writes have to take [0, EOF] lock for correctness;
2236 * - truncate has to take [new-size, EOF] lock for correctness;
2238 * - SNS has to take locks across full stripe for correctness;
2240 * - in the case when user level buffer, supplied to {read,write}(file0),
2241 * is a part of a memory mapped lustre file, client has to take a dlm
2242 * locks on file0, and all files that back up the buffer (or a part of
2243 * the buffer, that is being processed in the current chunk, in any
2244 * case, there are situations where at least 2 locks are necessary).
2246 * In such cases we at least try to take locks in the same consistent
2247 * order. To this end, all locks are first collected, then sorted, and then
2251 /** locks to be acquired. */
2252 cfs_list_t cls_todo;
2253 /** locks currently being processed. */
2254 cfs_list_t cls_curr;
2255 /** locks acquired. */
2256 cfs_list_t cls_done;
2260 * Lock requirements(demand) for IO. It should be cl_io_lock_req,
2261 * but 'req' is always to be thought as 'request' :-)
2263 enum cl_io_lock_dmd {
2264 /** Always lock data (e.g., O_APPEND). */
2266 /** Layers are free to decide between local and global locking. */
2268 /** Never lock: there is no cache (e.g., liblustre). */
2272 enum cl_fsync_mode {
2273 /** start writeback, do not wait for them to finish */
2275 /** start writeback and wait for them to finish */
2277 /** discard all of dirty pages in a specific file range */
2278 CL_FSYNC_DISCARD = 2,
2279 /** start writeback and make sure they have reached storage before
2280 * return. OST_SYNC RPC must be issued and finished */
2284 struct cl_io_rw_common {
2294 * cl_io is shared by all threads participating in this IO (in current
2295 * implementation only one thread advances IO, but parallel IO design and
2296 * concurrent copy_*_user() require multiple threads acting on the same IO. It
2297 * is up to these threads to serialize their activities, including updates to
2298 * mutable cl_io fields.
2301 /** type of this IO. Immutable after creation. */
2302 enum cl_io_type ci_type;
2303 /** current state of cl_io state machine. */
2304 enum cl_io_state ci_state;
2305 /** main object this io is against. Immutable after creation. */
2306 struct cl_object *ci_obj;
2308 * Upper layer io, of which this io is a part of. Immutable after
2311 struct cl_io *ci_parent;
2312 /** List of slices. Immutable after creation. */
2313 cfs_list_t ci_layers;
2314 /** list of locks (to be) acquired by this io. */
2315 struct cl_lockset ci_lockset;
2316 /** lock requirements, this is just a help info for sublayers. */
2317 enum cl_io_lock_dmd ci_lockreq;
2320 struct cl_io_rw_common rd;
2323 struct cl_io_rw_common wr;
2327 struct cl_io_rw_common ci_rw;
2328 struct cl_setattr_io {
2329 struct ost_lvb sa_attr;
2330 unsigned int sa_valid;
2331 struct obd_capa *sa_capa;
2333 struct cl_fault_io {
2334 /** page index within file. */
2336 /** bytes valid byte on a faulted page. */
2338 /** writable page? for nopage() only */
2340 /** page of an executable? */
2342 /** page_mkwrite() */
2344 /** resulting page */
2345 struct cl_page *ft_page;
2347 struct cl_fsync_io {
2350 struct obd_capa *fi_capa;
2351 /** file system level fid */
2352 struct lu_fid *fi_fid;
2353 enum cl_fsync_mode fi_mode;
2354 /* how many pages were written/discarded */
2355 unsigned int fi_nr_written;
2358 struct cl_2queue ci_queue;
2361 unsigned int ci_continue:1,
2363 * This io has held grouplock, to inform sublayers that
2364 * don't do lockless i/o.
2368 * The whole IO need to be restarted because layout has been changed
2372 * to not refresh layout - the IO issuer knows that the layout won't
2373 * change(page operations, layout change causes all page to be
2374 * discarded), or it doesn't matter if it changes(sync).
2378 * Check if layout changed after the IO finishes. Mainly for HSM
2379 * requirement. If IO occurs to openning files, it doesn't need to
2380 * verify layout because HSM won't release openning files.
2381 * Right now, only two opertaions need to verify layout: glimpse
2386 * file is released, restore has to to be triggered by vvp layer
2388 ci_restore_needed:1,
2394 * Number of pages owned by this IO. For invariant checking.
2396 unsigned ci_owned_nr;
2401 /** \addtogroup cl_req cl_req
2406 * There are two possible modes of transfer initiation on the client:
2408 * - immediate transfer: this is started when a high level io wants a page
2409 * or a collection of pages to be transferred right away. Examples:
2410 * read-ahead, synchronous read in the case of non-page aligned write,
2411 * page write-out as a part of extent lock cancellation, page write-out
2412 * as a part of memory cleansing. Immediate transfer can be both
2413 * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
2415 * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
2416 * when io wants to transfer a page to the server some time later, when
2417 * it can be done efficiently. Example: pages dirtied by the write(2)
2420 * In any case, transfer takes place in the form of a cl_req, which is a
2421 * representation for a network RPC.
2423 * Pages queued for an opportunistic transfer are cached until it is decided
2424 * that efficient RPC can be composed of them. This decision is made by "a
2425 * req-formation engine", currently implemented as a part of osc
2426 * layer. Req-formation depends on many factors: the size of the resulting
2427 * RPC, whether or not multi-object RPCs are supported by the server,
2428 * max-rpc-in-flight limitations, size of the dirty cache, etc.
2430 * For the immediate transfer io submits a cl_page_list, that req-formation
2431 * engine slices into cl_req's, possibly adding cached pages to some of
2432 * the resulting req's.
2434 * Whenever a page from cl_page_list is added to a newly constructed req, its
2435 * cl_page_operations::cpo_prep() layer methods are called. At that moment,
2436 * page state is atomically changed from cl_page_state::CPS_OWNED to
2437 * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
2438 * is zeroed, and cl_page::cp_req is set to the
2439 * req. cl_page_operations::cpo_prep() method at the particular layer might
2440 * return -EALREADY to indicate that it does not need to submit this page
2441 * at all. This is possible, for example, if page, submitted for read,
2442 * became up-to-date in the meantime; and for write, the page don't have
2443 * dirty bit marked. \see cl_io_submit_rw()
2445 * Whenever a cached page is added to a newly constructed req, its
2446 * cl_page_operations::cpo_make_ready() layer methods are called. At that
2447 * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
2448 * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
2449 * req. cl_page_operations::cpo_make_ready() method at the particular layer
2450 * might return -EAGAIN to indicate that this page is not eligible for the
2451 * transfer right now.
2455 * Plan is to divide transfers into "priority bands" (indicated when
2456 * submitting cl_page_list, and queuing a page for the opportunistic transfer)
2457 * and allow glueing of cached pages to immediate transfers only within single
2458 * band. This would make high priority transfers (like lock cancellation or
2459 * memory pressure induced write-out) really high priority.
2464 * Per-transfer attributes.
2466 struct cl_req_attr {
2467 /** Generic attributes for the server consumption. */
2468 struct obdo *cra_oa;
2470 struct obd_capa *cra_capa;
2472 char cra_jobid[JOBSTATS_JOBID_SIZE];
2476 * Transfer request operations definable at every layer.
2478 * Concurrency: transfer formation engine synchronizes calls to all transfer
2481 struct cl_req_operations {
2483 * Invoked top-to-bottom by cl_req_prep() when transfer formation is
2484 * complete (all pages are added).
2486 * \see osc_req_prep()
2488 int (*cro_prep)(const struct lu_env *env,
2489 const struct cl_req_slice *slice);
2491 * Called top-to-bottom to fill in \a oa fields. This is called twice
2492 * with different flags, see bug 10150 and osc_build_req().
2494 * \param obj an object from cl_req which attributes are to be set in
2497 * \param oa struct obdo where attributes are placed
2499 * \param flags \a oa fields to be filled.
2501 void (*cro_attr_set)(const struct lu_env *env,
2502 const struct cl_req_slice *slice,
2503 const struct cl_object *obj,
2504 struct cl_req_attr *attr, obd_valid flags);
2506 * Called top-to-bottom from cl_req_completion() to notify layers that
2507 * transfer completed. Has to free all state allocated by
2508 * cl_device_operations::cdo_req_init().
2510 void (*cro_completion)(const struct lu_env *env,
2511 const struct cl_req_slice *slice, int ioret);
2515 * A per-object state that (potentially multi-object) transfer request keeps.
2518 /** object itself */
2519 struct cl_object *ro_obj;
2520 /** reference to cl_req_obj::ro_obj. For debugging. */
2521 struct lu_ref_link ro_obj_ref;
2522 /* something else? Number of pages for a given object? */
2528 * Transfer requests are not reference counted, because IO sub-system owns
2529 * them exclusively and knows when to free them.
2533 * cl_req is created by cl_req_alloc() that calls
2534 * cl_device_operations::cdo_req_init() device methods to allocate per-req
2535 * state in every layer.
2537 * Then pages are added (cl_req_page_add()), req keeps track of all objects it
2538 * contains pages for.
2540 * Once all pages were collected, cl_page_operations::cpo_prep() method is
2541 * called top-to-bottom. At that point layers can modify req, let it pass, or
2542 * deny it completely. This is to support things like SNS that have transfer
2543 * ordering requirements invisible to the individual req-formation engine.
2545 * On transfer completion (or transfer timeout, or failure to initiate the
2546 * transfer of an allocated req), cl_req_operations::cro_completion() method
2547 * is called, after execution of cl_page_operations::cpo_completion() of all
2551 enum cl_req_type crq_type;
2552 /** A list of pages being transfered */
2553 cfs_list_t crq_pages;
2554 /** Number of pages in cl_req::crq_pages */
2555 unsigned crq_nrpages;
2556 /** An array of objects which pages are in ->crq_pages */
2557 struct cl_req_obj *crq_o;
2558 /** Number of elements in cl_req::crq_objs[] */
2559 unsigned crq_nrobjs;
2560 cfs_list_t crq_layers;
2564 * Per-layer state for request.
2566 struct cl_req_slice {
2567 struct cl_req *crs_req;
2568 struct cl_device *crs_dev;
2569 cfs_list_t crs_linkage;
2570 const struct cl_req_operations *crs_ops;
2575 enum cache_stats_item {
2576 /** how many cache lookups were performed */
2578 /** how many times cache lookup resulted in a hit */
2580 /** how many entities are in the cache right now */
2582 /** how many entities in the cache are actively used (and cannot be
2583 * evicted) right now */
2585 /** how many entities were created at all */
2590 #define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
2593 * Stats for a generic cache (similar to inode, lu_object, etc. caches).
2595 struct cache_stats {
2596 const char *cs_name;
2597 cfs_atomic_t cs_stats[CS_NR];
2600 /** These are not exported so far */
2601 void cache_stats_init (struct cache_stats *cs, const char *name);
2602 int cache_stats_print(const struct cache_stats *cs,
2603 char *page, int count, int header);
2606 * Client-side site. This represents particular client stack. "Global"
2607 * variables should (directly or indirectly) be added here to allow multiple
2608 * clients to co-exist in the single address space.
2611 struct lu_site cs_lu;
2613 * Statistical counters. Atomics do not scale, something better like
2614 * per-cpu counters is needed.
2616 * These are exported as /proc/fs/lustre/llite/.../site
2618 * When interpreting keep in mind that both sub-locks (and sub-pages)
2619 * and top-locks (and top-pages) are accounted here.
2621 struct cache_stats cs_pages;
2622 struct cache_stats cs_locks;
2623 cfs_atomic_t cs_pages_state[CPS_NR];
2624 cfs_atomic_t cs_locks_state[CLS_NR];
2627 int cl_site_init (struct cl_site *s, struct cl_device *top);
2628 void cl_site_fini (struct cl_site *s);
2629 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
2632 * Output client site statistical counters into a buffer. Suitable for
2633 * ll_rd_*()-style functions.
2635 int cl_site_stats_print(const struct cl_site *s, char *page, int count);
2640 * Type conversion and accessory functions.
2644 static inline struct cl_site *lu2cl_site(const struct lu_site *site)
2646 return container_of(site, struct cl_site, cs_lu);
2649 static inline int lu_device_is_cl(const struct lu_device *d)
2651 return d->ld_type->ldt_tags & LU_DEVICE_CL;
2654 static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
2656 LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
2657 return container_of0(d, struct cl_device, cd_lu_dev);
2660 static inline struct lu_device *cl2lu_dev(struct cl_device *d)
2662 return &d->cd_lu_dev;
2665 static inline struct cl_object *lu2cl(const struct lu_object *o)
2667 LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
2668 return container_of0(o, struct cl_object, co_lu);
2671 static inline const struct cl_object_conf *
2672 lu2cl_conf(const struct lu_object_conf *conf)
2674 return container_of0(conf, struct cl_object_conf, coc_lu);
2677 static inline struct cl_object *cl_object_next(const struct cl_object *obj)
2679 return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
2682 static inline struct cl_device *cl_object_device(const struct cl_object *o)
2684 LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
2685 return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
2688 static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
2690 return container_of0(h, struct cl_object_header, coh_lu);
2693 static inline struct cl_site *cl_object_site(const struct cl_object *obj)
2695 return lu2cl_site(obj->co_lu.lo_dev->ld_site);
2699 struct cl_object_header *cl_object_header(const struct cl_object *obj)
2701 return luh2coh(obj->co_lu.lo_header);
2704 static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
2706 return lu_device_init(&d->cd_lu_dev, t);
2709 static inline void cl_device_fini(struct cl_device *d)
2711 lu_device_fini(&d->cd_lu_dev);
2714 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
2715 struct cl_object *obj,
2716 const struct cl_page_operations *ops);
2717 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2718 struct cl_object *obj,
2719 const struct cl_lock_operations *ops);
2720 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
2721 struct cl_object *obj, const struct cl_io_operations *ops);
2722 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
2723 struct cl_device *dev,
2724 const struct cl_req_operations *ops);
2727 /** \defgroup cl_object cl_object
2729 struct cl_object *cl_object_top (struct cl_object *o);
2730 struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
2731 const struct lu_fid *fid,
2732 const struct cl_object_conf *c);
2734 int cl_object_header_init(struct cl_object_header *h);
2735 void cl_object_header_fini(struct cl_object_header *h);
2736 void cl_object_put (const struct lu_env *env, struct cl_object *o);
2737 void cl_object_get (struct cl_object *o);
2738 void cl_object_attr_lock (struct cl_object *o);
2739 void cl_object_attr_unlock(struct cl_object *o);
2740 int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
2741 struct cl_attr *attr);
2742 int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
2743 const struct cl_attr *attr, unsigned valid);
2744 int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
2745 struct ost_lvb *lvb);
2746 int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
2747 const struct cl_object_conf *conf);
2748 void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
2749 void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
2750 int cl_object_has_locks (struct cl_object *obj);
2753 * Returns true, iff \a o0 and \a o1 are slices of the same object.
2755 static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
2757 return cl_object_header(o0) == cl_object_header(o1);
2760 static inline void cl_object_page_init(struct cl_object *clob, int size)
2762 clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
2763 cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
2766 static inline void *cl_object_page_slice(struct cl_object *clob,
2767 struct cl_page *page)
2769 return (void *)((char *)page + clob->co_slice_off);
2773 * Return refcount of cl_object.
2775 static inline int cl_object_refc(struct cl_object *clob)
2777 struct lu_object_header *header = clob->co_lu.lo_header;
2778 return cfs_atomic_read(&header->loh_ref);
2783 /** \defgroup cl_page cl_page
2791 /* callback of cl_page_gang_lookup() */
2793 struct cl_page *cl_page_find (const struct lu_env *env,
2794 struct cl_object *obj,
2795 pgoff_t idx, struct page *vmpage,
2796 enum cl_page_type type);
2797 struct cl_page *cl_page_alloc (const struct lu_env *env,
2798 struct cl_object *o, pgoff_t ind,
2799 struct page *vmpage,
2800 enum cl_page_type type);
2801 void cl_page_get (struct cl_page *page);
2802 void cl_page_put (const struct lu_env *env,
2803 struct cl_page *page);
2804 void cl_page_print (const struct lu_env *env, void *cookie,
2805 lu_printer_t printer,
2806 const struct cl_page *pg);
2807 void cl_page_header_print(const struct lu_env *env, void *cookie,
2808 lu_printer_t printer,
2809 const struct cl_page *pg);
2810 struct page *cl_page_vmpage (const struct lu_env *env,
2811 struct cl_page *page);
2812 struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
2813 struct cl_page *cl_page_top (struct cl_page *page);
2815 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
2816 const struct lu_device_type *dtype);
2821 * Functions dealing with the ownership of page by io.
2825 int cl_page_own (const struct lu_env *env,
2826 struct cl_io *io, struct cl_page *page);
2827 int cl_page_own_try (const struct lu_env *env,
2828 struct cl_io *io, struct cl_page *page);
2829 void cl_page_assume (const struct lu_env *env,
2830 struct cl_io *io, struct cl_page *page);
2831 void cl_page_unassume (const struct lu_env *env,
2832 struct cl_io *io, struct cl_page *pg);
2833 void cl_page_disown (const struct lu_env *env,
2834 struct cl_io *io, struct cl_page *page);
2835 int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
2842 * Functions dealing with the preparation of a page for a transfer, and
2843 * tracking transfer state.
2846 int cl_page_prep (const struct lu_env *env, struct cl_io *io,
2847 struct cl_page *pg, enum cl_req_type crt);
2848 void cl_page_completion (const struct lu_env *env,
2849 struct cl_page *pg, enum cl_req_type crt, int ioret);
2850 int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
2851 enum cl_req_type crt);
2852 int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
2853 struct cl_page *pg, enum cl_req_type crt);
2854 void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
2856 int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
2857 int cl_page_flush (const struct lu_env *env, struct cl_io *io,
2858 struct cl_page *pg);
2864 * \name helper routines
2865 * Functions to discard, delete and export a cl_page.
2868 void cl_page_discard (const struct lu_env *env, struct cl_io *io,
2869 struct cl_page *pg);
2870 void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
2871 int cl_page_is_vmlocked (const struct lu_env *env,
2872 const struct cl_page *pg);
2873 void cl_page_export (const struct lu_env *env,
2874 struct cl_page *pg, int uptodate);
2875 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
2876 struct cl_page *page);
2877 loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
2878 pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
2879 int cl_page_size (const struct cl_object *obj);
2880 int cl_pages_prune (const struct lu_env *env, struct cl_object *obj);
2882 void cl_lock_print (const struct lu_env *env, void *cookie,
2883 lu_printer_t printer, const struct cl_lock *lock);
2884 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2885 lu_printer_t printer,
2886 const struct cl_lock_descr *descr);
2891 /** \defgroup cl_lock cl_lock
2894 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2895 const struct cl_lock_descr *need,
2896 const char *scope, const void *source);
2897 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
2898 const struct cl_lock_descr *need,
2899 const char *scope, const void *source);
2900 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2901 const struct cl_lock_descr *need,
2902 const char *scope, const void *source);
2903 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
2904 struct cl_object *obj, pgoff_t index,
2905 struct cl_lock *except, int pending,
2907 static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
2908 struct cl_object *obj,
2909 struct cl_page *page,
2910 struct cl_lock *except,
2911 int pending, int canceld)
2913 LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
2914 return cl_lock_at_pgoff(env, obj, page->cp_index, except,
2918 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
2919 const struct lu_device_type *dtype);
2921 void cl_lock_get (struct cl_lock *lock);
2922 void cl_lock_get_trust (struct cl_lock *lock);
2923 void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
2924 void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
2925 const char *scope, const void *source);
2926 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
2927 const char *scope, const void *source);
2928 void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
2929 const char *scope, const void *source);
2930 void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
2931 const char *scope, const void *source);
2932 void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
2933 void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
2935 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
2936 struct cl_lock *lock);
2937 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
2938 enum cl_lock_state state);
2939 int cl_lock_is_intransit(struct cl_lock *lock);
2941 int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
2944 /** \name statemachine statemachine
2945 * Interface to lock state machine consists of 3 parts:
2947 * - "try" functions that attempt to effect a state transition. If state
2948 * transition is not possible right now (e.g., if it has to wait for some
2949 * asynchronous event to occur), these functions return
2950 * cl_lock_transition::CLO_WAIT.
2952 * - "non-try" functions that implement synchronous blocking interface on
2953 * top of non-blocking "try" functions. These functions repeatedly call
2954 * corresponding "try" versions, and if state transition is not possible
2955 * immediately, wait for lock state change.
2957 * - methods from cl_lock_operations, called by "try" functions. Lock can
2958 * be advanced to the target state only when all layers voted that they
2959 * are ready for this transition. "Try" functions call methods under lock
2960 * mutex. If a layer had to release a mutex, it re-acquires it and returns
2961 * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
2964 * TRY NON-TRY METHOD FINAL STATE
2966 * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
2968 * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
2970 * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
2972 * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
2976 int cl_enqueue (const struct lu_env *env, struct cl_lock *lock,
2977 struct cl_io *io, __u32 flags);
2978 int cl_wait (const struct lu_env *env, struct cl_lock *lock);
2979 void cl_unuse (const struct lu_env *env, struct cl_lock *lock);
2980 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
2981 struct cl_io *io, __u32 flags);
2982 int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
2983 int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
2984 int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
2986 /** @} statemachine */
2988 void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
2989 int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
2990 void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
2991 enum cl_lock_state state);
2992 int cl_queue_match (const cfs_list_t *queue,
2993 const struct cl_lock_descr *need);
2995 void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
2996 int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock);
2997 void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock);
2998 int cl_lock_is_mutexed (struct cl_lock *lock);
2999 int cl_lock_nr_mutexed (const struct lu_env *env);
3000 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
3001 int cl_lock_ext_match (const struct cl_lock_descr *has,
3002 const struct cl_lock_descr *need);
3003 int cl_lock_descr_match(const struct cl_lock_descr *has,
3004 const struct cl_lock_descr *need);
3005 int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need);
3006 int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock,
3007 const struct cl_lock_descr *desc);
3009 void cl_lock_closure_init (const struct lu_env *env,
3010 struct cl_lock_closure *closure,
3011 struct cl_lock *origin, int wait);
3012 void cl_lock_closure_fini (struct cl_lock_closure *closure);
3013 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
3014 struct cl_lock_closure *closure);
3015 void cl_lock_disclosure (const struct lu_env *env,
3016 struct cl_lock_closure *closure);
3017 int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock,
3018 struct cl_lock_closure *closure);
3020 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
3021 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
3022 void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
3023 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
3025 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
3029 /** \defgroup cl_io cl_io
3032 int cl_io_init (const struct lu_env *env, struct cl_io *io,
3033 enum cl_io_type iot, struct cl_object *obj);
3034 int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
3035 enum cl_io_type iot, struct cl_object *obj);
3036 int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
3037 enum cl_io_type iot, loff_t pos, size_t count);
3038 int cl_io_loop (const struct lu_env *env, struct cl_io *io);
3040 void cl_io_fini (const struct lu_env *env, struct cl_io *io);
3041 int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
3042 void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
3043 int cl_io_lock (const struct lu_env *env, struct cl_io *io);
3044 void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
3045 int cl_io_start (const struct lu_env *env, struct cl_io *io);
3046 void cl_io_end (const struct lu_env *env, struct cl_io *io);
3047 int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
3048 struct cl_io_lock_link *link);
3049 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
3050 struct cl_lock_descr *descr);
3051 int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
3052 struct cl_page *page);
3053 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
3054 struct cl_page *page, unsigned from, unsigned to);
3055 int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
3056 struct cl_page *page, unsigned from, unsigned to);
3057 int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
3058 enum cl_req_type iot, struct cl_2queue *queue);
3059 int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
3060 enum cl_req_type iot, struct cl_2queue *queue,
3062 void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
3064 int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
3065 struct cl_page_list *queue);
3066 int cl_io_is_going (const struct lu_env *env);
3069 * True, iff \a io is an O_APPEND write(2).
3071 static inline int cl_io_is_append(const struct cl_io *io)
3073 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
3076 static inline int cl_io_is_sync_write(const struct cl_io *io)
3078 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
3081 static inline int cl_io_is_mkwrite(const struct cl_io *io)
3083 return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
3087 * True, iff \a io is a truncate(2).
3089 static inline int cl_io_is_trunc(const struct cl_io *io)
3091 return io->ci_type == CIT_SETATTR &&
3092 (io->u.ci_setattr.sa_valid & ATTR_SIZE);
3095 struct cl_io *cl_io_top(struct cl_io *io);
3097 void cl_io_print(const struct lu_env *env, void *cookie,
3098 lu_printer_t printer, const struct cl_io *io);
3100 #define CL_IO_SLICE_CLEAN(foo_io, base) \
3102 typeof(foo_io) __foo_io = (foo_io); \
3104 CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
3105 memset(&__foo_io->base + 1, 0, \
3106 (sizeof *__foo_io) - sizeof __foo_io->base); \
3111 /** \defgroup cl_page_list cl_page_list
3115 * Last page in the page list.
3117 static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
3119 LASSERT(plist->pl_nr > 0);
3120 return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
3124 * Iterate over pages in a page list.
3126 #define cl_page_list_for_each(page, list) \
3127 cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
3130 * Iterate over pages in a page list, taking possible removals into account.
3132 #define cl_page_list_for_each_safe(page, temp, list) \
3133 cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
3135 void cl_page_list_init (struct cl_page_list *plist);
3136 void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
3137 void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
3138 struct cl_page *page);
3139 void cl_page_list_splice (struct cl_page_list *list,
3140 struct cl_page_list *head);
3141 void cl_page_list_del (const struct lu_env *env,
3142 struct cl_page_list *plist, struct cl_page *page);
3143 void cl_page_list_disown (const struct lu_env *env,
3144 struct cl_io *io, struct cl_page_list *plist);
3145 int cl_page_list_own (const struct lu_env *env,
3146 struct cl_io *io, struct cl_page_list *plist);
3147 void cl_page_list_assume (const struct lu_env *env,
3148 struct cl_io *io, struct cl_page_list *plist);
3149 void cl_page_list_discard(const struct lu_env *env,
3150 struct cl_io *io, struct cl_page_list *plist);
3151 void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
3153 void cl_2queue_init (struct cl_2queue *queue);
3154 void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page);
3155 void cl_2queue_disown (const struct lu_env *env,
3156 struct cl_io *io, struct cl_2queue *queue);
3157 void cl_2queue_assume (const struct lu_env *env,
3158 struct cl_io *io, struct cl_2queue *queue);
3159 void cl_2queue_discard (const struct lu_env *env,
3160 struct cl_io *io, struct cl_2queue *queue);
3161 void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
3162 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
3164 /** @} cl_page_list */
3166 /** \defgroup cl_req cl_req
3168 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
3169 enum cl_req_type crt, int nr_objects);
3171 void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
3172 struct cl_page *page);
3173 void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
3174 int cl_req_prep (const struct lu_env *env, struct cl_req *req);
3175 void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
3176 struct cl_req_attr *attr, obd_valid flags);
3177 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
3179 /** \defgroup cl_sync_io cl_sync_io
3183 * Anchor for synchronous transfer. This is allocated on a stack by thread
3184 * doing synchronous transfer, and a pointer to this structure is set up in
3185 * every page submitted for transfer. Transfer completion routine updates
3186 * anchor and wakes up waiting thread when transfer is complete.
3189 /** number of pages yet to be transferred. */
3190 cfs_atomic_t csi_sync_nr;
3193 /** barrier of destroy this structure */
3194 cfs_atomic_t csi_barrier;
3195 /** completion to be signaled when transfer is complete. */
3196 wait_queue_head_t csi_waitq;
3199 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
3200 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
3201 struct cl_page_list *queue, struct cl_sync_io *anchor,
3203 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
3205 /** @} cl_sync_io */
3209 /** \defgroup cl_env cl_env
3211 * lu_env handling for a client.
3213 * lu_env is an environment within which lustre code executes. Its major part
3214 * is lu_context---a fast memory allocation mechanism that is used to conserve
3215 * precious kernel stack space. Originally lu_env was designed for a server,
3218 * - there is a (mostly) fixed number of threads, and
3220 * - call chains have no non-lustre portions inserted between lustre code.
3222 * On a client both these assumtpion fails, because every user thread can
3223 * potentially execute lustre code as part of a system call, and lustre calls
3224 * into VFS or MM that call back into lustre.
3226 * To deal with that, cl_env wrapper functions implement the following
3229 * - allocation and destruction of environment is amortized by caching no
3230 * longer used environments instead of destroying them;
3232 * - there is a notion of "current" environment, attached to the kernel
3233 * data structure representing current thread Top-level lustre code
3234 * allocates an environment and makes it current, then calls into
3235 * non-lustre code, that in turn calls lustre back. Low-level lustre
3236 * code thus called can fetch environment created by the top-level code
3237 * and reuse it, avoiding additional environment allocation.
3238 * Right now, three interfaces can attach the cl_env to running thread:
3241 * - cl_env_reexit(cl_env_reenter had to be called priorly)
3243 * \see lu_env, lu_context, lu_context_key
3246 struct cl_env_nest {
3251 struct lu_env *cl_env_peek (int *refcheck);
3252 struct lu_env *cl_env_get (int *refcheck);
3253 struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
3254 struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
3255 void cl_env_put (struct lu_env *env, int *refcheck);
3256 void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
3257 void *cl_env_reenter (void);
3258 void cl_env_reexit (void *cookie);
3259 void cl_env_implant (struct lu_env *env, int *refcheck);
3260 void cl_env_unplant (struct lu_env *env, int *refcheck);
3261 unsigned cl_env_cache_purge(unsigned nr);
3262 struct lu_env *cl_env_percpu_get (void);
3263 void cl_env_percpu_put (struct lu_env *env);
3270 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr);
3271 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
3273 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
3274 struct lu_device_type *ldt,
3275 struct lu_device *next);
3278 int cl_global_init(void);
3279 void cl_global_fini(void);
3281 #endif /* _LINUX_CL_OBJECT_H */