4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 #ifndef _LUSTRE_CL_OBJECT_H
37 #define _LUSTRE_CL_OBJECT_H
39 /** \defgroup clio clio
41 * Client objects implement io operations and cache pages.
43 * Examples: lov and osc are implementations of cl interface.
45 * Big Theory Statement.
49 * Client implementation is based on the following data-types:
55 * - cl_lock represents an extent lock on an object.
57 * - cl_io represents high-level i/o activity such as whole read/write
58 * system call, or write-out of pages from under the lock being
59 * canceled. cl_io has sub-ios that can be stopped and resumed
60 * independently, thus achieving high degree of transfer
61 * parallelism. Single cl_io can be advanced forward by
62 * the multiple threads (although in the most usual case of
63 * read/write system call it is associated with the single user
64 * thread, that issued the system call).
66 * - cl_req represents a collection of pages for a transfer. cl_req is
67 * constructed by req-forming engine that tries to saturate
68 * transport with large and continuous transfers.
72 * - to avoid confusion high-level I/O operation like read or write system
73 * call is referred to as "an io", whereas low-level I/O operation, like
74 * RPC, is referred to as "a transfer"
76 * - "generic code" means generic (not file system specific) code in the
77 * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
78 * is not layer specific.
84 * - cl_object_header::coh_page_guard
87 * See the top comment in cl_object.c for the description of overall locking and
88 * reference-counting design.
90 * See comments below for the description of i/o, page, and dlm-locking
97 * super-class definitions.
99 #include <libcfs/libcfs.h>
100 #include <lu_object.h>
101 #include <linux/mutex.h>
102 #include <linux/radix-tree.h>
107 struct cl_device_operations;
110 struct cl_object_page_operations;
111 struct cl_object_lock_operations;
114 struct cl_page_slice;
116 struct cl_lock_slice;
118 struct cl_lock_operations;
119 struct cl_page_operations;
128 * Operations for each data device in the client stack.
130 * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
132 struct cl_device_operations {
134 * Initialize cl_req. This method is called top-to-bottom on all
135 * devices in the stack to get them a chance to allocate layer-private
136 * data, and to attach them to the cl_req by calling
137 * cl_req_slice_add().
139 * \see osc_req_init(), lov_req_init(), lovsub_req_init()
140 * \see ccc_req_init()
142 int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
147 * Device in the client stack.
149 * \see ccc_device, lov_device, lovsub_device, osc_device
153 struct lu_device cd_lu_dev;
154 /** Per-layer operation vector. */
155 const struct cl_device_operations *cd_ops;
158 /** \addtogroup cl_object cl_object
161 * "Data attributes" of cl_object. Data attributes can be updated
162 * independently for a sub-object, and top-object's attributes are calculated
163 * from sub-objects' ones.
166 /** Object size, in bytes */
169 * Known minimal size, in bytes.
171 * This is only valid when at least one DLM lock is held.
174 /** Modification time. Measured in seconds since epoch. */
176 /** Access time. Measured in seconds since epoch. */
178 /** Change time. Measured in seconds since epoch. */
181 * Blocks allocated to this cl_object on the server file system.
183 * \todo XXX An interface for block size is needed.
187 * User identifier for quota purposes.
191 * Group identifier for quota purposes.
195 /* nlink of the directory */
200 * Fields in cl_attr that are being set.
214 * Sub-class of lu_object with methods common for objects on the client
217 * cl_object: represents a regular file system object, both a file and a
218 * stripe. cl_object is based on lu_object: it is identified by a fid,
219 * layered, cached, hashed, and lrued. Important distinction with the server
220 * side, where md_object and dt_object are used, is that cl_object "fans out"
221 * at the lov/sns level: depending on the file layout, single file is
222 * represented as a set of "sub-objects" (stripes). At the implementation
223 * level, struct lov_object contains an array of cl_objects. Each sub-object
224 * is a full-fledged cl_object, having its fid, living in the lru and hash
227 * This leads to the next important difference with the server side: on the
228 * client, it's quite usual to have objects with the different sequence of
229 * layers. For example, typical top-object is composed of the following
235 * whereas its sub-objects are composed of
240 * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
241 * track of the object-subobject relationship.
243 * Sub-objects are not cached independently: when top-object is about to
244 * be discarded from the memory, all its sub-objects are torn-down and
247 * \see ccc_object, lov_object, lovsub_object, osc_object
251 struct lu_object co_lu;
252 /** per-object-layer operations */
253 const struct cl_object_operations *co_ops;
254 /** offset of page slice in cl_page buffer */
259 * Description of the client object configuration. This is used for the
260 * creation of a new client object that is identified by a more state than
263 struct cl_object_conf {
265 struct lu_object_conf coc_lu;
268 * Object layout. This is consumed by lov.
270 struct lustre_md *coc_md;
272 * Description of particular stripe location in the
273 * cluster. This is consumed by osc.
275 struct lov_oinfo *coc_oinfo;
278 * VFS inode. This is consumed by vvp.
280 struct inode *coc_inode;
282 * Layout lock handle.
284 struct ldlm_lock *coc_lock;
286 * Operation to handle layout, OBJECT_CONF_XYZ.
292 /** configure layout, set up a new stripe, must be called while
293 * holding layout lock. */
295 /** invalidate the current stripe configuration due to losing
297 OBJECT_CONF_INVALIDATE = 1,
298 /** wait for old layout to go away so that new layout can be
304 * Operations implemented for each cl object layer.
306 * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
308 struct cl_object_operations {
310 * Initialize page slice for this layer. Called top-to-bottom through
311 * every object layer when a new cl_page is instantiated. Layer
312 * keeping private per-page data, or requiring its own page operations
313 * vector should allocate these data here, and attach then to the page
314 * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
317 * \retval NULL success.
319 * \retval ERR_PTR(errno) failure code.
321 * \retval valid-pointer pointer to already existing referenced page
322 * to be used instead of newly created.
324 int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
325 struct cl_page *page, pgoff_t index);
327 * Initialize lock slice for this layer. Called top-to-bottom through
328 * every object layer when a new cl_lock is instantiated. Layer
329 * keeping private per-lock data, or requiring its own lock operations
330 * vector should allocate these data here, and attach then to the lock
331 * by calling cl_lock_slice_add(). Mandatory.
333 int (*coo_lock_init)(const struct lu_env *env,
334 struct cl_object *obj, struct cl_lock *lock,
335 const struct cl_io *io);
337 * Initialize io state for a given layer.
339 * called top-to-bottom once per io existence to initialize io
340 * state. If layer wants to keep some state for this type of io, it
341 * has to embed struct cl_io_slice in lu_env::le_ses, and register
342 * slice with cl_io_slice_add(). It is guaranteed that all threads
343 * participating in this io share the same session.
345 int (*coo_io_init)(const struct lu_env *env,
346 struct cl_object *obj, struct cl_io *io);
348 * Fill portion of \a attr that this layer controls. This method is
349 * called top-to-bottom through all object layers.
351 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
353 * \return 0: to continue
354 * \return +ve: to stop iterating through layers (but 0 is returned
355 * from enclosing cl_object_attr_get())
356 * \return -ve: to signal error
358 int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
359 struct cl_attr *attr);
363 * \a valid is a bitmask composed from enum #cl_attr_valid, and
364 * indicating what attributes are to be set.
366 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
368 * \return the same convention as for
369 * cl_object_operations::coo_attr_get() is used.
371 int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
372 const struct cl_attr *attr, unsigned valid);
374 * Update object configuration. Called top-to-bottom to modify object
377 * XXX error conditions and handling.
379 int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
380 const struct cl_object_conf *conf);
382 * Glimpse ast. Executed when glimpse ast arrives for a lock on this
383 * object. Layers are supposed to fill parts of \a lvb that will be
384 * shipped to the glimpse originator as a glimpse result.
386 * \see ccc_object_glimpse(), lovsub_object_glimpse(),
387 * \see osc_object_glimpse()
389 int (*coo_glimpse)(const struct lu_env *env,
390 const struct cl_object *obj, struct ost_lvb *lvb);
392 * Object prune method. Called when the layout is going to change on
393 * this object, therefore each layer has to clean up their cache,
394 * mainly pages and locks.
396 int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
398 * Object getstripe method.
400 int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
401 struct lov_user_md __user *lum);
405 * Extended header for client object.
407 struct cl_object_header {
408 /** Standard lu_object_header. cl_object::co_lu::lo_header points
410 struct lu_object_header coh_lu;
413 * Parent object. It is assumed that an object has a well-defined
414 * parent, but not a well-defined child (there may be multiple
415 * sub-objects, for the same top-object). cl_object_header::coh_parent
416 * field allows certain code to be written generically, without
417 * limiting possible cl_object layouts unduly.
419 struct cl_object_header *coh_parent;
421 * Protects consistency between cl_attr of parent object and
422 * attributes of sub-objects, that the former is calculated ("merged")
425 * \todo XXX this can be read/write lock if needed.
427 spinlock_t coh_attr_guard;
429 * Size of cl_page + page slices
431 unsigned short coh_page_bufsize;
433 * Number of objects above this one: 0 for a top-object, 1 for its
436 unsigned char coh_nesting;
440 * Helper macro: iterate over all layers of the object \a obj, assigning every
441 * layer top-to-bottom to \a slice.
443 #define cl_object_for_each(slice, obj) \
444 list_for_each_entry((slice), \
445 &(obj)->co_lu.lo_header->loh_layers,\
449 * Helper macro: iterate over all layers of the object \a obj, assigning every
450 * layer bottom-to-top to \a slice.
452 #define cl_object_for_each_reverse(slice, obj) \
453 list_for_each_entry_reverse((slice), \
454 &(obj)->co_lu.lo_header->loh_layers,\
459 #define CL_PAGE_EOF ((pgoff_t)~0ull)
461 /** \addtogroup cl_page cl_page
465 * Layered client page.
467 * cl_page: represents a portion of a file, cached in the memory. All pages
468 * of the given file are of the same size, and are kept in the radix tree
469 * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
470 * of the top-level file object are first class cl_objects, they have their
471 * own radix trees of pages and hence page is implemented as a sequence of
472 * struct cl_pages's, linked into double-linked list through
473 * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
474 * corresponding radix tree at the corresponding logical offset.
476 * cl_page is associated with VM page of the hosting environment (struct
477 * page in Linux kernel, for example), struct page. It is assumed, that this
478 * association is implemented by one of cl_page layers (top layer in the
479 * current design) that
481 * - intercepts per-VM-page call-backs made by the environment (e.g.,
484 * - translates state (page flag bits) and locking between lustre and
487 * The association between cl_page and struct page is immutable and
488 * established when cl_page is created.
490 * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
491 * this io an exclusive access to this page w.r.t. other io attempts and
492 * various events changing page state (such as transfer completion, or
493 * eviction of the page from the memory). Note, that in general cl_io
494 * cannot be identified with a particular thread, and page ownership is not
495 * exactly equal to the current thread holding a lock on the page. Layer
496 * implementing association between cl_page and struct page has to implement
497 * ownership on top of available synchronization mechanisms.
499 * While lustre client maintains the notion of an page ownership by io,
500 * hosting MM/VM usually has its own page concurrency control
501 * mechanisms. For example, in Linux, page access is synchronized by the
502 * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
503 * takes care to acquire and release such locks as necessary around the
504 * calls to the file system methods (->readpage(), ->prepare_write(),
505 * ->commit_write(), etc.). This leads to the situation when there are two
506 * different ways to own a page in the client:
508 * - client code explicitly and voluntary owns the page (cl_page_own());
510 * - VM locks a page and then calls the client, that has "to assume"
511 * the ownership from the VM (cl_page_assume()).
513 * Dual methods to release ownership are cl_page_disown() and
514 * cl_page_unassume().
516 * cl_page is reference counted (cl_page::cp_ref). When reference counter
517 * drops to 0, the page is returned to the cache, unless it is in
518 * cl_page_state::CPS_FREEING state, in which case it is immediately
521 * The general logic guaranteeing the absence of "existential races" for
522 * pages is the following:
524 * - there are fixed known ways for a thread to obtain a new reference
527 * - by doing a lookup in the cl_object radix tree, protected by the
530 * - by starting from VM-locked struct page and following some
531 * hosting environment method (e.g., following ->private pointer in
532 * the case of Linux kernel), see cl_vmpage_page();
534 * - when the page enters cl_page_state::CPS_FREEING state, all these
535 * ways are severed with the proper synchronization
536 * (cl_page_delete());
538 * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
541 * - no new references to the page in cl_page_state::CPS_FREEING state
542 * are allowed (checked in cl_page_get()).
544 * Together this guarantees that when last reference to a
545 * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
546 * page, as neither references to it can be acquired at that point, nor
549 * cl_page is a state machine. States are enumerated in enum
550 * cl_page_state. Possible state transitions are enumerated in
551 * cl_page_state_set(). State transition process (i.e., actual changing of
552 * cl_page::cp_state field) is protected by the lock on the underlying VM
555 * Linux Kernel implementation.
557 * Binding between cl_page and struct page (which is a typedef for
558 * struct page) is implemented in the vvp layer. cl_page is attached to the
559 * ->private pointer of the struct page, together with the setting of
560 * PG_private bit in page->flags, and acquiring additional reference on the
561 * struct page (much like struct buffer_head, or any similar file system
562 * private data structures).
564 * PG_locked lock is used to implement both ownership and transfer
565 * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
566 * states. No additional references are acquired for the duration of the
569 * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
570 * write-out is "protected" by the special PG_writeback bit.
574 * States of cl_page. cl_page.c assumes particular order here.
576 * The page state machine is rather crude, as it doesn't recognize finer page
577 * states like "dirty" or "up to date". This is because such states are not
578 * always well defined for the whole stack (see, for example, the
579 * implementation of the read-ahead, that hides page up-to-dateness to track
580 * cache hits accurately). Such sub-states are maintained by the layers that
581 * are interested in them.
585 * Page is in the cache, un-owned. Page leaves cached state in the
588 * - [cl_page_state::CPS_OWNED] io comes across the page and
591 * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
592 * req-formation engine decides that it wants to include this page
593 * into an cl_req being constructed, and yanks it from the cache;
595 * - [cl_page_state::CPS_FREEING] VM callback is executed to
596 * evict the page form the memory;
598 * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
602 * Page is exclusively owned by some cl_io. Page may end up in this
603 * state as a result of
605 * - io creating new page and immediately owning it;
607 * - [cl_page_state::CPS_CACHED] io finding existing cached page
610 * - [cl_page_state::CPS_OWNED] io finding existing owned page
611 * and waiting for owner to release the page;
613 * Page leaves owned state in the following cases:
615 * - [cl_page_state::CPS_CACHED] io decides to leave the page in
616 * the cache, doing nothing;
618 * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
621 * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
622 * transfer for this page;
624 * - [cl_page_state::CPS_FREEING] io decides to destroy this
625 * page (e.g., as part of truncate or extent lock cancellation).
627 * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
631 * Page is being written out, as a part of a transfer. This state is
632 * entered when req-formation logic decided that it wants this page to
633 * be sent through the wire _now_. Specifically, it means that once
634 * this state is achieved, transfer completion handler (with either
635 * success or failure indication) is guaranteed to be executed against
636 * this page independently of any locks and any scheduling decisions
637 * made by the hosting environment (that effectively means that the
638 * page is never put into cl_page_state::CPS_PAGEOUT state "in
639 * advance". This property is mentioned, because it is important when
640 * reasoning about possible dead-locks in the system). The page can
641 * enter this state as a result of
643 * - [cl_page_state::CPS_OWNED] an io requesting an immediate
644 * write-out of this page, or
646 * - [cl_page_state::CPS_CACHED] req-forming engine deciding
647 * that it has enough dirty pages cached to issue a "good"
650 * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
651 * is completed---it is moved into cl_page_state::CPS_CACHED state.
653 * Underlying VM page is locked for the duration of transfer.
655 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
659 * Page is being read in, as a part of a transfer. This is quite
660 * similar to the cl_page_state::CPS_PAGEOUT state, except that
661 * read-in is always "immediate"---there is no such thing a sudden
662 * construction of read cl_req from cached, presumably not up to date,
665 * Underlying VM page is locked for the duration of transfer.
667 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
671 * Page is being destroyed. This state is entered when client decides
672 * that page has to be deleted from its host object, as, e.g., a part
675 * Once this state is reached, there is no way to escape it.
677 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
684 /** Host page, the page is from the host inode which the cl_page
688 /** Transient page, the transient cl_page is used to bind a cl_page
689 * to vmpage which is not belonging to the same object of cl_page.
690 * it is used in DirectIO, lockless IO and liblustre. */
695 * Fields are protected by the lock on struct page, except for atomics and
698 * \invariant Data type invariants are in cl_page_invariant(). Basically:
699 * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
700 * list, consistent with the parent/child pointers in the cl_page::cp_obj and
701 * cl_page::cp_owner (when set).
704 /** Reference counter. */
706 /** Transfer error. */
708 /** An object this page is a part of. Immutable after creation. */
709 struct cl_object *cp_obj;
711 struct page *cp_vmpage;
712 /** Linkage of pages within group. Pages must be owned */
713 struct list_head cp_batch;
714 /** List of slices. Immutable after creation. */
715 struct list_head cp_layers;
716 /** Linkage of pages within cl_req. */
717 struct list_head cp_flight;
719 * Page state. This field is const to avoid accidental update, it is
720 * modified only internally within cl_page.c. Protected by a VM lock.
722 const enum cl_page_state cp_state;
724 * Page type. Only CPT_TRANSIENT is used so far. Immutable after
727 enum cl_page_type cp_type;
730 * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
731 * by sub-io. Protected by a VM lock.
733 struct cl_io *cp_owner;
735 * Owning IO request in cl_page_state::CPS_PAGEOUT and
736 * cl_page_state::CPS_PAGEIN states. This field is maintained only in
737 * the top-level pages. Protected by a VM lock.
739 struct cl_req *cp_req;
740 /** List of references to this page, for debugging. */
741 struct lu_ref cp_reference;
742 /** Link to an object, for debugging. */
743 struct lu_ref_link cp_obj_ref;
744 /** Link to a queue, for debugging. */
745 struct lu_ref_link cp_queue_ref;
746 /** Assigned if doing a sync_io */
747 struct cl_sync_io *cp_sync_io;
751 * Per-layer part of cl_page.
753 * \see ccc_page, lov_page, osc_page
755 struct cl_page_slice {
756 struct cl_page *cpl_page;
759 * Object slice corresponding to this page slice. Immutable after
762 struct cl_object *cpl_obj;
763 const struct cl_page_operations *cpl_ops;
764 /** Linkage into cl_page::cp_layers. Immutable after creation. */
765 struct list_head cpl_linkage;
769 * Lock mode. For the client extent locks.
781 * Requested transfer type.
791 * Per-layer page operations.
793 * Methods taking an \a io argument are for the activity happening in the
794 * context of given \a io. Page is assumed to be owned by that io, except for
795 * the obvious cases (like cl_page_operations::cpo_own()).
797 * \see vvp_page_ops, lov_page_ops, osc_page_ops
799 struct cl_page_operations {
801 * cl_page<->struct page methods. Only one layer in the stack has to
802 * implement these. Current code assumes that this functionality is
803 * provided by the topmost layer, see cl_page_disown0() as an example.
807 * Called when \a io acquires this page into the exclusive
808 * ownership. When this method returns, it is guaranteed that the is
809 * not owned by other io, and no transfer is going on against
813 * \see vvp_page_own(), lov_page_own()
815 int (*cpo_own)(const struct lu_env *env,
816 const struct cl_page_slice *slice,
817 struct cl_io *io, int nonblock);
818 /** Called when ownership it yielded. Optional.
820 * \see cl_page_disown()
821 * \see vvp_page_disown()
823 void (*cpo_disown)(const struct lu_env *env,
824 const struct cl_page_slice *slice, struct cl_io *io);
826 * Called for a page that is already "owned" by \a io from VM point of
829 * \see cl_page_assume()
830 * \see vvp_page_assume(), lov_page_assume()
832 void (*cpo_assume)(const struct lu_env *env,
833 const struct cl_page_slice *slice, struct cl_io *io);
834 /** Dual to cl_page_operations::cpo_assume(). Optional. Called
835 * bottom-to-top when IO releases a page without actually unlocking
838 * \see cl_page_unassume()
839 * \see vvp_page_unassume()
841 void (*cpo_unassume)(const struct lu_env *env,
842 const struct cl_page_slice *slice,
845 * Announces whether the page contains valid data or not by \a uptodate.
847 * \see cl_page_export()
848 * \see vvp_page_export()
850 void (*cpo_export)(const struct lu_env *env,
851 const struct cl_page_slice *slice, int uptodate);
853 * Checks whether underlying VM page is locked (in the suitable
854 * sense). Used for assertions.
856 * \retval -EBUSY: page is protected by a lock of a given mode;
857 * \retval -ENODATA: page is not protected by a lock;
858 * \retval 0: this layer cannot decide. (Should never happen.)
860 int (*cpo_is_vmlocked)(const struct lu_env *env,
861 const struct cl_page_slice *slice);
867 * Called when page is truncated from the object. Optional.
869 * \see cl_page_discard()
870 * \see vvp_page_discard(), osc_page_discard()
872 void (*cpo_discard)(const struct lu_env *env,
873 const struct cl_page_slice *slice,
876 * Called when page is removed from the cache, and is about to being
877 * destroyed. Optional.
879 * \see cl_page_delete()
880 * \see vvp_page_delete(), osc_page_delete()
882 void (*cpo_delete)(const struct lu_env *env,
883 const struct cl_page_slice *slice);
884 /** Destructor. Frees resources and slice itself. */
885 void (*cpo_fini)(const struct lu_env *env,
886 struct cl_page_slice *slice);
889 * Checks whether the page is protected by a cl_lock. This is a
890 * per-layer method, because certain layers have ways to check for the
891 * lock much more efficiently than through the generic locks scan, or
892 * implement locking mechanisms separate from cl_lock, e.g.,
893 * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
894 * being canceled, or scheduled for cancellation as soon as the last
895 * user goes away, too.
897 * \retval -EBUSY: page is protected by a lock of a given mode;
898 * \retval -ENODATA: page is not protected by a lock;
899 * \retval 0: this layer cannot decide.
901 * \see cl_page_is_under_lock()
903 int (*cpo_is_under_lock)(const struct lu_env *env,
904 const struct cl_page_slice *slice,
905 struct cl_io *io, pgoff_t *max);
908 * Optional debugging helper. Prints given page slice.
910 * \see cl_page_print()
912 int (*cpo_print)(const struct lu_env *env,
913 const struct cl_page_slice *slice,
914 void *cookie, lu_printer_t p);
918 * Transfer methods. See comment on cl_req for a description of
919 * transfer formation and life-cycle.
924 * Request type dependent vector of operations.
926 * Transfer operations depend on transfer mode (cl_req_type). To avoid
927 * passing transfer mode to each and every of these methods, and to
928 * avoid branching on request type inside of the methods, separate
929 * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
930 * provided. That is, method invocation usually looks like
932 * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
936 * Called when a page is submitted for a transfer as a part of
939 * \return 0 : page is eligible for submission;
940 * \return -EALREADY : skip this page;
941 * \return -ve : error.
943 * \see cl_page_prep()
945 int (*cpo_prep)(const struct lu_env *env,
946 const struct cl_page_slice *slice,
949 * Completion handler. This is guaranteed to be eventually
950 * fired after cl_page_operations::cpo_prep() or
951 * cl_page_operations::cpo_make_ready() call.
953 * This method can be called in a non-blocking context. It is
954 * guaranteed however, that the page involved and its object
955 * are pinned in memory (and, hence, calling cl_page_put() is
958 * \see cl_page_completion()
960 void (*cpo_completion)(const struct lu_env *env,
961 const struct cl_page_slice *slice,
964 * Called when cached page is about to be added to the
965 * cl_req as a part of req formation.
967 * \return 0 : proceed with this page;
968 * \return -EAGAIN : skip this page;
969 * \return -ve : error.
971 * \see cl_page_make_ready()
973 int (*cpo_make_ready)(const struct lu_env *env,
974 const struct cl_page_slice *slice);
977 * Tell transfer engine that only [to, from] part of a page should be
980 * This is used for immediate transfers.
982 * \todo XXX this is not very good interface. It would be much better
983 * if all transfer parameters were supplied as arguments to
984 * cl_io_operations::cio_submit() call, but it is not clear how to do
985 * this for page queues.
987 * \see cl_page_clip()
989 void (*cpo_clip)(const struct lu_env *env,
990 const struct cl_page_slice *slice,
993 * \pre the page was queued for transferring.
994 * \post page is removed from client's pending list, or -EBUSY
995 * is returned if it has already been in transferring.
997 * This is one of seldom page operation which is:
998 * 0. called from top level;
999 * 1. don't have vmpage locked;
1000 * 2. every layer should synchronize execution of its ->cpo_cancel()
1001 * with completion handlers. Osc uses client obd lock for this
1002 * purpose. Based on there is no vvp_page_cancel and
1003 * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
1005 * \see osc_page_cancel().
1007 int (*cpo_cancel)(const struct lu_env *env,
1008 const struct cl_page_slice *slice);
1010 * Write out a page by kernel. This is only called by ll_writepage
1013 * \see cl_page_flush()
1015 int (*cpo_flush)(const struct lu_env *env,
1016 const struct cl_page_slice *slice,
1022 * Helper macro, dumping detailed information about \a page into a log.
1024 #define CL_PAGE_DEBUG(mask, env, page, format, ...) \
1026 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1027 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1028 cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
1029 CDEBUG(mask, format , ## __VA_ARGS__); \
1034 * Helper macro, dumping shorter information about \a page into a log.
1036 #define CL_PAGE_HEADER(mask, env, page, format, ...) \
1038 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1039 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1040 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
1041 CDEBUG(mask, format , ## __VA_ARGS__); \
1045 static inline struct page *cl_page_vmpage(const struct cl_page *page)
1047 LASSERT(page->cp_vmpage != NULL);
1048 return page->cp_vmpage;
1052 * Check if a cl_page is in use.
1054 * Client cache holds a refcount, this refcount will be dropped when
1055 * the page is taken out of cache, see vvp_page_delete().
1057 static inline bool __page_in_use(const struct cl_page *page, int refc)
1059 return (atomic_read(&page->cp_ref) > refc + 1);
1063 * Caller itself holds a refcount of cl_page.
1065 #define cl_page_in_use(pg) __page_in_use(pg, 1)
1067 * Caller doesn't hold a refcount.
1069 #define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
1073 /** \addtogroup cl_lock cl_lock
1077 * Extent locking on the client.
1081 * The locking model of the new client code is built around
1085 * data-type representing an extent lock on a regular file. cl_lock is a
1086 * layered object (much like cl_object and cl_page), it consists of a header
1087 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
1088 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
1090 * Typical cl_lock consists of the two layers:
1092 * - vvp_lock (vvp specific data), and
1093 * - lov_lock (lov specific data).
1095 * lov_lock contains an array of sub-locks. Each of these sub-locks is a
1096 * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
1098 * - lovsub_lock, and
1101 * Each sub-lock is associated with a cl_object (representing stripe
1102 * sub-object or the file to which top-level cl_lock is associated to), and is
1103 * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
1104 * cl_object (that at lov layer also fans out into multiple sub-objects), and
1105 * is different from cl_page, that doesn't fan out (there is usually exactly
1106 * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
1107 * a "top-lock" and its lovsub-osc portion a "sub-lock".
1111 * cl_lock is reference counted. When reference counter drops to 0, lock is
1112 * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
1113 * lock is destroyed when last reference is released. Referencing between
1114 * top-lock and its sub-locks is described in the lov documentation module.
1118 * Also, cl_lock is a state machine. This requires some clarification. One of
1119 * the goals of client IO re-write was to make IO path non-blocking, or at
1120 * least to make it easier to make it non-blocking in the future. Here
1121 * `non-blocking' means that when a system call (read, write, truncate)
1122 * reaches a situation where it has to wait for a communication with the
1123 * server, it should --instead of waiting-- remember its current state and
1124 * switch to some other work. E.g,. instead of waiting for a lock enqueue,
1125 * client should proceed doing IO on the next stripe, etc. Obviously this is
1126 * rather radical redesign, and it is not planned to be fully implemented at
1127 * this time, instead we are putting some infrastructure in place, that would
1128 * make it easier to do asynchronous non-blocking IO easier in the
1129 * future. Specifically, where old locking code goes to sleep (waiting for
1130 * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
1131 * enqueue reply comes, its completion handler signals that lock state-machine
1132 * is ready to transit to the next state. There is some generic code in
1133 * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
1134 * this cl_lock.c code, it looks like locking is done in normal blocking
1135 * fashion, and it the same time it is possible to switch to the non-blocking
1136 * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
1139 * For a description of state machine states and transitions see enum
1142 * There are two ways to restrict a set of states which lock might move to:
1144 * - placing a "hold" on a lock guarantees that lock will not be moved
1145 * into cl_lock_state::CLS_FREEING state until hold is released. Hold
1146 * can be only acquired on a lock that is not in
1147 * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
1148 * cl_lock::cll_holds. Hold protects lock from cancellation and
1149 * destruction. Requests to cancel and destroy a lock on hold will be
1150 * recorded, but only honored when last hold on a lock is released;
1152 * - placing a "user" on a lock guarantees that lock will not leave
1153 * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
1154 * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
1155 * states, once it enters this set. That is, if a user is added onto a
1156 * lock in a state not from this set, it doesn't immediately enforce
1157 * lock to move to this set, but once lock enters this set it will
1158 * remain there until all users are removed. Lock users are counted in
1159 * cl_lock::cll_users.
1161 * User is used to assure that lock is not canceled or destroyed while
1162 * it is being enqueued, or actively used by some IO.
1164 * Currently, a user always comes with a hold (cl_lock_invariant()
1165 * checks that a number of holds is not less than a number of users).
1169 * This is how lock state-machine operates. struct cl_lock contains a mutex
1170 * cl_lock::cll_guard that protects struct fields.
1172 * - mutex is taken, and cl_lock::cll_state is examined.
1174 * - for every state there are possible target states where lock can move
1175 * into. They are tried in order. Attempts to move into next state are
1176 * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
1178 * - if the transition can be performed immediately, state is changed,
1179 * and mutex is released.
1181 * - if the transition requires blocking, _try() function returns
1182 * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
1183 * sleep, waiting for possibility of lock state change. It is woken
1184 * up when some event occurs, that makes lock state change possible
1185 * (e.g., the reception of the reply from the server), and repeats
1188 * Top-lock and sub-lock has separate mutexes and the latter has to be taken
1189 * first to avoid dead-lock.
1191 * To see an example of interaction of all these issues, take a look at the
1192 * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
1193 * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
1194 * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
1195 * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
1196 * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
1197 * done in parallel, rather than one after another (this is used for glimpse
1198 * locks, that cannot dead-lock).
1200 * INTERFACE AND USAGE
1202 * struct cl_lock_operations provide a number of call-backs that are invoked
1203 * when events of interest occurs. Layers can intercept and handle glimpse,
1204 * blocking, cancel ASTs and a reception of the reply from the server.
1206 * One important difference with the old client locking model is that new
1207 * client has a representation for the top-lock, whereas in the old code only
1208 * sub-locks existed as real data structures and file-level locks are
1209 * represented by "request sets" that are created and destroyed on each and
1210 * every lock creation.
1212 * Top-locks are cached, and can be found in the cache by the system calls. It
1213 * is possible that top-lock is in cache, but some of its sub-locks were
1214 * canceled and destroyed. In that case top-lock has to be enqueued again
1215 * before it can be used.
1217 * Overall process of the locking during IO operation is as following:
1219 * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
1220 * is called on each layer. Responsibility of this method is to add locks,
1221 * needed by a given layer into cl_io.ci_lockset.
1223 * - once locks for all layers were collected, they are sorted to avoid
1224 * dead-locks (cl_io_locks_sort()), and enqueued.
1226 * - when all locks are acquired, IO is performed;
1228 * - locks are released into cache.
1230 * Striping introduces major additional complexity into locking. The
1231 * fundamental problem is that it is generally unsafe to actively use (hold)
1232 * two locks on the different OST servers at the same time, as this introduces
1233 * inter-server dependency and can lead to cascading evictions.
1235 * Basic solution is to sub-divide large read/write IOs into smaller pieces so
1236 * that no multi-stripe locks are taken (note that this design abandons POSIX
1237 * read/write semantics). Such pieces ideally can be executed concurrently. At
1238 * the same time, certain types of IO cannot be sub-divived, without
1239 * sacrificing correctness. This includes:
1241 * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
1244 * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
1246 * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
1247 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
1248 * has to be held together with the usual lock on [offset, offset + count].
1250 * As multi-stripe locks have to be allowed, it makes sense to cache them, so
1251 * that, for example, a sequence of O_APPEND writes can proceed quickly
1252 * without going down to the individual stripes to do lock matching. On the
1253 * other hand, multi-stripe locks shouldn't be used by normal read/write
1254 * calls. To achieve this, every layer can implement ->clo_fits_into() method,
1255 * that is called by lock matching code (cl_lock_lookup()), and that can be
1256 * used to selectively disable matching of certain locks for certain IOs. For
1257 * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
1258 * locks to be matched only for truncates and O_APPEND writes.
1260 * Interaction with DLM
1262 * In the expected setup, cl_lock is ultimately backed up by a collection of
1263 * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
1264 * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
1265 * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
1266 * description of interaction with DLM.
1272 struct cl_lock_descr {
1273 /** Object this lock is granted for. */
1274 struct cl_object *cld_obj;
1275 /** Index of the first page protected by this lock. */
1277 /** Index of the last page (inclusive) protected by this lock. */
1279 /** Group ID, for group lock */
1282 enum cl_lock_mode cld_mode;
1284 * flags to enqueue lock. A combination of bit-flags from
1285 * enum cl_enq_flags.
1287 __u32 cld_enq_flags;
1290 #define DDESCR "%s(%d):[%lu, %lu]:%x"
1291 #define PDESCR(descr) \
1292 cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
1293 (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
1295 const char *cl_lock_mode_name(const enum cl_lock_mode mode);
1298 * Layered client lock.
1301 /** List of slices. Immutable after creation. */
1302 struct list_head cll_layers;
1303 /** lock attribute, extent, cl_object, etc. */
1304 struct cl_lock_descr cll_descr;
1308 * Per-layer part of cl_lock
1310 * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
1312 struct cl_lock_slice {
1313 struct cl_lock *cls_lock;
1314 /** Object slice corresponding to this lock slice. Immutable after
1316 struct cl_object *cls_obj;
1317 const struct cl_lock_operations *cls_ops;
1318 /** Linkage into cl_lock::cll_layers. Immutable after creation. */
1319 struct list_head cls_linkage;
1324 * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
1326 struct cl_lock_operations {
1329 * Attempts to enqueue the lock. Called top-to-bottom.
1331 * \retval 0 this layer has enqueued the lock successfully
1332 * \retval >0 this layer has enqueued the lock, but need to wait on
1333 * @anchor for resources
1334 * \retval -ve failure
1336 * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
1337 * \see osc_lock_enqueue()
1339 int (*clo_enqueue)(const struct lu_env *env,
1340 const struct cl_lock_slice *slice,
1341 struct cl_io *io, struct cl_sync_io *anchor);
1343 * Cancel a lock, release its DLM lock ref, while does not cancel the
1346 void (*clo_cancel)(const struct lu_env *env,
1347 const struct cl_lock_slice *slice);
1350 * Destructor. Frees resources and the slice.
1352 * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
1353 * \see osc_lock_fini()
1355 void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
1357 * Optional debugging helper. Prints given lock slice.
1359 int (*clo_print)(const struct lu_env *env,
1360 void *cookie, lu_printer_t p,
1361 const struct cl_lock_slice *slice);
1364 #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
1366 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1367 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1368 cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
1369 CDEBUG(mask, format , ## __VA_ARGS__); \
1373 #define CL_LOCK_ASSERT(expr, env, lock) do { \
1377 CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
1383 /** \addtogroup cl_page_list cl_page_list
1384 * Page list used to perform collective operations on a group of pages.
1386 * Pages are added to the list one by one. cl_page_list acquires a reference
1387 * for every page in it. Page list is used to perform collective operations on
1390 * - submit pages for an immediate transfer,
1392 * - own pages on behalf of certain io (waiting for each page in turn),
1396 * When list is finalized, it releases references on all pages it still has.
1398 * \todo XXX concurrency control.
1402 struct cl_page_list {
1404 struct list_head pl_pages;
1405 struct task_struct *pl_owner;
1409 * A 2-queue of pages. A convenience data-type for common use case, 2-queue
1410 * contains an incoming page list and an outgoing page list.
1413 struct cl_page_list c2_qin;
1414 struct cl_page_list c2_qout;
1417 /** @} cl_page_list */
1419 /** \addtogroup cl_io cl_io
1424 * cl_io represents a high level I/O activity like
1425 * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
1428 * cl_io is a layered object, much like cl_{object,page,lock} but with one
1429 * important distinction. We want to minimize number of calls to the allocator
1430 * in the fast path, e.g., in the case of read(2) when everything is cached:
1431 * client already owns the lock over region being read, and data are cached
1432 * due to read-ahead. To avoid allocation of cl_io layers in such situations,
1433 * per-layer io state is stored in the session, associated with the io, see
1434 * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
1435 * by using free-lists, see cl_env_get().
1437 * There is a small predefined number of possible io types, enumerated in enum
1440 * cl_io is a state machine, that can be advanced concurrently by the multiple
1441 * threads. It is up to these threads to control the concurrency and,
1442 * specifically, to detect when io is done, and its state can be safely
1445 * For read/write io overall execution plan is as following:
1447 * (0) initialize io state through all layers;
1449 * (1) loop: prepare chunk of work to do
1451 * (2) call all layers to collect locks they need to process current chunk
1453 * (3) sort all locks to avoid dead-locks, and acquire them
1455 * (4) process the chunk: call per-page methods
1456 * (cl_io_operations::cio_read_page() for read,
1457 * cl_io_operations::cio_prepare_write(),
1458 * cl_io_operations::cio_commit_write() for write)
1464 * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
1465 * address allocation efficiency issues mentioned above), and returns with the
1466 * special error condition from per-page method when current sub-io has to
1467 * block. This causes io loop to be repeated, and lov switches to the next
1468 * sub-io in its cl_io_operations::cio_iter_init() implementation.
1473 /** read system call */
1475 /** write system call */
1477 /** truncate, utime system calls */
1480 * page fault handling
1484 * fsync system call handling
1485 * To write out a range of file
1489 * Miscellaneous io. This is used for occasional io activity that
1490 * doesn't fit into other types. Currently this is used for:
1492 * - cancellation of an extent lock. This io exists as a context
1493 * to write dirty pages from under the lock being canceled back
1496 * - VM induced page write-out. An io context for writing page out
1497 * for memory cleansing;
1499 * - glimpse. An io context to acquire glimpse lock.
1501 * - grouplock. An io context to acquire group lock.
1503 * CIT_MISC io is used simply as a context in which locks and pages
1504 * are manipulated. Such io has no internal "process", that is,
1505 * cl_io_loop() is never called for it.
1512 * States of cl_io state machine
1515 /** Not initialized. */
1519 /** IO iteration started. */
1523 /** Actual IO is in progress. */
1525 /** IO for the current iteration finished. */
1527 /** Locks released. */
1529 /** Iteration completed. */
1531 /** cl_io finalized. */
1536 * IO state private for a layer.
1538 * This is usually embedded into layer session data, rather than allocated
1541 * \see vvp_io, lov_io, osc_io, ccc_io
1543 struct cl_io_slice {
1544 struct cl_io *cis_io;
1545 /** corresponding object slice. Immutable after creation. */
1546 struct cl_object *cis_obj;
1547 /** io operations. Immutable after creation. */
1548 const struct cl_io_operations *cis_iop;
1550 * linkage into a list of all slices for a given cl_io, hanging off
1551 * cl_io::ci_layers. Immutable after creation.
1553 struct list_head cis_linkage;
1556 typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
1560 * Per-layer io operations.
1561 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
1563 struct cl_io_operations {
1565 * Vector of io state transition methods for every io type.
1567 * \see cl_page_operations::io
1571 * Prepare io iteration at a given layer.
1573 * Called top-to-bottom at the beginning of each iteration of
1574 * "io loop" (if it makes sense for this type of io). Here
1575 * layer selects what work it will do during this iteration.
1577 * \see cl_io_operations::cio_iter_fini()
1579 int (*cio_iter_init) (const struct lu_env *env,
1580 const struct cl_io_slice *slice);
1582 * Finalize io iteration.
1584 * Called bottom-to-top at the end of each iteration of "io
1585 * loop". Here layers can decide whether IO has to be
1588 * \see cl_io_operations::cio_iter_init()
1590 void (*cio_iter_fini) (const struct lu_env *env,
1591 const struct cl_io_slice *slice);
1593 * Collect locks for the current iteration of io.
1595 * Called top-to-bottom to collect all locks necessary for
1596 * this iteration. This methods shouldn't actually enqueue
1597 * anything, instead it should post a lock through
1598 * cl_io_lock_add(). Once all locks are collected, they are
1599 * sorted and enqueued in the proper order.
1601 int (*cio_lock) (const struct lu_env *env,
1602 const struct cl_io_slice *slice);
1604 * Finalize unlocking.
1606 * Called bottom-to-top to finish layer specific unlocking
1607 * functionality, after generic code released all locks
1608 * acquired by cl_io_operations::cio_lock().
1610 void (*cio_unlock)(const struct lu_env *env,
1611 const struct cl_io_slice *slice);
1613 * Start io iteration.
1615 * Once all locks are acquired, called top-to-bottom to
1616 * commence actual IO. In the current implementation,
1617 * top-level vvp_io_{read,write}_start() does all the work
1618 * synchronously by calling generic_file_*(), so other layers
1619 * are called when everything is done.
1621 int (*cio_start)(const struct lu_env *env,
1622 const struct cl_io_slice *slice);
1624 * Called top-to-bottom at the end of io loop. Here layer
1625 * might wait for an unfinished asynchronous io.
1627 void (*cio_end) (const struct lu_env *env,
1628 const struct cl_io_slice *slice);
1630 * Called bottom-to-top to notify layers that read/write IO
1631 * iteration finished, with \a nob bytes transferred.
1633 void (*cio_advance)(const struct lu_env *env,
1634 const struct cl_io_slice *slice,
1637 * Called once per io, bottom-to-top to release io resources.
1639 void (*cio_fini) (const struct lu_env *env,
1640 const struct cl_io_slice *slice);
1644 * Submit pages from \a queue->c2_qin for IO, and move
1645 * successfully submitted pages into \a queue->c2_qout. Return
1646 * non-zero if failed to submit even the single page. If
1647 * submission failed after some pages were moved into \a
1648 * queue->c2_qout, completion callback with non-zero ioret is
1651 int (*cio_submit)(const struct lu_env *env,
1652 const struct cl_io_slice *slice,
1653 enum cl_req_type crt,
1654 struct cl_2queue *queue);
1656 * Queue async page for write.
1657 * The difference between cio_submit and cio_queue is that
1658 * cio_submit is for urgent request.
1660 int (*cio_commit_async)(const struct lu_env *env,
1661 const struct cl_io_slice *slice,
1662 struct cl_page_list *queue, int from, int to,
1665 * Read missing page.
1667 * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
1668 * method, when it hits not-up-to-date page in the range. Optional.
1670 * \pre io->ci_type == CIT_READ
1672 int (*cio_read_page)(const struct lu_env *env,
1673 const struct cl_io_slice *slice,
1674 const struct cl_page_slice *page);
1676 * Optional debugging helper. Print given io slice.
1678 int (*cio_print)(const struct lu_env *env, void *cookie,
1679 lu_printer_t p, const struct cl_io_slice *slice);
1683 * Flags to lock enqueue procedure.
1688 * instruct server to not block, if conflicting lock is found. Instead
1689 * -EWOULDBLOCK is returned immediately.
1691 CEF_NONBLOCK = 0x00000001,
1693 * take lock asynchronously (out of order), as it cannot
1694 * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
1696 CEF_ASYNC = 0x00000002,
1698 * tell the server to instruct (though a flag in the blocking ast) an
1699 * owner of the conflicting lock, that it can drop dirty pages
1700 * protected by this lock, without sending them to the server.
1702 CEF_DISCARD_DATA = 0x00000004,
1704 * tell the sub layers that it must be a `real' lock. This is used for
1705 * mmapped-buffer locks and glimpse locks that must be never converted
1706 * into lockless mode.
1708 * \see vvp_mmap_locks(), cl_glimpse_lock().
1710 CEF_MUST = 0x00000008,
1712 * tell the sub layers that never request a `real' lock. This flag is
1713 * not used currently.
1715 * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
1716 * conversion policy: ci_lockreq describes generic information of lock
1717 * requirement for this IO, especially for locks which belong to the
1718 * object doing IO; however, lock itself may have precise requirements
1719 * that are described by the enqueue flags.
1721 CEF_NEVER = 0x00000010,
1723 * for async glimpse lock.
1725 CEF_AGL = 0x00000020,
1727 * enqueue a lock to test DLM lock existence.
1729 CEF_PEEK = 0x00000040,
1731 * mask of enq_flags.
1733 CEF_MASK = 0x0000007f,
1737 * Link between lock and io. Intermediate structure is needed, because the
1738 * same lock can be part of multiple io's simultaneously.
1740 struct cl_io_lock_link {
1741 /** linkage into one of cl_lockset lists. */
1742 struct list_head cill_linkage;
1743 struct cl_lock cill_lock;
1744 /** optional destructor */
1745 void (*cill_fini)(const struct lu_env *env,
1746 struct cl_io_lock_link *link);
1748 #define cill_descr cill_lock.cll_descr
1751 * Lock-set represents a collection of locks, that io needs at a
1752 * time. Generally speaking, client tries to avoid holding multiple locks when
1755 * - holding extent locks over multiple ost's introduces the danger of
1756 * "cascading timeouts";
1758 * - holding multiple locks over the same ost is still dead-lock prone,
1759 * see comment in osc_lock_enqueue(),
1761 * but there are certain situations where this is unavoidable:
1763 * - O_APPEND writes have to take [0, EOF] lock for correctness;
1765 * - truncate has to take [new-size, EOF] lock for correctness;
1767 * - SNS has to take locks across full stripe for correctness;
1769 * - in the case when user level buffer, supplied to {read,write}(file0),
1770 * is a part of a memory mapped lustre file, client has to take a dlm
1771 * locks on file0, and all files that back up the buffer (or a part of
1772 * the buffer, that is being processed in the current chunk, in any
1773 * case, there are situations where at least 2 locks are necessary).
1775 * In such cases we at least try to take locks in the same consistent
1776 * order. To this end, all locks are first collected, then sorted, and then
1780 /** locks to be acquired. */
1781 struct list_head cls_todo;
1782 /** locks acquired. */
1783 struct list_head cls_done;
1787 * Lock requirements(demand) for IO. It should be cl_io_lock_req,
1788 * but 'req' is always to be thought as 'request' :-)
1790 enum cl_io_lock_dmd {
1791 /** Always lock data (e.g., O_APPEND). */
1793 /** Layers are free to decide between local and global locking. */
1795 /** Never lock: there is no cache (e.g., liblustre). */
1799 enum cl_fsync_mode {
1800 /** start writeback, do not wait for them to finish */
1802 /** start writeback and wait for them to finish */
1804 /** discard all of dirty pages in a specific file range */
1805 CL_FSYNC_DISCARD = 2,
1806 /** start writeback and make sure they have reached storage before
1807 * return. OST_SYNC RPC must be issued and finished */
1811 struct cl_io_rw_common {
1821 * cl_io is shared by all threads participating in this IO (in current
1822 * implementation only one thread advances IO, but parallel IO design and
1823 * concurrent copy_*_user() require multiple threads acting on the same IO. It
1824 * is up to these threads to serialize their activities, including updates to
1825 * mutable cl_io fields.
1828 /** type of this IO. Immutable after creation. */
1829 enum cl_io_type ci_type;
1830 /** current state of cl_io state machine. */
1831 enum cl_io_state ci_state;
1832 /** main object this io is against. Immutable after creation. */
1833 struct cl_object *ci_obj;
1835 * Upper layer io, of which this io is a part of. Immutable after
1838 struct cl_io *ci_parent;
1839 /** List of slices. Immutable after creation. */
1840 struct list_head ci_layers;
1841 /** list of locks (to be) acquired by this io. */
1842 struct cl_lockset ci_lockset;
1843 /** lock requirements, this is just a help info for sublayers. */
1844 enum cl_io_lock_dmd ci_lockreq;
1847 struct cl_io_rw_common rd;
1850 struct cl_io_rw_common wr;
1854 struct cl_io_rw_common ci_rw;
1855 struct cl_setattr_io {
1856 struct ost_lvb sa_attr;
1857 unsigned int sa_valid;
1858 struct obd_capa *sa_capa;
1860 struct cl_fault_io {
1861 /** page index within file. */
1863 /** bytes valid byte on a faulted page. */
1865 /** writable page? for nopage() only */
1867 /** page of an executable? */
1869 /** page_mkwrite() */
1871 /** resulting page */
1872 struct cl_page *ft_page;
1874 struct cl_fsync_io {
1877 struct obd_capa *fi_capa;
1878 /** file system level fid */
1879 struct lu_fid *fi_fid;
1880 enum cl_fsync_mode fi_mode;
1881 /* how many pages were written/discarded */
1882 unsigned int fi_nr_written;
1885 struct cl_2queue ci_queue;
1888 unsigned int ci_continue:1,
1890 * This io has held grouplock, to inform sublayers that
1891 * don't do lockless i/o.
1895 * The whole IO need to be restarted because layout has been changed
1899 * to not refresh layout - the IO issuer knows that the layout won't
1900 * change(page operations, layout change causes all page to be
1901 * discarded), or it doesn't matter if it changes(sync).
1905 * Check if layout changed after the IO finishes. Mainly for HSM
1906 * requirement. If IO occurs to openning files, it doesn't need to
1907 * verify layout because HSM won't release openning files.
1908 * Right now, only two opertaions need to verify layout: glimpse
1913 * file is released, restore has to to be triggered by vvp layer
1915 ci_restore_needed:1,
1921 * Number of pages owned by this IO. For invariant checking.
1923 unsigned ci_owned_nr;
1928 /** \addtogroup cl_req cl_req
1933 * There are two possible modes of transfer initiation on the client:
1935 * - immediate transfer: this is started when a high level io wants a page
1936 * or a collection of pages to be transferred right away. Examples:
1937 * read-ahead, synchronous read in the case of non-page aligned write,
1938 * page write-out as a part of extent lock cancellation, page write-out
1939 * as a part of memory cleansing. Immediate transfer can be both
1940 * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
1942 * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
1943 * when io wants to transfer a page to the server some time later, when
1944 * it can be done efficiently. Example: pages dirtied by the write(2)
1947 * In any case, transfer takes place in the form of a cl_req, which is a
1948 * representation for a network RPC.
1950 * Pages queued for an opportunistic transfer are cached until it is decided
1951 * that efficient RPC can be composed of them. This decision is made by "a
1952 * req-formation engine", currently implemented as a part of osc
1953 * layer. Req-formation depends on many factors: the size of the resulting
1954 * RPC, whether or not multi-object RPCs are supported by the server,
1955 * max-rpc-in-flight limitations, size of the dirty cache, etc.
1957 * For the immediate transfer io submits a cl_page_list, that req-formation
1958 * engine slices into cl_req's, possibly adding cached pages to some of
1959 * the resulting req's.
1961 * Whenever a page from cl_page_list is added to a newly constructed req, its
1962 * cl_page_operations::cpo_prep() layer methods are called. At that moment,
1963 * page state is atomically changed from cl_page_state::CPS_OWNED to
1964 * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
1965 * is zeroed, and cl_page::cp_req is set to the
1966 * req. cl_page_operations::cpo_prep() method at the particular layer might
1967 * return -EALREADY to indicate that it does not need to submit this page
1968 * at all. This is possible, for example, if page, submitted for read,
1969 * became up-to-date in the meantime; and for write, the page don't have
1970 * dirty bit marked. \see cl_io_submit_rw()
1972 * Whenever a cached page is added to a newly constructed req, its
1973 * cl_page_operations::cpo_make_ready() layer methods are called. At that
1974 * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
1975 * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
1976 * req. cl_page_operations::cpo_make_ready() method at the particular layer
1977 * might return -EAGAIN to indicate that this page is not eligible for the
1978 * transfer right now.
1982 * Plan is to divide transfers into "priority bands" (indicated when
1983 * submitting cl_page_list, and queuing a page for the opportunistic transfer)
1984 * and allow glueing of cached pages to immediate transfers only within single
1985 * band. This would make high priority transfers (like lock cancellation or
1986 * memory pressure induced write-out) really high priority.
1991 * Per-transfer attributes.
1993 struct cl_req_attr {
1994 /** Generic attributes for the server consumption. */
1995 struct obdo *cra_oa;
1997 struct obd_capa *cra_capa;
1999 char cra_jobid[LUSTRE_JOBID_SIZE];
2003 * Transfer request operations definable at every layer.
2005 * Concurrency: transfer formation engine synchronizes calls to all transfer
2008 struct cl_req_operations {
2010 * Invoked top-to-bottom by cl_req_prep() when transfer formation is
2011 * complete (all pages are added).
2013 * \see osc_req_prep()
2015 int (*cro_prep)(const struct lu_env *env,
2016 const struct cl_req_slice *slice);
2018 * Called top-to-bottom to fill in \a oa fields. This is called twice
2019 * with different flags, see bug 10150 and osc_build_req().
2021 * \param obj an object from cl_req which attributes are to be set in
2024 * \param oa struct obdo where attributes are placed
2026 * \param flags \a oa fields to be filled.
2028 void (*cro_attr_set)(const struct lu_env *env,
2029 const struct cl_req_slice *slice,
2030 const struct cl_object *obj,
2031 struct cl_req_attr *attr, obd_valid flags);
2033 * Called top-to-bottom from cl_req_completion() to notify layers that
2034 * transfer completed. Has to free all state allocated by
2035 * cl_device_operations::cdo_req_init().
2037 void (*cro_completion)(const struct lu_env *env,
2038 const struct cl_req_slice *slice, int ioret);
2042 * A per-object state that (potentially multi-object) transfer request keeps.
2045 /** object itself */
2046 struct cl_object *ro_obj;
2047 /** reference to cl_req_obj::ro_obj. For debugging. */
2048 struct lu_ref_link ro_obj_ref;
2049 /* something else? Number of pages for a given object? */
2055 * Transfer requests are not reference counted, because IO sub-system owns
2056 * them exclusively and knows when to free them.
2060 * cl_req is created by cl_req_alloc() that calls
2061 * cl_device_operations::cdo_req_init() device methods to allocate per-req
2062 * state in every layer.
2064 * Then pages are added (cl_req_page_add()), req keeps track of all objects it
2065 * contains pages for.
2067 * Once all pages were collected, cl_page_operations::cpo_prep() method is
2068 * called top-to-bottom. At that point layers can modify req, let it pass, or
2069 * deny it completely. This is to support things like SNS that have transfer
2070 * ordering requirements invisible to the individual req-formation engine.
2072 * On transfer completion (or transfer timeout, or failure to initiate the
2073 * transfer of an allocated req), cl_req_operations::cro_completion() method
2074 * is called, after execution of cl_page_operations::cpo_completion() of all
2078 enum cl_req_type crq_type;
2079 /** A list of pages being transfered */
2080 struct list_head crq_pages;
2081 /** Number of pages in cl_req::crq_pages */
2082 unsigned crq_nrpages;
2083 /** An array of objects which pages are in ->crq_pages */
2084 struct cl_req_obj *crq_o;
2085 /** Number of elements in cl_req::crq_objs[] */
2086 unsigned crq_nrobjs;
2087 struct list_head crq_layers;
2091 * Per-layer state for request.
2093 struct cl_req_slice {
2094 struct cl_req *crs_req;
2095 struct cl_device *crs_dev;
2096 struct list_head crs_linkage;
2097 const struct cl_req_operations *crs_ops;
2102 enum cache_stats_item {
2103 /** how many cache lookups were performed */
2105 /** how many times cache lookup resulted in a hit */
2107 /** how many entities are in the cache right now */
2109 /** how many entities in the cache are actively used (and cannot be
2110 * evicted) right now */
2112 /** how many entities were created at all */
2117 #define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
2120 * Stats for a generic cache (similar to inode, lu_object, etc. caches).
2122 struct cache_stats {
2123 const char *cs_name;
2124 atomic_t cs_stats[CS_NR];
2127 /** These are not exported so far */
2128 void cache_stats_init (struct cache_stats *cs, const char *name);
2131 * Client-side site. This represents particular client stack. "Global"
2132 * variables should (directly or indirectly) be added here to allow multiple
2133 * clients to co-exist in the single address space.
2136 struct lu_site cs_lu;
2138 * Statistical counters. Atomics do not scale, something better like
2139 * per-cpu counters is needed.
2141 * These are exported as /proc/fs/lustre/llite/.../site
2143 * When interpreting keep in mind that both sub-locks (and sub-pages)
2144 * and top-locks (and top-pages) are accounted here.
2146 struct cache_stats cs_pages;
2147 atomic_t cs_pages_state[CPS_NR];
2150 int cl_site_init(struct cl_site *s, struct cl_device *top);
2151 void cl_site_fini(struct cl_site *s);
2152 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
2155 * Output client site statistical counters into a buffer. Suitable for
2156 * ll_rd_*()-style functions.
2158 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
2163 * Type conversion and accessory functions.
2167 static inline struct cl_site *lu2cl_site(const struct lu_site *site)
2169 return container_of(site, struct cl_site, cs_lu);
2172 static inline int lu_device_is_cl(const struct lu_device *d)
2174 return d->ld_type->ldt_tags & LU_DEVICE_CL;
2177 static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
2179 LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
2180 return container_of0(d, struct cl_device, cd_lu_dev);
2183 static inline struct lu_device *cl2lu_dev(struct cl_device *d)
2185 return &d->cd_lu_dev;
2188 static inline struct cl_object *lu2cl(const struct lu_object *o)
2190 LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
2191 return container_of0(o, struct cl_object, co_lu);
2194 static inline const struct cl_object_conf *
2195 lu2cl_conf(const struct lu_object_conf *conf)
2197 return container_of0(conf, struct cl_object_conf, coc_lu);
2200 static inline struct cl_object *cl_object_next(const struct cl_object *obj)
2202 return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
2205 static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
2207 return container_of0(h, struct cl_object_header, coh_lu);
2210 static inline struct cl_site *cl_object_site(const struct cl_object *obj)
2212 return lu2cl_site(obj->co_lu.lo_dev->ld_site);
2216 struct cl_object_header *cl_object_header(const struct cl_object *obj)
2218 return luh2coh(obj->co_lu.lo_header);
2221 static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
2223 return lu_device_init(&d->cd_lu_dev, t);
2226 static inline void cl_device_fini(struct cl_device *d)
2228 lu_device_fini(&d->cd_lu_dev);
2231 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
2232 struct cl_object *obj, pgoff_t index,
2233 const struct cl_page_operations *ops);
2234 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2235 struct cl_object *obj,
2236 const struct cl_lock_operations *ops);
2237 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
2238 struct cl_object *obj, const struct cl_io_operations *ops);
2239 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
2240 struct cl_device *dev,
2241 const struct cl_req_operations *ops);
2244 /** \defgroup cl_object cl_object
2246 struct cl_object *cl_object_top (struct cl_object *o);
2247 struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
2248 const struct lu_fid *fid,
2249 const struct cl_object_conf *c);
2251 int cl_object_header_init(struct cl_object_header *h);
2252 void cl_object_header_fini(struct cl_object_header *h);
2253 void cl_object_put (const struct lu_env *env, struct cl_object *o);
2254 void cl_object_get (struct cl_object *o);
2255 void cl_object_attr_lock (struct cl_object *o);
2256 void cl_object_attr_unlock(struct cl_object *o);
2257 int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
2258 struct cl_attr *attr);
2259 int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
2260 const struct cl_attr *attr, unsigned valid);
2261 int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
2262 struct ost_lvb *lvb);
2263 int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
2264 const struct cl_object_conf *conf);
2265 int cl_object_prune (const struct lu_env *env, struct cl_object *obj);
2266 void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
2267 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2268 struct lov_user_md __user *lum);
2271 * Returns true, iff \a o0 and \a o1 are slices of the same object.
2273 static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
2275 return cl_object_header(o0) == cl_object_header(o1);
2278 static inline void cl_object_page_init(struct cl_object *clob, int size)
2280 clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
2281 cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
2282 WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
2285 static inline void *cl_object_page_slice(struct cl_object *clob,
2286 struct cl_page *page)
2288 return (void *)((char *)page + clob->co_slice_off);
2292 * Return refcount of cl_object.
2294 static inline int cl_object_refc(struct cl_object *clob)
2296 struct lu_object_header *header = clob->co_lu.lo_header;
2297 return atomic_read(&header->loh_ref);
2302 /** \defgroup cl_page cl_page
2310 /* callback of cl_page_gang_lookup() */
2312 struct cl_page *cl_page_find (const struct lu_env *env,
2313 struct cl_object *obj,
2314 pgoff_t idx, struct page *vmpage,
2315 enum cl_page_type type);
2316 struct cl_page *cl_page_alloc (const struct lu_env *env,
2317 struct cl_object *o, pgoff_t ind,
2318 struct page *vmpage,
2319 enum cl_page_type type);
2320 void cl_page_get (struct cl_page *page);
2321 void cl_page_put (const struct lu_env *env,
2322 struct cl_page *page);
2323 void cl_page_print (const struct lu_env *env, void *cookie,
2324 lu_printer_t printer,
2325 const struct cl_page *pg);
2326 void cl_page_header_print(const struct lu_env *env, void *cookie,
2327 lu_printer_t printer,
2328 const struct cl_page *pg);
2329 struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
2330 struct cl_page *cl_page_top (struct cl_page *page);
2332 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
2333 const struct lu_device_type *dtype);
2338 * Functions dealing with the ownership of page by io.
2342 int cl_page_own (const struct lu_env *env,
2343 struct cl_io *io, struct cl_page *page);
2344 int cl_page_own_try (const struct lu_env *env,
2345 struct cl_io *io, struct cl_page *page);
2346 void cl_page_assume (const struct lu_env *env,
2347 struct cl_io *io, struct cl_page *page);
2348 void cl_page_unassume (const struct lu_env *env,
2349 struct cl_io *io, struct cl_page *pg);
2350 void cl_page_disown (const struct lu_env *env,
2351 struct cl_io *io, struct cl_page *page);
2352 int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
2359 * Functions dealing with the preparation of a page for a transfer, and
2360 * tracking transfer state.
2363 int cl_page_prep (const struct lu_env *env, struct cl_io *io,
2364 struct cl_page *pg, enum cl_req_type crt);
2365 void cl_page_completion (const struct lu_env *env,
2366 struct cl_page *pg, enum cl_req_type crt, int ioret);
2367 int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
2368 enum cl_req_type crt);
2369 int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
2370 struct cl_page *pg, enum cl_req_type crt);
2371 void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
2373 int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
2374 int cl_page_flush (const struct lu_env *env, struct cl_io *io,
2375 struct cl_page *pg);
2381 * \name helper routines
2382 * Functions to discard, delete and export a cl_page.
2385 void cl_page_discard(const struct lu_env *env, struct cl_io *io,
2386 struct cl_page *pg);
2387 void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
2388 int cl_page_is_vmlocked(const struct lu_env *env,
2389 const struct cl_page *pg);
2390 void cl_page_export(const struct lu_env *env,
2391 struct cl_page *pg, int uptodate);
2392 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
2393 struct cl_page *page, pgoff_t *max_index);
2394 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
2395 pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
2396 size_t cl_page_size(const struct cl_object *obj);
2398 void cl_lock_print(const struct lu_env *env, void *cookie,
2399 lu_printer_t printer, const struct cl_lock *lock);
2400 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2401 lu_printer_t printer,
2402 const struct cl_lock_descr *descr);
2407 /** \defgroup cl_lock cl_lock
2409 int cl_lock_request(const struct lu_env *env, struct cl_io *io,
2410 struct cl_lock *lock);
2411 int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
2412 const struct cl_io *io);
2413 void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
2414 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
2415 const struct lu_device_type *dtype);
2416 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
2418 int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
2419 struct cl_lock *lock, struct cl_sync_io *anchor);
2420 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
2424 /** \defgroup cl_io cl_io
2427 int cl_io_init (const struct lu_env *env, struct cl_io *io,
2428 enum cl_io_type iot, struct cl_object *obj);
2429 int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
2430 enum cl_io_type iot, struct cl_object *obj);
2431 int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
2432 enum cl_io_type iot, loff_t pos, size_t count);
2433 int cl_io_loop (const struct lu_env *env, struct cl_io *io);
2435 void cl_io_fini (const struct lu_env *env, struct cl_io *io);
2436 int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
2437 void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
2438 int cl_io_lock (const struct lu_env *env, struct cl_io *io);
2439 void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
2440 int cl_io_start (const struct lu_env *env, struct cl_io *io);
2441 void cl_io_end (const struct lu_env *env, struct cl_io *io);
2442 int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
2443 struct cl_io_lock_link *link);
2444 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
2445 struct cl_lock_descr *descr);
2446 int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
2447 struct cl_page *page);
2448 int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
2449 enum cl_req_type iot, struct cl_2queue *queue);
2450 int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
2451 enum cl_req_type iot, struct cl_2queue *queue,
2453 int cl_io_commit_async (const struct lu_env *env, struct cl_io *io,
2454 struct cl_page_list *queue, int from, int to,
2456 void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
2458 int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
2459 struct cl_page_list *queue);
2460 int cl_io_is_going (const struct lu_env *env);
2463 * True, iff \a io is an O_APPEND write(2).
2465 static inline int cl_io_is_append(const struct cl_io *io)
2467 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
2470 static inline int cl_io_is_sync_write(const struct cl_io *io)
2472 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
2475 static inline int cl_io_is_mkwrite(const struct cl_io *io)
2477 return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
2481 * True, iff \a io is a truncate(2).
2483 static inline int cl_io_is_trunc(const struct cl_io *io)
2485 return io->ci_type == CIT_SETATTR &&
2486 (io->u.ci_setattr.sa_valid & ATTR_SIZE);
2489 struct cl_io *cl_io_top(struct cl_io *io);
2491 void cl_io_print(const struct lu_env *env, void *cookie,
2492 lu_printer_t printer, const struct cl_io *io);
2494 #define CL_IO_SLICE_CLEAN(foo_io, base) \
2496 typeof(foo_io) __foo_io = (foo_io); \
2498 CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
2499 memset(&__foo_io->base + 1, 0, \
2500 (sizeof *__foo_io) - sizeof __foo_io->base); \
2505 /** \defgroup cl_page_list cl_page_list
2509 * Last page in the page list.
2511 static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
2513 LASSERT(plist->pl_nr > 0);
2514 return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
2517 static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
2519 LASSERT(plist->pl_nr > 0);
2520 return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
2524 * Iterate over pages in a page list.
2526 #define cl_page_list_for_each(page, list) \
2527 list_for_each_entry((page), &(list)->pl_pages, cp_batch)
2530 * Iterate over pages in a page list, taking possible removals into account.
2532 #define cl_page_list_for_each_safe(page, temp, list) \
2533 list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
2535 void cl_page_list_init (struct cl_page_list *plist);
2536 void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
2537 void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
2538 struct cl_page *page);
2539 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
2540 struct cl_page *page);
2541 void cl_page_list_splice (struct cl_page_list *list,
2542 struct cl_page_list *head);
2543 void cl_page_list_del (const struct lu_env *env,
2544 struct cl_page_list *plist, struct cl_page *page);
2545 void cl_page_list_disown (const struct lu_env *env,
2546 struct cl_io *io, struct cl_page_list *plist);
2547 int cl_page_list_own (const struct lu_env *env,
2548 struct cl_io *io, struct cl_page_list *plist);
2549 void cl_page_list_assume (const struct lu_env *env,
2550 struct cl_io *io, struct cl_page_list *plist);
2551 void cl_page_list_discard(const struct lu_env *env,
2552 struct cl_io *io, struct cl_page_list *plist);
2553 void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
2555 void cl_2queue_init (struct cl_2queue *queue);
2556 void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page);
2557 void cl_2queue_disown (const struct lu_env *env,
2558 struct cl_io *io, struct cl_2queue *queue);
2559 void cl_2queue_assume (const struct lu_env *env,
2560 struct cl_io *io, struct cl_2queue *queue);
2561 void cl_2queue_discard (const struct lu_env *env,
2562 struct cl_io *io, struct cl_2queue *queue);
2563 void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
2564 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
2566 /** @} cl_page_list */
2568 /** \defgroup cl_req cl_req
2570 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
2571 enum cl_req_type crt, int nr_objects);
2573 void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
2574 struct cl_page *page);
2575 void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
2576 int cl_req_prep (const struct lu_env *env, struct cl_req *req);
2577 void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
2578 struct cl_req_attr *attr, obd_valid flags);
2579 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
2581 /** \defgroup cl_sync_io cl_sync_io
2585 * Anchor for synchronous transfer. This is allocated on a stack by thread
2586 * doing synchronous transfer, and a pointer to this structure is set up in
2587 * every page submitted for transfer. Transfer completion routine updates
2588 * anchor and wakes up waiting thread when transfer is complete.
2591 /** number of pages yet to be transferred. */
2592 atomic_t csi_sync_nr;
2595 /** barrier of destroy this structure */
2596 atomic_t csi_barrier;
2597 /** completion to be signaled when transfer is complete. */
2598 wait_queue_head_t csi_waitq;
2599 /** callback to invoke when this IO is finished */
2600 void (*csi_end_io)(const struct lu_env *,
2601 struct cl_sync_io *);
2604 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
2605 void (*end)(const struct lu_env *, struct cl_sync_io *));
2606 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
2608 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
2610 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
2612 /** @} cl_sync_io */
2616 /** \defgroup cl_env cl_env
2618 * lu_env handling for a client.
2620 * lu_env is an environment within which lustre code executes. Its major part
2621 * is lu_context---a fast memory allocation mechanism that is used to conserve
2622 * precious kernel stack space. Originally lu_env was designed for a server,
2625 * - there is a (mostly) fixed number of threads, and
2627 * - call chains have no non-lustre portions inserted between lustre code.
2629 * On a client both these assumtpion fails, because every user thread can
2630 * potentially execute lustre code as part of a system call, and lustre calls
2631 * into VFS or MM that call back into lustre.
2633 * To deal with that, cl_env wrapper functions implement the following
2636 * - allocation and destruction of environment is amortized by caching no
2637 * longer used environments instead of destroying them;
2639 * - there is a notion of "current" environment, attached to the kernel
2640 * data structure representing current thread Top-level lustre code
2641 * allocates an environment and makes it current, then calls into
2642 * non-lustre code, that in turn calls lustre back. Low-level lustre
2643 * code thus called can fetch environment created by the top-level code
2644 * and reuse it, avoiding additional environment allocation.
2645 * Right now, three interfaces can attach the cl_env to running thread:
2648 * - cl_env_reexit(cl_env_reenter had to be called priorly)
2650 * \see lu_env, lu_context, lu_context_key
2653 struct cl_env_nest {
2658 struct lu_env *cl_env_peek (int *refcheck);
2659 struct lu_env *cl_env_get (int *refcheck);
2660 struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
2661 struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
2662 void cl_env_put (struct lu_env *env, int *refcheck);
2663 void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
2664 void *cl_env_reenter (void);
2665 void cl_env_reexit (void *cookie);
2666 void cl_env_implant (struct lu_env *env, int *refcheck);
2667 void cl_env_unplant (struct lu_env *env, int *refcheck);
2668 unsigned cl_env_cache_purge(unsigned nr);
2669 struct lu_env *cl_env_percpu_get (void);
2670 void cl_env_percpu_put (struct lu_env *env);
2677 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr);
2678 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
2680 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
2681 struct lu_device_type *ldt,
2682 struct lu_device *next);
2685 int cl_global_init(void);
2686 void cl_global_fini(void);
2688 #endif /* _LINUX_CL_OBJECT_H */