4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 #ifndef _LUSTRE_CL_OBJECT_H
33 #define _LUSTRE_CL_OBJECT_H
35 /** \defgroup clio clio
37 * Client objects implement io operations and cache pages.
39 * Examples: lov and osc are implementations of cl interface.
41 * Big Theory Statement.
45 * Client implementation is based on the following data-types:
51 * - cl_lock represents an extent lock on an object.
53 * - cl_io represents high-level i/o activity such as whole read/write
54 * system call, or write-out of pages from under the lock being
55 * canceled. cl_io has sub-ios that can be stopped and resumed
56 * independently, thus achieving high degree of transfer
57 * parallelism. Single cl_io can be advanced forward by
58 * the multiple threads (although in the most usual case of
59 * read/write system call it is associated with the single user
60 * thread, that issued the system call).
64 * - to avoid confusion high-level I/O operation like read or write system
65 * call is referred to as "an io", whereas low-level I/O operation, like
66 * RPC, is referred to as "a transfer"
68 * - "generic code" means generic (not file system specific) code in the
69 * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
70 * is not layer specific.
76 * - cl_object_header::coh_page_guard
79 * See the top comment in cl_object.c for the description of overall locking and
80 * reference-counting design.
82 * See comments below for the description of i/o, page, and dlm-locking
89 * super-class definitions.
91 #include <libcfs/libcfs.h>
92 #include <libcfs/libcfs_ptask.h>
93 #include <lu_object.h>
94 #include <linux/atomic.h>
95 #include <linux/mutex.h>
96 #include <linux/radix-tree.h>
97 #include <linux/spinlock.h>
98 #include <linux/wait.h>
99 #include <lustre_dlm.h>
109 struct cl_page_slice;
111 struct cl_lock_slice;
113 struct cl_lock_operations;
114 struct cl_page_operations;
121 extern struct cfs_ptask_engine *cl_io_engine;
124 * Device in the client stack.
126 * \see vvp_device, lov_device, lovsub_device, osc_device
130 struct lu_device cd_lu_dev;
133 /** \addtogroup cl_object cl_object
136 * "Data attributes" of cl_object. Data attributes can be updated
137 * independently for a sub-object, and top-object's attributes are calculated
138 * from sub-objects' ones.
141 /** Object size, in bytes */
144 * Known minimal size, in bytes.
146 * This is only valid when at least one DLM lock is held.
149 /** Modification time. Measured in seconds since epoch. */
151 /** Access time. Measured in seconds since epoch. */
153 /** Change time. Measured in seconds since epoch. */
156 * Blocks allocated to this cl_object on the server file system.
158 * \todo XXX An interface for block size is needed.
162 * User identifier for quota purposes.
166 * Group identifier for quota purposes.
170 /* nlink of the directory */
173 /* Project identifier for quota purpose. */
178 * Fields in cl_attr that are being set.
193 * Sub-class of lu_object with methods common for objects on the client
196 * cl_object: represents a regular file system object, both a file and a
197 * stripe. cl_object is based on lu_object: it is identified by a fid,
198 * layered, cached, hashed, and lrued. Important distinction with the server
199 * side, where md_object and dt_object are used, is that cl_object "fans out"
200 * at the lov/sns level: depending on the file layout, single file is
201 * represented as a set of "sub-objects" (stripes). At the implementation
202 * level, struct lov_object contains an array of cl_objects. Each sub-object
203 * is a full-fledged cl_object, having its fid, living in the lru and hash
206 * This leads to the next important difference with the server side: on the
207 * client, it's quite usual to have objects with the different sequence of
208 * layers. For example, typical top-object is composed of the following
214 * whereas its sub-objects are composed of
219 * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
220 * track of the object-subobject relationship.
222 * Sub-objects are not cached independently: when top-object is about to
223 * be discarded from the memory, all its sub-objects are torn-down and
226 * \see vvp_object, lov_object, lovsub_object, osc_object
230 struct lu_object co_lu;
231 /** per-object-layer operations */
232 const struct cl_object_operations *co_ops;
233 /** offset of page slice in cl_page buffer */
238 * Description of the client object configuration. This is used for the
239 * creation of a new client object that is identified by a more state than
242 struct cl_object_conf {
244 struct lu_object_conf coc_lu;
247 * Object layout. This is consumed by lov.
249 struct lu_buf coc_layout;
251 * Description of particular stripe location in the
252 * cluster. This is consumed by osc.
254 struct lov_oinfo *coc_oinfo;
257 * VFS inode. This is consumed by vvp.
259 struct inode *coc_inode;
261 * Layout lock handle.
263 struct ldlm_lock *coc_lock;
265 * Operation to handle layout, OBJECT_CONF_XYZ.
271 /** configure layout, set up a new stripe, must be called while
272 * holding layout lock. */
274 /** invalidate the current stripe configuration due to losing
276 OBJECT_CONF_INVALIDATE = 1,
277 /** wait for old layout to go away so that new layout can be
283 CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
284 CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
288 /** the buffer to return the layout in lov_mds_md format. */
289 struct lu_buf cl_buf;
290 /** size of layout in lov_mds_md format. */
292 /** Layout generation. */
294 /** whether layout is a composite one */
295 bool cl_is_composite;
299 * Operations implemented for each cl object layer.
301 * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
303 struct cl_object_operations {
305 * Initialize page slice for this layer. Called top-to-bottom through
306 * every object layer when a new cl_page is instantiated. Layer
307 * keeping private per-page data, or requiring its own page operations
308 * vector should allocate these data here, and attach then to the page
309 * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
312 * \retval NULL success.
314 * \retval ERR_PTR(errno) failure code.
316 * \retval valid-pointer pointer to already existing referenced page
317 * to be used instead of newly created.
319 int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
320 struct cl_page *page, pgoff_t index);
322 * Initialize lock slice for this layer. Called top-to-bottom through
323 * every object layer when a new cl_lock is instantiated. Layer
324 * keeping private per-lock data, or requiring its own lock operations
325 * vector should allocate these data here, and attach then to the lock
326 * by calling cl_lock_slice_add(). Mandatory.
328 int (*coo_lock_init)(const struct lu_env *env,
329 struct cl_object *obj, struct cl_lock *lock,
330 const struct cl_io *io);
332 * Initialize io state for a given layer.
334 * called top-to-bottom once per io existence to initialize io
335 * state. If layer wants to keep some state for this type of io, it
336 * has to embed struct cl_io_slice in lu_env::le_ses, and register
337 * slice with cl_io_slice_add(). It is guaranteed that all threads
338 * participating in this io share the same session.
340 int (*coo_io_init)(const struct lu_env *env,
341 struct cl_object *obj, struct cl_io *io);
343 * Fill portion of \a attr that this layer controls. This method is
344 * called top-to-bottom through all object layers.
346 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
348 * \return 0: to continue
349 * \return +ve: to stop iterating through layers (but 0 is returned
350 * from enclosing cl_object_attr_get())
351 * \return -ve: to signal error
353 int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
354 struct cl_attr *attr);
358 * \a valid is a bitmask composed from enum #cl_attr_valid, and
359 * indicating what attributes are to be set.
361 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
363 * \return the same convention as for
364 * cl_object_operations::coo_attr_get() is used.
366 int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
367 const struct cl_attr *attr, unsigned valid);
369 * Update object configuration. Called top-to-bottom to modify object
372 * XXX error conditions and handling.
374 int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
375 const struct cl_object_conf *conf);
377 * Glimpse ast. Executed when glimpse ast arrives for a lock on this
378 * object. Layers are supposed to fill parts of \a lvb that will be
379 * shipped to the glimpse originator as a glimpse result.
381 * \see vvp_object_glimpse(), lovsub_object_glimpse(),
382 * \see osc_object_glimpse()
384 int (*coo_glimpse)(const struct lu_env *env,
385 const struct cl_object *obj, struct ost_lvb *lvb);
387 * Object prune method. Called when the layout is going to change on
388 * this object, therefore each layer has to clean up their cache,
389 * mainly pages and locks.
391 int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
393 * Object getstripe method.
395 int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
396 struct lov_user_md __user *lum, size_t size);
398 * Get FIEMAP mapping from the object.
400 int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
401 struct ll_fiemap_info_key *fmkey,
402 struct fiemap *fiemap, size_t *buflen);
404 * Get layout and generation of the object.
406 int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
407 struct cl_layout *layout);
409 * Get maximum size of the object.
411 loff_t (*coo_maxbytes)(struct cl_object *obj);
413 * Set request attributes.
415 void (*coo_req_attr_set)(const struct lu_env *env,
416 struct cl_object *obj,
417 struct cl_req_attr *attr);
421 * Extended header for client object.
423 struct cl_object_header {
424 /** Standard lu_object_header. cl_object::co_lu::lo_header points
426 struct lu_object_header coh_lu;
429 * Parent object. It is assumed that an object has a well-defined
430 * parent, but not a well-defined child (there may be multiple
431 * sub-objects, for the same top-object). cl_object_header::coh_parent
432 * field allows certain code to be written generically, without
433 * limiting possible cl_object layouts unduly.
435 struct cl_object_header *coh_parent;
437 * Protects consistency between cl_attr of parent object and
438 * attributes of sub-objects, that the former is calculated ("merged")
441 * \todo XXX this can be read/write lock if needed.
443 spinlock_t coh_attr_guard;
445 * Size of cl_page + page slices
447 unsigned short coh_page_bufsize;
449 * Number of objects above this one: 0 for a top-object, 1 for its
452 unsigned char coh_nesting;
456 * Helper macro: iterate over all layers of the object \a obj, assigning every
457 * layer top-to-bottom to \a slice.
459 #define cl_object_for_each(slice, obj) \
460 list_for_each_entry((slice), \
461 &(obj)->co_lu.lo_header->loh_layers,\
465 * Helper macro: iterate over all layers of the object \a obj, assigning every
466 * layer bottom-to-top to \a slice.
468 #define cl_object_for_each_reverse(slice, obj) \
469 list_for_each_entry_reverse((slice), \
470 &(obj)->co_lu.lo_header->loh_layers,\
475 #define CL_PAGE_EOF ((pgoff_t)~0ull)
477 /** \addtogroup cl_page cl_page
481 * Layered client page.
483 * cl_page: represents a portion of a file, cached in the memory. All pages
484 * of the given file are of the same size, and are kept in the radix tree
485 * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
486 * of the top-level file object are first class cl_objects, they have their
487 * own radix trees of pages and hence page is implemented as a sequence of
488 * struct cl_pages's, linked into double-linked list through
489 * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
490 * corresponding radix tree at the corresponding logical offset.
492 * cl_page is associated with VM page of the hosting environment (struct
493 * page in Linux kernel, for example), struct page. It is assumed, that this
494 * association is implemented by one of cl_page layers (top layer in the
495 * current design) that
497 * - intercepts per-VM-page call-backs made by the environment (e.g.,
500 * - translates state (page flag bits) and locking between lustre and
503 * The association between cl_page and struct page is immutable and
504 * established when cl_page is created.
506 * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
507 * this io an exclusive access to this page w.r.t. other io attempts and
508 * various events changing page state (such as transfer completion, or
509 * eviction of the page from the memory). Note, that in general cl_io
510 * cannot be identified with a particular thread, and page ownership is not
511 * exactly equal to the current thread holding a lock on the page. Layer
512 * implementing association between cl_page and struct page has to implement
513 * ownership on top of available synchronization mechanisms.
515 * While lustre client maintains the notion of an page ownership by io,
516 * hosting MM/VM usually has its own page concurrency control
517 * mechanisms. For example, in Linux, page access is synchronized by the
518 * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
519 * takes care to acquire and release such locks as necessary around the
520 * calls to the file system methods (->readpage(), ->prepare_write(),
521 * ->commit_write(), etc.). This leads to the situation when there are two
522 * different ways to own a page in the client:
524 * - client code explicitly and voluntary owns the page (cl_page_own());
526 * - VM locks a page and then calls the client, that has "to assume"
527 * the ownership from the VM (cl_page_assume()).
529 * Dual methods to release ownership are cl_page_disown() and
530 * cl_page_unassume().
532 * cl_page is reference counted (cl_page::cp_ref). When reference counter
533 * drops to 0, the page is returned to the cache, unless it is in
534 * cl_page_state::CPS_FREEING state, in which case it is immediately
537 * The general logic guaranteeing the absence of "existential races" for
538 * pages is the following:
540 * - there are fixed known ways for a thread to obtain a new reference
543 * - by doing a lookup in the cl_object radix tree, protected by the
546 * - by starting from VM-locked struct page and following some
547 * hosting environment method (e.g., following ->private pointer in
548 * the case of Linux kernel), see cl_vmpage_page();
550 * - when the page enters cl_page_state::CPS_FREEING state, all these
551 * ways are severed with the proper synchronization
552 * (cl_page_delete());
554 * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
557 * - no new references to the page in cl_page_state::CPS_FREEING state
558 * are allowed (checked in cl_page_get()).
560 * Together this guarantees that when last reference to a
561 * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
562 * page, as neither references to it can be acquired at that point, nor
565 * cl_page is a state machine. States are enumerated in enum
566 * cl_page_state. Possible state transitions are enumerated in
567 * cl_page_state_set(). State transition process (i.e., actual changing of
568 * cl_page::cp_state field) is protected by the lock on the underlying VM
571 * Linux Kernel implementation.
573 * Binding between cl_page and struct page (which is a typedef for
574 * struct page) is implemented in the vvp layer. cl_page is attached to the
575 * ->private pointer of the struct page, together with the setting of
576 * PG_private bit in page->flags, and acquiring additional reference on the
577 * struct page (much like struct buffer_head, or any similar file system
578 * private data structures).
580 * PG_locked lock is used to implement both ownership and transfer
581 * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
582 * states. No additional references are acquired for the duration of the
585 * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
586 * write-out is "protected" by the special PG_writeback bit.
590 * States of cl_page. cl_page.c assumes particular order here.
592 * The page state machine is rather crude, as it doesn't recognize finer page
593 * states like "dirty" or "up to date". This is because such states are not
594 * always well defined for the whole stack (see, for example, the
595 * implementation of the read-ahead, that hides page up-to-dateness to track
596 * cache hits accurately). Such sub-states are maintained by the layers that
597 * are interested in them.
601 * Page is in the cache, un-owned. Page leaves cached state in the
604 * - [cl_page_state::CPS_OWNED] io comes across the page and
607 * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
608 * req-formation engine decides that it wants to include this page
609 * into an RPC being constructed, and yanks it from the cache;
611 * - [cl_page_state::CPS_FREEING] VM callback is executed to
612 * evict the page form the memory;
614 * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
618 * Page is exclusively owned by some cl_io. Page may end up in this
619 * state as a result of
621 * - io creating new page and immediately owning it;
623 * - [cl_page_state::CPS_CACHED] io finding existing cached page
626 * - [cl_page_state::CPS_OWNED] io finding existing owned page
627 * and waiting for owner to release the page;
629 * Page leaves owned state in the following cases:
631 * - [cl_page_state::CPS_CACHED] io decides to leave the page in
632 * the cache, doing nothing;
634 * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
637 * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
638 * transfer for this page;
640 * - [cl_page_state::CPS_FREEING] io decides to destroy this
641 * page (e.g., as part of truncate or extent lock cancellation).
643 * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
647 * Page is being written out, as a part of a transfer. This state is
648 * entered when req-formation logic decided that it wants this page to
649 * be sent through the wire _now_. Specifically, it means that once
650 * this state is achieved, transfer completion handler (with either
651 * success or failure indication) is guaranteed to be executed against
652 * this page independently of any locks and any scheduling decisions
653 * made by the hosting environment (that effectively means that the
654 * page is never put into cl_page_state::CPS_PAGEOUT state "in
655 * advance". This property is mentioned, because it is important when
656 * reasoning about possible dead-locks in the system). The page can
657 * enter this state as a result of
659 * - [cl_page_state::CPS_OWNED] an io requesting an immediate
660 * write-out of this page, or
662 * - [cl_page_state::CPS_CACHED] req-forming engine deciding
663 * that it has enough dirty pages cached to issue a "good"
666 * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
667 * is completed---it is moved into cl_page_state::CPS_CACHED state.
669 * Underlying VM page is locked for the duration of transfer.
671 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
675 * Page is being read in, as a part of a transfer. This is quite
676 * similar to the cl_page_state::CPS_PAGEOUT state, except that
677 * read-in is always "immediate"---there is no such thing a sudden
678 * construction of read request from cached, presumably not up to date,
681 * Underlying VM page is locked for the duration of transfer.
683 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
687 * Page is being destroyed. This state is entered when client decides
688 * that page has to be deleted from its host object, as, e.g., a part
691 * Once this state is reached, there is no way to escape it.
693 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
700 /** Host page, the page is from the host inode which the cl_page
704 /** Transient page, the transient cl_page is used to bind a cl_page
705 * to vmpage which is not belonging to the same object of cl_page.
706 * it is used in DirectIO, lockless IO and liblustre. */
711 * Fields are protected by the lock on struct page, except for atomics and
714 * \invariant Data type invariants are in cl_page_invariant(). Basically:
715 * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
716 * list, consistent with the parent/child pointers in the cl_page::cp_obj and
717 * cl_page::cp_owner (when set).
720 /** Reference counter. */
722 /** An object this page is a part of. Immutable after creation. */
723 struct cl_object *cp_obj;
725 struct page *cp_vmpage;
726 /** Linkage of pages within group. Pages must be owned */
727 struct list_head cp_batch;
728 /** List of slices. Immutable after creation. */
729 struct list_head cp_layers;
731 * Page state. This field is const to avoid accidental update, it is
732 * modified only internally within cl_page.c. Protected by a VM lock.
734 const enum cl_page_state cp_state;
736 * Page type. Only CPT_TRANSIENT is used so far. Immutable after
739 enum cl_page_type cp_type;
742 * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
743 * by sub-io. Protected by a VM lock.
745 struct cl_io *cp_owner;
746 /** List of references to this page, for debugging. */
747 struct lu_ref cp_reference;
748 /** Link to an object, for debugging. */
749 struct lu_ref_link cp_obj_ref;
750 /** Link to a queue, for debugging. */
751 struct lu_ref_link cp_queue_ref;
752 /** Assigned if doing a sync_io */
753 struct cl_sync_io *cp_sync_io;
757 * Per-layer part of cl_page.
759 * \see vvp_page, lov_page, osc_page
761 struct cl_page_slice {
762 struct cl_page *cpl_page;
765 * Object slice corresponding to this page slice. Immutable after
768 struct cl_object *cpl_obj;
769 const struct cl_page_operations *cpl_ops;
770 /** Linkage into cl_page::cp_layers. Immutable after creation. */
771 struct list_head cpl_linkage;
775 * Lock mode. For the client extent locks.
787 * Requested transfer type.
796 * Per-layer page operations.
798 * Methods taking an \a io argument are for the activity happening in the
799 * context of given \a io. Page is assumed to be owned by that io, except for
800 * the obvious cases (like cl_page_operations::cpo_own()).
802 * \see vvp_page_ops, lov_page_ops, osc_page_ops
804 struct cl_page_operations {
806 * cl_page<->struct page methods. Only one layer in the stack has to
807 * implement these. Current code assumes that this functionality is
808 * provided by the topmost layer, see cl_page_disown0() as an example.
812 * Called when \a io acquires this page into the exclusive
813 * ownership. When this method returns, it is guaranteed that the is
814 * not owned by other io, and no transfer is going on against
818 * \see vvp_page_own(), lov_page_own()
820 int (*cpo_own)(const struct lu_env *env,
821 const struct cl_page_slice *slice,
822 struct cl_io *io, int nonblock);
823 /** Called when ownership it yielded. Optional.
825 * \see cl_page_disown()
826 * \see vvp_page_disown()
828 void (*cpo_disown)(const struct lu_env *env,
829 const struct cl_page_slice *slice, struct cl_io *io);
831 * Called for a page that is already "owned" by \a io from VM point of
834 * \see cl_page_assume()
835 * \see vvp_page_assume(), lov_page_assume()
837 void (*cpo_assume)(const struct lu_env *env,
838 const struct cl_page_slice *slice, struct cl_io *io);
839 /** Dual to cl_page_operations::cpo_assume(). Optional. Called
840 * bottom-to-top when IO releases a page without actually unlocking
843 * \see cl_page_unassume()
844 * \see vvp_page_unassume()
846 void (*cpo_unassume)(const struct lu_env *env,
847 const struct cl_page_slice *slice,
850 * Announces whether the page contains valid data or not by \a uptodate.
852 * \see cl_page_export()
853 * \see vvp_page_export()
855 void (*cpo_export)(const struct lu_env *env,
856 const struct cl_page_slice *slice, int uptodate);
858 * Checks whether underlying VM page is locked (in the suitable
859 * sense). Used for assertions.
861 * \retval -EBUSY: page is protected by a lock of a given mode;
862 * \retval -ENODATA: page is not protected by a lock;
863 * \retval 0: this layer cannot decide. (Should never happen.)
865 int (*cpo_is_vmlocked)(const struct lu_env *env,
866 const struct cl_page_slice *slice);
872 * Called when page is truncated from the object. Optional.
874 * \see cl_page_discard()
875 * \see vvp_page_discard(), osc_page_discard()
877 void (*cpo_discard)(const struct lu_env *env,
878 const struct cl_page_slice *slice,
881 * Called when page is removed from the cache, and is about to being
882 * destroyed. Optional.
884 * \see cl_page_delete()
885 * \see vvp_page_delete(), osc_page_delete()
887 void (*cpo_delete)(const struct lu_env *env,
888 const struct cl_page_slice *slice);
889 /** Destructor. Frees resources and slice itself. */
890 void (*cpo_fini)(const struct lu_env *env,
891 struct cl_page_slice *slice);
893 * Optional debugging helper. Prints given page slice.
895 * \see cl_page_print()
897 int (*cpo_print)(const struct lu_env *env,
898 const struct cl_page_slice *slice,
899 void *cookie, lu_printer_t p);
908 * Request type dependent vector of operations.
910 * Transfer operations depend on transfer mode (cl_req_type). To avoid
911 * passing transfer mode to each and every of these methods, and to
912 * avoid branching on request type inside of the methods, separate
913 * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
914 * provided. That is, method invocation usually looks like
916 * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
920 * Called when a page is submitted for a transfer as a part of
923 * \return 0 : page is eligible for submission;
924 * \return -EALREADY : skip this page;
925 * \return -ve : error.
927 * \see cl_page_prep()
929 int (*cpo_prep)(const struct lu_env *env,
930 const struct cl_page_slice *slice,
933 * Completion handler. This is guaranteed to be eventually
934 * fired after cl_page_operations::cpo_prep() or
935 * cl_page_operations::cpo_make_ready() call.
937 * This method can be called in a non-blocking context. It is
938 * guaranteed however, that the page involved and its object
939 * are pinned in memory (and, hence, calling cl_page_put() is
942 * \see cl_page_completion()
944 void (*cpo_completion)(const struct lu_env *env,
945 const struct cl_page_slice *slice,
948 * Called when cached page is about to be added to the
949 * ptlrpc request as a part of req formation.
951 * \return 0 : proceed with this page;
952 * \return -EAGAIN : skip this page;
953 * \return -ve : error.
955 * \see cl_page_make_ready()
957 int (*cpo_make_ready)(const struct lu_env *env,
958 const struct cl_page_slice *slice);
961 * Tell transfer engine that only [to, from] part of a page should be
964 * This is used for immediate transfers.
966 * \todo XXX this is not very good interface. It would be much better
967 * if all transfer parameters were supplied as arguments to
968 * cl_io_operations::cio_submit() call, but it is not clear how to do
969 * this for page queues.
971 * \see cl_page_clip()
973 void (*cpo_clip)(const struct lu_env *env,
974 const struct cl_page_slice *slice,
977 * \pre the page was queued for transferring.
978 * \post page is removed from client's pending list, or -EBUSY
979 * is returned if it has already been in transferring.
981 * This is one of seldom page operation which is:
982 * 0. called from top level;
983 * 1. don't have vmpage locked;
984 * 2. every layer should synchronize execution of its ->cpo_cancel()
985 * with completion handlers. Osc uses client obd lock for this
986 * purpose. Based on there is no vvp_page_cancel and
987 * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
989 * \see osc_page_cancel().
991 int (*cpo_cancel)(const struct lu_env *env,
992 const struct cl_page_slice *slice);
994 * Write out a page by kernel. This is only called by ll_writepage
997 * \see cl_page_flush()
999 int (*cpo_flush)(const struct lu_env *env,
1000 const struct cl_page_slice *slice,
1006 * Helper macro, dumping detailed information about \a page into a log.
1008 #define CL_PAGE_DEBUG(mask, env, page, format, ...) \
1010 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1011 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1012 cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
1013 CDEBUG(mask, format , ## __VA_ARGS__); \
1018 * Helper macro, dumping shorter information about \a page into a log.
1020 #define CL_PAGE_HEADER(mask, env, page, format, ...) \
1022 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1023 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1024 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
1025 CDEBUG(mask, format , ## __VA_ARGS__); \
1029 static inline struct page *cl_page_vmpage(const struct cl_page *page)
1031 LASSERT(page->cp_vmpage != NULL);
1032 return page->cp_vmpage;
1036 * Check if a cl_page is in use.
1038 * Client cache holds a refcount, this refcount will be dropped when
1039 * the page is taken out of cache, see vvp_page_delete().
1041 static inline bool __page_in_use(const struct cl_page *page, int refc)
1043 return (atomic_read(&page->cp_ref) > refc + 1);
1047 * Caller itself holds a refcount of cl_page.
1049 #define cl_page_in_use(pg) __page_in_use(pg, 1)
1051 * Caller doesn't hold a refcount.
1053 #define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
1057 /** \addtogroup cl_lock cl_lock
1061 * Extent locking on the client.
1065 * The locking model of the new client code is built around
1069 * data-type representing an extent lock on a regular file. cl_lock is a
1070 * layered object (much like cl_object and cl_page), it consists of a header
1071 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
1072 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
1074 * Typical cl_lock consists of the two layers:
1076 * - vvp_lock (vvp specific data), and
1077 * - lov_lock (lov specific data).
1079 * lov_lock contains an array of sub-locks. Each of these sub-locks is a
1080 * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
1082 * - lovsub_lock, and
1085 * Each sub-lock is associated with a cl_object (representing stripe
1086 * sub-object or the file to which top-level cl_lock is associated to), and is
1087 * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
1088 * cl_object (that at lov layer also fans out into multiple sub-objects), and
1089 * is different from cl_page, that doesn't fan out (there is usually exactly
1090 * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
1091 * a "top-lock" and its lovsub-osc portion a "sub-lock".
1095 * cl_lock is a cacheless data container for the requirements of locks to
1096 * complete the IO. cl_lock is created before I/O starts and destroyed when the
1099 * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
1100 * to cl_lock at OSC layer. LDLM lock is still cacheable.
1102 * INTERFACE AND USAGE
1104 * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
1105 * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
1106 * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
1107 * consists of multiple sub cl_locks, each sub locks will be enqueued
1108 * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
1109 * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
1112 * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
1113 * method will be called for each layer to release the resource held by this
1114 * lock. At OSC layer, the reference count of LDLM lock, which is held at
1115 * clo_enqueue time, is released.
1117 * LDLM lock can only be canceled if there is no cl_lock using it.
1119 * Overall process of the locking during IO operation is as following:
1121 * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
1122 * is called on each layer. Responsibility of this method is to add locks,
1123 * needed by a given layer into cl_io.ci_lockset.
1125 * - once locks for all layers were collected, they are sorted to avoid
1126 * dead-locks (cl_io_locks_sort()), and enqueued.
1128 * - when all locks are acquired, IO is performed;
1130 * - locks are released after IO is complete.
1132 * Striping introduces major additional complexity into locking. The
1133 * fundamental problem is that it is generally unsafe to actively use (hold)
1134 * two locks on the different OST servers at the same time, as this introduces
1135 * inter-server dependency and can lead to cascading evictions.
1137 * Basic solution is to sub-divide large read/write IOs into smaller pieces so
1138 * that no multi-stripe locks are taken (note that this design abandons POSIX
1139 * read/write semantics). Such pieces ideally can be executed concurrently. At
1140 * the same time, certain types of IO cannot be sub-divived, without
1141 * sacrificing correctness. This includes:
1143 * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
1146 * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
1148 * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
1149 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
1150 * has to be held together with the usual lock on [offset, offset + count].
1152 * Interaction with DLM
1154 * In the expected setup, cl_lock is ultimately backed up by a collection of
1155 * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
1156 * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
1157 * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
1158 * description of interaction with DLM.
1164 struct cl_lock_descr {
1165 /** Object this lock is granted for. */
1166 struct cl_object *cld_obj;
1167 /** Index of the first page protected by this lock. */
1169 /** Index of the last page (inclusive) protected by this lock. */
1171 /** Group ID, for group lock */
1174 enum cl_lock_mode cld_mode;
1176 * flags to enqueue lock. A combination of bit-flags from
1177 * enum cl_enq_flags.
1179 __u32 cld_enq_flags;
1182 #define DDESCR "%s(%d):[%lu, %lu]:%x"
1183 #define PDESCR(descr) \
1184 cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
1185 (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
1187 const char *cl_lock_mode_name(const enum cl_lock_mode mode);
1190 * Layered client lock.
1193 /** List of slices. Immutable after creation. */
1194 struct list_head cll_layers;
1195 /** lock attribute, extent, cl_object, etc. */
1196 struct cl_lock_descr cll_descr;
1200 * Per-layer part of cl_lock
1202 * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
1204 struct cl_lock_slice {
1205 struct cl_lock *cls_lock;
1206 /** Object slice corresponding to this lock slice. Immutable after
1208 struct cl_object *cls_obj;
1209 const struct cl_lock_operations *cls_ops;
1210 /** Linkage into cl_lock::cll_layers. Immutable after creation. */
1211 struct list_head cls_linkage;
1216 * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
1218 struct cl_lock_operations {
1221 * Attempts to enqueue the lock. Called top-to-bottom.
1223 * \retval 0 this layer has enqueued the lock successfully
1224 * \retval >0 this layer has enqueued the lock, but need to wait on
1225 * @anchor for resources
1226 * \retval -ve failure
1228 * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
1229 * \see osc_lock_enqueue()
1231 int (*clo_enqueue)(const struct lu_env *env,
1232 const struct cl_lock_slice *slice,
1233 struct cl_io *io, struct cl_sync_io *anchor);
1235 * Cancel a lock, release its DLM lock ref, while does not cancel the
1238 void (*clo_cancel)(const struct lu_env *env,
1239 const struct cl_lock_slice *slice);
1242 * Destructor. Frees resources and the slice.
1244 * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
1245 * \see osc_lock_fini()
1247 void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
1249 * Optional debugging helper. Prints given lock slice.
1251 int (*clo_print)(const struct lu_env *env,
1252 void *cookie, lu_printer_t p,
1253 const struct cl_lock_slice *slice);
1256 #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
1258 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1259 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1260 cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
1261 CDEBUG(mask, format , ## __VA_ARGS__); \
1265 #define CL_LOCK_ASSERT(expr, env, lock) do { \
1269 CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
1275 /** \addtogroup cl_page_list cl_page_list
1276 * Page list used to perform collective operations on a group of pages.
1278 * Pages are added to the list one by one. cl_page_list acquires a reference
1279 * for every page in it. Page list is used to perform collective operations on
1282 * - submit pages for an immediate transfer,
1284 * - own pages on behalf of certain io (waiting for each page in turn),
1288 * When list is finalized, it releases references on all pages it still has.
1290 * \todo XXX concurrency control.
1294 struct cl_page_list {
1296 struct list_head pl_pages;
1297 struct task_struct *pl_owner;
1301 * A 2-queue of pages. A convenience data-type for common use case, 2-queue
1302 * contains an incoming page list and an outgoing page list.
1305 struct cl_page_list c2_qin;
1306 struct cl_page_list c2_qout;
1309 /** @} cl_page_list */
1311 /** \addtogroup cl_io cl_io
1316 * cl_io represents a high level I/O activity like
1317 * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
1320 * cl_io is a layered object, much like cl_{object,page,lock} but with one
1321 * important distinction. We want to minimize number of calls to the allocator
1322 * in the fast path, e.g., in the case of read(2) when everything is cached:
1323 * client already owns the lock over region being read, and data are cached
1324 * due to read-ahead. To avoid allocation of cl_io layers in such situations,
1325 * per-layer io state is stored in the session, associated with the io, see
1326 * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
1327 * by using free-lists, see cl_env_get().
1329 * There is a small predefined number of possible io types, enumerated in enum
1332 * cl_io is a state machine, that can be advanced concurrently by the multiple
1333 * threads. It is up to these threads to control the concurrency and,
1334 * specifically, to detect when io is done, and its state can be safely
1337 * For read/write io overall execution plan is as following:
1339 * (0) initialize io state through all layers;
1341 * (1) loop: prepare chunk of work to do
1343 * (2) call all layers to collect locks they need to process current chunk
1345 * (3) sort all locks to avoid dead-locks, and acquire them
1347 * (4) process the chunk: call per-page methods
1348 * cl_io_operations::cio_prepare_write(),
1349 * cl_io_operations::cio_commit_write() for write)
1355 * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
1356 * address allocation efficiency issues mentioned above), and returns with the
1357 * special error condition from per-page method when current sub-io has to
1358 * block. This causes io loop to be repeated, and lov switches to the next
1359 * sub-io in its cl_io_operations::cio_iter_init() implementation.
1364 /** read system call */
1366 /** write system call */
1368 /** truncate, utime system calls */
1370 /** get data version */
1373 * page fault handling
1377 * fsync system call handling
1378 * To write out a range of file
1382 * Miscellaneous io. This is used for occasional io activity that
1383 * doesn't fit into other types. Currently this is used for:
1385 * - cancellation of an extent lock. This io exists as a context
1386 * to write dirty pages from under the lock being canceled back
1389 * - VM induced page write-out. An io context for writing page out
1390 * for memory cleansing;
1392 * - glimpse. An io context to acquire glimpse lock.
1394 * - grouplock. An io context to acquire group lock.
1396 * CIT_MISC io is used simply as a context in which locks and pages
1397 * are manipulated. Such io has no internal "process", that is,
1398 * cl_io_loop() is never called for it.
1403 * To give advice about access of a file
1410 * States of cl_io state machine
1413 /** Not initialized. */
1417 /** IO iteration started. */
1421 /** Actual IO is in progress. */
1423 /** IO for the current iteration finished. */
1425 /** Locks released. */
1427 /** Iteration completed. */
1429 /** cl_io finalized. */
1434 * IO state private for a layer.
1436 * This is usually embedded into layer session data, rather than allocated
1439 * \see vvp_io, lov_io, osc_io
1441 struct cl_io_slice {
1442 struct cl_io *cis_io;
1443 /** corresponding object slice. Immutable after creation. */
1444 struct cl_object *cis_obj;
1445 /** io operations. Immutable after creation. */
1446 const struct cl_io_operations *cis_iop;
1448 * linkage into a list of all slices for a given cl_io, hanging off
1449 * cl_io::ci_layers. Immutable after creation.
1451 struct list_head cis_linkage;
1454 typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
1457 struct cl_read_ahead {
1458 /* Maximum page index the readahead window will end.
1459 * This is determined DLM lock coverage, RPC and stripe boundary.
1460 * cra_end is included. */
1462 /* optimal RPC size for this read, by pages */
1463 unsigned long cra_rpc_size;
1464 /* Release callback. If readahead holds resources underneath, this
1465 * function should be called to release it. */
1466 void (*cra_release)(const struct lu_env *env, void *cbdata);
1467 /* Callback data for cra_release routine */
1471 static inline void cl_read_ahead_release(const struct lu_env *env,
1472 struct cl_read_ahead *ra)
1474 if (ra->cra_release != NULL)
1475 ra->cra_release(env, ra->cra_cbdata);
1476 memset(ra, 0, sizeof(*ra));
1481 * Per-layer io operations.
1482 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
1484 struct cl_io_operations {
1486 * Vector of io state transition methods for every io type.
1488 * \see cl_page_operations::io
1492 * Prepare io iteration at a given layer.
1494 * Called top-to-bottom at the beginning of each iteration of
1495 * "io loop" (if it makes sense for this type of io). Here
1496 * layer selects what work it will do during this iteration.
1498 * \see cl_io_operations::cio_iter_fini()
1500 int (*cio_iter_init) (const struct lu_env *env,
1501 const struct cl_io_slice *slice);
1503 * Finalize io iteration.
1505 * Called bottom-to-top at the end of each iteration of "io
1506 * loop". Here layers can decide whether IO has to be
1509 * \see cl_io_operations::cio_iter_init()
1511 void (*cio_iter_fini) (const struct lu_env *env,
1512 const struct cl_io_slice *slice);
1514 * Collect locks for the current iteration of io.
1516 * Called top-to-bottom to collect all locks necessary for
1517 * this iteration. This methods shouldn't actually enqueue
1518 * anything, instead it should post a lock through
1519 * cl_io_lock_add(). Once all locks are collected, they are
1520 * sorted and enqueued in the proper order.
1522 int (*cio_lock) (const struct lu_env *env,
1523 const struct cl_io_slice *slice);
1525 * Finalize unlocking.
1527 * Called bottom-to-top to finish layer specific unlocking
1528 * functionality, after generic code released all locks
1529 * acquired by cl_io_operations::cio_lock().
1531 void (*cio_unlock)(const struct lu_env *env,
1532 const struct cl_io_slice *slice);
1534 * Start io iteration.
1536 * Once all locks are acquired, called top-to-bottom to
1537 * commence actual IO. In the current implementation,
1538 * top-level vvp_io_{read,write}_start() does all the work
1539 * synchronously by calling generic_file_*(), so other layers
1540 * are called when everything is done.
1542 int (*cio_start)(const struct lu_env *env,
1543 const struct cl_io_slice *slice);
1545 * Called top-to-bottom at the end of io loop. Here layer
1546 * might wait for an unfinished asynchronous io.
1548 void (*cio_end) (const struct lu_env *env,
1549 const struct cl_io_slice *slice);
1551 * Called bottom-to-top to notify layers that read/write IO
1552 * iteration finished, with \a nob bytes transferred.
1554 void (*cio_advance)(const struct lu_env *env,
1555 const struct cl_io_slice *slice,
1558 * Called once per io, bottom-to-top to release io resources.
1560 void (*cio_fini) (const struct lu_env *env,
1561 const struct cl_io_slice *slice);
1565 * Submit pages from \a queue->c2_qin for IO, and move
1566 * successfully submitted pages into \a queue->c2_qout. Return
1567 * non-zero if failed to submit even the single page. If
1568 * submission failed after some pages were moved into \a
1569 * queue->c2_qout, completion callback with non-zero ioret is
1572 int (*cio_submit)(const struct lu_env *env,
1573 const struct cl_io_slice *slice,
1574 enum cl_req_type crt,
1575 struct cl_2queue *queue);
1577 * Queue async page for write.
1578 * The difference between cio_submit and cio_queue is that
1579 * cio_submit is for urgent request.
1581 int (*cio_commit_async)(const struct lu_env *env,
1582 const struct cl_io_slice *slice,
1583 struct cl_page_list *queue, int from, int to,
1586 * Decide maximum read ahead extent
1588 * \pre io->ci_type == CIT_READ
1590 int (*cio_read_ahead)(const struct lu_env *env,
1591 const struct cl_io_slice *slice,
1592 pgoff_t start, struct cl_read_ahead *ra);
1594 * Optional debugging helper. Print given io slice.
1596 int (*cio_print)(const struct lu_env *env, void *cookie,
1597 lu_printer_t p, const struct cl_io_slice *slice);
1601 * Flags to lock enqueue procedure.
1606 * instruct server to not block, if conflicting lock is found. Instead
1607 * -EWOULDBLOCK is returned immediately.
1609 CEF_NONBLOCK = 0x00000001,
1611 * take lock asynchronously (out of order), as it cannot
1612 * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
1614 CEF_ASYNC = 0x00000002,
1616 * tell the server to instruct (though a flag in the blocking ast) an
1617 * owner of the conflicting lock, that it can drop dirty pages
1618 * protected by this lock, without sending them to the server.
1620 CEF_DISCARD_DATA = 0x00000004,
1622 * tell the sub layers that it must be a `real' lock. This is used for
1623 * mmapped-buffer locks and glimpse locks that must be never converted
1624 * into lockless mode.
1626 * \see vvp_mmap_locks(), cl_glimpse_lock().
1628 CEF_MUST = 0x00000008,
1630 * tell the sub layers that never request a `real' lock. This flag is
1631 * not used currently.
1633 * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
1634 * conversion policy: ci_lockreq describes generic information of lock
1635 * requirement for this IO, especially for locks which belong to the
1636 * object doing IO; however, lock itself may have precise requirements
1637 * that are described by the enqueue flags.
1639 CEF_NEVER = 0x00000010,
1641 * for async glimpse lock.
1643 CEF_AGL = 0x00000020,
1645 * enqueue a lock to test DLM lock existence.
1647 CEF_PEEK = 0x00000040,
1649 * Lock match only. Used by group lock in I/O as group lock
1650 * is known to exist.
1652 CEF_LOCK_MATCH = 0x00000080,
1654 * mask of enq_flags.
1656 CEF_MASK = 0x000000ff,
1660 * Link between lock and io. Intermediate structure is needed, because the
1661 * same lock can be part of multiple io's simultaneously.
1663 struct cl_io_lock_link {
1664 /** linkage into one of cl_lockset lists. */
1665 struct list_head cill_linkage;
1666 struct cl_lock cill_lock;
1667 /** optional destructor */
1668 void (*cill_fini)(const struct lu_env *env,
1669 struct cl_io_lock_link *link);
1671 #define cill_descr cill_lock.cll_descr
1674 * Lock-set represents a collection of locks, that io needs at a
1675 * time. Generally speaking, client tries to avoid holding multiple locks when
1678 * - holding extent locks over multiple ost's introduces the danger of
1679 * "cascading timeouts";
1681 * - holding multiple locks over the same ost is still dead-lock prone,
1682 * see comment in osc_lock_enqueue(),
1684 * but there are certain situations where this is unavoidable:
1686 * - O_APPEND writes have to take [0, EOF] lock for correctness;
1688 * - truncate has to take [new-size, EOF] lock for correctness;
1690 * - SNS has to take locks across full stripe for correctness;
1692 * - in the case when user level buffer, supplied to {read,write}(file0),
1693 * is a part of a memory mapped lustre file, client has to take a dlm
1694 * locks on file0, and all files that back up the buffer (or a part of
1695 * the buffer, that is being processed in the current chunk, in any
1696 * case, there are situations where at least 2 locks are necessary).
1698 * In such cases we at least try to take locks in the same consistent
1699 * order. To this end, all locks are first collected, then sorted, and then
1703 /** locks to be acquired. */
1704 struct list_head cls_todo;
1705 /** locks acquired. */
1706 struct list_head cls_done;
1710 * Lock requirements(demand) for IO. It should be cl_io_lock_req,
1711 * but 'req' is always to be thought as 'request' :-)
1713 enum cl_io_lock_dmd {
1714 /** Always lock data (e.g., O_APPEND). */
1716 /** Layers are free to decide between local and global locking. */
1718 /** Never lock: there is no cache (e.g., liblustre). */
1722 enum cl_fsync_mode {
1723 /** start writeback, do not wait for them to finish */
1725 /** start writeback and wait for them to finish */
1727 /** discard all of dirty pages in a specific file range */
1728 CL_FSYNC_DISCARD = 2,
1729 /** start writeback and make sure they have reached storage before
1730 * return. OST_SYNC RPC must be issued and finished */
1734 struct cl_io_range {
1740 struct cl_io_pt *cip_next;
1741 struct cfs_ptask cip_task;
1742 struct kiocb cip_iocb;
1743 struct iov_iter cip_iter;
1744 struct file *cip_file;
1745 enum cl_io_type cip_iot;
1754 * cl_io is shared by all threads participating in this IO (in current
1755 * implementation only one thread advances IO, but parallel IO design and
1756 * concurrent copy_*_user() require multiple threads acting on the same IO. It
1757 * is up to these threads to serialize their activities, including updates to
1758 * mutable cl_io fields.
1761 /** type of this IO. Immutable after creation. */
1762 enum cl_io_type ci_type;
1763 /** current state of cl_io state machine. */
1764 enum cl_io_state ci_state;
1765 /** main object this io is against. Immutable after creation. */
1766 struct cl_object *ci_obj;
1768 * Upper layer io, of which this io is a part of. Immutable after
1771 struct cl_io *ci_parent;
1772 /** List of slices. Immutable after creation. */
1773 struct list_head ci_layers;
1774 /** list of locks (to be) acquired by this io. */
1775 struct cl_lockset ci_lockset;
1776 /** lock requirements, this is just a help info for sublayers. */
1777 enum cl_io_lock_dmd ci_lockreq;
1780 struct iov_iter rw_iter;
1781 struct kiocb rw_iocb;
1782 struct cl_io_range rw_range;
1783 struct file *rw_file;
1784 unsigned int rw_nonblock:1,
1787 int (*rw_ptask)(struct cfs_ptask *ptask);
1789 struct cl_setattr_io {
1790 struct ost_lvb sa_attr;
1791 unsigned int sa_attr_flags;
1792 unsigned int sa_valid;
1793 int sa_stripe_index;
1794 struct ost_layout sa_layout;
1795 const struct lu_fid *sa_parent_fid;
1797 struct cl_data_version_io {
1798 u64 dv_data_version;
1801 struct cl_fault_io {
1802 /** page index within file. */
1804 /** bytes valid byte on a faulted page. */
1806 /** writable page? for nopage() only */
1808 /** page of an executable? */
1810 /** page_mkwrite() */
1812 /** resulting page */
1813 struct cl_page *ft_page;
1815 struct cl_fsync_io {
1818 /** file system level fid */
1819 struct lu_fid *fi_fid;
1820 enum cl_fsync_mode fi_mode;
1821 /* how many pages were written/discarded */
1822 unsigned int fi_nr_written;
1824 struct cl_ladvise_io {
1827 /** file system level fid */
1828 struct lu_fid *li_fid;
1829 enum lu_ladvise_type li_advice;
1833 struct cl_2queue ci_queue;
1836 unsigned int ci_continue:1,
1838 * This io has held grouplock, to inform sublayers that
1839 * don't do lockless i/o.
1843 * The whole IO need to be restarted because layout has been changed
1847 * to not refresh layout - the IO issuer knows that the layout won't
1848 * change(page operations, layout change causes all page to be
1849 * discarded), or it doesn't matter if it changes(sync).
1853 * Need MDS intervention to complete a write. This usually means the
1854 * corresponding component is not initialized for the writing extent.
1856 ci_need_write_intent:1,
1858 * Check if layout changed after the IO finishes. Mainly for HSM
1859 * requirement. If IO occurs to openning files, it doesn't need to
1860 * verify layout because HSM won't release openning files.
1861 * Right now, only two opertaions need to verify layout: glimpse
1866 * file is released, restore has to to be triggered by vvp layer
1868 ci_restore_needed:1,
1873 /** Set to 1 if parallel execution is allowed for current I/O? */
1876 * Number of pages owned by this IO. For invariant checking.
1878 unsigned ci_owned_nr;
1884 * Per-transfer attributes.
1886 struct cl_req_attr {
1887 enum cl_req_type cra_type;
1889 struct cl_page *cra_page;
1890 /** Generic attributes for the server consumption. */
1891 struct obdo *cra_oa;
1893 char cra_jobid[LUSTRE_JOBID_SIZE];
1896 enum cache_stats_item {
1897 /** how many cache lookups were performed */
1899 /** how many times cache lookup resulted in a hit */
1901 /** how many entities are in the cache right now */
1903 /** how many entities in the cache are actively used (and cannot be
1904 * evicted) right now */
1906 /** how many entities were created at all */
1911 #define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
1914 * Stats for a generic cache (similar to inode, lu_object, etc. caches).
1916 struct cache_stats {
1917 const char *cs_name;
1918 atomic_t cs_stats[CS_NR];
1921 /** These are not exported so far */
1922 void cache_stats_init (struct cache_stats *cs, const char *name);
1925 * Client-side site. This represents particular client stack. "Global"
1926 * variables should (directly or indirectly) be added here to allow multiple
1927 * clients to co-exist in the single address space.
1930 struct lu_site cs_lu;
1932 * Statistical counters. Atomics do not scale, something better like
1933 * per-cpu counters is needed.
1935 * These are exported as /proc/fs/lustre/llite/.../site
1937 * When interpreting keep in mind that both sub-locks (and sub-pages)
1938 * and top-locks (and top-pages) are accounted here.
1940 struct cache_stats cs_pages;
1941 atomic_t cs_pages_state[CPS_NR];
1944 int cl_site_init(struct cl_site *s, struct cl_device *top);
1945 void cl_site_fini(struct cl_site *s);
1946 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
1949 * Output client site statistical counters into a buffer. Suitable for
1950 * ll_rd_*()-style functions.
1952 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
1957 * Type conversion and accessory functions.
1961 static inline struct cl_site *lu2cl_site(const struct lu_site *site)
1963 return container_of(site, struct cl_site, cs_lu);
1966 static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
1968 LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
1969 return container_of0(d, struct cl_device, cd_lu_dev);
1972 static inline struct lu_device *cl2lu_dev(struct cl_device *d)
1974 return &d->cd_lu_dev;
1977 static inline struct cl_object *lu2cl(const struct lu_object *o)
1979 LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
1980 return container_of0(o, struct cl_object, co_lu);
1983 static inline const struct cl_object_conf *
1984 lu2cl_conf(const struct lu_object_conf *conf)
1986 return container_of0(conf, struct cl_object_conf, coc_lu);
1989 static inline struct cl_object *cl_object_next(const struct cl_object *obj)
1991 return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
1994 static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
1996 return container_of0(h, struct cl_object_header, coh_lu);
1999 static inline struct cl_site *cl_object_site(const struct cl_object *obj)
2001 return lu2cl_site(obj->co_lu.lo_dev->ld_site);
2005 struct cl_object_header *cl_object_header(const struct cl_object *obj)
2007 return luh2coh(obj->co_lu.lo_header);
2010 static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
2012 return lu_device_init(&d->cd_lu_dev, t);
2015 static inline void cl_device_fini(struct cl_device *d)
2017 lu_device_fini(&d->cd_lu_dev);
2020 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
2021 struct cl_object *obj, pgoff_t index,
2022 const struct cl_page_operations *ops);
2023 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2024 struct cl_object *obj,
2025 const struct cl_lock_operations *ops);
2026 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
2027 struct cl_object *obj, const struct cl_io_operations *ops);
2030 /** \defgroup cl_object cl_object
2032 struct cl_object *cl_object_top (struct cl_object *o);
2033 struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
2034 const struct lu_fid *fid,
2035 const struct cl_object_conf *c);
2037 int cl_object_header_init(struct cl_object_header *h);
2038 void cl_object_header_fini(struct cl_object_header *h);
2039 void cl_object_put (const struct lu_env *env, struct cl_object *o);
2040 void cl_object_get (struct cl_object *o);
2041 void cl_object_attr_lock (struct cl_object *o);
2042 void cl_object_attr_unlock(struct cl_object *o);
2043 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
2044 struct cl_attr *attr);
2045 int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
2046 const struct cl_attr *attr, unsigned valid);
2047 int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
2048 struct ost_lvb *lvb);
2049 int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
2050 const struct cl_object_conf *conf);
2051 int cl_object_prune (const struct lu_env *env, struct cl_object *obj);
2052 void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
2053 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2054 struct lov_user_md __user *lum, size_t size);
2055 int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
2056 struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
2058 int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
2059 struct cl_layout *cl);
2060 loff_t cl_object_maxbytes(struct cl_object *obj);
2063 * Returns true, iff \a o0 and \a o1 are slices of the same object.
2065 static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
2067 return cl_object_header(o0) == cl_object_header(o1);
2070 static inline void cl_object_page_init(struct cl_object *clob, int size)
2072 clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
2073 cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
2074 WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
2077 static inline void *cl_object_page_slice(struct cl_object *clob,
2078 struct cl_page *page)
2080 return (void *)((char *)page + clob->co_slice_off);
2084 * Return refcount of cl_object.
2086 static inline int cl_object_refc(struct cl_object *clob)
2088 struct lu_object_header *header = clob->co_lu.lo_header;
2089 return atomic_read(&header->loh_ref);
2094 /** \defgroup cl_page cl_page
2102 /* callback of cl_page_gang_lookup() */
2104 struct cl_page *cl_page_find (const struct lu_env *env,
2105 struct cl_object *obj,
2106 pgoff_t idx, struct page *vmpage,
2107 enum cl_page_type type);
2108 struct cl_page *cl_page_alloc (const struct lu_env *env,
2109 struct cl_object *o, pgoff_t ind,
2110 struct page *vmpage,
2111 enum cl_page_type type);
2112 void cl_page_get (struct cl_page *page);
2113 void cl_page_put (const struct lu_env *env,
2114 struct cl_page *page);
2115 void cl_page_print (const struct lu_env *env, void *cookie,
2116 lu_printer_t printer,
2117 const struct cl_page *pg);
2118 void cl_page_header_print(const struct lu_env *env, void *cookie,
2119 lu_printer_t printer,
2120 const struct cl_page *pg);
2121 struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
2122 struct cl_page *cl_page_top (struct cl_page *page);
2124 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
2125 const struct lu_device_type *dtype);
2130 * Functions dealing with the ownership of page by io.
2134 int cl_page_own (const struct lu_env *env,
2135 struct cl_io *io, struct cl_page *page);
2136 int cl_page_own_try (const struct lu_env *env,
2137 struct cl_io *io, struct cl_page *page);
2138 void cl_page_assume (const struct lu_env *env,
2139 struct cl_io *io, struct cl_page *page);
2140 void cl_page_unassume (const struct lu_env *env,
2141 struct cl_io *io, struct cl_page *pg);
2142 void cl_page_disown (const struct lu_env *env,
2143 struct cl_io *io, struct cl_page *page);
2144 int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
2151 * Functions dealing with the preparation of a page for a transfer, and
2152 * tracking transfer state.
2155 int cl_page_prep (const struct lu_env *env, struct cl_io *io,
2156 struct cl_page *pg, enum cl_req_type crt);
2157 void cl_page_completion (const struct lu_env *env,
2158 struct cl_page *pg, enum cl_req_type crt, int ioret);
2159 int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
2160 enum cl_req_type crt);
2161 int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
2162 struct cl_page *pg, enum cl_req_type crt);
2163 void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
2165 int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
2166 int cl_page_flush (const struct lu_env *env, struct cl_io *io,
2167 struct cl_page *pg);
2173 * \name helper routines
2174 * Functions to discard, delete and export a cl_page.
2177 void cl_page_discard(const struct lu_env *env, struct cl_io *io,
2178 struct cl_page *pg);
2179 void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
2180 int cl_page_is_vmlocked(const struct lu_env *env,
2181 const struct cl_page *pg);
2182 void cl_page_export(const struct lu_env *env,
2183 struct cl_page *pg, int uptodate);
2184 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
2185 pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
2186 size_t cl_page_size(const struct cl_object *obj);
2188 void cl_lock_print(const struct lu_env *env, void *cookie,
2189 lu_printer_t printer, const struct cl_lock *lock);
2190 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2191 lu_printer_t printer,
2192 const struct cl_lock_descr *descr);
2196 * Data structure managing a client's cached pages. A count of
2197 * "unstable" pages is maintained, and an LRU of clean pages is
2198 * maintained. "unstable" pages are pages pinned by the ptlrpc
2199 * layer for recovery purposes.
2201 struct cl_client_cache {
2203 * # of client cache refcount
2204 * # of users (OSCs) + 2 (held by llite and lov)
2208 * # of threads are doing shrinking
2210 unsigned int ccc_lru_shrinkers;
2212 * # of LRU entries available
2214 atomic_long_t ccc_lru_left;
2216 * List of entities(OSCs) for this LRU cache
2218 struct list_head ccc_lru;
2220 * Max # of LRU entries
2222 unsigned long ccc_lru_max;
2224 * Lock to protect ccc_lru list
2226 spinlock_t ccc_lru_lock;
2228 * Set if unstable check is enabled
2230 unsigned int ccc_unstable_check:1;
2232 * # of unstable pages for this mount point
2234 atomic_long_t ccc_unstable_nr;
2236 * Waitq for awaiting unstable pages to reach zero.
2237 * Used at umounting time and signaled on BRW commit
2239 wait_queue_head_t ccc_unstable_waitq;
2242 * cl_cache functions
2244 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
2245 void cl_cache_incref(struct cl_client_cache *cache);
2246 void cl_cache_decref(struct cl_client_cache *cache);
2250 /** \defgroup cl_lock cl_lock
2252 int cl_lock_request(const struct lu_env *env, struct cl_io *io,
2253 struct cl_lock *lock);
2254 int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
2255 const struct cl_io *io);
2256 void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
2257 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
2258 const struct lu_device_type *dtype);
2259 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
2261 int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
2262 struct cl_lock *lock, struct cl_sync_io *anchor);
2263 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
2267 /** \defgroup cl_io cl_io
2270 int cl_io_init (const struct lu_env *env, struct cl_io *io,
2271 enum cl_io_type iot, struct cl_object *obj);
2272 int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
2273 enum cl_io_type iot, struct cl_object *obj);
2274 int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
2275 enum cl_io_type iot, loff_t pos, size_t count);
2276 int cl_io_loop (const struct lu_env *env, struct cl_io *io);
2278 void cl_io_fini (const struct lu_env *env, struct cl_io *io);
2279 int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
2280 void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
2281 int cl_io_lock (const struct lu_env *env, struct cl_io *io);
2282 void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
2283 int cl_io_start (const struct lu_env *env, struct cl_io *io);
2284 void cl_io_end (const struct lu_env *env, struct cl_io *io);
2285 int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
2286 struct cl_io_lock_link *link);
2287 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
2288 struct cl_lock_descr *descr);
2289 int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
2290 enum cl_req_type iot, struct cl_2queue *queue);
2291 int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
2292 enum cl_req_type iot, struct cl_2queue *queue,
2294 int cl_io_commit_async (const struct lu_env *env, struct cl_io *io,
2295 struct cl_page_list *queue, int from, int to,
2297 int cl_io_read_ahead (const struct lu_env *env, struct cl_io *io,
2298 pgoff_t start, struct cl_read_ahead *ra);
2299 void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
2301 int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
2302 struct cl_page_list *queue);
2305 * True, iff \a io is an O_APPEND write(2).
2307 static inline int cl_io_is_append(const struct cl_io *io)
2309 return io->ci_type == CIT_WRITE && io->u.ci_rw.rw_append;
2312 static inline int cl_io_is_sync_write(const struct cl_io *io)
2314 return io->ci_type == CIT_WRITE && io->u.ci_rw.rw_sync;
2317 static inline int cl_io_is_mkwrite(const struct cl_io *io)
2319 return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
2323 * True, iff \a io is a truncate(2).
2325 static inline int cl_io_is_trunc(const struct cl_io *io)
2327 return io->ci_type == CIT_SETATTR &&
2328 (io->u.ci_setattr.sa_valid & ATTR_SIZE);
2331 struct cl_io *cl_io_top(struct cl_io *io);
2333 void cl_io_print(const struct lu_env *env, void *cookie,
2334 lu_printer_t printer, const struct cl_io *io);
2336 #define CL_IO_SLICE_CLEAN(foo_io, base) \
2338 typeof(foo_io) __foo_io = (foo_io); \
2340 CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
2341 memset(&__foo_io->base + 1, 0, \
2342 (sizeof *__foo_io) - sizeof __foo_io->base); \
2347 /** \defgroup cl_page_list cl_page_list
2351 * Last page in the page list.
2353 static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
2355 LASSERT(plist->pl_nr > 0);
2356 return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
2359 static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
2361 LASSERT(plist->pl_nr > 0);
2362 return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
2366 * Iterate over pages in a page list.
2368 #define cl_page_list_for_each(page, list) \
2369 list_for_each_entry((page), &(list)->pl_pages, cp_batch)
2372 * Iterate over pages in a page list, taking possible removals into account.
2374 #define cl_page_list_for_each_safe(page, temp, list) \
2375 list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
2377 void cl_page_list_init (struct cl_page_list *plist);
2378 void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
2379 void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
2380 struct cl_page *page);
2381 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
2382 struct cl_page *page);
2383 void cl_page_list_splice (struct cl_page_list *list,
2384 struct cl_page_list *head);
2385 void cl_page_list_del (const struct lu_env *env,
2386 struct cl_page_list *plist, struct cl_page *page);
2387 void cl_page_list_disown (const struct lu_env *env,
2388 struct cl_io *io, struct cl_page_list *plist);
2389 void cl_page_list_assume (const struct lu_env *env,
2390 struct cl_io *io, struct cl_page_list *plist);
2391 void cl_page_list_discard(const struct lu_env *env,
2392 struct cl_io *io, struct cl_page_list *plist);
2393 void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
2395 void cl_2queue_init (struct cl_2queue *queue);
2396 void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page);
2397 void cl_2queue_disown (const struct lu_env *env,
2398 struct cl_io *io, struct cl_2queue *queue);
2399 void cl_2queue_assume (const struct lu_env *env,
2400 struct cl_io *io, struct cl_2queue *queue);
2401 void cl_2queue_discard (const struct lu_env *env,
2402 struct cl_io *io, struct cl_2queue *queue);
2403 void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
2404 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
2406 /** @} cl_page_list */
2408 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
2409 struct cl_req_attr *attr);
2411 /** \defgroup cl_sync_io cl_sync_io
2415 * Anchor for synchronous transfer. This is allocated on a stack by thread
2416 * doing synchronous transfer, and a pointer to this structure is set up in
2417 * every page submitted for transfer. Transfer completion routine updates
2418 * anchor and wakes up waiting thread when transfer is complete.
2421 /** number of pages yet to be transferred. */
2422 atomic_t csi_sync_nr;
2425 /** barrier of destroy this structure */
2426 atomic_t csi_barrier;
2427 /** completion to be signaled when transfer is complete. */
2428 wait_queue_head_t csi_waitq;
2429 /** callback to invoke when this IO is finished */
2430 void (*csi_end_io)(const struct lu_env *,
2431 struct cl_sync_io *);
2434 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
2435 void (*end)(const struct lu_env *, struct cl_sync_io *));
2436 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
2438 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
2440 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
2442 /** @} cl_sync_io */
2444 /** \defgroup cl_env cl_env
2446 * lu_env handling for a client.
2448 * lu_env is an environment within which lustre code executes. Its major part
2449 * is lu_context---a fast memory allocation mechanism that is used to conserve
2450 * precious kernel stack space. Originally lu_env was designed for a server,
2453 * - there is a (mostly) fixed number of threads, and
2455 * - call chains have no non-lustre portions inserted between lustre code.
2457 * On a client both these assumtpion fails, because every user thread can
2458 * potentially execute lustre code as part of a system call, and lustre calls
2459 * into VFS or MM that call back into lustre.
2461 * To deal with that, cl_env wrapper functions implement the following
2464 * - allocation and destruction of environment is amortized by caching no
2465 * longer used environments instead of destroying them;
2467 * \see lu_env, lu_context, lu_context_key
2470 struct lu_env *cl_env_get(__u16 *refcheck);
2471 struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags);
2472 void cl_env_put(struct lu_env *env, __u16 *refcheck);
2473 unsigned cl_env_cache_purge(unsigned nr);
2474 struct lu_env *cl_env_percpu_get(void);
2475 void cl_env_percpu_put(struct lu_env *env);
2482 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr);
2483 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
2485 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
2486 struct lu_device_type *ldt,
2487 struct lu_device *next);
2490 int cl_global_init(void);
2491 void cl_global_fini(void);
2493 #endif /* _LINUX_CL_OBJECT_H */