4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 #ifndef _LUSTRE_CL_OBJECT_H
32 #define _LUSTRE_CL_OBJECT_H
34 /** \defgroup clio clio
36 * Client objects implement io operations and cache pages.
38 * Examples: lov and osc are implementations of cl interface.
40 * Big Theory Statement.
44 * Client implementation is based on the following data-types:
50 * - cl_lock represents an extent lock on an object.
52 * - cl_io represents high-level i/o activity such as whole read/write
53 * system call, or write-out of pages from under the lock being
54 * canceled. cl_io has sub-ios that can be stopped and resumed
55 * independently, thus achieving high degree of transfer
56 * parallelism. Single cl_io can be advanced forward by
57 * the multiple threads (although in the most usual case of
58 * read/write system call it is associated with the single user
59 * thread, that issued the system call).
63 * - to avoid confusion high-level I/O operation like read or write system
64 * call is referred to as "an io", whereas low-level I/O operation, like
65 * RPC, is referred to as "a transfer"
67 * - "generic code" means generic (not file system specific) code in the
68 * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
69 * is not layer specific.
75 * - cl_object_header::coh_page_guard
78 * See the top comment in cl_object.c for the description of overall locking and
79 * reference-counting design.
81 * See comments below for the description of i/o, page, and dlm-locking
87 * super-class definitions.
89 #include <linux/aio.h>
92 #include <libcfs/libcfs.h>
93 #include <lu_object.h>
94 #include <linux/atomic.h>
95 #include <linux/mutex.h>
96 #include <linux/radix-tree.h>
97 #include <linux/spinlock.h>
98 #include <linux/wait.h>
99 #include <linux/pagevec.h>
100 #include <libcfs/linux/linux-misc.h>
101 #include <lustre_dlm.h>
102 #include <lustre_compat.h>
112 struct cl_page_slice;
114 struct cl_lock_slice;
116 struct cl_lock_operations;
117 struct cl_page_operations;
125 * Device in the client stack.
127 * \see vvp_device, lov_device, lovsub_device, osc_device
131 struct lu_device cd_lu_dev;
136 * "Data attributes" of cl_object. Data attributes can be updated
137 * independently for a sub-object, and top-object's attributes are calculated
138 * from sub-objects' ones.
141 /** Object size, in bytes */
144 unsigned int cat_kms_valid:1;
146 * Known minimal size, in bytes.
148 * This is only valid when at least one DLM lock is held.
151 /** Modification time. Measured in seconds since epoch. */
153 /** Access time. Measured in seconds since epoch. */
155 /** Change time. Measured in seconds since epoch. */
158 * Blocks allocated to this cl_object on the server file system.
160 * \todo XXX An interface for block size is needed.
164 * User identifier for quota purposes.
168 * Group identifier for quota purposes.
172 /* nlink of the directory */
175 /* Project identifier for quota purpose. */
180 * Fields in cl_attr that are being set.
195 * Sub-class of lu_object with methods common for objects on the client
198 * cl_object: represents a regular file system object, both a file and a
199 * stripe. cl_object is based on lu_object: it is identified by a fid,
200 * layered, cached, hashed, and lrued. Important distinction with the server
201 * side, where md_object and dt_object are used, is that cl_object "fans out"
202 * at the lov/sns level: depending on the file layout, single file is
203 * represented as a set of "sub-objects" (stripes). At the implementation
204 * level, struct lov_object contains an array of cl_objects. Each sub-object
205 * is a full-fledged cl_object, having its fid, living in the lru and hash
208 * This leads to the next important difference with the server side: on the
209 * client, it's quite usual to have objects with the different sequence of
210 * layers. For example, typical top-object is composed of the following
216 * whereas its sub-objects are composed of
221 * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
222 * track of the object-subobject relationship.
224 * Sub-objects are not cached independently: when top-object is about to
225 * be discarded from the memory, all its sub-objects are torn-down and
228 * \see vvp_object, lov_object, lovsub_object, osc_object
232 struct lu_object co_lu;
233 /** per-object-layer operations */
234 const struct cl_object_operations *co_ops;
235 /** offset of page slice in cl_page buffer */
240 * Description of the client object configuration. This is used for the
241 * creation of a new client object that is identified by a more state than
244 struct cl_object_conf {
246 struct lu_object_conf coc_lu;
249 * Object layout. This is consumed by lov.
251 struct lu_buf coc_layout;
253 * Description of particular stripe location in the
254 * cluster. This is consumed by osc.
256 struct lov_oinfo *coc_oinfo;
259 * VFS inode. This is consumed by vvp.
261 struct inode *coc_inode;
263 * Layout lock handle.
265 struct ldlm_lock *coc_lock;
268 * Operation to handle layout, OBJECT_CONF_XYZ.
274 /** configure layout, new stripe, must must be holding layout lock. */
276 /** invalidate the current stripe config when losing layout lock. */
277 OBJECT_CONF_INVALIDATE = 1,
278 /** wait for old layout to go away so that new layout can be set up. */
283 CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
284 CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
288 /** the buffer to return the layout in lov_mds_md format. */
289 struct lu_buf cl_buf;
290 /** size of layout in lov_mds_md format. */
292 /** Layout generation. */
294 /** whether layout is a composite one */
295 bool cl_is_composite;
296 /** Whether layout is a HSM released one */
298 /** Whether layout is a readonly one */
310 * Operations implemented for each cl object layer.
312 * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
314 struct cl_object_operations {
316 * Initialize page slice for this layer. Called top-to-bottom through
317 * every object layer when a new cl_page is instantiated. Layer
318 * keeping private per-page data, or requiring its own page operations
319 * vector should allocate these data here, and attach then to the page
320 * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
323 * \retval NULL success.
325 * \retval ERR_PTR(errno) failure code.
327 * \retval valid-pointer pointer to already existing referenced page
328 * to be used instead of newly created.
330 int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
331 struct cl_page *page, pgoff_t index);
333 * Initialize lock slice for this layer. Called top-to-bottom through
334 * every object layer when a new cl_lock is instantiated. Layer
335 * keeping private per-lock data, or requiring its own lock operations
336 * vector should allocate these data here, and attach then to the lock
337 * by calling cl_lock_slice_add(). Mandatory.
339 int (*coo_lock_init)(const struct lu_env *env,
340 struct cl_object *obj, struct cl_lock *lock,
341 const struct cl_io *io);
343 * Initialize io state for a given layer.
345 * called top-to-bottom once per io existence to initialize io
346 * state. If layer wants to keep some state for this type of io, it
347 * has to embed struct cl_io_slice in lu_env::le_ses, and register
348 * slice with cl_io_slice_add(). It is guaranteed that all threads
349 * participating in this io share the same session.
351 int (*coo_io_init)(const struct lu_env *env,
352 struct cl_object *obj, struct cl_io *io);
354 * Fill portion of \a attr that this layer controls. This method is
355 * called top-to-bottom through all object layers.
357 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
359 * \return 0: to continue
360 * \return +ve: to stop iterating through layers (but 0 is returned
361 * from enclosing cl_object_attr_get())
362 * \return -ve: to signal error
364 int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
365 struct cl_attr *attr);
369 * \a valid is a bitmask composed from enum #cl_attr_valid, and
370 * indicating what attributes are to be set.
372 * \pre cl_object_header::coh_attr_guard of the top-object is locked.
374 * \return the same convention as for
375 * cl_object_operations::coo_attr_get() is used.
377 int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
378 const struct cl_attr *attr, unsigned int valid);
380 * Mark the inode dirty. By this way, the inode will add into the
381 * writeback list of the corresponding @bdi_writeback, and then it will
382 * defer to write out the dirty pages to OSTs via the kernel writeback
385 void (*coo_dirty_for_sync)(const struct lu_env *env,
386 struct cl_object *obj);
388 * Update object configuration. Called top-to-bottom to modify object
391 * XXX error conditions and handling.
393 int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
394 const struct cl_object_conf *conf);
396 * Glimpse ast. Executed when glimpse ast arrives for a lock on this
397 * object. Layers are supposed to fill parts of \a lvb that will be
398 * shipped to the glimpse originator as a glimpse result.
400 * \see vvp_object_glimpse(), lovsub_object_glimpse(),
401 * \see osc_object_glimpse()
403 int (*coo_glimpse)(const struct lu_env *env,
404 const struct cl_object *obj, struct ost_lvb *lvb);
406 * Object prune method. Called when the layout is going to change on
407 * this object, therefore each layer has to clean up their cache,
408 * mainly pages and locks.
410 int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
412 * Object getstripe method.
414 int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
415 struct lov_user_md __user *lum, size_t size);
417 * Get FIEMAP mapping from the object.
419 int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
420 struct ll_fiemap_info_key *fmkey,
421 struct fiemap *fiemap, size_t *buflen);
423 * Get layout and generation of the object.
425 int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
426 struct cl_layout *layout);
428 * Get maximum size of the object.
430 loff_t (*coo_maxbytes)(struct cl_object *obj);
432 * Set request attributes.
434 void (*coo_req_attr_set)(const struct lu_env *env,
435 struct cl_object *obj,
436 struct cl_req_attr *attr);
438 * Flush \a obj data corresponding to \a lock. Used for DoM
439 * locks in llite's cancelling blocking ast callback.
441 int (*coo_object_flush)(const struct lu_env *env,
442 struct cl_object *obj,
443 struct ldlm_lock *lock);
445 * operate upon inode. Used in LOV to lock/unlock inode from vvp layer.
447 int (*coo_inode_ops)(const struct lu_env *env, struct cl_object *obj,
448 enum coo_inode_opc opc, void *data);
452 * Extended header for client object.
454 struct cl_object_header {
455 /* Standard lu_object_header. cl_object::co_lu::lo_header points here.*/
456 struct lu_object_header coh_lu;
459 * Parent object. It is assumed that an object has a well-defined
460 * parent, but not a well-defined child (there may be multiple
461 * sub-objects, for the same top-object). cl_object_header::coh_parent
462 * field allows certain code to be written generically, without
463 * limiting possible cl_object layouts unduly.
465 struct cl_object_header *coh_parent;
467 * Protects consistency between cl_attr of parent object and
468 * attributes of sub-objects, that the former is calculated ("merged")
471 * \todo XXX this can be read/write lock if needed.
473 spinlock_t coh_attr_guard;
475 * Size of cl_page + page slices
477 unsigned short coh_page_bufsize;
479 * Number of objects above this one: 0 for a top-object, 1 for its
482 unsigned char coh_nesting;
486 * Helper macro: iterate over all layers of the object \a obj, assigning every
487 * layer top-to-bottom to \a slice.
489 #define cl_object_for_each(slice, obj) \
490 list_for_each_entry((slice), \
491 &(obj)->co_lu.lo_header->loh_layers,\
495 * Helper macro: iterate over all layers of the object \a obj, assigning every
496 * layer bottom-to-top to \a slice.
498 #define cl_object_for_each_reverse(slice, obj) \
499 list_for_each_entry_reverse((slice), \
500 &(obj)->co_lu.lo_header->loh_layers,\
503 #define CL_PAGE_EOF ((pgoff_t)~0ull)
506 * Layered client page.
508 * cl_page: represents a portion of a file, cached in the memory. All pages
509 * of the given file are of the same size, and are kept in the radix tree
510 * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
511 * of the top-level file object are first class cl_objects, they have their
512 * own radix trees of pages and hence page is implemented as a sequence of
513 * struct cl_pages's, linked into double-linked list through
514 * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
515 * corresponding radix tree at the corresponding logical offset.
517 * cl_page is associated with VM page of the hosting environment (struct
518 * page in Linux kernel, for example), struct page. It is assumed, that this
519 * association is implemented by one of cl_page layers (top layer in the
520 * current design) that
522 * - intercepts per-VM-page call-backs made by the environment (e.g.,
525 * - translates state (page flag bits) and locking between lustre and
528 * The association between cl_page and struct page is immutable and
529 * established when cl_page is created.
531 * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
532 * this io an exclusive access to this page w.r.t. other io attempts and
533 * various events changing page state (such as transfer completion, or
534 * eviction of the page from the memory). Note, that in general cl_io
535 * cannot be identified with a particular thread, and page ownership is not
536 * exactly equal to the current thread holding a lock on the page. Layer
537 * implementing association between cl_page and struct page has to implement
538 * ownership on top of available synchronization mechanisms.
540 * While lustre client maintains the notion of an page ownership by io,
541 * hosting MM/VM usually has its own page concurrency control
542 * mechanisms. For example, in Linux, page access is synchronized by the
543 * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
544 * takes care to acquire and release such locks as necessary around the
545 * calls to the file system methods (->readpage(), ->prepare_write(),
546 * ->commit_write(), etc.). This leads to the situation when there are two
547 * different ways to own a page in the client:
549 * - client code explicitly and voluntary owns the page (cl_page_own());
551 * - VM locks a page and then calls the client, that has "to assume"
552 * the ownership from the VM (cl_page_assume()).
554 * Dual methods to release ownership are cl_page_disown() and
555 * cl_page_unassume().
557 * cl_page is reference counted (cl_page::cp_ref). When reference counter
558 * drops to 0, the page is returned to the cache, unless it is in
559 * cl_page_state::CPS_FREEING state, in which case it is immediately
562 * The general logic guaranteeing the absence of "existential races" for
563 * pages is the following:
565 * - there are fixed known ways for a thread to obtain a new reference
568 * - by doing a lookup in the cl_object radix tree, protected by the
571 * - by starting from VM-locked struct page and following some
572 * hosting environment method (e.g., following ->private pointer in
573 * the case of Linux kernel), see cl_vmpage_page();
575 * - when the page enters cl_page_state::CPS_FREEING state, all these
576 * ways are severed with the proper synchronization
577 * (cl_page_delete());
579 * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
582 * - no new references to the page in cl_page_state::CPS_FREEING state
583 * are allowed (checked in cl_page_get()).
585 * Together this guarantees that when last reference to a
586 * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
587 * page, as neither references to it can be acquired at that point, nor
590 * cl_page is a state machine. States are enumerated in enum
591 * cl_page_state. Possible state transitions are enumerated in
592 * cl_page_state_set(). State transition process (i.e., actual changing of
593 * cl_page::cp_state field) is protected by the lock on the underlying VM
596 * Linux Kernel implementation.
598 * Binding between cl_page and struct page (which is a typedef for
599 * struct page) is implemented in the vvp layer. cl_page is attached to the
600 * ->private pointer of the struct page, together with the setting of
601 * PG_private bit in page->flags, and acquiring additional reference on the
602 * struct page (much like struct buffer_head, or any similar file system
603 * private data structures).
605 * PG_locked lock is used to implement both ownership and transfer
606 * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
607 * states. No additional references are acquired for the duration of the
610 * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
611 * write-out is "protected" by the special PG_writeback bit.
615 * States of cl_page. cl_page.c assumes particular order here.
617 * The page state machine is rather crude, as it doesn't recognize finer page
618 * states like "dirty" or "up to date". This is because such states are not
619 * always well defined for the whole stack (see, for example, the
620 * implementation of the read-ahead, that hides page up-to-dateness to track
621 * cache hits accurately). Such sub-states are maintained by the layers that
622 * are interested in them.
626 * Page is in the cache, un-owned. Page leaves cached state in the
629 * - [cl_page_state::CPS_OWNED] io comes across the page and
632 * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
633 * req-formation engine decides that it wants to include this page
634 * into an RPC being constructed, and yanks it from the cache;
636 * - [cl_page_state::CPS_FREEING] VM callback is executed to
637 * evict the page form the memory;
639 * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
643 * Page is exclusively owned by some cl_io. Page may end up in this
644 * state as a result of
646 * - io creating new page and immediately owning it;
648 * - [cl_page_state::CPS_CACHED] io finding existing cached page
651 * - [cl_page_state::CPS_OWNED] io finding existing owned page
652 * and waiting for owner to release the page;
654 * Page leaves owned state in the following cases:
656 * - [cl_page_state::CPS_CACHED] io decides to leave the page in
657 * the cache, doing nothing;
659 * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
662 * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
663 * transfer for this page;
665 * - [cl_page_state::CPS_FREEING] io decides to destroy this
666 * page (e.g., as part of truncate or extent lock cancellation).
668 * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
672 * Page is being written out, as a part of a transfer. This state is
673 * entered when req-formation logic decided that it wants this page to
674 * be sent through the wire _now_. Specifically, it means that once
675 * this state is achieved, transfer completion handler (with either
676 * success or failure indication) is guaranteed to be executed against
677 * this page independently of any locks and any scheduling decisions
678 * made by the hosting environment (that effectively means that the
679 * page is never put into cl_page_state::CPS_PAGEOUT state "in
680 * advance". This property is mentioned, because it is important when
681 * reasoning about possible dead-locks in the system). The page can
682 * enter this state as a result of
684 * - [cl_page_state::CPS_OWNED] an io requesting an immediate
685 * write-out of this page, or
687 * - [cl_page_state::CPS_CACHED] req-forming engine deciding
688 * that it has enough dirty pages cached to issue a "good"
691 * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
692 * is completed---it is moved into cl_page_state::CPS_CACHED state.
694 * Underlying VM page is locked for the duration of transfer.
696 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
700 * Page is being read in, as a part of a transfer. This is quite
701 * similar to the cl_page_state::CPS_PAGEOUT state, except that
702 * read-in is always "immediate"---there is no such thing a sudden
703 * construction of read request from cached, presumably not up to date,
706 * Underlying VM page is locked for the duration of transfer.
708 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
712 * Page is being destroyed. This state is entered when client decides
713 * that page has to be deleted from its host object, as, e.g., a part
716 * Once this state is reached, there is no way to escape it.
718 * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
725 /** Host page, the page is from the host inode which the cl_page
730 /** Transient page, the transient cl_page is used to bind a cl_page
731 * to vmpage which is not belonging to the same object of cl_page.
732 * it is used in DirectIO and lockless IO.
738 #define CP_STATE_BITS 4
739 #define CP_TYPE_BITS 2
740 #define CP_MAX_LAYER 2
743 * Fields are protected by the lock on struct page, except for atomics and
746 * \invariant Data type invariants are in cl_page_invariant(). Basically:
747 * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
748 * list, consistent with the parent/child pointers in the cl_page::cp_obj and
749 * cl_page::cp_owner (when set).
752 /** Reference counter. */
754 /** layout_entry + stripe index, composed using lov_comp_index() */
755 unsigned int cp_lov_index;
756 /** page->index of the page within the whole file */
757 pgoff_t cp_page_index;
758 /** An object this page is a part of. Immutable after creation. */
759 struct cl_object *cp_obj;
761 struct page *cp_vmpage;
763 * Assigned if doing direct IO, because in this case cp_vmpage is not
764 * a valid page cache page, hence the inode cannot be inferred from
765 * cp_vmpage->mapping->host.
767 struct inode *cp_inode;
768 /** Linkage of pages within group. Pages must be owned */
769 struct list_head cp_batch;
770 /** array of slices offset. Immutable after creation. */
771 unsigned char cp_layer_offset[CP_MAX_LAYER];
772 /** current slice index */
773 unsigned char cp_layer_count:2;
775 * Page state. This field is const to avoid accidental update, it is
776 * modified only internally within cl_page.c. Protected by a VM lock.
778 enum cl_page_state cp_state:CP_STATE_BITS;
780 * Page type. Only CPT_TRANSIENT is used so far. Immutable after
783 enum cl_page_type cp_type:CP_TYPE_BITS;
784 unsigned cp_defer_uptodate:1,
787 /* which slab kmem index this memory allocated from */
788 short int cp_kmem_index;
791 * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
792 * by sub-io. Protected by a VM lock.
794 struct cl_io *cp_owner;
795 /** Assigned if doing a sync_io */
796 struct cl_sync_io *cp_sync_io;
800 * Per-layer part of cl_page.
802 * \see vvp_page, lov_page, osc_page
804 struct cl_page_slice {
805 struct cl_page *cpl_page;
806 const struct cl_page_operations *cpl_ops;
810 * Lock mode. For the client extent locks.
822 * Requested transfer type.
831 * Per-layer page operations.
833 * Methods taking an \a io argument are for the activity happening in the
834 * context of given \a io. Page is assumed to be owned by that io, except for
837 * \see vvp_page_ops, lov_page_ops, osc_page_ops
839 struct cl_page_operations {
841 * cl_page<->struct page methods. Only one layer in the stack has to
842 * implement these. Current code assumes that this functionality is
843 * provided by the topmost layer, see __cl_page_disown() as an example.
847 * Update file attributes when all we have is this page. Used for tiny
848 * writes to update attributes when we don't have a full cl_io.
850 void (*cpo_page_touch)(const struct lu_env *env,
851 const struct cl_page_slice *slice, size_t to);
857 * Called when page is truncated from the object. Optional.
859 * \see cl_page_discard()
860 * \see vvp_page_discard(), osc_page_discard()
862 void (*cpo_discard)(const struct lu_env *env,
863 const struct cl_page_slice *slice,
866 * Called when page is removed from the cache, and is about to being
867 * destroyed. Optional.
869 * \see cl_page_delete()
870 * \see vvp_page_delete(), osc_page_delete()
872 void (*cpo_delete)(const struct lu_env *env,
873 const struct cl_page_slice *slice);
875 * Optional debugging helper. Prints given page slice.
877 * \see cl_page_print()
879 int (*cpo_print)(const struct lu_env *env,
880 const struct cl_page_slice *slice,
881 void *cookie, lu_printer_t p);
889 * Request type dependent vector of operations.
891 * Transfer operations depend on transfer mode (cl_req_type). To avoid
892 * passing transfer mode to each and every of these methods, and to
893 * avoid branching on request type inside of the methods, separate
894 * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
895 * provided. That is, method invocation usually looks like
897 * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
901 * Completion handler. This is guaranteed to be eventually
902 * fired after cl_page_prep() or cl_page_make_ready() call.
904 * This method can be called in a non-blocking context. It is
905 * guaranteed however, that the page involved and its object
906 * are pinned in memory (and, hence, calling cl_page_put() is
909 * \see cl_page_completion()
911 void (*cpo_completion)(const struct lu_env *env,
912 const struct cl_page_slice *slice,
916 * Tell transfer engine that only [to, from] part of a page should be
919 * This is used for immediate transfers.
921 * \todo XXX this is not very good interface. It would be much better
922 * if all transfer parameters were supplied as arguments to
923 * cl_io_operations::cio_submit() call, but it is not clear how to do
924 * this for page queues.
926 * \see cl_page_clip()
928 void (*cpo_clip)(const struct lu_env *env,
929 const struct cl_page_slice *slice, int from, int to);
931 * Write out a page by kernel. This is only called by ll_writepage
934 * \see cl_page_flush()
936 int (*cpo_flush)(const struct lu_env *env,
937 const struct cl_page_slice *slice,
942 * Helper macro, dumping detailed information about \a page into a log.
944 #define CL_PAGE_DEBUG(mask, env, page, format, ...) \
946 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
947 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
948 cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
949 CDEBUG(mask, format, ## __VA_ARGS__); \
954 * Helper macro, dumping shorter information about \a page into a log.
956 #define CL_PAGE_HEADER(mask, env, page, format, ...) \
958 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
959 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
960 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
961 CDEBUG(mask, format, ## __VA_ARGS__); \
965 static inline struct page *cl_page_vmpage(const struct cl_page *page)
967 LASSERT(page->cp_vmpage != NULL);
968 return page->cp_vmpage;
971 static inline pgoff_t cl_page_index(const struct cl_page *cp)
973 return cl_page_vmpage(cp)->index;
977 * Check if a cl_page is in use.
979 * Client cache holds a refcount, this refcount will be dropped when
980 * the page is taken out of cache, see vvp_page_delete().
982 static inline bool __page_in_use(const struct cl_page *page, int refc)
984 return (refcount_read(&page->cp_ref) > refc + 1);
988 * Caller itself holds a refcount of cl_page.
990 #define cl_page_in_use(pg) __page_in_use(pg, 1)
992 * Caller doesn't hold a refcount.
994 #define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
998 * Extent locking on the client.
1002 * The locking model of the new client code is built around
1006 * data-type representing an extent lock on a regular file. cl_lock is a
1007 * layered object (much like cl_object and cl_page), it consists of a header
1008 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
1009 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
1011 * Typical cl_lock consists of one layer:
1013 * - lov_lock (lov specific data).
1015 * lov_lock contains an array of sub-locks. Each of these sub-locks is a
1016 * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
1020 * Each sub-lock is associated with a cl_object (representing stripe
1021 * sub-object or the file to which top-level cl_lock is associated to), and is
1022 * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
1023 * cl_object (that at lov layer also fans out into multiple sub-objects), and
1024 * is different from cl_page, that doesn't fan out (there is usually exactly
1025 * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
1026 * a "top-lock" and its lovsub-osc portion a "sub-lock".
1030 * cl_lock is a cacheless data container for the requirements of locks to
1031 * complete the IO. cl_lock is created before I/O starts and destroyed when the
1034 * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
1035 * to cl_lock at OSC layer. LDLM lock is still cacheable.
1037 * INTERFACE AND USAGE
1039 * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
1040 * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
1041 * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
1042 * consists of multiple sub cl_locks, each sub locks will be enqueued
1043 * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
1044 * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
1047 * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
1048 * method will be called for each layer to release the resource held by this
1049 * lock. At OSC layer, the reference count of LDLM lock, which is held at
1050 * clo_enqueue time, is released.
1052 * LDLM lock can only be canceled if there is no cl_lock using it.
1054 * Overall process of the locking during IO operation is as following:
1056 * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
1057 * is called on each layer. Responsibility of this method is to add locks,
1058 * needed by a given layer into cl_io.ci_lockset.
1060 * - once locks for all layers were collected, they are sorted to avoid
1061 * dead-locks (cl_io_locks_sort()), and enqueued.
1063 * - when all locks are acquired, IO is performed;
1065 * - locks are released after IO is complete.
1067 * Striping introduces major additional complexity into locking. The
1068 * fundamental problem is that it is generally unsafe to actively use (hold)
1069 * two locks on the different OST servers at the same time, as this introduces
1070 * inter-server dependency and can lead to cascading evictions.
1072 * Basic solution is to sub-divide large read/write IOs into smaller pieces so
1073 * that no multi-stripe locks are taken (note that this design abandons POSIX
1074 * read/write semantics). Such pieces ideally can be executed concurrently. At
1075 * the same time, certain types of IO cannot be sub-divived, without
1076 * sacrificing correctness. This includes:
1078 * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
1081 * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
1083 * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
1084 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
1085 * has to be held together with the usual lock on [offset, offset + count].
1087 * Interaction with DLM
1089 * In the expected setup, cl_lock is ultimately backed up by a collection of
1090 * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
1091 * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
1092 * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
1093 * description of interaction with DLM.
1099 struct cl_lock_descr {
1100 /** Object this lock is granted for. */
1101 struct cl_object *cld_obj;
1102 /** Index of the first page protected by this lock. */
1104 /** Index of the last page (inclusive) protected by this lock. */
1106 /** Group ID, for group lock */
1109 enum cl_lock_mode cld_mode;
1111 * flags to enqueue lock. A combination of bit-flags from
1112 * enum cl_enq_flags.
1114 __u32 cld_enq_flags;
1117 #define DDESCR "%s(%d):[%lu, %lu]:%x"
1118 #define PDESCR(descr) \
1119 cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
1120 (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
1122 const char *cl_lock_mode_name(const enum cl_lock_mode mode);
1125 * Layered client lock.
1128 /** List of slices. Immutable after creation. */
1129 struct list_head cll_layers;
1130 /** lock attribute, extent, cl_object, etc. */
1131 struct cl_lock_descr cll_descr;
1135 * Per-layer part of cl_lock
1137 * \see lov_lock, osc_lock
1139 struct cl_lock_slice {
1140 struct cl_lock *cls_lock;
1141 /** Object slice corresponding to this lock slice. Immutable after
1144 struct cl_object *cls_obj;
1145 const struct cl_lock_operations *cls_ops;
1146 /** Linkage into cl_lock::cll_layers. Immutable after creation. */
1147 struct list_head cls_linkage;
1152 * \see lov_lock_ops, osc_lock_ops
1154 struct cl_lock_operations {
1156 * Attempts to enqueue the lock. Called top-to-bottom.
1158 * \retval 0 this layer has enqueued the lock successfully
1159 * \retval >0 this layer has enqueued the lock, but need to wait on
1160 * @anchor for resources
1161 * \retval -ve failure
1163 * \see lov_lock_enqueue(), osc_lock_enqueue()
1165 int (*clo_enqueue)(const struct lu_env *env,
1166 const struct cl_lock_slice *slice,
1167 struct cl_io *io, struct cl_sync_io *anchor);
1169 * Cancel a lock, release its DLM lock ref, while does not cancel the
1172 void (*clo_cancel)(const struct lu_env *env,
1173 const struct cl_lock_slice *slice);
1175 * Destructor. Frees resources and the slice.
1177 * \see lov_lock_fini(), osc_lock_fini()
1179 void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
1181 * Optional debugging helper. Prints given lock slice.
1183 int (*clo_print)(const struct lu_env *env, void *cookie,
1184 lu_printer_t p, const struct cl_lock_slice *slice);
1187 #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
1189 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
1190 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
1191 cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
1192 CDEBUG(mask, format, ## __VA_ARGS__); \
1196 #define CL_LOCK_ASSERT(expr, env, lock) do { \
1200 CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
1205 /** \addtogroup cl_page_list cl_page_list
1206 * Page list used to perform collective operations on a group of pages.
1208 * Pages are added to the list one by one. cl_page_list acquires a reference
1209 * for every page in it. Page list is used to perform collective operations on
1212 * - submit pages for an immediate transfer,
1214 * - own pages on behalf of certain io (waiting for each page in turn),
1218 * When list is finalized, it releases references on all pages it still has.
1220 * \todo XXX concurrency control.
1223 struct cl_page_list {
1225 struct list_head pl_pages;
1229 * A 2-queue of pages. A convenience data-type for common use case, 2-queue
1230 * contains an incoming page list and an outgoing page list.
1233 struct cl_page_list c2_qin;
1234 struct cl_page_list c2_qout;
1240 * cl_io represents a high level I/O activity like
1241 * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
1244 * cl_io is a layered object, much like cl_{object,page,lock} but with one
1245 * important distinction. We want to minimize number of calls to the allocator
1246 * in the fast path, e.g., in the case of read(2) when everything is cached:
1247 * client already owns the lock over region being read, and data are cached
1248 * due to read-ahead. To avoid allocation of cl_io layers in such situations,
1249 * per-layer io state is stored in the session, associated with the io, see
1250 * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
1251 * by using free-lists, see cl_env_get().
1253 * There is a small predefined number of possible io types, enumerated in enum
1256 * cl_io is a state machine, that can be advanced concurrently by the multiple
1257 * threads. It is up to these threads to control the concurrency and,
1258 * specifically, to detect when io is done, and its state can be safely
1261 * For read/write io overall execution plan is as following:
1263 * (0) initialize io state through all layers;
1265 * (1) loop: prepare chunk of work to do
1267 * (2) call all layers to collect locks they need to process current chunk
1269 * (3) sort all locks to avoid dead-locks, and acquire them
1271 * (4) process the chunk: call per-page methods
1272 * cl_io_operations::cio_prepare_write(),
1273 * cl_io_operations::cio_commit_write() for write)
1279 * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
1280 * address allocation efficiency issues mentioned above), and returns with the
1281 * special error condition from per-page method when current sub-io has to
1282 * block. This causes io loop to be repeated, and lov switches to the next
1283 * sub-io in its cl_io_operations::cio_iter_init() implementation.
1288 /** read system call */
1290 /** write system call */
1292 /** truncate, utime system calls */
1294 /** get data version */
1297 * page fault handling
1301 * fsync system call handling
1302 * To write out a range of file
1306 * glimpse. An io context to acquire glimpse lock.
1310 * Miscellaneous io. This is used for occasional io activity that
1311 * doesn't fit into other types. Currently this is used for:
1313 * - cancellation of an extent lock. This io exists as a context
1314 * to write dirty pages from under the lock being canceled back
1317 * - VM induced page write-out. An io context for writing page out
1318 * for memory cleansing;
1320 * - grouplock. An io context to acquire group lock.
1322 * CIT_MISC io is used simply as a context in which locks and pages
1323 * are manipulated. Such io has no internal "process", that is,
1324 * cl_io_loop() is never called for it.
1329 * To give advice about access of a file
1333 * SEEK_HOLE/SEEK_DATA handling to search holes or data
1334 * across all file objects
1341 * States of cl_io state machine
1344 /** Not initialized. */
1348 /** IO iteration started. */
1352 /** Actual IO is in progress. */
1354 /** IO for the current iteration finished. */
1356 /** Locks released. */
1358 /** Iteration completed. */
1360 /** cl_io finalized. */
1365 * IO state private for a layer.
1367 * This is usually embedded into layer session data, rather than allocated
1370 * \see vvp_io, lov_io, osc_io
1372 struct cl_io_slice {
1373 struct cl_io *cis_io;
1374 /** corresponding object slice. Immutable after creation. */
1375 struct cl_object *cis_obj;
1376 /** io operations. Immutable after creation. */
1377 const struct cl_io_operations *cis_iop;
1379 * linkage into a list of all slices for a given cl_io, hanging off
1380 * cl_io::ci_layers. Immutable after creation.
1382 struct list_head cis_linkage;
1385 typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
1386 struct folio_batch *);
1388 struct cl_read_ahead {
1389 /* Maximum page index the readahead window will end.
1390 * This is determined DLM lock coverage, RPC and stripe boundary.
1391 * cra_end is included.
1393 pgoff_t cra_end_idx;
1394 /* optimal RPC size for this read, by pages */
1395 unsigned long cra_rpc_pages;
1396 /* Release callback. If readahead holds resources underneath, this
1397 * function should be called to release it.
1399 void (*cra_release)(const struct lu_env *env,
1400 struct cl_read_ahead *ra);
1402 /* Callback data for cra_release routine */
1406 /* whether lock is in contention */
1407 bool cra_contention;
1410 static inline void cl_read_ahead_release(const struct lu_env *env,
1411 struct cl_read_ahead *ra)
1413 if (ra->cra_release != NULL)
1414 ra->cra_release(env, ra);
1415 memset(ra, 0, sizeof(*ra));
1420 * Per-layer io operations.
1421 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
1423 struct cl_io_operations {
1425 * Vector of io state transition methods for every io type.
1427 * \see cl_page_operations::io
1431 * Prepare io iteration at a given layer.
1433 * Called top-to-bottom at the beginning of each iteration of
1434 * "io loop" (if it makes sense for this type of io). Here
1435 * layer selects what work it will do during this iteration.
1437 * \see cl_io_operations::cio_iter_fini()
1439 int (*cio_iter_init)(const struct lu_env *env,
1440 const struct cl_io_slice *slice);
1442 * Finalize io iteration.
1444 * Called bottom-to-top at the end of each iteration of "io
1445 * loop". Here layers can decide whether IO has to be
1448 * \see cl_io_operations::cio_iter_init()
1450 void (*cio_iter_fini)(const struct lu_env *env,
1451 const struct cl_io_slice *slice);
1453 * Collect locks for the current iteration of io.
1455 * Called top-to-bottom to collect all locks necessary for
1456 * this iteration. This methods shouldn't actually enqueue
1457 * anything, instead it should post a lock through
1458 * cl_io_lock_add(). Once all locks are collected, they are
1459 * sorted and enqueued in the proper order.
1461 int (*cio_lock)(const struct lu_env *env,
1462 const struct cl_io_slice *slice);
1464 * Finalize unlocking.
1466 * Called bottom-to-top to finish layer specific unlocking
1467 * functionality, after generic code released all locks
1468 * acquired by cl_io_operations::cio_lock().
1470 void (*cio_unlock)(const struct lu_env *env,
1471 const struct cl_io_slice *slice);
1473 * Start io iteration.
1475 * Once all locks are acquired, called top-to-bottom to
1476 * commence actual IO. In the current implementation,
1477 * top-level vvp_io_{read,write}_start() does all the work
1478 * synchronously by calling generic_file_*(), so other layers
1479 * are called when everything is done.
1481 int (*cio_start)(const struct lu_env *env,
1482 const struct cl_io_slice *slice);
1484 * Called top-to-bottom at the end of io loop. Here layer
1485 * might wait for an unfinished asynchronous io.
1487 void (*cio_end)(const struct lu_env *env,
1488 const struct cl_io_slice *slice);
1490 * Called bottom-to-top to notify layers that read/write IO
1491 * iteration finished, with \a nob bytes transferred.
1493 void (*cio_advance)(const struct lu_env *env,
1494 const struct cl_io_slice *slice,
1497 * Called once per io, bottom-to-top to release io resources.
1499 void (*cio_fini)(const struct lu_env *env,
1500 const struct cl_io_slice *slice);
1504 * Submit pages from \a queue->c2_qin for IO, and move
1505 * successfully submitted pages into \a queue->c2_qout. Return
1506 * non-zero if failed to submit even the single page. If
1507 * submission failed after some pages were moved into \a
1508 * queue->c2_qout, completion callback with non-zero ioret is
1511 int (*cio_submit)(const struct lu_env *env,
1513 const struct cl_io_slice *slice,
1514 enum cl_req_type crt, struct cl_2queue *queue);
1516 * Queue async page for write.
1517 * The difference between cio_submit and cio_queue is that
1518 * cio_submit is for urgent request.
1520 int (*cio_commit_async)(const struct lu_env *env,
1521 const struct cl_io_slice *slice,
1522 struct cl_page_list *queue, int from, int to,
1525 * Release active extent.
1527 void (*cio_extent_release)(const struct lu_env *env,
1528 const struct cl_io_slice *slice);
1530 * Decide maximum read ahead extent
1532 * \pre io->ci_type == CIT_READ
1534 int (*cio_read_ahead)(const struct lu_env *env,
1535 const struct cl_io_slice *slice,
1536 pgoff_t start, struct cl_read_ahead *ra);
1539 * Reserve LRU slots before IO.
1541 int (*cio_lru_reserve)(const struct lu_env *env,
1542 const struct cl_io_slice *slice,
1543 loff_t pos, size_t bytes);
1545 * Optional debugging helper. Print given io slice.
1547 int (*cio_print)(const struct lu_env *env, void *cookie,
1548 lu_printer_t p, const struct cl_io_slice *slice);
1552 * Flags to lock enqueue procedure.
1557 * instruct server to not block, if conflicting lock is found. Instead
1558 * -EAGAIN is returned immediately.
1560 CEF_NONBLOCK = 0x00000001,
1562 * Tell lower layers this is a glimpse request, translated to
1563 * LDLM_FL_HAS_INTENT at LDLM layer.
1565 * Also, because glimpse locks never block other locks, we count this
1566 * as automatically compatible with other osc locks.
1567 * (see osc_lock_compatible)
1569 CEF_GLIMPSE = 0x00000002,
1571 * tell the server to instruct (though a flag in the blocking ast) an
1572 * owner of the conflicting lock, that it can drop dirty pages
1573 * protected by this lock, without sending them to the server.
1575 CEF_DISCARD_DATA = 0x00000004,
1577 * tell the sub layers that it must be a `real' lock. This is used for
1578 * mmapped-buffer locks, glimpse locks, manually requested locks
1579 * (LU_LADVISE_LOCKAHEAD) that must never be converted into lockless
1582 * \see vvp_mmap_locks(), cl_glimpse_lock, cl_request_lock().
1584 CEF_MUST = 0x00000008,
1586 * tell the sub layers that never request a `real' lock. This flag is
1587 * not used currently.
1589 * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
1590 * conversion policy: ci_lockreq describes generic information of lock
1591 * requirement for this IO, especially for locks which belong to the
1592 * object doing IO; however, lock itself may have precise requirements
1593 * that are described by the enqueue flags.
1595 CEF_NEVER = 0x00000010,
1597 * tell the dlm layer this is a speculative lock request
1598 * speculative lock requests are locks which are not requested as part
1599 * of an I/O operation. Instead, they are requested because we expect
1600 * to use them in the future. They are requested asynchronously at the
1603 * Currently used for asynchronous glimpse locks and manually requested
1604 * locks (LU_LADVISE_LOCKAHEAD).
1606 CEF_SPECULATIVE = 0x00000020,
1608 * enqueue a lock to test DLM lock existence.
1610 CEF_PEEK = 0x00000040,
1612 * Lock match only. Used by group lock in I/O as group lock
1613 * is known to exist.
1615 CEF_LOCK_MATCH = 0x00000080,
1617 * tell the DLM layer to lock only the requested range
1619 CEF_LOCK_NO_EXPAND = 0x00000100,
1621 * mask of enq_flags.
1623 CEF_MASK = 0x000001ff,
1627 * Link between lock and io. Intermediate structure is needed, because the
1628 * same lock can be part of multiple io's simultaneously.
1630 struct cl_io_lock_link {
1631 /** linkage into one of cl_lockset lists. */
1632 struct list_head cill_linkage;
1633 struct cl_lock cill_lock;
1634 /** optional destructor */
1635 void (*cill_fini)(const struct lu_env *env,
1636 struct cl_io_lock_link *link);
1638 #define cill_descr cill_lock.cll_descr
1641 * Lock-set represents a collection of locks, that io needs at a
1642 * time. Generally speaking, client tries to avoid holding multiple locks when
1645 * - holding extent locks over multiple ost's introduces the danger of
1646 * "cascading timeouts";
1648 * - holding multiple locks over the same ost is still dead-lock prone,
1649 * see comment in osc_lock_enqueue(),
1651 * but there are certain situations where this is unavoidable:
1653 * - O_APPEND writes have to take [0, EOF] lock for correctness;
1655 * - truncate has to take [new-size, EOF] lock for correctness;
1657 * - SNS has to take locks across full stripe for correctness;
1659 * - in the case when user level buffer, supplied to {read,write}(file0),
1660 * is a part of a memory mapped lustre file, client has to take a dlm
1661 * locks on file0, and all files that back up the buffer (or a part of
1662 * the buffer, that is being processed in the current chunk, in any
1663 * case, there are situations where at least 2 locks are necessary).
1665 * In such cases we at least try to take locks in the same consistent
1666 * order. To this end, all locks are first collected, then sorted, and then
1670 /** locks to be acquired. */
1671 struct list_head cls_todo;
1672 /** locks acquired. */
1673 struct list_head cls_done;
1677 * Lock requirements(demand) for IO. It should be cl_io_lock_req,
1678 * but 'req' is always to be thought as 'request' :-)
1680 enum cl_io_lock_dmd {
1681 /** Always lock data (e.g., O_APPEND). */
1683 /** Layers are free to decide between local and global locking. */
1685 /** Never lock: there is no cache (e.g., liblustre). */
1689 enum cl_fsync_mode {
1690 /** start writeback, do not wait for them to finish */
1692 /** start writeback and wait for them to finish */
1694 /** discard all of dirty pages in a specific file range */
1695 CL_FSYNC_DISCARD = 2,
1696 /** start writeback and make sure they have reached storage before
1697 * return. OST_SYNC RPC must be issued and finished
1700 /** start writeback, thus the kernel can reclaim some memory */
1701 CL_FSYNC_RECLAIM = 4,
1704 struct cl_io_rw_common {
1709 enum cl_setattr_subtype {
1710 /** regular setattr **/
1714 /** fallocate(2) - mode preallocate **/
1715 CL_SETATTR_FALLOCATE
1718 struct cl_io_range {
1724 struct cl_io_pt *cip_next;
1725 struct kiocb cip_iocb;
1726 struct iov_iter cip_iter;
1727 struct file *cip_file;
1728 enum cl_io_type cip_iot;
1729 unsigned int cip_need_restart:1;
1738 * cl_io is shared by all threads participating in this IO (in current
1739 * implementation only one thread advances IO, but parallel IO design and
1740 * concurrent copy_*_user() require multiple threads acting on the same IO. It
1741 * is up to these threads to serialize their activities, including updates to
1742 * mutable cl_io fields.
1745 /** type of this IO. Immutable after creation. */
1746 enum cl_io_type ci_type;
1747 /** current state of cl_io state machine. */
1748 enum cl_io_state ci_state;
1749 /** main object this io is against. Immutable after creation. */
1750 struct cl_object *ci_obj;
1751 /** top level dio_aio */
1752 struct cl_dio_aio *ci_dio_aio;
1754 * Upper layer io, of which this io is a part of. Immutable after
1757 struct cl_io *ci_parent;
1758 /** List of slices. Immutable after creation. */
1759 struct list_head ci_layers;
1760 /** list of locks (to be) acquired by this io. */
1761 struct cl_lockset ci_lockset;
1762 /** lock requirements, this is just a help info for sublayers. */
1763 enum cl_io_lock_dmd ci_lockreq;
1764 /** layout version when this IO occurs */
1765 __u32 ci_layout_version;
1768 struct cl_io_rw_common rd;
1771 struct cl_io_rw_common wr;
1775 struct cl_io_rw_common ci_rw;
1776 struct cl_setattr_io {
1777 struct ost_lvb sa_attr;
1778 unsigned int sa_attr_flags;
1779 unsigned int sa_avalid; /* ATTR_* */
1780 unsigned int sa_xvalid; /* OP_XVALID */
1781 int sa_stripe_index;
1782 struct ost_layout sa_layout;
1783 const struct lu_fid *sa_parent_fid;
1784 /* SETATTR interface is used for regular setattr, */
1785 /* truncate(2) and fallocate(2) subtypes */
1786 enum cl_setattr_subtype sa_subtype;
1787 /* The following are used for fallocate(2) */
1789 loff_t sa_falloc_offset;
1790 loff_t sa_falloc_end;
1791 uid_t sa_falloc_uid;
1792 gid_t sa_falloc_gid;
1793 __u32 sa_falloc_projid;
1795 struct cl_data_version_io {
1796 u64 dv_data_version;
1797 u32 dv_layout_version;
1800 struct cl_fault_io {
1801 /** page index within file. */
1803 /** bytes valid byte on a faulted page. */
1805 /** writable page? for nopage() only */
1807 /** page of an executable? */
1809 /** page_mkwrite() */
1811 /** resulting page */
1812 struct cl_page *ft_page;
1814 struct cl_fsync_io {
1817 /** file system level fid */
1818 struct lu_fid *fi_fid;
1819 enum cl_fsync_mode fi_mode;
1820 /* how many pages were written/discarded */
1821 unsigned int fi_nr_written;
1823 struct cl_ladvise_io {
1826 /** file system level fid */
1827 struct lu_fid *lio_fid;
1828 enum lu_ladvise_type lio_advice;
1831 struct cl_lseek_io {
1837 time64_t lm_next_rpc_time;
1840 struct cl_2queue ci_queue;
1843 unsigned int ci_continue:1,
1845 * This io has held grouplock, to inform sublayers that
1846 * don't do lockless i/o.
1850 * The whole IO need to be restarted because layout has been changed
1854 * to not refresh layout - the IO issuer knows that the layout won't
1855 * change(page operations, layout change causes all page to be
1856 * discarded), or it doesn't matter if it changes(sync).
1860 * Need MDS intervention to complete a write.
1861 * Write intent is required for the following cases:
1862 * 1. component being written is not initialized, or
1863 * 2. the mirrored files are NOT in WRITE_PENDING state.
1865 ci_need_write_intent:1,
1867 * File is in PCC-RO state, need MDS intervention to complete
1868 * a data modifying operation.
1870 ci_need_pccro_clear:1,
1872 * Check if layout changed after the IO finishes. Mainly for HSM
1873 * requirement. If IO occurs to openning files, it doesn't need to
1874 * verify layout because HSM won't release openning files.
1875 * Right now, only two opertaions need to verify layout: glimpse
1880 * file is released, restore has to to be triggered by vvp layer
1882 ci_restore_needed:1,
1887 /* Tell sublayers not to expand LDLM locks requested for this IO */
1888 ci_lock_no_expand:1,
1890 * Set if non-delay RPC should be used for this IO.
1892 * If this file has multiple mirrors, and if the OSTs of the current
1893 * mirror is inaccessible, non-delay RPC would error out quickly so
1894 * that the upper layer can try to access the next mirror.
1898 * Set if IO is triggered by async workqueue readahead.
1900 ci_async_readahead:1,
1902 * Ignore lockless and do normal locking for this io.
1906 * Set if we've tried all mirrors for this read IO, if it's not set,
1907 * the read IO will check to-be-read OSCs' status, and make fast-switch
1908 * another mirror if some of the OSTs are not healthy.
1910 ci_tried_all_mirrors:1,
1912 * Random read hints, readahead will be disabled.
1916 * Sequential read hints.
1920 * Do parallel (async) submission of DIO RPCs. Note DIO is still sync
1921 * to userspace, only the RPCs are submitted async, then waited for at
1922 * the llite layer before returning.
1926 * this DIO is at least partly unaligned, and so the unaligned DIO
1927 * path is being used for this entire IO
1931 * there is a compat issue with unupgraded ZFS targets which means we
1932 * must refuse to do unaligned DIO to these targets, so this is used
1933 * to annotate that in the IO (since we learn if there is a problematic
1934 * OST/MDT target as we build the IO)
1938 * there is an interop issue with unpatched clients/servers that
1939 * exceed 4k read/write offsets with I/O exceeding LNET_MTU.
1940 * This flag cleared if a target is not patched.
1942 ci_allow_unaligned_dio:1,
1944 * Bypass quota check
1948 * io_uring direct IO with flags IOCB_NOWAIT.
1952 * The filesystem must exclusively acquire invalidate_lock before
1953 * invalidating page cache in truncate / hole punch / DLM extent
1954 * lock blocking AST path (and thus calling into ->invalidatepage)
1955 * to block races between page cache invalidation and page cache
1956 * filling functions (fault, read, ...)
1958 ci_invalidate_page_cache:1,
1959 /* was this IO switched from BIO to DIO for hybrid IO? */
1960 ci_hybrid_switched:1;
1963 * How many times the read has retried before this one.
1964 * Set by the top level and consumed by the LOV.
1966 unsigned int ci_ndelay_tried;
1968 * Designated mirror index for this I/O.
1970 unsigned int ci_designated_mirror;
1972 * Number of pages owned by this IO. For invariant checking.
1974 unsigned int ci_owned_nr;
1976 * Range of write intent. Valid if ci_need_write_intent is set.
1978 struct lu_extent ci_write_intent;
1982 * Per-transfer attributes.
1984 struct cl_req_attr {
1985 enum cl_req_type cra_type;
1987 struct cl_page *cra_page;
1988 /** Generic attributes for the server consumption. */
1989 struct obdo *cra_oa;
1990 /** process jobid/uid/gid performing the io */
1991 struct job_info cra_jobinfo;
1994 enum cache_stats_item {
1995 /** how many cache lookups were performed */
1997 /** how many times cache lookup resulted in a hit */
1999 /** how many entities are in the cache right now */
2001 /** how many entities in the cache are actively used (and cannot be
2002 * evicted) right now
2005 /** how many entities were created at all */
2010 #define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
2013 * Stats for a generic cache (similar to inode, lu_object, etc. caches).
2015 struct cache_stats {
2016 const char *cs_name;
2017 atomic_t cs_stats[CS_NR];
2020 /** These are not exported so far */
2021 void cache_stats_init(struct cache_stats *cs, const char *name);
2024 * Client-side site. This represents particular client stack. "Global"
2025 * variables should (directly or indirectly) be added here to allow multiple
2026 * clients to co-exist in the single address space.
2029 struct lu_site cs_lu;
2031 * Statistical counters. Atomics do not scale, something better like
2032 * per-cpu counters is needed.
2034 * These are exported as /proc/fs/lustre/llite/.../site
2036 * When interpreting keep in mind that both sub-locks (and sub-pages)
2037 * and top-locks (and top-pages) are accounted here.
2039 struct cache_stats cs_pages;
2040 atomic_t cs_pages_state[CPS_NR];
2043 int cl_site_init(struct cl_site *s, struct cl_device *top);
2044 void cl_site_fini(struct cl_site *s);
2045 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
2048 * Output client site statistical counters into a buffer. Suitable for
2049 * ll_rd_*()-style functions.
2051 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
2056 * Type conversion and accessory functions.
2059 static inline struct cl_site *lu2cl_site(const struct lu_site *site)
2061 return container_of(site, struct cl_site, cs_lu);
2064 static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
2066 LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
2067 return container_of_safe(d, struct cl_device, cd_lu_dev);
2070 static inline struct lu_device *cl2lu_dev(struct cl_device *d)
2072 return &d->cd_lu_dev;
2075 static inline struct cl_object *lu2cl(const struct lu_object *o)
2077 LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
2078 return container_of_safe(o, struct cl_object, co_lu);
2081 static inline const struct cl_object_conf *
2082 lu2cl_conf(const struct lu_object_conf *conf)
2084 return container_of_safe(conf, struct cl_object_conf, coc_lu);
2087 static inline struct cl_object *cl_object_next(const struct cl_object *obj)
2089 return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
2092 static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
2094 return container_of_safe(h, struct cl_object_header, coh_lu);
2097 static inline struct cl_site *cl_object_site(const struct cl_object *obj)
2099 return lu2cl_site(obj->co_lu.lo_dev->ld_site);
2103 struct cl_object_header *cl_object_header(const struct cl_object *obj)
2105 return luh2coh(obj->co_lu.lo_header);
2108 static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
2110 return lu_device_init(&d->cd_lu_dev, t);
2113 static inline void cl_device_fini(struct cl_device *d)
2115 lu_device_fini(&d->cd_lu_dev);
2118 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
2119 struct cl_object *obj,
2120 const struct cl_page_operations *ops);
2121 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2122 struct cl_object *obj,
2123 const struct cl_lock_operations *ops);
2124 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
2125 struct cl_object *obj, const struct cl_io_operations *ops);
2127 struct cl_object *cl_object_top(struct cl_object *o);
2128 struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
2129 const struct lu_fid *fid,
2130 const struct cl_object_conf *c);
2132 int cl_object_header_init(struct cl_object_header *h);
2133 void cl_object_header_fini(struct cl_object_header *h);
2134 void cl_object_put(const struct lu_env *env, struct cl_object *o);
2135 void cl_object_get(struct cl_object *o);
2136 void cl_object_attr_lock(struct cl_object *o);
2137 void cl_object_attr_unlock(struct cl_object *o);
2138 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
2139 struct cl_attr *attr);
2140 int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
2141 const struct cl_attr *attr, unsigned int valid);
2142 void cl_object_dirty_for_sync(const struct lu_env *env, struct cl_object *obj);
2143 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
2144 struct ost_lvb *lvb);
2145 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
2146 const struct cl_object_conf *conf);
2147 int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
2148 void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
2149 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2150 struct lov_user_md __user *lum, size_t size);
2151 int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
2152 struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
2154 int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
2155 struct cl_layout *cl);
2156 loff_t cl_object_maxbytes(struct cl_object *obj);
2157 int cl_object_flush(const struct lu_env *env, struct cl_object *obj,
2158 struct ldlm_lock *lock);
2159 int cl_object_inode_ops(const struct lu_env *env, struct cl_object *obj,
2160 enum coo_inode_opc opc, void *data);
2164 * Returns true, iff \a o0 and \a o1 are slices of the same object.
2166 static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
2168 return cl_object_header(o0) == cl_object_header(o1);
2171 static inline void cl_object_page_init(struct cl_object *clob, int size)
2173 clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
2174 cl_object_header(clob)->coh_page_bufsize += round_up(size, 8);
2175 WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
2178 static inline void *cl_object_page_slice(struct cl_object *clob,
2179 struct cl_page *page)
2181 return (void *)((char *)page + clob->co_slice_off);
2185 * Return refcount of cl_object.
2187 static inline int cl_object_refc(struct cl_object *clob)
2189 struct lu_object_header *header = clob->co_lu.lo_header;
2191 return atomic_read(&header->loh_ref);
2195 struct cl_page *cl_page_find(const struct lu_env *env,
2196 struct cl_object *obj,
2197 pgoff_t idx, struct page *vmpage,
2198 enum cl_page_type type);
2199 struct cl_page *cl_page_alloc(const struct lu_env *env,
2200 struct cl_object *o, pgoff_t ind,
2201 struct page *vmpage,
2202 enum cl_page_type type);
2203 void cl_page_get(struct cl_page *page);
2204 void cl_page_put(const struct lu_env *env,
2205 struct cl_page *page);
2206 void cl_batch_put(const struct lu_env *env, struct cl_page *page,
2207 struct folio_batch *fbatch);
2208 void cl_page_print(const struct lu_env *env, void *cookie,
2209 lu_printer_t printer, const struct cl_page *pg);
2210 void cl_page_header_print(const struct lu_env *env, void *cookie,
2211 lu_printer_t printer, const struct cl_page *pg);
2212 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
2217 * Functions dealing with the ownership of page by io.
2220 int cl_page_own(const struct lu_env *env, struct cl_io *io,
2221 struct cl_page *page);
2222 int cl_page_own_try(const struct lu_env *env,
2223 struct cl_io *io, struct cl_page *page);
2224 void cl_page_assume(const struct lu_env *env,
2225 struct cl_io *io, struct cl_page *page);
2226 void cl_page_unassume(const struct lu_env *env,
2227 struct cl_io *io, struct cl_page *pg);
2228 void cl_page_disown(const struct lu_env *env, struct cl_io *io,
2229 struct cl_page *page);
2230 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io);
2235 * Functions dealing with the preparation of a page for a transfer, and
2236 * tracking transfer state.
2238 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
2239 struct cl_page *pg, enum cl_req_type crt);
2240 void cl_page_completion(const struct lu_env *env, struct cl_page *pg,
2241 enum cl_req_type crt, int ioret);
2242 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
2243 enum cl_req_type crt);
2244 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
2245 struct cl_page *pg, enum cl_req_type crt);
2246 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
2248 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
2249 struct cl_page *pg);
2252 * \name helper routines
2253 * Functions to discard, delete and export a cl_page.
2255 void cl_page_discard(const struct lu_env *env, struct cl_io *io,
2256 struct cl_page *pg);
2257 void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
2258 void cl_page_touch(const struct lu_env *env, const struct cl_page *pg,
2260 void cl_lock_print(const struct lu_env *env, void *cookie,
2261 lu_printer_t printer, const struct cl_lock *lock);
2262 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2263 lu_printer_t printer,
2264 const struct cl_lock_descr *descr);
2267 * Data structure managing a client's cached pages. A count of
2268 * "unstable" pages is maintained, and an LRU of clean pages is
2269 * maintained. "unstable" pages are pages pinned by the ptlrpc
2270 * layer for recovery purposes.
2272 struct cl_client_cache {
2274 * # of client cache refcount
2275 * # of users (OSCs) + 2 (held by llite and lov)
2277 refcount_t ccc_users;
2279 * # of threads are doing shrinking
2281 unsigned int ccc_lru_shrinkers;
2283 * # of LRU entries available
2285 atomic_long_t ccc_lru_left;
2287 * List of entities(OSCs) for this LRU cache
2289 struct list_head ccc_lru;
2291 * Max # of LRU entries
2293 unsigned long ccc_lru_max;
2295 * Lock to protect ccc_lru list
2297 spinlock_t ccc_lru_lock;
2299 * Set if unstable check is enabled
2301 unsigned int ccc_unstable_check:1;
2303 * # of unstable pages for this mount point
2305 atomic_long_t ccc_unstable_nr;
2307 * Serialize max_cache_mb write operation
2309 struct mutex ccc_max_cache_mb_lock;
2312 * cl_cache functions
2314 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
2315 void cl_cache_incref(struct cl_client_cache *cache);
2316 void cl_cache_decref(struct cl_client_cache *cache);
2319 int cl_lock_request(const struct lu_env *env, struct cl_io *io,
2320 struct cl_lock *lock);
2321 int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
2322 const struct cl_io *io);
2323 void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
2324 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
2325 const struct lu_device_type *dtype);
2326 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
2328 int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
2329 struct cl_lock *lock, struct cl_sync_io *anchor);
2330 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
2333 int cl_io_init(const struct lu_env *env, struct cl_io *io,
2334 enum cl_io_type iot, struct cl_object *obj);
2335 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
2336 enum cl_io_type iot, struct cl_object *obj);
2337 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
2338 enum cl_io_type iot, loff_t pos, size_t bytes);
2339 int cl_io_loop(const struct lu_env *env, struct cl_io *io);
2341 void cl_io_fini(const struct lu_env *env, struct cl_io *io);
2342 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io);
2343 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io);
2344 int cl_io_lock(const struct lu_env *env, struct cl_io *io);
2345 void cl_io_unlock(const struct lu_env *env, struct cl_io *io);
2346 int cl_io_start(const struct lu_env *env, struct cl_io *io);
2347 void cl_io_end(const struct lu_env *env, struct cl_io *io);
2348 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
2349 struct cl_io_lock_link *link);
2350 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
2351 struct cl_lock_descr *descr);
2352 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
2353 enum cl_req_type iot, struct cl_2queue *queue);
2354 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
2355 enum cl_req_type iot, struct cl_2queue *queue,
2357 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
2358 struct cl_page_list *queue, int from, int to,
2360 void cl_io_extent_release(const struct lu_env *env, struct cl_io *io);
2361 int cl_io_lru_reserve(const struct lu_env *env, struct cl_io *io,
2362 loff_t pos, size_t bytes);
2363 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
2364 pgoff_t start, struct cl_read_ahead *ra);
2365 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,
2369 * True, iff \a io is an O_APPEND write(2).
2371 static inline int cl_io_is_append(const struct cl_io *io)
2373 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
2376 static inline int cl_io_is_sync_write(const struct cl_io *io)
2378 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
2381 static inline int cl_io_is_mkwrite(const struct cl_io *io)
2383 return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
2387 * True, iff \a io is a truncate(2).
2389 static inline int cl_io_is_trunc(const struct cl_io *io)
2391 return io->ci_type == CIT_SETATTR &&
2392 (io->u.ci_setattr.sa_avalid & ATTR_SIZE) &&
2393 (io->u.ci_setattr.sa_subtype != CL_SETATTR_FALLOCATE);
2396 static inline int cl_io_is_fallocate(const struct cl_io *io)
2398 return (io->ci_type == CIT_SETATTR) &&
2399 (io->u.ci_setattr.sa_subtype == CL_SETATTR_FALLOCATE);
2402 struct cl_io *cl_io_top(struct cl_io *io);
2404 #define CL_IO_SLICE_CLEAN(obj, base) memset_startat(obj, 0, base)
2408 * Last page in the page list.
2410 static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
2412 LASSERT(plist->pl_nr > 0);
2413 return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
2416 static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
2418 LASSERT(plist->pl_nr > 0);
2419 return list_first_entry(&plist->pl_pages, struct cl_page, cp_batch);
2423 * Iterate over pages in a page list.
2425 #define cl_page_list_for_each(page, list) \
2426 list_for_each_entry((page), &(list)->pl_pages, cp_batch)
2429 * Iterate over pages in a page list, taking possible removals into account.
2431 #define cl_page_list_for_each_safe(page, temp, list) \
2432 list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
2434 void cl_page_list_init(struct cl_page_list *plist);
2435 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page,
2437 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
2438 struct cl_page *page);
2439 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
2440 struct cl_page *page);
2441 void cl_page_list_splice(struct cl_page_list *list,
2442 struct cl_page_list *head);
2443 void cl_page_list_del(const struct lu_env *env,
2444 struct cl_page_list *plist, struct cl_page *page,
2446 void cl_page_list_disown(const struct lu_env *env,
2447 struct cl_page_list *plist);
2448 void cl_page_list_assume(const struct lu_env *env,
2449 struct cl_io *io, struct cl_page_list *plist);
2450 void cl_page_list_discard(const struct lu_env *env,
2451 struct cl_io *io, struct cl_page_list *plist);
2452 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
2454 void cl_2queue_init(struct cl_2queue *queue);
2455 void cl_2queue_disown(const struct lu_env *env, struct cl_2queue *queue);
2456 void cl_2queue_assume(const struct lu_env *env, struct cl_io *io,
2457 struct cl_2queue *queue);
2458 void cl_2queue_discard(const struct lu_env *env, struct cl_io *io,
2459 struct cl_2queue *queue);
2460 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue);
2461 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
2463 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
2464 struct cl_req_attr *attr);
2471 typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
2473 void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr, void *dio_aio,
2474 cl_sync_io_end_t *end);
2476 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
2478 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
2480 int cl_sync_io_wait_recycle(const struct lu_env *env, struct cl_sync_io *anchor,
2481 long timeout, int ioret);
2482 struct cl_dio_aio *cl_dio_aio_alloc(struct kiocb *iocb, struct cl_object *obj,
2484 struct cl_sub_dio *cl_sub_dio_alloc(struct cl_dio_aio *ll_aio,
2485 struct iov_iter *iter, bool write,
2486 bool unaligned, bool sync);
2487 void cl_dio_aio_free(const struct lu_env *env, struct cl_dio_aio *aio);
2488 void cl_sub_dio_free(struct cl_sub_dio *sdio);
2489 static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
2491 cl_sync_io_init_notify(anchor, nr, NULL, NULL);
2495 * Anchor for synchronous transfer. This is allocated on a stack by thread
2496 * doing synchronous transfer, and a pointer to this structure is set up in
2497 * every page submitted for transfer. Transfer completion routine updates
2498 * anchor and wakes up waiting thread when transfer is complete.
2501 /** number of pages yet to be transferred. */
2502 atomic_t csi_sync_nr;
2503 /** has this i/o completed? */
2504 atomic_t csi_complete;
2507 /** completion to be signaled when transfer is complete. */
2508 wait_queue_head_t csi_waitq;
2509 /** callback to invoke when this IO is finished */
2510 cl_sync_io_end_t *csi_end_io;
2511 /* private pointer for an associated DIO/AIO */
2515 /** direct IO pages */
2516 struct ll_dio_pages {
2518 * page array for RDMA - for aligned i/o, this is the user provided
2519 * pages, but for unaligned i/o, this is the internal buffer
2521 struct page **ldp_pages;
2522 /** # of pages in the array. */
2524 /* the file offset of the first page. */
2525 loff_t ldp_file_offset;
2528 /* Top level struct used for AIO and DIO */
2530 struct cl_sync_io cda_sync;
2531 struct cl_object *cda_obj;
2532 struct kiocb *cda_iocb;
2534 struct mm_struct *cda_mm;
2535 unsigned cda_no_aio_complete:1,
2539 struct cl_iter_dup {
2540 void *id_vec; /* dup'd vec (iov/bvec/kvec) */
2541 size_t id_vec_size; /* bytes allocated for id_vec */
2544 /* Sub-dio used for splitting DIO (and AIO, because AIO is DIO) according to
2545 * the layout/striping, so we can do parallel submit of DIO RPCs
2548 struct cl_sync_io csd_sync;
2549 struct cl_page_list csd_pages;
2551 struct cl_dio_aio *csd_ll_aio;
2552 struct ll_dio_pages csd_dio_pages;
2553 struct iov_iter csd_iter;
2554 struct cl_iter_dup csd_dup;
2555 spinlock_t csd_lock;
2556 unsigned csd_creator_free:1,
2562 static inline u64 cl_io_nob_aligned(u64 off, u32 nob, u32 pgsz)
2564 return (((nob / pgsz) - 1) * pgsz) + (pgsz - (off & (pgsz - 1)));
2567 void ll_release_user_pages(struct page **pages, int npages);
2568 int ll_allocate_dio_buffer(struct ll_dio_pages *pvec, size_t io_size);
2569 void ll_free_dio_buffer(struct ll_dio_pages *pvec);
2570 ssize_t ll_dio_user_copy(struct cl_sub_dio *sdio);
2572 #ifndef HAVE_KTHREAD_USE_MM
2573 #define kthread_use_mm(mm) use_mm(mm)
2574 #define kthread_unuse_mm(mm) unuse_mm(mm)
2577 /** \defgroup cl_env cl_env
2579 * lu_env handling for a client.
2581 * lu_env is an environment within which lustre code executes. Its major part
2582 * is lu_context---a fast memory allocation mechanism that is used to conserve
2583 * precious kernel stack space. Originally lu_env was designed for a server,
2586 * - there is a (mostly) fixed number of threads, and
2588 * - call chains have no non-lustre portions inserted between lustre code.
2590 * On a client both these assumtpion fails, because every user thread can
2591 * potentially execute lustre code as part of a system call, and lustre calls
2592 * into VFS or MM that call back into lustre.
2594 * To deal with that, cl_env wrapper functions implement the following
2597 * - allocation and destruction of environment is amortized by caching no
2598 * longer used environments instead of destroying them;
2600 * \see lu_env, lu_context, lu_context_key
2604 struct lu_env *cl_env_get(__u16 *refcheck);
2605 struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags);
2606 void cl_env_put(struct lu_env *env, __u16 *refcheck);
2607 unsigned int cl_env_cache_purge(unsigned int nr);
2608 struct lu_env *cl_env_percpu_get(void);
2609 void cl_env_percpu_put(struct lu_env *env);
2615 void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr);
2616 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
2618 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
2619 struct lu_device_type *ldt,
2620 struct lu_device *next);
2622 int cl_global_init(void);
2623 void cl_global_fini(void);
2625 int lov_read_and_clear_async_rc(struct cl_object *clob);
2627 #endif /* _LINUX_CL_OBJECT_H */