X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=abd341daa5b08802c3dd408617f96a1a47071455;hp=85b267d9c0b30b17df54067714404216b4a524f3;hb=200d4423787524eb8115b4fa4588a248065bd2be;hpb=3f09c2b10e0aef9b20df18ca3cf2e4638159d3db diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 85b267d..abd341d 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2014, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -63,10 +59,6 @@ * read/write system call it is associated with the single user * thread, that issued the system call). * - * - cl_req represents a collection of pages for a transfer. cl_req is - * constructed by req-forming engine that tries to saturate - * transport with large and continuous transfers. - * * Terminology * * - to avoid confusion high-level I/O operation like read or write system @@ -96,6 +88,9 @@ /* * super-class definitions. */ +#include +#include + #include #include #include @@ -103,15 +98,15 @@ #include #include #include +#include +#include +struct obd_info; struct inode; struct cl_device; -struct cl_device_operations; struct cl_object; -struct cl_object_page_operations; -struct cl_object_lock_operations; struct cl_page; struct cl_page_slice; @@ -124,27 +119,7 @@ struct cl_page_operations; struct cl_io; struct cl_io_slice; -struct cl_req; -struct cl_req_slice; - -/** - * Operations for each data device in the client stack. - * - * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops - */ -struct cl_device_operations { - /** - * Initialize cl_req. This method is called top-to-bottom on all - * devices in the stack to get them a chance to allocate layer-private - * data, and to attach them to the cl_req by calling - * cl_req_slice_add(). - * - * \see osc_req_init(), lov_req_init(), lovsub_req_init() - * \see ccc_req_init() - */ - int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); -}; +struct cl_req_attr; /** * Device in the client stack. @@ -154,8 +129,6 @@ struct cl_device_operations { struct cl_device { /** Super-class. */ struct lu_device cd_lu_dev; - /** Per-layer operation vector. */ - const struct cl_device_operations *cd_ops; }; /** \addtogroup cl_object cl_object @@ -175,11 +148,11 @@ struct cl_attr { */ loff_t cat_kms; /** Modification time. Measured in seconds since epoch. */ - time_t cat_mtime; + time64_t cat_mtime; /** Access time. Measured in seconds since epoch. */ - time_t cat_atime; + time64_t cat_atime; /** Change time. Measured in seconds since epoch. */ - time_t cat_ctime; + time64_t cat_ctime; /** * Blocks allocated to this cl_object on the server file system. * @@ -197,20 +170,24 @@ struct cl_attr { /* nlink of the directory */ __u64 cat_nlink; + + /* Project identifier for quota purpose. */ + __u32 cat_projid; }; /** * Fields in cl_attr that are being set. */ enum cl_attr_valid { - CAT_SIZE = 1 << 0, - CAT_KMS = 1 << 1, - CAT_MTIME = 1 << 3, - CAT_ATIME = 1 << 4, - CAT_CTIME = 1 << 5, - CAT_BLOCKS = 1 << 6, - CAT_UID = 1 << 7, - CAT_GID = 1 << 8 + CAT_SIZE = BIT(0), + CAT_KMS = BIT(1), + CAT_MTIME = BIT(3), + CAT_ATIME = BIT(4), + CAT_CTIME = BIT(5), + CAT_BLOCKS = BIT(6), + CAT_UID = BIT(7), + CAT_GID = BIT(8), + CAT_PROJID = BIT(9), }; /** @@ -265,12 +242,12 @@ struct cl_object { */ struct cl_object_conf { /** Super-class. */ - struct lu_object_conf coc_lu; - union { - /** - * Object layout. This is consumed by lov. - */ - struct lustre_md *coc_md; + struct lu_object_conf coc_lu; + union { + /** + * Object layout. This is consumed by lov. + */ + struct lu_buf coc_layout; /** * Description of particular stripe location in the * cluster. This is consumed by osc. @@ -303,6 +280,24 @@ enum { OBJECT_CONF_WAIT = 2 }; +enum { + CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */ + CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */ +}; + +struct cl_layout { + /** the buffer to return the layout in lov_mds_md format. */ + struct lu_buf cl_buf; + /** size of layout in lov_mds_md format. */ + size_t cl_size; + /** Layout generation. */ + u32 cl_layout_gen; + /** whether layout is a composite one */ + bool cl_is_composite; + /** Whether layout is a HSM released one */ + bool cl_is_released; +}; + /** * Operations implemented for each cl object layer. * @@ -347,19 +342,19 @@ struct cl_object_operations { */ int (*coo_io_init)(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); - /** - * Fill portion of \a attr that this layer controls. This method is - * called top-to-bottom through all object layers. - * - * \pre cl_object_header::coh_attr_guard of the top-object is locked. - * - * \return 0: to continue - * \return +ve: to stop iterating through layers (but 0 is returned - * from enclosing cl_object_attr_get()) - * \return -ve: to signal error - */ - int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); + /** + * Fill portion of \a attr that this layer controls. This method is + * called top-to-bottom through all object layers. + * + * \pre cl_object_header::coh_attr_guard of the top-object is locked. + * + * \return 0: to continue + * \return +ve: to stop iterating through layers (but 0 is returned + * from enclosing cl_object_attr_get()) + * \return -ve: to signal error + */ + int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); /** * Update attributes. * @@ -401,7 +396,35 @@ struct cl_object_operations { * Object getstripe method. */ int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *lum); + struct lov_user_md __user *lum, size_t size); + /** + * Get FIEMAP mapping from the object. + */ + int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, + struct fiemap *fiemap, size_t *buflen); + /** + * Get layout and generation of the object. + */ + int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *layout); + /** + * Get maximum size of the object. + */ + loff_t (*coo_maxbytes)(struct cl_object *obj); + /** + * Set request attributes. + */ + void (*coo_req_attr_set)(const struct lu_env *env, + struct cl_object *obj, + struct cl_req_attr *attr); + /** + * Flush \a obj data corresponding to \a lock. Used for DoM + * locks in llite's cancelling blocking ast callback. + */ + int (*coo_object_flush)(const struct lu_env *env, + struct cl_object *obj, + struct ldlm_lock *lock); }; /** @@ -593,7 +616,7 @@ enum cl_page_state { * * - [cl_page_state::CPS_PAGEOUT] page is dirty, the * req-formation engine decides that it wants to include this page - * into an cl_req being constructed, and yanks it from the cache; + * into an RPC being constructed, and yanks it from the cache; * * - [cl_page_state::CPS_FREEING] VM callback is executed to * evict the page form the memory; @@ -662,7 +685,7 @@ enum cl_page_state { * Page is being read in, as a part of a transfer. This is quite * similar to the cl_page_state::CPS_PAGEOUT state, except that * read-in is always "immediate"---there is no such thing a sudden - * construction of read cl_req from cached, presumably not up to date, + * construction of read request from cached, presumably not up to date, * pages. * * Underlying VM page is locked for the duration of transfer. @@ -690,7 +713,7 @@ enum cl_page_type { /** Transient page, the transient cl_page is used to bind a cl_page * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO, lockless IO and liblustre. */ + * it is used in DirectIO and lockless IO. */ CPT_TRANSIENT, }; @@ -706,8 +729,8 @@ enum cl_page_type { struct cl_page { /** Reference counter. */ atomic_t cp_ref; - /** Transfer error. */ - int cp_error; + /* which slab kmem index this memory allocated from */ + int cp_kmem_index; /** An object this page is a part of. Immutable after creation. */ struct cl_object *cp_obj; /** vmpage */ @@ -716,8 +739,6 @@ struct cl_page { struct list_head cp_batch; /** List of slices. Immutable after creation. */ struct list_head cp_layers; - /** Linkage of pages within cl_req. */ - struct list_head cp_flight; /** * Page state. This field is const to avoid accidental update, it is * modified only internally within cl_page.c. Protected by a VM lock. @@ -734,12 +755,6 @@ struct cl_page { * by sub-io. Protected by a VM lock. */ struct cl_io *cp_owner; - /** - * Owning IO request in cl_page_state::CPS_PAGEOUT and - * cl_page_state::CPS_PAGEIN states. This field is maintained only in - * the top-level pages. Protected by a VM lock. - */ - struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; /** Link to an object, for debugging. */ @@ -748,6 +763,9 @@ struct cl_page { struct lu_ref_link cp_queue_ref; /** Assigned if doing a sync_io */ struct cl_sync_io *cp_sync_io; + /** layout_entry + stripe index, composed using lov_comp_index() */ + unsigned int cp_lov_index; + pgoff_t cp_osc_index; }; /** @@ -757,7 +775,6 @@ struct cl_page { */ struct cl_page_slice { struct cl_page *cpl_page; - pgoff_t cpl_index; /** * Object slice corresponding to this page slice. Immutable after * creation. @@ -782,7 +799,6 @@ enum cl_lock_mode { /** * Requested transfer type. - * \ingroup cl_req */ enum cl_req_type { CRT_READ, @@ -862,6 +878,13 @@ struct cl_page_operations { */ int (*cpo_is_vmlocked)(const struct lu_env *env, const struct cl_page_slice *slice); + + /** + * Update file attributes when all we have is this page. Used for tiny + * writes to update attributes when we don't have a full cl_io. + */ + void (*cpo_page_touch)(const struct lu_env *env, + const struct cl_page_slice *slice, size_t to); /** * Page destruction. */ @@ -886,27 +909,8 @@ struct cl_page_operations { const struct cl_page_slice *slice); /** Destructor. Frees resources and slice itself. */ void (*cpo_fini)(const struct lu_env *env, - struct cl_page_slice *slice); - - /** - * Checks whether the page is protected by a cl_lock. This is a - * per-layer method, because certain layers have ways to check for the - * lock much more efficiently than through the generic locks scan, or - * implement locking mechanisms separate from cl_lock, e.g., - * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks - * being canceled, or scheduled for cancellation as soon as the last - * user goes away, too. - * - * \retval -EBUSY: page is protected by a lock of a given mode; - * \retval -ENODATA: page is not protected by a lock; - * \retval 0: this layer cannot decide. - * - * \see cl_page_is_under_lock() - */ - int (*cpo_is_under_lock)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max); - + struct cl_page_slice *slice, + struct pagevec *pvec); /** * Optional debugging helper. Prints given page slice. * @@ -918,8 +922,7 @@ struct cl_page_operations { /** * \name transfer * - * Transfer methods. See comment on cl_req for a description of - * transfer formation and life-cycle. + * Transfer methods. * * @{ */ @@ -965,7 +968,7 @@ struct cl_page_operations { int ioret); /** * Called when cached page is about to be added to the - * cl_req as a part of req formation. + * ptlrpc request as a part of req formation. * * \return 0 : proceed with this page; * \return -EAGAIN : skip this page; @@ -992,23 +995,6 @@ struct cl_page_operations { void (*cpo_clip)(const struct lu_env *env, const struct cl_page_slice *slice, int from, int to); - /** - * \pre the page was queued for transferring. - * \post page is removed from client's pending list, or -EBUSY - * is returned if it has already been in transferring. - * - * This is one of seldom page operation which is: - * 0. called from top level; - * 1. don't have vmpage locked; - * 2. every layer should synchronize execution of its ->cpo_cancel() - * with completion handlers. Osc uses client obd lock for this - * purpose. Based on there is no vvp_page_cancel and - * lov_page_cancel(), cpo_cancel is defacto protected by client lock. - * - * \see osc_page_cancel(). - */ - int (*cpo_cancel)(const struct lu_env *env, - const struct cl_page_slice *slice); /** * Write out a page by kernel. This is only called by ll_writepage * right now. @@ -1090,15 +1076,13 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to * cl_lock::cll_layers list through cl_lock_slice::cls_linkage. * - * Typical cl_lock consists of the two layers: + * Typical cl_lock consists of one layer: * - * - vvp_lock (vvp specific data), and * - lov_lock (lov specific data). * * lov_lock contains an array of sub-locks. Each of these sub-locks is a * normal cl_lock: it has a header (struct cl_lock) and a list of layers: * - * - lovsub_lock, and * - osc_lock * * Each sub-lock is associated with a cl_object (representing stripe @@ -1111,111 +1095,29 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * * LIFE CYCLE * - * cl_lock is reference counted. When reference counter drops to 0, lock is - * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING - * lock is destroyed when last reference is released. Referencing between - * top-lock and its sub-locks is described in the lov documentation module. - * - * STATE MACHINE - * - * Also, cl_lock is a state machine. This requires some clarification. One of - * the goals of client IO re-write was to make IO path non-blocking, or at - * least to make it easier to make it non-blocking in the future. Here - * `non-blocking' means that when a system call (read, write, truncate) - * reaches a situation where it has to wait for a communication with the - * server, it should --instead of waiting-- remember its current state and - * switch to some other work. E.g,. instead of waiting for a lock enqueue, - * client should proceed doing IO on the next stripe, etc. Obviously this is - * rather radical redesign, and it is not planned to be fully implemented at - * this time, instead we are putting some infrastructure in place, that would - * make it easier to do asynchronous non-blocking IO easier in the - * future. Specifically, where old locking code goes to sleep (waiting for - * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When - * enqueue reply comes, its completion handler signals that lock state-machine - * is ready to transit to the next state. There is some generic code in - * cl_lock.c that sleeps, waiting for these signals. As a result, for users of - * this cl_lock.c code, it looks like locking is done in normal blocking - * fashion, and it the same time it is possible to switch to the non-blocking - * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c - * functions). - * - * For a description of state machine states and transitions see enum - * cl_lock_state. - * - * There are two ways to restrict a set of states which lock might move to: - * - * - placing a "hold" on a lock guarantees that lock will not be moved - * into cl_lock_state::CLS_FREEING state until hold is released. Hold - * can be only acquired on a lock that is not in - * cl_lock_state::CLS_FREEING. All holds on a lock are counted in - * cl_lock::cll_holds. Hold protects lock from cancellation and - * destruction. Requests to cancel and destroy a lock on hold will be - * recorded, but only honored when last hold on a lock is released; - * - * - placing a "user" on a lock guarantees that lock will not leave - * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING, - * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of - * states, once it enters this set. That is, if a user is added onto a - * lock in a state not from this set, it doesn't immediately enforce - * lock to move to this set, but once lock enters this set it will - * remain there until all users are removed. Lock users are counted in - * cl_lock::cll_users. - * - * User is used to assure that lock is not canceled or destroyed while - * it is being enqueued, or actively used by some IO. - * - * Currently, a user always comes with a hold (cl_lock_invariant() - * checks that a number of holds is not less than a number of users). - * - * CONCURRENCY - * - * This is how lock state-machine operates. struct cl_lock contains a mutex - * cl_lock::cll_guard that protects struct fields. - * - * - mutex is taken, and cl_lock::cll_state is examined. - * - * - for every state there are possible target states where lock can move - * into. They are tried in order. Attempts to move into next state are - * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try(). - * - * - if the transition can be performed immediately, state is changed, - * and mutex is released. - * - * - if the transition requires blocking, _try() function returns - * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to - * sleep, waiting for possibility of lock state change. It is woken - * up when some event occurs, that makes lock state change possible - * (e.g., the reception of the reply from the server), and repeats - * the loop. - * - * Top-lock and sub-lock has separate mutexes and the latter has to be taken - * first to avoid dead-lock. - * - * To see an example of interaction of all these issues, take a look at the - * lov_cl.c:lov_lock_enqueue() function. It is called as a part of - * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by - * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note - * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It - * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be - * done in parallel, rather than one after another (this is used for glimpse - * locks, that cannot dead-lock). + * cl_lock is a cacheless data container for the requirements of locks to + * complete the IO. cl_lock is created before I/O starts and destroyed when the + * I/O is complete. + * + * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached + * to cl_lock at OSC layer. LDLM lock is still cacheable. * * INTERFACE AND USAGE * - * struct cl_lock_operations provide a number of call-backs that are invoked - * when events of interest occurs. Layers can intercept and handle glimpse, - * blocking, cancel ASTs and a reception of the reply from the server. + * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A + * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue() + * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock + * consists of multiple sub cl_locks, each sub locks will be enqueued + * correspondingly. At OSC layer, the lock enqueue request will tend to reuse + * cached LDLM lock; otherwise a new LDLM lock will have to be requested from + * OST side. * - * One important difference with the old client locking model is that new - * client has a representation for the top-lock, whereas in the old code only - * sub-locks existed as real data structures and file-level locks are - * represented by "request sets" that are created and destroyed on each and - * every lock creation. + * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel() + * method will be called for each layer to release the resource held by this + * lock. At OSC layer, the reference count of LDLM lock, which is held at + * clo_enqueue time, is released. * - * Top-locks are cached, and can be found in the cache by the system calls. It - * is possible that top-lock is in cache, but some of its sub-locks were - * canceled and destroyed. In that case top-lock has to be enqueued again - * before it can be used. + * LDLM lock can only be canceled if there is no cl_lock using it. * * Overall process of the locking during IO operation is as following: * @@ -1228,7 +1130,7 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * * - when all locks are acquired, IO is performed; * - * - locks are released into cache. + * - locks are released after IO is complete. * * Striping introduces major additional complexity into locking. The * fundamental problem is that it is generally unsafe to actively use (hold) @@ -1250,16 +1152,6 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * buf is a part of memory mapped Lustre file, a lock or locks protecting buf * has to be held together with the usual lock on [offset, offset + count]. * - * As multi-stripe locks have to be allowed, it makes sense to cache them, so - * that, for example, a sequence of O_APPEND writes can proceed quickly - * without going down to the individual stripes to do lock matching. On the - * other hand, multi-stripe locks shouldn't be used by normal read/write - * calls. To achieve this, every layer can implement ->clo_fits_into() method, - * that is called by lock matching code (cl_lock_lookup()), and that can be - * used to selectively disable matching of certain locks for certain IOs. For - * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe - * locks to be matched only for truncates and O_APPEND writes. - * * Interaction with DLM * * In the expected setup, cl_lock is ultimately backed up by a collection of @@ -1310,7 +1202,7 @@ struct cl_lock { /** * Per-layer part of cl_lock * - * \see vvp_lock, lov_lock, lovsub_lock, osc_lock + * \see lov_lock, osc_lock */ struct cl_lock_slice { struct cl_lock *cls_lock; @@ -1324,7 +1216,7 @@ struct cl_lock_slice { /** * - * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops + * \see lov_lock_ops, osc_lock_ops */ struct cl_lock_operations { /** @{ */ @@ -1336,8 +1228,7 @@ struct cl_lock_operations { * @anchor for resources * \retval -ve failure * - * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(), - * \see osc_lock_enqueue() + * \see lov_lock_enqueue(), osc_lock_enqueue() */ int (*clo_enqueue)(const struct lu_env *env, const struct cl_lock_slice *slice, @@ -1352,8 +1243,7 @@ struct cl_lock_operations { /** * Destructor. Frees resources and the slice. * - * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(), - * \see osc_lock_fini() + * \see lov_lock_fini(), osc_lock_fini() */ void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice); /** @@ -1405,10 +1295,9 @@ do { \ struct cl_page_list { unsigned pl_nr; struct list_head pl_pages; - struct task_struct *pl_owner; }; -/** +/** * A 2-queue of pages. A convenience data-type for common use case, 2-queue * contains an incoming page list and an outgoing page list. */ @@ -1456,7 +1345,6 @@ struct cl_2queue { * (3) sort all locks to avoid dead-locks, and acquire them * * (4) process the chunk: call per-page methods - * (cl_io_operations::cio_read_page() for read, * cl_io_operations::cio_prepare_write(), * cl_io_operations::cio_commit_write() for write) * @@ -1474,11 +1362,13 @@ struct cl_2queue { /** IO types */ enum cl_io_type { /** read system call */ - CIT_READ, + CIT_READ = 1, /** write system call */ CIT_WRITE, /** truncate, utime system calls */ CIT_SETATTR, + /** get data version */ + CIT_DATA_VERSION, /** * page fault handling */ @@ -1489,6 +1379,10 @@ enum cl_io_type { */ CIT_FSYNC, /** + * glimpse. An io context to acquire glimpse lock. + */ + CIT_GLIMPSE, + /** * Miscellaneous io. This is used for occasional io activity that * doesn't fit into other types. Currently this is used for: * @@ -1499,8 +1393,6 @@ enum cl_io_type { * - VM induced page write-out. An io context for writing page out * for memory cleansing; * - * - glimpse. An io context to acquire glimpse lock. - * * - grouplock. An io context to acquire group lock. * * CIT_MISC io is used simply as a context in which locks and pages @@ -1508,6 +1400,11 @@ enum cl_io_type { * cl_io_loop() is never called for it. */ CIT_MISC, + /** + * ladvise handling + * To give advice about access of a file + */ + CIT_LADVISE, CIT_OP_NR }; @@ -1557,7 +1454,32 @@ struct cl_io_slice { }; typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, - struct cl_page *); + struct pagevec *); + +struct cl_read_ahead { + /* Maximum page index the readahead window will end. + * This is determined DLM lock coverage, RPC and stripe boundary. + * cra_end is included. */ + pgoff_t cra_end_idx; + /* optimal RPC size for this read, by pages */ + unsigned long cra_rpc_pages; + /* Release callback. If readahead holds resources underneath, this + * function should be called to release it. */ + void (*cra_release)(const struct lu_env *env, void *cbdata); + /* Callback data for cra_release routine */ + void *cra_cbdata; + /* whether lock is in contention */ + bool cra_contention; +}; + +static inline void cl_read_ahead_release(const struct lu_env *env, + struct cl_read_ahead *ra) +{ + if (ra->cra_release != NULL) + ra->cra_release(env, ra->cra_cbdata); + memset(ra, 0, sizeof(*ra)); +} + /** * Per-layer io operations. @@ -1664,17 +1586,14 @@ struct cl_io_operations { const struct cl_io_slice *slice, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); - /** - * Read missing page. - * - * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start() - * method, when it hits not-up-to-date page in the range. Optional. - * - * \pre io->ci_type == CIT_READ - */ - int (*cio_read_page)(const struct lu_env *env, - const struct cl_io_slice *slice, - const struct cl_page_slice *page); + /** + * Decide maximum read ahead extent + * + * \pre io->ci_type == CIT_READ + */ + int (*cio_read_ahead)(const struct lu_env *env, + const struct cl_io_slice *slice, + pgoff_t start, struct cl_read_ahead *ra); /** * Optional debugging helper. Print given io slice. */ @@ -1692,25 +1611,30 @@ enum cl_enq_flags { * -EWOULDBLOCK is returned immediately. */ CEF_NONBLOCK = 0x00000001, - /** - * take lock asynchronously (out of order), as it cannot - * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing. - */ - CEF_ASYNC = 0x00000002, + /** + * Tell lower layers this is a glimpse request, translated to + * LDLM_FL_HAS_INTENT at LDLM layer. + * + * Also, because glimpse locks never block other locks, we count this + * as automatically compatible with other osc locks. + * (see osc_lock_compatible) + */ + CEF_GLIMPSE = 0x00000002, /** * tell the server to instruct (though a flag in the blocking ast) an * owner of the conflicting lock, that it can drop dirty pages * protected by this lock, without sending them to the server. */ CEF_DISCARD_DATA = 0x00000004, - /** - * tell the sub layers that it must be a `real' lock. This is used for - * mmapped-buffer locks and glimpse locks that must be never converted - * into lockless mode. - * - * \see vvp_mmap_locks(), cl_glimpse_lock(). - */ - CEF_MUST = 0x00000008, + /** + * tell the sub layers that it must be a `real' lock. This is used for + * mmapped-buffer locks, glimpse locks, manually requested locks + * (LU_LADVISE_LOCKAHEAD) that must never be converted into lockless + * mode. + * + * \see vvp_mmap_locks(), cl_glimpse_lock, cl_request_lock(). + */ + CEF_MUST = 0x00000008, /** * tell the sub layers that never request a `real' lock. This flag is * not used currently. @@ -1723,17 +1647,33 @@ enum cl_enq_flags { */ CEF_NEVER = 0x00000010, /** - * for async glimpse lock. + * tell the dlm layer this is a speculative lock request + * speculative lock requests are locks which are not requested as part + * of an I/O operation. Instead, they are requested because we expect + * to use them in the future. They are requested asynchronously at the + * ptlrpc layer. + * + * Currently used for asynchronous glimpse locks and manually requested + * locks (LU_LADVISE_LOCKAHEAD). */ - CEF_AGL = 0x00000020, + CEF_SPECULATIVE = 0x00000020, /** * enqueue a lock to test DLM lock existence. */ CEF_PEEK = 0x00000040, /** + * Lock match only. Used by group lock in I/O as group lock + * is known to exist. + */ + CEF_LOCK_MATCH = 0x00000080, + /** + * tell the DLM layer to lock only the requested range + */ + CEF_LOCK_NO_EXPAND = 0x00000100, + /** * mask of enq_flags. */ - CEF_MASK = 0x0000007f, + CEF_MASK = 0x000001ff, }; /** @@ -1812,11 +1752,35 @@ enum cl_fsync_mode { }; struct cl_io_rw_common { - loff_t crw_pos; - size_t crw_count; - int crw_nonblock; + loff_t crw_pos; + size_t crw_count; + int crw_nonblock; +}; +enum cl_setattr_subtype { + /** regular setattr **/ + CL_SETATTR_REG = 1, + /** truncate(2) **/ + CL_SETATTR_TRUNC, + /** fallocate(2) - mode preallocate **/ + CL_SETATTR_FALLOCATE +}; + +struct cl_io_range { + loff_t cir_pos; + size_t cir_count; }; +struct cl_io_pt { + struct cl_io_pt *cip_next; + struct kiocb cip_iocb; + struct iov_iter cip_iter; + struct file *cip_file; + enum cl_io_type cip_iot; + unsigned int cip_need_restart:1; + loff_t cip_pos; + size_t cip_count; + ssize_t cip_result; +}; /** * State for io. @@ -1845,10 +1809,12 @@ struct cl_io { struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ enum cl_io_lock_dmd ci_lockreq; - union { - struct cl_rd_io { - struct cl_io_rw_common rd; - } ci_rd; + /** layout version when this IO occurs */ + __u32 ci_layout_version; + union { + struct cl_rd_io { + struct cl_io_rw_common rd; + } ci_rd; struct cl_wr_io { struct cl_io_rw_common wr; int wr_append; @@ -1856,12 +1822,27 @@ struct cl_io { } ci_wr; struct cl_io_rw_common ci_rw; struct cl_setattr_io { - struct ost_lvb sa_attr; - unsigned int sa_valid; - int sa_stripe_index; - struct lu_fid *sa_parent_fid; - struct obd_capa *sa_capa; + struct ost_lvb sa_attr; + unsigned int sa_attr_flags; + unsigned int sa_avalid; /* ATTR_* */ + unsigned int sa_xvalid; /* OP_XVALID */ + int sa_stripe_index; + struct ost_layout sa_layout; + const struct lu_fid *sa_parent_fid; + /* SETATTR interface is used for regular setattr, */ + /* truncate(2) and fallocate(2) subtypes */ + enum cl_setattr_subtype sa_subtype; + /* The following are used for fallocate(2) */ + int sa_falloc_mode; + loff_t sa_falloc_offset; + loff_t sa_falloc_len; + loff_t sa_falloc_end; } ci_setattr; + struct cl_data_version_io { + u64 dv_data_version; + u32 dv_layout_version; + int dv_flags; + } ci_data_version; struct cl_fault_io { /** page index within file. */ pgoff_t ft_index; @@ -1879,13 +1860,20 @@ struct cl_io { struct cl_fsync_io { loff_t fi_start; loff_t fi_end; - struct obd_capa *fi_capa; /** file system level fid */ struct lu_fid *fi_fid; enum cl_fsync_mode fi_mode; /* how many pages were written/discarded */ unsigned int fi_nr_written; } ci_fsync; + struct cl_ladvise_io { + __u64 li_start; + __u64 li_end; + /** file system level fid */ + struct lu_fid *li_fid; + enum lu_ladvise_type li_advice; + __u64 li_flags; + } ci_ladvise; } u; struct cl_2queue ci_queue; size_t ci_nob; @@ -1907,6 +1895,13 @@ struct cl_io { */ ci_ignore_layout:1, /** + * Need MDS intervention to complete a write. + * Write intent is required for the following cases: + * 1. component being written is not initialized, or + * 2. the mirrored files are NOT in WRITE_PENDING state. + */ + ci_need_write_intent:1, + /** * Check if layout changed after the IO finishes. Mainly for HSM * requirement. If IO occurs to openning files, it doesn't need to * verify layout because HSM won't release openning files. @@ -1921,189 +1916,69 @@ struct cl_io { /** * O_NOATIME */ - ci_noatime:1; + ci_noatime:1, + /* Tell sublayers not to expand LDLM locks requested for this IO */ + ci_lock_no_expand:1, + /** + * Set if non-delay RPC should be used for this IO. + * + * If this file has multiple mirrors, and if the OSTs of the current + * mirror is inaccessible, non-delay RPC would error out quickly so + * that the upper layer can try to access the next mirror. + */ + ci_ndelay:1, + /** + * Set if IO is triggered by async workqueue readahead. + */ + ci_async_readahead:1, + /** + * Ignore lockless and do normal locking for this io. + */ + ci_ignore_lockless:1, + /** + * Set if we've tried all mirrors for this read IO, if it's not set, + * the read IO will check to-be-read OSCs' status, and make fast-switch + * another mirror if some of the OSTs are not healthy. + */ + ci_tried_all_mirrors:1; + /** + * Bypass quota check + */ + unsigned ci_noquota:1; + /** + * How many times the read has retried before this one. + * Set by the top level and consumed by the LOV. + */ + unsigned ci_ndelay_tried; + /** + * Designated mirror index for this I/O. + */ + unsigned ci_designated_mirror; /** * Number of pages owned by this IO. For invariant checking. */ unsigned ci_owned_nr; + /** + * Range of write intent. Valid if ci_need_write_intent is set. + */ + struct lu_extent ci_write_intent; }; /** @} cl_io */ -/** \addtogroup cl_req cl_req - * @{ */ -/** \struct cl_req - * Transfer. - * - * There are two possible modes of transfer initiation on the client: - * - * - immediate transfer: this is started when a high level io wants a page - * or a collection of pages to be transferred right away. Examples: - * read-ahead, synchronous read in the case of non-page aligned write, - * page write-out as a part of extent lock cancellation, page write-out - * as a part of memory cleansing. Immediate transfer can be both - * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE; - * - * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens - * when io wants to transfer a page to the server some time later, when - * it can be done efficiently. Example: pages dirtied by the write(2) - * path. - * - * In any case, transfer takes place in the form of a cl_req, which is a - * representation for a network RPC. - * - * Pages queued for an opportunistic transfer are cached until it is decided - * that efficient RPC can be composed of them. This decision is made by "a - * req-formation engine", currently implemented as a part of osc - * layer. Req-formation depends on many factors: the size of the resulting - * RPC, whether or not multi-object RPCs are supported by the server, - * max-rpc-in-flight limitations, size of the dirty cache, etc. - * - * For the immediate transfer io submits a cl_page_list, that req-formation - * engine slices into cl_req's, possibly adding cached pages to some of - * the resulting req's. - * - * Whenever a page from cl_page_list is added to a newly constructed req, its - * cl_page_operations::cpo_prep() layer methods are called. At that moment, - * page state is atomically changed from cl_page_state::CPS_OWNED to - * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner - * is zeroed, and cl_page::cp_req is set to the - * req. cl_page_operations::cpo_prep() method at the particular layer might - * return -EALREADY to indicate that it does not need to submit this page - * at all. This is possible, for example, if page, submitted for read, - * became up-to-date in the meantime; and for write, the page don't have - * dirty bit marked. \see cl_io_submit_rw() - * - * Whenever a cached page is added to a newly constructed req, its - * cl_page_operations::cpo_make_ready() layer methods are called. At that - * moment, page state is atomically changed from cl_page_state::CPS_CACHED to - * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to - * req. cl_page_operations::cpo_make_ready() method at the particular layer - * might return -EAGAIN to indicate that this page is not eligible for the - * transfer right now. - * - * FUTURE - * - * Plan is to divide transfers into "priority bands" (indicated when - * submitting cl_page_list, and queuing a page for the opportunistic transfer) - * and allow glueing of cached pages to immediate transfers only within single - * band. This would make high priority transfers (like lock cancellation or - * memory pressure induced write-out) really high priority. - * - */ - /** * Per-transfer attributes. */ struct cl_req_attr { + enum cl_req_type cra_type; + u64 cra_flags; + struct cl_page *cra_page; /** Generic attributes for the server consumption. */ struct obdo *cra_oa; - /** Capability. */ - struct obd_capa *cra_capa; /** Jobid */ char cra_jobid[LUSTRE_JOBID_SIZE]; }; -/** - * Transfer request operations definable at every layer. - * - * Concurrency: transfer formation engine synchronizes calls to all transfer - * methods. - */ -struct cl_req_operations { - /** - * Invoked top-to-bottom by cl_req_prep() when transfer formation is - * complete (all pages are added). - * - * \see osc_req_prep() - */ - int (*cro_prep)(const struct lu_env *env, - const struct cl_req_slice *slice); - /** - * Called top-to-bottom to fill in \a oa fields. This is called twice - * with different flags, see bug 10150 and osc_build_req(). - * - * \param obj an object from cl_req which attributes are to be set in - * \a oa. - * - * \param oa struct obdo where attributes are placed - * - * \param flags \a oa fields to be filled. - */ - void (*cro_attr_set)(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, u64 flags); - /** - * Called top-to-bottom from cl_req_completion() to notify layers that - * transfer completed. Has to free all state allocated by - * cl_device_operations::cdo_req_init(). - */ - void (*cro_completion)(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret); -}; - -/** - * A per-object state that (potentially multi-object) transfer request keeps. - */ -struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link ro_obj_ref; - /* something else? Number of pages for a given object? */ -}; - -/** - * Transfer request. - * - * Transfer requests are not reference counted, because IO sub-system owns - * them exclusively and knows when to free them. - * - * Life cycle. - * - * cl_req is created by cl_req_alloc() that calls - * cl_device_operations::cdo_req_init() device methods to allocate per-req - * state in every layer. - * - * Then pages are added (cl_req_page_add()), req keeps track of all objects it - * contains pages for. - * - * Once all pages were collected, cl_page_operations::cpo_prep() method is - * called top-to-bottom. At that point layers can modify req, let it pass, or - * deny it completely. This is to support things like SNS that have transfer - * ordering requirements invisible to the individual req-formation engine. - * - * On transfer completion (or transfer timeout, or failure to initiate the - * transfer of an allocated req), cl_req_operations::cro_completion() method - * is called, after execution of cl_page_operations::cpo_completion() of all - * req's pages. - */ -struct cl_req { - enum cl_req_type crq_type; - /** A list of pages being transfered */ - struct list_head crq_pages; - /** Number of pages in cl_req::crq_pages */ - unsigned crq_nrpages; - /** An array of objects which pages are in ->crq_pages */ - struct cl_req_obj *crq_o; - /** Number of elements in cl_req::crq_objs[] */ - unsigned crq_nrobjs; - struct list_head crq_layers; -}; - -/** - * Per-layer state for request. - */ -struct cl_req_slice { - struct cl_req *crs_req; - struct cl_device *crs_dev; - struct list_head crs_linkage; - const struct cl_req_operations *crs_ops; -}; - -/* @} cl_req */ - enum cache_stats_item { /** how many cache lookups were performed */ CS_lookup = 0, @@ -2174,15 +2049,10 @@ static inline struct cl_site *lu2cl_site(const struct lu_site *site) return container_of(site, struct cl_site, cs_lu); } -static inline int lu_device_is_cl(const struct lu_device *d) -{ - return d->ld_type->ldt_tags & LU_DEVICE_CL; -} - static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { - LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); - return container_of0(d, struct cl_device, cd_lu_dev); + LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); + return container_of_safe(d, struct cl_device, cd_lu_dev); } static inline struct lu_device *cl2lu_dev(struct cl_device *d) @@ -2192,58 +2062,55 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d) static inline struct cl_object *lu2cl(const struct lu_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); - return container_of0(o, struct cl_object, co_lu); + LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); + return container_of_safe(o, struct cl_object, co_lu); } static inline const struct cl_object_conf * lu2cl_conf(const struct lu_object_conf *conf) { - return container_of0(conf, struct cl_object_conf, coc_lu); + return container_of_safe(conf, struct cl_object_conf, coc_lu); } static inline struct cl_object *cl_object_next(const struct cl_object *obj) { - return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL; + return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL; } static inline struct cl_object_header *luh2coh(const struct lu_object_header *h) { - return container_of0(h, struct cl_object_header, coh_lu); + return container_of_safe(h, struct cl_object_header, coh_lu); } static inline struct cl_site *cl_object_site(const struct cl_object *obj) { - return lu2cl_site(obj->co_lu.lo_dev->ld_site); + return lu2cl_site(obj->co_lu.lo_dev->ld_site); } static inline struct cl_object_header *cl_object_header(const struct cl_object *obj) { - return luh2coh(obj->co_lu.lo_header); + return luh2coh(obj->co_lu.lo_header); } static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t) { - return lu_device_init(&d->cd_lu_dev, t); + return lu_device_init(&d->cd_lu_dev, t); } static inline void cl_device_fini(struct cl_device *d) { - lu_device_fini(&d->cd_lu_dev); + lu_device_fini(&d->cd_lu_dev); } void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, - struct cl_object *obj, pgoff_t index, + struct cl_object *obj, const struct cl_page_operations *ops); void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, struct cl_object *obj, const struct cl_lock_operations *ops); void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, struct cl_object *obj, const struct cl_io_operations *ops); -void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, - struct cl_device *dev, - const struct cl_req_operations *ops); /** @} helpers */ /** \defgroup cl_object cl_object @@ -2259,8 +2126,8 @@ void cl_object_put (const struct lu_env *env, struct cl_object *o); void cl_object_get (struct cl_object *o); void cl_object_attr_lock (struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid); int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, @@ -2269,8 +2136,17 @@ int cl_conf_set (const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); int cl_object_prune (const struct lu_env *env, struct cl_object *obj); void cl_object_kill (const struct lu_env *env, struct cl_object *obj); -int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *lum); +int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, + struct lov_user_md __user *lum, size_t size); +int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, + size_t *buflen); +int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *cl); +loff_t cl_object_maxbytes(struct cl_object *obj); +int cl_object_flush(const struct lu_env *env, struct cl_object *obj, + struct ldlm_lock *lock); + /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2306,14 +2182,6 @@ static inline int cl_object_refc(struct cl_object *clob) /** \defgroup cl_page cl_page * @{ */ -enum { - CLP_GANG_OKAY = 0, - CLP_GANG_RESCHED, - CLP_GANG_AGAIN, - CLP_GANG_ABORT -}; -/* callback of cl_page_gang_lookup() */ - struct cl_page *cl_page_find (const struct lu_env *env, struct cl_object *obj, pgoff_t idx, struct page *vmpage, @@ -2325,6 +2193,9 @@ struct cl_page *cl_page_alloc (const struct lu_env *env, void cl_page_get (struct cl_page *page); void cl_page_put (const struct lu_env *env, struct cl_page *page); +void cl_pagevec_put (const struct lu_env *env, + struct cl_page *page, + struct pagevec *pvec); void cl_page_print (const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); @@ -2375,7 +2246,6 @@ int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, struct cl_page *pg, enum cl_req_type crt); void cl_page_clip (const struct lu_env *env, struct cl_page *pg, int from, int to); -int cl_page_cancel (const struct lu_env *env, struct cl_page *page); int cl_page_flush (const struct lu_env *env, struct cl_io *io, struct cl_page *pg); @@ -2392,10 +2262,10 @@ void cl_page_discard(const struct lu_env *env, struct cl_io *io, void cl_page_delete(const struct lu_env *env, struct cl_page *pg); int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); +void cl_page_touch(const struct lu_env *env, const struct cl_page *pg, + size_t to); void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, pgoff_t *max_index); loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); pgoff_t cl_index(const struct cl_object *obj, loff_t offset); size_t cl_page_size(const struct cl_object *obj); @@ -2415,7 +2285,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, */ struct cl_client_cache { /** - * # of users (OSCs) + * # of client cache refcount + * # of users (OSCs) + 2 (held by llite and lov) */ atomic_t ccc_users; /** @@ -2451,7 +2322,17 @@ struct cl_client_cache { * Used at umounting time and signaled on BRW commit */ wait_queue_head_t ccc_unstable_waitq; + /** + * Serialize max_cache_mb write operation + */ + struct mutex ccc_max_cache_mb_lock; }; +/** + * cl_cache functions + */ +struct cl_client_cache *cl_cache_init(unsigned long lru_page_max); +void cl_cache_incref(struct cl_client_cache *cache); +void cl_cache_decref(struct cl_client_cache *cache); /** @} cl_page */ @@ -2494,8 +2375,6 @@ int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, @@ -2504,18 +2383,17 @@ int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, int cl_io_commit_async (const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); +int cl_io_read_ahead (const struct lu_env *env, struct cl_io *io, + pgoff_t start, struct cl_read_ahead *ra); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); -int cl_io_cancel (const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue); -int cl_io_is_going (const struct lu_env *env); /** * True, iff \a io is an O_APPEND write(2). */ static inline int cl_io_is_append(const struct cl_io *io) { - return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; + return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; } static inline int cl_io_is_sync_write(const struct cl_io *io) @@ -2533,8 +2411,15 @@ static inline int cl_io_is_mkwrite(const struct cl_io *io) */ static inline int cl_io_is_trunc(const struct cl_io *io) { - return io->ci_type == CIT_SETATTR && - (io->u.ci_setattr.sa_valid & ATTR_SIZE); + return io->ci_type == CIT_SETATTR && + (io->u.ci_setattr.sa_avalid & ATTR_SIZE) && + (io->u.ci_setattr.sa_subtype != CL_SETATTR_FALLOCATE); +} + +static inline int cl_io_is_fallocate(const struct cl_io *io) +{ + return (io->ci_type == CIT_SETATTR) && + (io->u.ci_setattr.sa_subtype == CL_SETATTR_FALLOCATE); } struct cl_io *cl_io_top(struct cl_io *io); @@ -2542,13 +2427,12 @@ struct cl_io *cl_io_top(struct cl_io *io); void cl_io_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_io *io); -#define CL_IO_SLICE_CLEAN(foo_io, base) \ -do { \ - typeof(foo_io) __foo_io = (foo_io); \ - \ - CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \ - memset(&__foo_io->base + 1, 0, \ - (sizeof *__foo_io) - sizeof __foo_io->base); \ +#define CL_IO_SLICE_CLEAN(foo_io, base) \ +do { \ + typeof(foo_io) __foo_io = (foo_io); \ + \ + memset(&__foo_io->base, 0, \ + sizeof(*__foo_io) - offsetof(typeof(*__foo_io), base)); \ } while (0) /** @} cl_io */ @@ -2595,8 +2479,6 @@ void cl_page_list_del (const struct lu_env *env, struct cl_page_list *plist, struct cl_page *page); void cl_page_list_disown (const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist); -int cl_page_list_own (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); void cl_page_list_assume (const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist); void cl_page_list_discard(const struct lu_env *env, @@ -2616,22 +2498,30 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ -/** \defgroup cl_req cl_req - * @{ */ -struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, - enum cl_req_type crt, int nr_objects); - -void cl_req_page_add (const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done (const struct lu_env *env, struct cl_page *page); -int cl_req_prep (const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags); -void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); +void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr); /** \defgroup cl_sync_io cl_sync_io * @{ */ +struct cl_sync_io; +struct cl_dio_aio; + +typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *); + +void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr, + struct cl_dio_aio *aio, cl_sync_io_end_t *end); + +int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, + long timeout); +void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, + int ioret); +struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb); +static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr) +{ + cl_sync_io_init_notify(anchor, nr, NULL, NULL); +} + /** * Anchor for synchronous transfer. This is allocated on a stack by thread * doing synchronous transfer, and a pointer to this structure is set up in @@ -2643,27 +2533,24 @@ struct cl_sync_io { atomic_t csi_sync_nr; /** error code. */ int csi_sync_rc; - /** barrier of destroy this structure */ - atomic_t csi_barrier; /** completion to be signaled when transfer is complete. */ wait_queue_head_t csi_waitq; /** callback to invoke when this IO is finished */ - void (*csi_end_io)(const struct lu_env *, - struct cl_sync_io *); + cl_sync_io_end_t *csi_end_io; + /** aio private data */ + struct cl_dio_aio *csi_aio; }; -void cl_sync_io_init(struct cl_sync_io *anchor, int nr, - void (*end)(const struct lu_env *, struct cl_sync_io *)); -int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, - long timeout); -void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, - int ioret); -void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor); +/** To support Direct AIO */ +struct cl_dio_aio { + struct cl_sync_io cda_sync; + struct cl_page_list cda_pages; + struct kiocb *cda_iocb; + ssize_t cda_bytes; +}; /** @} cl_sync_io */ -/** @} cl_req */ - /** \defgroup cl_env cl_env * * lu_env handling for a client. @@ -2687,38 +2574,15 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor); * - allocation and destruction of environment is amortized by caching no * longer used environments instead of destroying them; * - * - there is a notion of "current" environment, attached to the kernel - * data structure representing current thread Top-level lustre code - * allocates an environment and makes it current, then calls into - * non-lustre code, that in turn calls lustre back. Low-level lustre - * code thus called can fetch environment created by the top-level code - * and reuse it, avoiding additional environment allocation. - * Right now, three interfaces can attach the cl_env to running thread: - * - cl_env_get - * - cl_env_implant - * - cl_env_reexit(cl_env_reenter had to be called priorly) - * * \see lu_env, lu_context, lu_context_key * @{ */ -struct cl_env_nest { - int cen_refcheck; - void *cen_cookie; -}; - -struct lu_env *cl_env_peek (int *refcheck); -struct lu_env *cl_env_get (int *refcheck); -struct lu_env *cl_env_alloc (int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get (struct cl_env_nest *nest); -void cl_env_put (struct lu_env *env, int *refcheck); -void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter (void); -void cl_env_reexit (void *cookie); -void cl_env_implant (struct lu_env *env, int *refcheck); -void cl_env_unplant (struct lu_env *env, int *refcheck); -unsigned cl_env_cache_purge(unsigned nr); -struct lu_env *cl_env_percpu_get (void); -void cl_env_percpu_put (struct lu_env *env); +struct lu_env *cl_env_get(__u16 *refcheck); +struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags); +void cl_env_put(struct lu_env *env, __u16 *refcheck); +unsigned cl_env_cache_purge(unsigned nr); +struct lu_env *cl_env_percpu_get(void); +void cl_env_percpu_put(struct lu_env *env); /** @} cl_env */