X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=7979b1475cc921a0daac3ab319cf7918e081789e;hp=286ecfa69755bd5c8a242d38d35287bda184d8b0;hb=d1dded6e28473d889a9b24b47cbc804f90dd2956;hpb=001b8dbfacb747f1649a2eb047a5f118ce32fdc7 diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 286ecfa..7979b14 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -63,10 +59,6 @@ * read/write system call it is associated with the single user * thread, that issued the system call). * - * - cl_req represents a collection of pages for a transfer. cl_req is - * constructed by req-forming engine that tries to saturate - * transport with large and continuous transfers. - * * Terminology * * - to avoid confusion high-level I/O operation like read or write system @@ -82,7 +74,6 @@ * - i_mutex * - PG_locked * - cl_object_header::coh_page_guard - * - cl_object_header::coh_lock_guard * - lu_site::ls_guard * * See the top comment in cl_object.c for the description of overall locking and @@ -97,24 +88,25 @@ /* * super-class definitions. */ +#include +#include + #include #include - -#ifdef __KERNEL__ -# include -# include -#else -# include -#endif - +#include +#include +#include +#include +#include +#include +#include + +struct obd_info; struct inode; struct cl_device; -struct cl_device_operations; struct cl_object; -struct cl_object_page_operations; -struct cl_object_lock_operations; struct cl_page; struct cl_page_slice; @@ -127,38 +119,16 @@ struct cl_page_operations; struct cl_io; struct cl_io_slice; -struct cl_req; -struct cl_req_slice; - -/** - * Operations for each data device in the client stack. - * - * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops - */ -struct cl_device_operations { - /** - * Initialize cl_req. This method is called top-to-bottom on all - * devices in the stack to get them a chance to allocate layer-private - * data, and to attach them to the cl_req by calling - * cl_req_slice_add(). - * - * \see osc_req_init(), lov_req_init(), lovsub_req_init() - * \see ccc_req_init() - */ - int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); -}; +struct cl_req_attr; /** * Device in the client stack. * - * \see ccc_device, lov_device, lovsub_device, osc_device + * \see vvp_device, lov_device, lovsub_device, osc_device */ struct cl_device { /** Super-class. */ struct lu_device cd_lu_dev; - /** Per-layer operation vector. */ - const struct cl_device_operations *cd_ops; }; /** \addtogroup cl_object cl_object @@ -178,11 +148,11 @@ struct cl_attr { */ loff_t cat_kms; /** Modification time. Measured in seconds since epoch. */ - time_t cat_mtime; + time64_t cat_mtime; /** Access time. Measured in seconds since epoch. */ - time_t cat_atime; + time64_t cat_atime; /** Change time. Measured in seconds since epoch. */ - time_t cat_ctime; + time64_t cat_ctime; /** * Blocks allocated to this cl_object on the server file system. * @@ -200,20 +170,24 @@ struct cl_attr { /* nlink of the directory */ __u64 cat_nlink; + + /* Project identifier for quota purpose. */ + __u32 cat_projid; }; /** * Fields in cl_attr that are being set. */ enum cl_attr_valid { - CAT_SIZE = 1 << 0, - CAT_KMS = 1 << 1, - CAT_MTIME = 1 << 3, - CAT_ATIME = 1 << 4, - CAT_CTIME = 1 << 5, - CAT_BLOCKS = 1 << 6, - CAT_UID = 1 << 7, - CAT_GID = 1 << 8 + CAT_SIZE = 1 << 0, + CAT_KMS = 1 << 1, + CAT_MTIME = 1 << 3, + CAT_ATIME = 1 << 4, + CAT_CTIME = 1 << 5, + CAT_BLOCKS = 1 << 6, + CAT_UID = 1 << 7, + CAT_GID = 1 << 8, + CAT_PROJID = 1 << 9 }; /** @@ -250,7 +224,7 @@ enum cl_attr_valid { * be discarded from the memory, all its sub-objects are torn-down and * destroyed too. * - * \see ccc_object, lov_object, lovsub_object, osc_object + * \see vvp_object, lov_object, lovsub_object, osc_object */ struct cl_object { /** super class */ @@ -268,12 +242,12 @@ struct cl_object { */ struct cl_object_conf { /** Super-class. */ - struct lu_object_conf coc_lu; - union { - /** - * Object layout. This is consumed by lov. - */ - struct lustre_md *coc_md; + struct lu_object_conf coc_lu; + union { + /** + * Object layout. This is consumed by lov. + */ + struct lu_buf coc_layout; /** * Description of particular stripe location in the * cluster. This is consumed by osc. @@ -306,6 +280,24 @@ enum { OBJECT_CONF_WAIT = 2 }; +enum { + CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */ + CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */ +}; + +struct cl_layout { + /** the buffer to return the layout in lov_mds_md format. */ + struct lu_buf cl_buf; + /** size of layout in lov_mds_md format. */ + size_t cl_size; + /** Layout generation. */ + u32 cl_layout_gen; + /** whether layout is a composite one */ + bool cl_is_composite; + /** Whether layout is a HSM released one */ + bool cl_is_released; +}; + /** * Operations implemented for each cl object layer. * @@ -350,19 +342,19 @@ struct cl_object_operations { */ int (*coo_io_init)(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); - /** - * Fill portion of \a attr that this layer controls. This method is - * called top-to-bottom through all object layers. - * - * \pre cl_object_header::coh_attr_guard of the top-object is locked. - * - * \return 0: to continue - * \return +ve: to stop iterating through layers (but 0 is returned - * from enclosing cl_object_attr_get()) - * \return -ve: to signal error - */ - int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); + /** + * Fill portion of \a attr that this layer controls. This method is + * called top-to-bottom through all object layers. + * + * \pre cl_object_header::coh_attr_guard of the top-object is locked. + * + * \return 0: to continue + * \return +ve: to stop iterating through layers (but 0 is returned + * from enclosing cl_object_attr_get()) + * \return -ve: to signal error + */ + int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); /** * Update attributes. * @@ -374,8 +366,8 @@ struct cl_object_operations { * \return the same convention as for * cl_object_operations::coo_attr_get() is used. */ - int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); + int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj, + const struct cl_attr *attr, unsigned valid); /** * Update object configuration. Called top-to-bottom to modify object * configuration. @@ -384,14 +376,14 @@ struct cl_object_operations { */ int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); - /** - * Glimpse ast. Executed when glimpse ast arrives for a lock on this - * object. Layers are supposed to fill parts of \a lvb that will be - * shipped to the glimpse originator as a glimpse result. - * - * \see ccc_object_glimpse(), lovsub_object_glimpse(), - * \see osc_object_glimpse() - */ + /** + * Glimpse ast. Executed when glimpse ast arrives for a lock on this + * object. Layers are supposed to fill parts of \a lvb that will be + * shipped to the glimpse originator as a glimpse result. + * + * \see vvp_object_glimpse(), lovsub_object_glimpse(), + * \see osc_object_glimpse() + */ int (*coo_glimpse)(const struct lu_env *env, const struct cl_object *obj, struct ost_lvb *lvb); /** @@ -400,6 +392,39 @@ struct cl_object_operations { * mainly pages and locks. */ int (*coo_prune)(const struct lu_env *env, struct cl_object *obj); + /** + * Object getstripe method. + */ + int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj, + struct lov_user_md __user *lum, size_t size); + /** + * Get FIEMAP mapping from the object. + */ + int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, + struct fiemap *fiemap, size_t *buflen); + /** + * Get layout and generation of the object. + */ + int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *layout); + /** + * Get maximum size of the object. + */ + loff_t (*coo_maxbytes)(struct cl_object *obj); + /** + * Set request attributes. + */ + void (*coo_req_attr_set)(const struct lu_env *env, + struct cl_object *obj, + struct cl_req_attr *attr); + /** + * Flush \a obj data corresponding to \a lock. Used for DoM + * locks in llite's cancelling blocking ast callback. + */ + int (*coo_object_flush)(const struct lu_env *env, + struct cl_object *obj, + struct ldlm_lock *lock); }; /** @@ -408,17 +433,7 @@ struct cl_object_operations { struct cl_object_header { /** Standard lu_object_header. cl_object::co_lu::lo_header points * here. */ - struct lu_object_header coh_lu; - /** \name locks - * \todo XXX move locks below to the separate cache-lines, they are - * mostly useless otherwise. - */ - /** @{ */ - /** Lock protecting lock list. */ - spinlock_t coh_lock_guard; - /** @} locks */ - /** List of cl_lock's granted for this object. */ - cfs_list_t coh_locks; + struct lu_object_header coh_lu; /** * Parent object. It is assumed that an object has a well-defined @@ -451,18 +466,20 @@ struct cl_object_header { * Helper macro: iterate over all layers of the object \a obj, assigning every * layer top-to-bottom to \a slice. */ -#define cl_object_for_each(slice, obj) \ - cfs_list_for_each_entry((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) +#define cl_object_for_each(slice, obj) \ + list_for_each_entry((slice), \ + &(obj)->co_lu.lo_header->loh_layers,\ + co_lu.lo_linkage) + /** * Helper macro: iterate over all layers of the object \a obj, assigning every * layer bottom-to-top to \a slice. */ -#define cl_object_for_each_reverse(slice, obj) \ - cfs_list_for_each_entry_reverse((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) +#define cl_object_for_each_reverse(slice, obj) \ + list_for_each_entry_reverse((slice), \ + &(obj)->co_lu.lo_header->loh_layers,\ + co_lu.lo_linkage) + /** @} cl_object */ #define CL_PAGE_EOF ((pgoff_t)~0ull) @@ -599,7 +616,7 @@ enum cl_page_state { * * - [cl_page_state::CPS_PAGEOUT] page is dirty, the * req-formation engine decides that it wants to include this page - * into an cl_req being constructed, and yanks it from the cache; + * into an RPC being constructed, and yanks it from the cache; * * - [cl_page_state::CPS_FREEING] VM callback is executed to * evict the page form the memory; @@ -668,7 +685,7 @@ enum cl_page_state { * Page is being read in, as a part of a transfer. This is quite * similar to the cl_page_state::CPS_PAGEOUT state, except that * read-in is always "immediate"---there is no such thing a sudden - * construction of read cl_req from cached, presumably not up to date, + * construction of read request from cached, presumably not up to date, * pages. * * Underlying VM page is locked for the duration of transfer. @@ -696,22 +713,11 @@ enum cl_page_type { /** Transient page, the transient cl_page is used to bind a cl_page * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO, lockless IO and liblustre. */ + * it is used in DirectIO and lockless IO. */ CPT_TRANSIENT, }; /** - * Flags maintained for every cl_page. - */ -enum cl_page_flags { - /** - * Set when pagein completes. Used for debugging (read completes at - * most once for a page). - */ - CPF_READ_COMPLETED = 1 << 0 -}; - -/** * Fields are protected by the lock on struct page, except for atomics and * immutables. * @@ -722,26 +728,20 @@ enum cl_page_flags { */ struct cl_page { /** Reference counter. */ - atomic_t cp_ref; + atomic_t cp_ref; /** An object this page is a part of. Immutable after creation. */ struct cl_object *cp_obj; - /** List of slices. Immutable after creation. */ - cfs_list_t cp_layers; + /** vmpage */ struct page *cp_vmpage; + /** Linkage of pages within group. Pages must be owned */ + struct list_head cp_batch; + /** List of slices. Immutable after creation. */ + struct list_head cp_layers; /** * Page state. This field is const to avoid accidental update, it is * modified only internally within cl_page.c. Protected by a VM lock. */ const enum cl_page_state cp_state; - /** Linkage of pages within group. Protected by cl_page::cp_mutex. */ - cfs_list_t cp_batch; - /** Mutex serializing membership of a page in a batch. */ - struct mutex cp_mutex; - /** Linkage of pages within cl_req. */ - cfs_list_t cp_flight; - /** Transfer error. */ - int cp_error; - /** * Page type. Only CPT_TRANSIENT is used so far. Immutable after * creation. @@ -753,32 +753,22 @@ struct cl_page { * by sub-io. Protected by a VM lock. */ struct cl_io *cp_owner; - /** - * Debug information, the task is owning the page. - */ - struct task_struct *cp_task; - /** - * Owning IO request in cl_page_state::CPS_PAGEOUT and - * cl_page_state::CPS_PAGEIN states. This field is maintained only in - * the top-level pages. Protected by a VM lock. - */ - struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; /** Link to an object, for debugging. */ struct lu_ref_link cp_obj_ref; /** Link to a queue, for debugging. */ struct lu_ref_link cp_queue_ref; - /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ - unsigned cp_flags; /** Assigned if doing a sync_io */ struct cl_sync_io *cp_sync_io; + /** layout_entry + stripe index, composed using lov_comp_index() */ + unsigned int cp_lov_index; }; /** * Per-layer part of cl_page. * - * \see ccc_page, lov_page, osc_page + * \see vvp_page, lov_page, osc_page */ struct cl_page_slice { struct cl_page *cpl_page; @@ -790,31 +780,23 @@ struct cl_page_slice { struct cl_object *cpl_obj; const struct cl_page_operations *cpl_ops; /** Linkage into cl_page::cp_layers. Immutable after creation. */ - cfs_list_t cpl_linkage; + struct list_head cpl_linkage; }; /** * Lock mode. For the client extent locks. * - * \warning: cl_lock_mode_match() assumes particular ordering here. * \ingroup cl_lock */ enum cl_lock_mode { - /** - * Mode of a lock that protects no data, and exists only as a - * placeholder. This is used for `glimpse' requests. A phantom lock - * might get promoted to real lock at some point. - */ - CLM_PHANTOM, - CLM_READ, - CLM_WRITE, + CLM_READ, + CLM_WRITE, CLM_GROUP, CLM_MAX, }; /** * Requested transfer type. - * \ingroup cl_req */ enum cl_req_type { CRT_READ, @@ -894,6 +876,13 @@ struct cl_page_operations { */ int (*cpo_is_vmlocked)(const struct lu_env *env, const struct cl_page_slice *slice); + + /** + * Update file attributes when all we have is this page. Used for tiny + * writes to update attributes when we don't have a full cl_io. + */ + void (*cpo_page_touch)(const struct lu_env *env, + const struct cl_page_slice *slice, size_t to); /** * Page destruction. */ @@ -918,27 +907,8 @@ struct cl_page_operations { const struct cl_page_slice *slice); /** Destructor. Frees resources and slice itself. */ void (*cpo_fini)(const struct lu_env *env, - struct cl_page_slice *slice); - - /** - * Checks whether the page is protected by a cl_lock. This is a - * per-layer method, because certain layers have ways to check for the - * lock much more efficiently than through the generic locks scan, or - * implement locking mechanisms separate from cl_lock, e.g., - * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks - * being canceled, or scheduled for cancellation as soon as the last - * user goes away, too. - * - * \retval -EBUSY: page is protected by a lock of a given mode; - * \retval -ENODATA: page is not protected by a lock; - * \retval 0: this layer cannot decide. - * - * \see cl_page_is_under_lock() - */ - int (*cpo_is_under_lock)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max); - + struct cl_page_slice *slice, + struct pagevec *pvec); /** * Optional debugging helper. Prints given page slice. * @@ -950,8 +920,7 @@ struct cl_page_operations { /** * \name transfer * - * Transfer methods. See comment on cl_req for a description of - * transfer formation and life-cycle. + * Transfer methods. * * @{ */ @@ -997,7 +966,7 @@ struct cl_page_operations { int ioret); /** * Called when cached page is about to be added to the - * cl_req as a part of req formation. + * ptlrpc request as a part of req formation. * * \return 0 : proceed with this page; * \return -EAGAIN : skip this page; @@ -1077,22 +1046,32 @@ do { \ } \ } while (0) -static inline int __page_in_use(const struct cl_page *page, int refc) +static inline struct page *cl_page_vmpage(const struct cl_page *page) { - if (page->cp_type == CPT_CACHEABLE) - ++refc; - LASSERT(atomic_read(&page->cp_ref) > 0); - return (atomic_read(&page->cp_ref) > refc); + LASSERT(page->cp_vmpage != NULL); + return page->cp_vmpage; } -#define cl_page_in_use(pg) __page_in_use(pg, 1) -#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) -static inline struct page *cl_page_vmpage(struct cl_page *page) +/** + * Check if a cl_page is in use. + * + * Client cache holds a refcount, this refcount will be dropped when + * the page is taken out of cache, see vvp_page_delete(). + */ +static inline bool __page_in_use(const struct cl_page *page, int refc) { - LASSERT(page->cp_vmpage != NULL); - return page->cp_vmpage; + return (atomic_read(&page->cp_ref) > refc + 1); } +/** + * Caller itself holds a refcount of cl_page. + */ +#define cl_page_in_use(pg) __page_in_use(pg, 1) +/** + * Caller doesn't hold a refcount. + */ +#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) + /** @} cl_page */ /** \addtogroup cl_lock cl_lock @@ -1112,21 +1091,13 @@ static inline struct page *cl_page_vmpage(struct cl_page *page) * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to * cl_lock::cll_layers list through cl_lock_slice::cls_linkage. * - * All locks for a given object are linked into cl_object_header::coh_locks - * list (protected by cl_object_header::coh_lock_guard spin-lock) through - * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can - * sort it in starting lock offset, or use altogether different data structure - * like a tree. + * Typical cl_lock consists of one layer: * - * Typical cl_lock consists of the two layers: - * - * - vvp_lock (vvp specific data), and * - lov_lock (lov specific data). * * lov_lock contains an array of sub-locks. Each of these sub-locks is a * normal cl_lock: it has a header (struct cl_lock) and a list of layers: * - * - lovsub_lock, and * - osc_lock * * Each sub-lock is associated with a cl_object (representing stripe @@ -1139,111 +1110,29 @@ static inline struct page *cl_page_vmpage(struct cl_page *page) * * LIFE CYCLE * - * cl_lock is reference counted. When reference counter drops to 0, lock is - * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING - * lock is destroyed when last reference is released. Referencing between - * top-lock and its sub-locks is described in the lov documentation module. - * - * STATE MACHINE - * - * Also, cl_lock is a state machine. This requires some clarification. One of - * the goals of client IO re-write was to make IO path non-blocking, or at - * least to make it easier to make it non-blocking in the future. Here - * `non-blocking' means that when a system call (read, write, truncate) - * reaches a situation where it has to wait for a communication with the - * server, it should --instead of waiting-- remember its current state and - * switch to some other work. E.g,. instead of waiting for a lock enqueue, - * client should proceed doing IO on the next stripe, etc. Obviously this is - * rather radical redesign, and it is not planned to be fully implemented at - * this time, instead we are putting some infrastructure in place, that would - * make it easier to do asynchronous non-blocking IO easier in the - * future. Specifically, where old locking code goes to sleep (waiting for - * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When - * enqueue reply comes, its completion handler signals that lock state-machine - * is ready to transit to the next state. There is some generic code in - * cl_lock.c that sleeps, waiting for these signals. As a result, for users of - * this cl_lock.c code, it looks like locking is done in normal blocking - * fashion, and it the same time it is possible to switch to the non-blocking - * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c - * functions). - * - * For a description of state machine states and transitions see enum - * cl_lock_state. - * - * There are two ways to restrict a set of states which lock might move to: - * - * - placing a "hold" on a lock guarantees that lock will not be moved - * into cl_lock_state::CLS_FREEING state until hold is released. Hold - * can be only acquired on a lock that is not in - * cl_lock_state::CLS_FREEING. All holds on a lock are counted in - * cl_lock::cll_holds. Hold protects lock from cancellation and - * destruction. Requests to cancel and destroy a lock on hold will be - * recorded, but only honored when last hold on a lock is released; - * - * - placing a "user" on a lock guarantees that lock will not leave - * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING, - * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of - * states, once it enters this set. That is, if a user is added onto a - * lock in a state not from this set, it doesn't immediately enforce - * lock to move to this set, but once lock enters this set it will - * remain there until all users are removed. Lock users are counted in - * cl_lock::cll_users. - * - * User is used to assure that lock is not canceled or destroyed while - * it is being enqueued, or actively used by some IO. - * - * Currently, a user always comes with a hold (cl_lock_invariant() - * checks that a number of holds is not less than a number of users). - * - * CONCURRENCY - * - * This is how lock state-machine operates. struct cl_lock contains a mutex - * cl_lock::cll_guard that protects struct fields. - * - * - mutex is taken, and cl_lock::cll_state is examined. - * - * - for every state there are possible target states where lock can move - * into. They are tried in order. Attempts to move into next state are - * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try(). - * - * - if the transition can be performed immediately, state is changed, - * and mutex is released. - * - * - if the transition requires blocking, _try() function returns - * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to - * sleep, waiting for possibility of lock state change. It is woken - * up when some event occurs, that makes lock state change possible - * (e.g., the reception of the reply from the server), and repeats - * the loop. - * - * Top-lock and sub-lock has separate mutexes and the latter has to be taken - * first to avoid dead-lock. - * - * To see an example of interaction of all these issues, take a look at the - * lov_cl.c:lov_lock_enqueue() function. It is called as a part of - * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by - * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note - * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It - * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be - * done in parallel, rather than one after another (this is used for glimpse - * locks, that cannot dead-lock). + * cl_lock is a cacheless data container for the requirements of locks to + * complete the IO. cl_lock is created before I/O starts and destroyed when the + * I/O is complete. + * + * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached + * to cl_lock at OSC layer. LDLM lock is still cacheable. * * INTERFACE AND USAGE * - * struct cl_lock_operations provide a number of call-backs that are invoked - * when events of interest occurs. Layers can intercept and handle glimpse, - * blocking, cancel ASTs and a reception of the reply from the server. + * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A + * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue() + * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock + * consists of multiple sub cl_locks, each sub locks will be enqueued + * correspondingly. At OSC layer, the lock enqueue request will tend to reuse + * cached LDLM lock; otherwise a new LDLM lock will have to be requested from + * OST side. * - * One important difference with the old client locking model is that new - * client has a representation for the top-lock, whereas in the old code only - * sub-locks existed as real data structures and file-level locks are - * represented by "request sets" that are created and destroyed on each and - * every lock creation. + * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel() + * method will be called for each layer to release the resource held by this + * lock. At OSC layer, the reference count of LDLM lock, which is held at + * clo_enqueue time, is released. * - * Top-locks are cached, and can be found in the cache by the system calls. It - * is possible that top-lock is in cache, but some of its sub-locks were - * canceled and destroyed. In that case top-lock has to be enqueued again - * before it can be used. + * LDLM lock can only be canceled if there is no cl_lock using it. * * Overall process of the locking during IO operation is as following: * @@ -1256,7 +1145,7 @@ static inline struct page *cl_page_vmpage(struct cl_page *page) * * - when all locks are acquired, IO is performed; * - * - locks are released into cache. + * - locks are released after IO is complete. * * Striping introduces major additional complexity into locking. The * fundamental problem is that it is generally unsafe to actively use (hold) @@ -1278,16 +1167,6 @@ static inline struct page *cl_page_vmpage(struct cl_page *page) * buf is a part of memory mapped Lustre file, a lock or locks protecting buf * has to be held together with the usual lock on [offset, offset + count]. * - * As multi-stripe locks have to be allowed, it makes sense to cache them, so - * that, for example, a sequence of O_APPEND writes can proceed quickly - * without going down to the individual stripes to do lock matching. On the - * other hand, multi-stripe locks shouldn't be used by normal read/write - * calls. To achieve this, every layer can implement ->clo_fits_into() method, - * that is called by lock matching code (cl_lock_lookup()), and that can be - * used to selectively disable matching of certain locks for certain IOs. For - * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe - * locks to be matched only for truncates and O_APPEND writes. - * * Interaction with DLM * * In the expected setup, cl_lock is ultimately backed up by a collection of @@ -1318,295 +1197,27 @@ struct cl_lock_descr { __u32 cld_enq_flags; }; -#define DDESCR "%s(%d):[%lu, %lu]" -#define PDESCR(descr) \ - cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \ - (descr)->cld_start, (descr)->cld_end +#define DDESCR "%s(%d):[%lu, %lu]:%x" +#define PDESCR(descr) \ + cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \ + (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags const char *cl_lock_mode_name(const enum cl_lock_mode mode); /** - * Lock state-machine states. - * - * \htmlonly - *
- *
- * Possible state transitions:
- *
- *              +------------------>NEW
- *              |                    |
- *              |                    | cl_enqueue_try()
- *              |                    |
- *              |    cl_unuse_try()  V
- *              |  +--------------QUEUING (*)
- *              |  |                 |
- *              |  |                 | cl_enqueue_try()
- *              |  |                 |
- *              |  | cl_unuse_try()  V
- *    sub-lock  |  +-------------ENQUEUED (*)
- *    canceled  |  |                 |
- *              |  |                 | cl_wait_try()
- *              |  |                 |
- *              |  |                (R)
- *              |  |                 |
- *              |  |                 V
- *              |  |                HELD<---------+
- *              |  |                 |            |
- *              |  |                 |            | cl_use_try()
- *              |  |  cl_unuse_try() |            |
- *              |  |                 |            |
- *              |  |                 V         ---+ 
- *              |  +------------>INTRANSIT (D) <--+
- *              |                    |            |
- *              |     cl_unuse_try() |            | cached lock found
- *              |                    |            | cl_use_try()
- *              |                    |            |
- *              |                    V            |
- *              +------------------CACHED---------+
- *                                   |
- *                                  (C)
- *                                   |
- *                                   V
- *                                FREEING
- *
- * Legend:
- *
- *         In states marked with (*) transition to the same state (i.e., a loop
- *         in the diagram) is possible.
- *
- *         (R) is the point where Receive call-back is invoked: it allows layers
- *         to handle arrival of lock reply.
- *
- *         (C) is the point where Cancellation call-back is invoked.
- *
- *         (D) is the transit state which means the lock is changing.
- *
- *         Transition to FREEING state is possible from any other state in the
- *         diagram in case of unrecoverable error.
- * 
- * \endhtmlonly - * - * These states are for individual cl_lock object. Top-lock and its sub-locks - * can be in the different states. Another way to say this is that we have - * nested state-machines. - * - * Separate QUEUING and ENQUEUED states are needed to support non-blocking - * operation for locks with multiple sub-locks. Imagine lock on a file F, that - * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send - * enqueue to S0, wait for its completion, then send enqueue for S1, wait for - * its completion and at last enqueue lock for S2, and wait for its - * completion. In that case, top-lock is in QUEUING state while S0, S1 are - * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note - * that in this case, sub-locks move from state to state, and top-lock remains - * in the same state). - */ -enum cl_lock_state { - /** - * Lock that wasn't yet enqueued - */ - CLS_NEW, - /** - * Enqueue is in progress, blocking for some intermediate interaction - * with the other side. - */ - CLS_QUEUING, - /** - * Lock is fully enqueued, waiting for server to reply when it is - * granted. - */ - CLS_ENQUEUED, - /** - * Lock granted, actively used by some IO. - */ - CLS_HELD, - /** - * This state is used to mark the lock is being used, or unused. - * We need this state because the lock may have several sublocks, - * so it's impossible to have an atomic way to bring all sublocks - * into CLS_HELD state at use case, or all sublocks to CLS_CACHED - * at unuse case. - * If a thread is referring to a lock, and it sees the lock is in this - * state, it must wait for the lock. - * See state diagram for details. - */ - CLS_INTRANSIT, - /** - * Lock granted, not used. - */ - CLS_CACHED, - /** - * Lock is being destroyed. - */ - CLS_FREEING, - CLS_NR -}; - -enum cl_lock_flags { - /** - * lock has been cancelled. This flag is never cleared once set (by - * cl_lock_cancel0()). - */ - CLF_CANCELLED = 1 << 0, - /** cancellation is pending for this lock. */ - CLF_CANCELPEND = 1 << 1, - /** destruction is pending for this lock. */ - CLF_DOOMED = 1 << 2, - /** from enqueue RPC reply upcall. */ - CLF_FROM_UPCALL= 1 << 3, -}; - -/** - * Lock closure. - * - * Lock closure is a collection of locks (both top-locks and sub-locks) that - * might be updated in a result of an operation on a certain lock (which lock - * this is a closure of). - * - * Closures are needed to guarantee dead-lock freedom in the presence of - * - * - nested state-machines (top-lock state-machine composed of sub-lock - * state-machines), and - * - * - shared sub-locks. - * - * Specifically, many operations, such as lock enqueue, wait, unlock, - * etc. start from a top-lock, and then operate on a sub-locks of this - * top-lock, holding a top-lock mutex. When sub-lock state changes as a result - * of such operation, this change has to be propagated to all top-locks that - * share this sub-lock. Obviously, no natural lock ordering (e.g., - * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has - * to be used. Lock closure systematizes this try-and-repeat logic. - */ -struct cl_lock_closure { - /** - * Lock that is mutexed when closure construction is started. When - * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on - * origin is released before waiting. - */ - struct cl_lock *clc_origin; - /** - * List of enclosed locks, so far. Locks are linked here through - * cl_lock::cll_inclosure. - */ - cfs_list_t clc_list; - /** - * True iff closure is in a `wait' mode. This determines what - * cl_lock_enclosure() does when a lock L to be added to the closure - * is currently mutexed by some other thread. - * - * If cl_lock_closure::clc_wait is not set, then closure construction - * fails with CLO_REPEAT immediately. - * - * In wait mode, cl_lock_enclosure() waits until next attempt to build - * a closure might succeed. To this end it releases an origin mutex - * (cl_lock_closure::clc_origin), that has to be the only lock mutex - * owned by the current thread, and then waits on L mutex (by grabbing - * it and immediately releasing), before returning CLO_REPEAT to the - * caller. - */ - int clc_wait; - /** Number of locks in the closure. */ - int clc_nr; -}; - -/** * Layered client lock. */ struct cl_lock { - /** Reference counter. */ - atomic_t cll_ref; /** List of slices. Immutable after creation. */ - cfs_list_t cll_layers; - /** - * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected - * by cl_lock::cll_descr::cld_obj::coh_lock_guard. - */ - cfs_list_t cll_linkage; - /** - * Parameters of this lock. Protected by - * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within - * cl_lock::cll_guard. Modified only on lock creation and in - * cl_lock_modify(). - */ - struct cl_lock_descr cll_descr; - /** Protected by cl_lock::cll_guard. */ - enum cl_lock_state cll_state; - /** signals state changes. */ - wait_queue_head_t cll_wq; - /** - * Recursive lock, most fields in cl_lock{} are protected by this. - * - * Locking rules: this mutex is never held across network - * communication, except when lock is being canceled. - * - * Lock ordering: a mutex of a sub-lock is taken first, then a mutex - * on a top-lock. Other direction is implemented through a - * try-lock-repeat loop. Mutices of unrelated locks can be taken only - * by try-locking. - * - * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). - */ - struct mutex cll_guard; - struct task_struct *cll_guarder; - int cll_depth; - - /** - * the owner for INTRANSIT state - */ - struct task_struct *cll_intransit_owner; - int cll_error; - /** - * Number of holds on a lock. A hold prevents a lock from being - * canceled and destroyed. Protected by cl_lock::cll_guard. - * - * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release() - */ - int cll_holds; - /** - * Number of lock users. Valid in cl_lock_state::CLS_HELD state - * only. Lock user pins lock in CLS_HELD state. Protected by - * cl_lock::cll_guard. - * - * \see cl_wait(), cl_unuse(). - */ - int cll_users; - /** - * Flag bit-mask. Values from enum cl_lock_flags. Updates are - * protected by cl_lock::cll_guard. - */ - unsigned long cll_flags; - /** - * A linkage into a list of locks in a closure. - * - * \see cl_lock_closure - */ - cfs_list_t cll_inclosure; - /** - * Confict lock at queuing time. - */ - struct cl_lock *cll_conflict; - /** - * A list of references to this lock, for debugging. - */ - struct lu_ref cll_reference; - /** - * A list of holds on this lock, for debugging. - */ - struct lu_ref cll_holders; - /** - * A reference for cl_lock::cll_descr::cld_obj. For debugging. - */ - struct lu_ref_link cll_obj_ref; -#ifdef CONFIG_LOCKDEP - /* "dep_map" name is assumed by lockdep.h macros. */ - struct lockdep_map dep_map; -#endif + struct list_head cll_layers; + /** lock attribute, extent, cl_object, etc. */ + struct cl_lock_descr cll_descr; }; /** * Per-layer part of cl_lock * - * \see ccc_lock, lov_lock, lovsub_lock, osc_lock + * \see lov_lock, osc_lock */ struct cl_lock_slice { struct cl_lock *cls_lock; @@ -1615,180 +1226,40 @@ struct cl_lock_slice { struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ - cfs_list_t cls_linkage; -}; - -/** - * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}(). - * - * NOTE: lov_subresult() depends on ordering here. - */ -enum cl_lock_transition { - /** operation cannot be completed immediately. Wait for state change. */ - CLO_WAIT = 1, - /** operation had to release lock mutex, restart. */ - CLO_REPEAT = 2, - /** lower layer re-enqueued. */ - CLO_REENQUEUED = 3, + struct list_head cls_linkage; }; /** * - * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops + * \see lov_lock_ops, osc_lock_ops */ struct cl_lock_operations { - /** - * \name statemachine - * - * State machine transitions. These 3 methods are called to transfer - * lock from one state to another, as described in the commentary - * above enum #cl_lock_state. - * - * \retval 0 this layer has nothing more to do to before - * transition to the target state happens; - * - * \retval CLO_REPEAT method had to release and re-acquire cl_lock - * mutex, repeat invocation of transition method - * across all layers; - * - * \retval CLO_WAIT this layer cannot move to the target state - * immediately, as it has to wait for certain event - * (e.g., the communication with the server). It - * is guaranteed, that when the state transfer - * becomes possible, cl_lock::cll_wq wait-queue - * is signaled. Caller can wait for this event by - * calling cl_lock_state_wait(); - * - * \retval -ve failure, abort state transition, move the lock - * into cl_lock_state::CLS_FREEING state, and set - * cl_lock::cll_error. - * - * Once all layers voted to agree to transition (by returning 0), lock - * is moved into corresponding target state. All state transition - * methods are optional. - */ - /** @{ */ - /** - * Attempts to enqueue the lock. Called top-to-bottom. - * - * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(), - * \see osc_lock_enqueue() - */ - int (*clo_enqueue)(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *io, __u32 enqflags); - /** - * Attempts to wait for enqueue result. Called top-to-bottom. - * - * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait() - */ - int (*clo_wait)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** - * Attempts to unlock the lock. Called bottom-to-top. In addition to - * usual return values of lock state-machine methods, this can return - * -ESTALE to indicate that lock cannot be returned to the cache, and - * has to be re-initialized. - * unuse is a one-shot operation, so it must NOT return CLO_WAIT. - * - * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse() - */ - int (*clo_unuse)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** - * Notifies layer that cached lock is started being used. - * - * \pre lock->cll_state == CLS_CACHED - * - * \see lov_lock_use(), osc_lock_use() - */ - int (*clo_use)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** @} statemachine */ - /** - * A method invoked when lock state is changed (as a result of state - * transition). This is used, for example, to track when the state of - * a sub-lock changes, to propagate this change to the corresponding - * top-lock. Optional - * - * \see lovsub_lock_state() - */ - void (*clo_state)(const struct lu_env *env, - const struct cl_lock_slice *slice, - enum cl_lock_state st); - /** - * Returns true, iff given lock is suitable for the given io, idea - * being, that there are certain "unsafe" locks, e.g., ones acquired - * for O_APPEND writes, that we don't want to re-use for a normal - * write, to avoid the danger of cascading evictions. Optional. Runs - * under cl_object_header::coh_lock_guard. - * - * XXX this should take more information about lock needed by - * io. Probably lock description or something similar. - * - * \see lov_fits_into() - */ - int (*clo_fits_into)(const struct lu_env *env, - const struct cl_lock_slice *slice, - const struct cl_lock_descr *need, - const struct cl_io *io); - /** - * \name ast - * Asynchronous System Traps. All of then are optional, all are - * executed bottom-to-top. - */ - /** @{ */ - - /** - * Cancellation callback. Cancel a lock voluntarily, or under - * the request of server. - */ - void (*clo_cancel)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** - * Lock weighting ast. Executed to estimate how precious this lock - * is. The sum of results across all layers is used to determine - * whether lock worth keeping in cache given present memory usage. - * - * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh(). - */ - unsigned long (*clo_weigh)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** @} ast */ - - /** - * \see lovsub_lock_closure() - */ - int (*clo_closure)(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_lock_closure *closure); - /** - * Executed bottom-to-top when lock description changes (e.g., as a - * result of server granting more generous lock than was requested). - * - * \see lovsub_lock_modify() - */ - int (*clo_modify)(const struct lu_env *env, - const struct cl_lock_slice *slice, - const struct cl_lock_descr *updated); - /** - * Notifies layers (bottom-to-top) that lock is going to be - * destroyed. Responsibility of layers is to prevent new references on - * this lock from being acquired once this method returns. - * - * This can be called multiple times due to the races. - * - * \see cl_lock_delete() - * \see osc_lock_delete(), lovsub_lock_delete() - */ - void (*clo_delete)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** - * Destructor. Frees resources and the slice. - * - * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(), - * \see osc_lock_fini() - */ + /** @{ */ + /** + * Attempts to enqueue the lock. Called top-to-bottom. + * + * \retval 0 this layer has enqueued the lock successfully + * \retval >0 this layer has enqueued the lock, but need to wait on + * @anchor for resources + * \retval -ve failure + * + * \see lov_lock_enqueue(), osc_lock_enqueue() + */ + int (*clo_enqueue)(const struct lu_env *env, + const struct cl_lock_slice *slice, + struct cl_io *io, struct cl_sync_io *anchor); + /** + * Cancel a lock, release its DLM lock ref, while does not cancel the + * DLM lock + */ + void (*clo_cancel)(const struct lu_env *env, + const struct cl_lock_slice *slice); + /** @} */ + /** + * Destructor. Frees resources and the slice. + * + * \see lov_lock_fini(), osc_lock_fini() + */ void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice); /** * Optional debugging helper. Prints given lock slice. @@ -1837,12 +1308,12 @@ do { \ * @{ */ struct cl_page_list { - unsigned pl_nr; - cfs_list_t pl_pages; - struct task_struct *pl_owner; + unsigned pl_nr; + struct list_head pl_pages; + struct task_struct *pl_owner; }; -/** +/** * A 2-queue of pages. A convenience data-type for common use case, 2-queue * contains an incoming page list and an outgoing page list. */ @@ -1890,7 +1361,6 @@ struct cl_2queue { * (3) sort all locks to avoid dead-locks, and acquire them * * (4) process the chunk: call per-page methods - * (cl_io_operations::cio_read_page() for read, * cl_io_operations::cio_prepare_write(), * cl_io_operations::cio_commit_write() for write) * @@ -1908,11 +1378,13 @@ struct cl_2queue { /** IO types */ enum cl_io_type { /** read system call */ - CIT_READ, + CIT_READ = 1, /** write system call */ CIT_WRITE, /** truncate, utime system calls */ CIT_SETATTR, + /** get data version */ + CIT_DATA_VERSION, /** * page fault handling */ @@ -1923,6 +1395,10 @@ enum cl_io_type { */ CIT_FSYNC, /** + * glimpse. An io context to acquire glimpse lock. + */ + CIT_GLIMPSE, + /** * Miscellaneous io. This is used for occasional io activity that * doesn't fit into other types. Currently this is used for: * @@ -1933,8 +1409,6 @@ enum cl_io_type { * - VM induced page write-out. An io context for writing page out * for memory cleansing; * - * - glimpse. An io context to acquire glimpse lock. - * * - grouplock. An io context to acquire group lock. * * CIT_MISC io is used simply as a context in which locks and pages @@ -1942,6 +1416,11 @@ enum cl_io_type { * cl_io_loop() is never called for it. */ CIT_MISC, + /** + * ladvise handling + * To give advice about access of a file + */ + CIT_LADVISE, CIT_OP_NR }; @@ -1975,23 +1454,48 @@ enum cl_io_state { * This is usually embedded into layer session data, rather than allocated * dynamically. * - * \see vvp_io, lov_io, osc_io, ccc_io + * \see vvp_io, lov_io, osc_io */ struct cl_io_slice { - struct cl_io *cis_io; - /** corresponding object slice. Immutable after creation. */ - struct cl_object *cis_obj; - /** io operations. Immutable after creation. */ - const struct cl_io_operations *cis_iop; - /** - * linkage into a list of all slices for a given cl_io, hanging off - * cl_io::ci_layers. Immutable after creation. - */ - cfs_list_t cis_linkage; + struct cl_io *cis_io; + /** corresponding object slice. Immutable after creation. */ + struct cl_object *cis_obj; + /** io operations. Immutable after creation. */ + const struct cl_io_operations *cis_iop; + /** + * linkage into a list of all slices for a given cl_io, hanging off + * cl_io::ci_layers. Immutable after creation. + */ + struct list_head cis_linkage; }; typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, - struct cl_page *); + struct pagevec *); + +struct cl_read_ahead { + /* Maximum page index the readahead window will end. + * This is determined DLM lock coverage, RPC and stripe boundary. + * cra_end is included. */ + pgoff_t cra_end_idx; + /* optimal RPC size for this read, by pages */ + unsigned long cra_rpc_pages; + /* Release callback. If readahead holds resources underneath, this + * function should be called to release it. */ + void (*cra_release)(const struct lu_env *env, void *cbdata); + /* Callback data for cra_release routine */ + void *cra_cbdata; + /* whether lock is in contention */ + bool cra_contention; +}; + +static inline void cl_read_ahead_release(const struct lu_env *env, + struct cl_read_ahead *ra) +{ + if (ra->cra_release != NULL) + ra->cra_release(env, ra->cra_cbdata); + memset(ra, 0, sizeof(*ra)); +} + /** * Per-layer io operations. @@ -2098,17 +1602,14 @@ struct cl_io_operations { const struct cl_io_slice *slice, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); - /** - * Read missing page. - * - * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start() - * method, when it hits not-up-to-date page in the range. Optional. - * - * \pre io->ci_type == CIT_READ - */ - int (*cio_read_page)(const struct lu_env *env, - const struct cl_io_slice *slice, - const struct cl_page_slice *page); + /** + * Decide maximum read ahead extent + * + * \pre io->ci_type == CIT_READ + */ + int (*cio_read_ahead)(const struct lu_env *env, + const struct cl_io_slice *slice, + pgoff_t start, struct cl_read_ahead *ra); /** * Optional debugging helper. Print given io slice. */ @@ -2126,25 +1627,30 @@ enum cl_enq_flags { * -EWOULDBLOCK is returned immediately. */ CEF_NONBLOCK = 0x00000001, - /** - * take lock asynchronously (out of order), as it cannot - * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing. - */ - CEF_ASYNC = 0x00000002, + /** + * Tell lower layers this is a glimpse request, translated to + * LDLM_FL_HAS_INTENT at LDLM layer. + * + * Also, because glimpse locks never block other locks, we count this + * as automatically compatible with other osc locks. + * (see osc_lock_compatible) + */ + CEF_GLIMPSE = 0x00000002, /** * tell the server to instruct (though a flag in the blocking ast) an * owner of the conflicting lock, that it can drop dirty pages * protected by this lock, without sending them to the server. */ CEF_DISCARD_DATA = 0x00000004, - /** - * tell the sub layers that it must be a `real' lock. This is used for - * mmapped-buffer locks and glimpse locks that must be never converted - * into lockless mode. - * - * \see vvp_mmap_locks(), cl_glimpse_lock(). - */ - CEF_MUST = 0x00000008, + /** + * tell the sub layers that it must be a `real' lock. This is used for + * mmapped-buffer locks, glimpse locks, manually requested locks + * (LU_LADVISE_LOCKAHEAD) that must never be converted into lockless + * mode. + * + * \see vvp_mmap_locks(), cl_glimpse_lock, cl_request_lock(). + */ + CEF_MUST = 0x00000008, /** * tell the sub layers that never request a `real' lock. This flag is * not used currently. @@ -2157,13 +1663,33 @@ enum cl_enq_flags { */ CEF_NEVER = 0x00000010, /** - * for async glimpse lock. - */ - CEF_AGL = 0x00000020, - /** - * mask of enq_flags. + * tell the dlm layer this is a speculative lock request + * speculative lock requests are locks which are not requested as part + * of an I/O operation. Instead, they are requested because we expect + * to use them in the future. They are requested asynchronously at the + * ptlrpc layer. + * + * Currently used for asynchronous glimpse locks and manually requested + * locks (LU_LADVISE_LOCKAHEAD). */ - CEF_MASK = 0x0000003f, + CEF_SPECULATIVE = 0x00000020, + /** + * enqueue a lock to test DLM lock existence. + */ + CEF_PEEK = 0x00000040, + /** + * Lock match only. Used by group lock in I/O as group lock + * is known to exist. + */ + CEF_LOCK_MATCH = 0x00000080, + /** + * tell the DLM layer to lock only the requested range + */ + CEF_LOCK_NO_EXPAND = 0x00000100, + /** + * mask of enq_flags. + */ + CEF_MASK = 0x000001ff, }; /** @@ -2171,14 +1697,14 @@ enum cl_enq_flags { * same lock can be part of multiple io's simultaneously. */ struct cl_io_lock_link { - /** linkage into one of cl_lockset lists. */ - cfs_list_t cill_linkage; - struct cl_lock_descr cill_descr; - struct cl_lock *cill_lock; - /** optional destructor */ - void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); + /** linkage into one of cl_lockset lists. */ + struct list_head cill_linkage; + struct cl_lock cill_lock; + /** optional destructor */ + void (*cill_fini)(const struct lu_env *env, + struct cl_io_lock_link *link); }; +#define cill_descr cill_lock.cll_descr /** * Lock-set represents a collection of locks, that io needs at a @@ -2210,12 +1736,10 @@ struct cl_io_lock_link { * enqueued. */ struct cl_lockset { - /** locks to be acquired. */ - cfs_list_t cls_todo; - /** locks currently being processed. */ - cfs_list_t cls_curr; - /** locks acquired. */ - cfs_list_t cls_done; + /** locks to be acquired. */ + struct list_head cls_todo; + /** locks acquired. */ + struct list_head cls_done; }; /** @@ -2244,12 +1768,11 @@ enum cl_fsync_mode { }; struct cl_io_rw_common { - loff_t crw_pos; - size_t crw_count; - int crw_nonblock; + loff_t crw_pos; + size_t crw_count; + int crw_nonblock; }; - /** * State for io. * @@ -2272,31 +1795,42 @@ struct cl_io { */ struct cl_io *ci_parent; /** List of slices. Immutable after creation. */ - cfs_list_t ci_layers; + struct list_head ci_layers; /** list of locks (to be) acquired by this io. */ struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ enum cl_io_lock_dmd ci_lockreq; - union { - struct cl_rd_io { - struct cl_io_rw_common rd; - } ci_rd; - struct cl_wr_io { - struct cl_io_rw_common wr; - int wr_append; + /** layout version when this IO occurs */ + __u32 ci_layout_version; + union { + struct cl_rd_io { + struct cl_io_rw_common rd; + } ci_rd; + struct cl_wr_io { + struct cl_io_rw_common wr; + int wr_append; int wr_sync; - } ci_wr; - struct cl_io_rw_common ci_rw; - struct cl_setattr_io { - struct ost_lvb sa_attr; - unsigned int sa_valid; - struct obd_capa *sa_capa; - } ci_setattr; + } ci_wr; + struct cl_io_rw_common ci_rw; + struct cl_setattr_io { + struct ost_lvb sa_attr; + unsigned int sa_attr_flags; + unsigned int sa_avalid; /* ATTR_* */ + unsigned int sa_xvalid; /* OP_XVALID */ + int sa_stripe_index; + struct ost_layout sa_layout; + const struct lu_fid *sa_parent_fid; + } ci_setattr; + struct cl_data_version_io { + u64 dv_data_version; + u32 dv_layout_version; + int dv_flags; + } ci_data_version; struct cl_fault_io { /** page index within file. */ pgoff_t ft_index; /** bytes valid byte on a faulted page. */ - int ft_nob; + size_t ft_nob; /** writable page? for nopage() only */ int ft_writable; /** page of an executable? */ @@ -2309,13 +1843,20 @@ struct cl_io { struct cl_fsync_io { loff_t fi_start; loff_t fi_end; - struct obd_capa *fi_capa; /** file system level fid */ struct lu_fid *fi_fid; enum cl_fsync_mode fi_mode; /* how many pages were written/discarded */ unsigned int fi_nr_written; } ci_fsync; + struct cl_ladvise_io { + __u64 li_start; + __u64 li_end; + /** file system level fid */ + struct lu_fid *li_fid; + enum lu_ladvise_type li_advice; + __u64 li_flags; + } ci_ladvise; } u; struct cl_2queue ci_queue; size_t ci_nob; @@ -2337,6 +1878,13 @@ struct cl_io { */ ci_ignore_layout:1, /** + * Need MDS intervention to complete a write. + * Write intent is required for the following cases: + * 1. component being written is not initialized, or + * 2. the mirrored files are NOT in WRITE_PENDING state. + */ + ci_need_write_intent:1, + /** * Check if layout changed after the IO finishes. Mainly for HSM * requirement. If IO occurs to openning files, it doesn't need to * verify layout because HSM won't release openning files. @@ -2351,189 +1899,65 @@ struct cl_io { /** * O_NOATIME */ - ci_noatime:1; + ci_noatime:1, + /* Tell sublayers not to expand LDLM locks requested for this IO */ + ci_lock_no_expand:1, + /** + * Set if non-delay RPC should be used for this IO. + * + * If this file has multiple mirrors, and if the OSTs of the current + * mirror is inaccessible, non-delay RPC would error out quickly so + * that the upper layer can try to access the next mirror. + */ + ci_ndelay:1, + /** + * Set if IO is triggered by async workqueue readahead. + */ + ci_async_readahead:1, + /** + * Ignore lockless and do normal locking for this io. + */ + ci_ignore_lockless:1, + /** + * Set if we've tried all mirrors for this read IO, if it's not set, + * the read IO will check to-be-read OSCs' status, and make fast-switch + * another mirror if some of the OSTs are not healthy. + */ + ci_tried_all_mirrors:1; + /** + * How many times the read has retried before this one. + * Set by the top level and consumed by the LOV. + */ + unsigned ci_ndelay_tried; + /** + * Designated mirror index for this I/O. + */ + unsigned ci_designated_mirror; /** * Number of pages owned by this IO. For invariant checking. */ unsigned ci_owned_nr; + /** + * Range of write intent. Valid if ci_need_write_intent is set. + */ + struct lu_extent ci_write_intent; }; /** @} cl_io */ -/** \addtogroup cl_req cl_req - * @{ */ -/** \struct cl_req - * Transfer. - * - * There are two possible modes of transfer initiation on the client: - * - * - immediate transfer: this is started when a high level io wants a page - * or a collection of pages to be transferred right away. Examples: - * read-ahead, synchronous read in the case of non-page aligned write, - * page write-out as a part of extent lock cancellation, page write-out - * as a part of memory cleansing. Immediate transfer can be both - * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE; - * - * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens - * when io wants to transfer a page to the server some time later, when - * it can be done efficiently. Example: pages dirtied by the write(2) - * path. - * - * In any case, transfer takes place in the form of a cl_req, which is a - * representation for a network RPC. - * - * Pages queued for an opportunistic transfer are cached until it is decided - * that efficient RPC can be composed of them. This decision is made by "a - * req-formation engine", currently implemented as a part of osc - * layer. Req-formation depends on many factors: the size of the resulting - * RPC, whether or not multi-object RPCs are supported by the server, - * max-rpc-in-flight limitations, size of the dirty cache, etc. - * - * For the immediate transfer io submits a cl_page_list, that req-formation - * engine slices into cl_req's, possibly adding cached pages to some of - * the resulting req's. - * - * Whenever a page from cl_page_list is added to a newly constructed req, its - * cl_page_operations::cpo_prep() layer methods are called. At that moment, - * page state is atomically changed from cl_page_state::CPS_OWNED to - * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner - * is zeroed, and cl_page::cp_req is set to the - * req. cl_page_operations::cpo_prep() method at the particular layer might - * return -EALREADY to indicate that it does not need to submit this page - * at all. This is possible, for example, if page, submitted for read, - * became up-to-date in the meantime; and for write, the page don't have - * dirty bit marked. \see cl_io_submit_rw() - * - * Whenever a cached page is added to a newly constructed req, its - * cl_page_operations::cpo_make_ready() layer methods are called. At that - * moment, page state is atomically changed from cl_page_state::CPS_CACHED to - * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to - * req. cl_page_operations::cpo_make_ready() method at the particular layer - * might return -EAGAIN to indicate that this page is not eligible for the - * transfer right now. - * - * FUTURE - * - * Plan is to divide transfers into "priority bands" (indicated when - * submitting cl_page_list, and queuing a page for the opportunistic transfer) - * and allow glueing of cached pages to immediate transfers only within single - * band. This would make high priority transfers (like lock cancellation or - * memory pressure induced write-out) really high priority. - * - */ - /** * Per-transfer attributes. */ struct cl_req_attr { + enum cl_req_type cra_type; + u64 cra_flags; + struct cl_page *cra_page; /** Generic attributes for the server consumption. */ struct obdo *cra_oa; - /** Capability. */ - struct obd_capa *cra_capa; /** Jobid */ - char cra_jobid[JOBSTATS_JOBID_SIZE]; + char cra_jobid[LUSTRE_JOBID_SIZE]; }; -/** - * Transfer request operations definable at every layer. - * - * Concurrency: transfer formation engine synchronizes calls to all transfer - * methods. - */ -struct cl_req_operations { - /** - * Invoked top-to-bottom by cl_req_prep() when transfer formation is - * complete (all pages are added). - * - * \see osc_req_prep() - */ - int (*cro_prep)(const struct lu_env *env, - const struct cl_req_slice *slice); - /** - * Called top-to-bottom to fill in \a oa fields. This is called twice - * with different flags, see bug 10150 and osc_build_req(). - * - * \param obj an object from cl_req which attributes are to be set in - * \a oa. - * - * \param oa struct obdo where attributes are placed - * - * \param flags \a oa fields to be filled. - */ - void (*cro_attr_set)(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, obd_valid flags); - /** - * Called top-to-bottom from cl_req_completion() to notify layers that - * transfer completed. Has to free all state allocated by - * cl_device_operations::cdo_req_init(). - */ - void (*cro_completion)(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret); -}; - -/** - * A per-object state that (potentially multi-object) transfer request keeps. - */ -struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link ro_obj_ref; - /* something else? Number of pages for a given object? */ -}; - -/** - * Transfer request. - * - * Transfer requests are not reference counted, because IO sub-system owns - * them exclusively and knows when to free them. - * - * Life cycle. - * - * cl_req is created by cl_req_alloc() that calls - * cl_device_operations::cdo_req_init() device methods to allocate per-req - * state in every layer. - * - * Then pages are added (cl_req_page_add()), req keeps track of all objects it - * contains pages for. - * - * Once all pages were collected, cl_page_operations::cpo_prep() method is - * called top-to-bottom. At that point layers can modify req, let it pass, or - * deny it completely. This is to support things like SNS that have transfer - * ordering requirements invisible to the individual req-formation engine. - * - * On transfer completion (or transfer timeout, or failure to initiate the - * transfer of an allocated req), cl_req_operations::cro_completion() method - * is called, after execution of cl_page_operations::cpo_completion() of all - * req's pages. - */ -struct cl_req { - enum cl_req_type crq_type; - /** A list of pages being transfered */ - cfs_list_t crq_pages; - /** Number of pages in cl_req::crq_pages */ - unsigned crq_nrpages; - /** An array of objects which pages are in ->crq_pages */ - struct cl_req_obj *crq_o; - /** Number of elements in cl_req::crq_objs[] */ - unsigned crq_nrobjs; - cfs_list_t crq_layers; -}; - -/** - * Per-layer state for request. - */ -struct cl_req_slice { - struct cl_req *crs_req; - struct cl_device *crs_dev; - cfs_list_t crs_linkage; - const struct cl_req_operations *crs_ops; -}; - -/* @} cl_req */ - enum cache_stats_item { /** how many cache lookups were performed */ CS_lookup = 0, @@ -2579,9 +2003,7 @@ struct cl_site { * and top-locks (and top-pages) are accounted here. */ struct cache_stats cs_pages; - struct cache_stats cs_locks; atomic_t cs_pages_state[CPS_NR]; - atomic_t cs_locks_state[CLS_NR]; }; int cl_site_init(struct cl_site *s, struct cl_device *top); @@ -2606,11 +2028,6 @@ static inline struct cl_site *lu2cl_site(const struct lu_site *site) return container_of(site, struct cl_site, cs_lu); } -static inline int lu_device_is_cl(const struct lu_device *d) -{ - return d->ld_type->ldt_tags & LU_DEVICE_CL; -} - static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); @@ -2673,9 +2090,6 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, const struct cl_lock_operations *ops); void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, struct cl_object *obj, const struct cl_io_operations *ops); -void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, - struct cl_device *dev, - const struct cl_req_operations *ops); /** @} helpers */ /** \defgroup cl_object cl_object @@ -2691,17 +2105,27 @@ void cl_object_put (const struct lu_env *env, struct cl_object *o); void cl_object_get (struct cl_object *o); void cl_object_attr_lock (struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj, +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); +int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid); int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, struct ost_lvb *lvb); int cl_conf_set (const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); -void cl_object_prune (const struct lu_env *env, struct cl_object *obj); +int cl_object_prune (const struct lu_env *env, struct cl_object *obj); void cl_object_kill (const struct lu_env *env, struct cl_object *obj); -int cl_object_has_locks (struct cl_object *obj); +int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, + struct lov_user_md __user *lum, size_t size); +int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, + size_t *buflen); +int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj, + struct cl_layout *cl); +loff_t cl_object_maxbytes(struct cl_object *obj); +int cl_object_flush(const struct lu_env *env, struct cl_object *obj, + struct ldlm_lock *lock); + /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2715,6 +2139,7 @@ static inline void cl_object_page_init(struct cl_object *clob, int size) { clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size); + WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512); } static inline void *cl_object_page_slice(struct cl_object *clob, @@ -2755,6 +2180,9 @@ struct cl_page *cl_page_alloc (const struct lu_env *env, void cl_page_get (struct cl_page *page); void cl_page_put (const struct lu_env *env, struct cl_page *page); +void cl_pagevec_put (const struct lu_env *env, + struct cl_page *page, + struct pagevec *pvec); void cl_page_print (const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); @@ -2817,153 +2245,95 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io, * Functions to discard, delete and export a cl_page. */ /** @{ */ -void cl_page_discard (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -void cl_page_delete (const struct lu_env *env, struct cl_page *pg); -int cl_page_is_vmlocked (const struct lu_env *env, - const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, - struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, pgoff_t *max_index); -loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); -pgoff_t cl_index (const struct cl_object *obj, loff_t offset); -int cl_page_size (const struct cl_object *obj); -int cl_pages_prune (const struct lu_env *env, struct cl_object *obj); - -void cl_lock_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock); +void cl_page_discard(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +void cl_page_delete(const struct lu_env *env, struct cl_page *pg); +int cl_page_is_vmlocked(const struct lu_env *env, + const struct cl_page *pg); +void cl_page_touch(const struct lu_env *env, const struct cl_page *pg, + size_t to); +void cl_page_export(const struct lu_env *env, + struct cl_page *pg, int uptodate); +loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); +pgoff_t cl_index(const struct cl_object *obj, loff_t offset); +size_t cl_page_size(const struct cl_object *obj); + +void cl_lock_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_lock *lock); void cl_lock_descr_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_lock_descr *descr); + lu_printer_t printer, + const struct cl_lock_descr *descr); /* @} helper */ +/** + * Data structure managing a client's cached pages. A count of + * "unstable" pages is maintained, and an LRU of clean pages is + * maintained. "unstable" pages are pages pinned by the ptlrpc + * layer for recovery purposes. + */ +struct cl_client_cache { + /** + * # of client cache refcount + * # of users (OSCs) + 2 (held by llite and lov) + */ + atomic_t ccc_users; + /** + * # of threads are doing shrinking + */ + unsigned int ccc_lru_shrinkers; + /** + * # of LRU entries available + */ + atomic_long_t ccc_lru_left; + /** + * List of entities(OSCs) for this LRU cache + */ + struct list_head ccc_lru; + /** + * Max # of LRU entries + */ + unsigned long ccc_lru_max; + /** + * Lock to protect ccc_lru list + */ + spinlock_t ccc_lru_lock; + /** + * Set if unstable check is enabled + */ + unsigned int ccc_unstable_check:1; + /** + * # of unstable pages for this mount point + */ + atomic_long_t ccc_unstable_nr; + /** + * Waitq for awaiting unstable pages to reach zero. + * Used at umounting time and signaled on BRW commit + */ + wait_queue_head_t ccc_unstable_waitq; +}; +/** + * cl_cache functions + */ +struct cl_client_cache *cl_cache_init(unsigned long lru_page_max); +void cl_cache_incref(struct cl_client_cache *cache); +void cl_cache_decref(struct cl_client_cache *cache); + /** @} cl_page */ /** \defgroup cl_lock cl_lock * @{ */ - -struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, - const struct cl_lock_descr *need, - const char *scope, const void *source); -struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, - const struct cl_lock_descr *need, - const char *scope, const void *source); -struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, - const struct cl_lock_descr *need, - const char *scope, const void *source); -struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, - struct cl_object *obj, pgoff_t index, - struct cl_lock *except, int pending, - int canceld); +int cl_lock_request(const struct lu_env *env, struct cl_io *io, + struct cl_lock *lock); +int cl_lock_init(const struct lu_env *env, struct cl_lock *lock, + const struct cl_io *io); +void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock); const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, - const struct lu_device_type *dtype); - -void cl_lock_get (struct cl_lock *lock); -void cl_lock_get_trust (struct cl_lock *lock); -void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); - -enum cl_lock_state cl_lock_intransit(const struct lu_env *env, - struct cl_lock *lock); -void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, - enum cl_lock_state state); -int cl_lock_is_intransit(struct cl_lock *lock); - -int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, - int keep_mutex); - -/** \name statemachine statemachine - * Interface to lock state machine consists of 3 parts: - * - * - "try" functions that attempt to effect a state transition. If state - * transition is not possible right now (e.g., if it has to wait for some - * asynchronous event to occur), these functions return - * cl_lock_transition::CLO_WAIT. - * - * - "non-try" functions that implement synchronous blocking interface on - * top of non-blocking "try" functions. These functions repeatedly call - * corresponding "try" versions, and if state transition is not possible - * immediately, wait for lock state change. - * - * - methods from cl_lock_operations, called by "try" functions. Lock can - * be advanced to the target state only when all layers voted that they - * are ready for this transition. "Try" functions call methods under lock - * mutex. If a layer had to release a mutex, it re-acquires it and returns - * cl_lock_transition::CLO_REPEAT, causing "try" function to call all - * layers again. - * - * TRY NON-TRY METHOD FINAL STATE - * - * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED - * - * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD - * - * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED - * - * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD - * - * @{ */ - -int cl_enqueue (const struct lu_env *env, struct cl_lock *lock, - struct cl_io *io, __u32 flags); -int cl_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_unuse (const struct lu_env *env, struct cl_lock *lock); -int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, - struct cl_io *io, __u32 flags); -int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); -int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); - -/** @} statemachine */ - -void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, - enum cl_lock_state state); -int cl_queue_match (const cfs_list_t *queue, - const struct cl_lock_descr *need); - -void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_is_mutexed (struct cl_lock *lock); -int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); -int cl_lock_ext_match (const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_descr_match(const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need); -int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock, - const struct cl_lock_descr *desc); - -void cl_lock_closure_init (const struct lu_env *env, - struct cl_lock_closure *closure, - struct cl_lock *origin, int wait); -void cl_lock_closure_fini (struct cl_lock_closure *closure); -int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); -void cl_lock_disclosure (const struct lu_env *env, - struct cl_lock_closure *closure); -int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); + const struct lu_device_type *dtype); +void cl_lock_release(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io, + struct cl_lock *lock, struct cl_sync_io *anchor); void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); -void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); -void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); -void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); - -unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); /** @} cl_lock */ @@ -2989,8 +2359,6 @@ int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, @@ -2999,18 +2367,19 @@ int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, int cl_io_commit_async (const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); +int cl_io_read_ahead (const struct lu_env *env, struct cl_io *io, + pgoff_t start, struct cl_read_ahead *ra); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue); -int cl_io_is_going (const struct lu_env *env); /** * True, iff \a io is an O_APPEND write(2). */ static inline int cl_io_is_append(const struct cl_io *io) { - return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; + return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; } static inline int cl_io_is_sync_write(const struct cl_io *io) @@ -3028,8 +2397,8 @@ static inline int cl_io_is_mkwrite(const struct cl_io *io) */ static inline int cl_io_is_trunc(const struct cl_io *io) { - return io->ci_type == CIT_SETATTR && - (io->u.ci_setattr.sa_valid & ATTR_SIZE); + return io->ci_type == CIT_SETATTR && + (io->u.ci_setattr.sa_avalid & ATTR_SIZE); } struct cl_io *cl_io_top(struct cl_io *io); @@ -3037,13 +2406,12 @@ struct cl_io *cl_io_top(struct cl_io *io); void cl_io_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_io *io); -#define CL_IO_SLICE_CLEAN(foo_io, base) \ -do { \ - typeof(foo_io) __foo_io = (foo_io); \ - \ - CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \ - memset(&__foo_io->base + 1, 0, \ - (sizeof *__foo_io) - sizeof __foo_io->base); \ +#define CL_IO_SLICE_CLEAN(foo_io, base) \ +do { \ + typeof(foo_io) __foo_io = (foo_io); \ + \ + memset(&__foo_io->base, 0, \ + sizeof(*__foo_io) - offsetof(typeof(*__foo_io), base)); \ } while (0) /** @} cl_io */ @@ -3056,27 +2424,27 @@ do { \ */ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) { - LASSERT(plist->pl_nr > 0); - return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); + LASSERT(plist->pl_nr > 0); + return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); } static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist) { LASSERT(plist->pl_nr > 0); - return cfs_list_entry(plist->pl_pages.next, struct cl_page, cp_batch); + return list_entry(plist->pl_pages.next, struct cl_page, cp_batch); } /** * Iterate over pages in a page list. */ #define cl_page_list_for_each(page, list) \ - cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch) + list_for_each_entry((page), &(list)->pl_pages, cp_batch) /** * Iterate over pages in a page list, taking possible removals into account. */ #define cl_page_list_for_each_safe(page, temp, list) \ - cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) + list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) void cl_page_list_init (struct cl_page_list *plist); void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); @@ -3090,8 +2458,6 @@ void cl_page_list_del (const struct lu_env *env, struct cl_page_list *plist, struct cl_page *page); void cl_page_list_disown (const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist); -int cl_page_list_own (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); void cl_page_list_assume (const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist); void cl_page_list_discard(const struct lu_env *env, @@ -3111,22 +2477,29 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ -/** \defgroup cl_req cl_req - * @{ */ -struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, - enum cl_req_type crt, int nr_objects); - -void cl_req_page_add (const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done (const struct lu_env *env, struct cl_page *page); -int cl_req_prep (const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set (const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, obd_valid flags); -void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); +void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, + struct cl_req_attr *attr); /** \defgroup cl_sync_io cl_sync_io * @{ */ +struct cl_sync_io; +struct cl_dio_aio; + +typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *); + +void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr, + struct cl_dio_aio *aio, cl_sync_io_end_t *end); + +int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, + long timeout); +void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, + int ioret); +static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr) +{ + cl_sync_io_init_notify(anchor, nr, NULL, NULL); +} + /** * Anchor for synchronous transfer. This is allocated on a stack by thread * doing synchronous transfer, and a pointer to this structure is set up in @@ -3138,22 +2511,24 @@ struct cl_sync_io { atomic_t csi_sync_nr; /** error code. */ int csi_sync_rc; - /** barrier of destroy this structure */ - atomic_t csi_barrier; /** completion to be signaled when transfer is complete. */ wait_queue_head_t csi_waitq; + /** callback to invoke when this IO is finished */ + cl_sync_io_end_t *csi_end_io; + /** aio private data */ + struct cl_dio_aio *csi_aio; }; -void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); -int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct cl_sync_io *anchor, - long timeout); -void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); +/** To support Direct AIO */ +struct cl_dio_aio { + struct cl_sync_io cda_sync; + struct cl_page_list cda_pages; + struct kiocb *cda_iocb; + ssize_t cda_bytes; +}; /** @} cl_sync_io */ -/** @} cl_req */ - /** \defgroup cl_env cl_env * * lu_env handling for a client. @@ -3177,38 +2552,15 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); * - allocation and destruction of environment is amortized by caching no * longer used environments instead of destroying them; * - * - there is a notion of "current" environment, attached to the kernel - * data structure representing current thread Top-level lustre code - * allocates an environment and makes it current, then calls into - * non-lustre code, that in turn calls lustre back. Low-level lustre - * code thus called can fetch environment created by the top-level code - * and reuse it, avoiding additional environment allocation. - * Right now, three interfaces can attach the cl_env to running thread: - * - cl_env_get - * - cl_env_implant - * - cl_env_reexit(cl_env_reenter had to be called priorly) - * * \see lu_env, lu_context, lu_context_key * @{ */ -struct cl_env_nest { - int cen_refcheck; - void *cen_cookie; -}; - -struct lu_env *cl_env_peek (int *refcheck); -struct lu_env *cl_env_get (int *refcheck); -struct lu_env *cl_env_alloc (int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get (struct cl_env_nest *nest); -void cl_env_put (struct lu_env *env, int *refcheck); -void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter (void); -void cl_env_reexit (void *cookie); -void cl_env_implant (struct lu_env *env, int *refcheck); -void cl_env_unplant (struct lu_env *env, int *refcheck); -unsigned cl_env_cache_purge(unsigned nr); -struct lu_env *cl_env_percpu_get (void); -void cl_env_percpu_put (struct lu_env *env); +struct lu_env *cl_env_get(__u16 *refcheck); +struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags); +void cl_env_put(struct lu_env *env, __u16 *refcheck); +unsigned cl_env_cache_purge(unsigned nr); +struct lu_env *cl_env_percpu_get(void); +void cl_env_percpu_put(struct lu_env *env); /** @} cl_env */