X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=63be0fce67117a3446ac689472c534b2ef4f9264;hp=eb0fb23ed60e326477ad5da1113414a405dbc194;hb=f625f670afbe954030ff81f0f8522137d6cdd335;hpb=65e067d5d90270d4237a7271008561a4b432b94d diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index eb0fb23..63be0fc 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -103,7 +103,9 @@ #include #include #include +#include +struct obd_info; struct inode; struct cl_device; @@ -133,17 +135,17 @@ struct cl_req_slice; * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops */ struct cl_device_operations { - /** - * Initialize cl_req. This method is called top-to-bottom on all - * devices in the stack to get them a chance to allocate layer-private - * data, and to attach them to the cl_req by calling - * cl_req_slice_add(). - * - * \see osc_req_init(), lov_req_init(), lovsub_req_init() - * \see ccc_req_init() - */ - int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); + /** + * Initialize cl_req. This method is called top-to-bottom on all + * devices in the stack to get them a chance to allocate layer-private + * data, and to attach them to the cl_req by calling + * cl_req_slice_add(). + * + * \see osc_req_init(), lov_req_init(), lovsub_req_init() + * \see vvp_req_init() + */ + int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev, + struct cl_req *req); }; /** @@ -347,19 +349,19 @@ struct cl_object_operations { */ int (*coo_io_init)(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); - /** - * Fill portion of \a attr that this layer controls. This method is - * called top-to-bottom through all object layers. - * - * \pre cl_object_header::coh_attr_guard of the top-object is locked. - * - * \return 0: to continue - * \return +ve: to stop iterating through layers (but 0 is returned - * from enclosing cl_object_attr_get()) - * \return -ve: to signal error - */ - int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); + /** + * Fill portion of \a attr that this layer controls. This method is + * called top-to-bottom through all object layers. + * + * \pre cl_object_header::coh_attr_guard of the top-object is locked. + * + * \return 0: to continue + * \return +ve: to stop iterating through layers (but 0 is returned + * from enclosing cl_object_attr_get()) + * \return -ve: to signal error + */ + int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); /** * Update attributes. * @@ -402,6 +404,29 @@ struct cl_object_operations { */ int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj, struct lov_user_md __user *lum); + /** + * Find whether there is any callback data (ldlm lock) attached upon + * the object. + */ + int (*coo_find_cbdata)(const struct lu_env *env, struct cl_object *obj, + ldlm_iterator_t iter, void *data); + /** + * Get FIEMAP mapping from the object. + */ + int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, + struct fiemap *fiemap, size_t *buflen); + /** + * Get attributes of the object from server. (top->bottom) + */ + int (*coo_obd_info_get)(const struct lu_env *env, struct cl_object *obj, + struct obd_info *oinfo, + struct ptlrpc_request_set *set); + /** + * Get data version of the object. (top->bottom) + */ + int (*coo_data_version)(const struct lu_env *env, struct cl_object *obj, + __u64 *version, int flags); }; /** @@ -887,26 +912,6 @@ struct cl_page_operations { /** Destructor. Frees resources and slice itself. */ void (*cpo_fini)(const struct lu_env *env, struct cl_page_slice *slice); - - /** - * Checks whether the page is protected by a cl_lock. This is a - * per-layer method, because certain layers have ways to check for the - * lock much more efficiently than through the generic locks scan, or - * implement locking mechanisms separate from cl_lock, e.g., - * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks - * being canceled, or scheduled for cancellation as soon as the last - * user goes away, too. - * - * \retval -EBUSY: page is protected by a lock of a given mode; - * \retval -ENODATA: page is not protected by a lock; - * \retval 0: this layer cannot decide. - * - * \see cl_page_is_under_lock() - */ - int (*cpo_is_under_lock)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max); - /** * Optional debugging helper. Prints given page slice. * @@ -1111,111 +1116,29 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * * LIFE CYCLE * - * cl_lock is reference counted. When reference counter drops to 0, lock is - * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING - * lock is destroyed when last reference is released. Referencing between - * top-lock and its sub-locks is described in the lov documentation module. - * - * STATE MACHINE - * - * Also, cl_lock is a state machine. This requires some clarification. One of - * the goals of client IO re-write was to make IO path non-blocking, or at - * least to make it easier to make it non-blocking in the future. Here - * `non-blocking' means that when a system call (read, write, truncate) - * reaches a situation where it has to wait for a communication with the - * server, it should --instead of waiting-- remember its current state and - * switch to some other work. E.g,. instead of waiting for a lock enqueue, - * client should proceed doing IO on the next stripe, etc. Obviously this is - * rather radical redesign, and it is not planned to be fully implemented at - * this time, instead we are putting some infrastructure in place, that would - * make it easier to do asynchronous non-blocking IO easier in the - * future. Specifically, where old locking code goes to sleep (waiting for - * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When - * enqueue reply comes, its completion handler signals that lock state-machine - * is ready to transit to the next state. There is some generic code in - * cl_lock.c that sleeps, waiting for these signals. As a result, for users of - * this cl_lock.c code, it looks like locking is done in normal blocking - * fashion, and it the same time it is possible to switch to the non-blocking - * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c - * functions). - * - * For a description of state machine states and transitions see enum - * cl_lock_state. - * - * There are two ways to restrict a set of states which lock might move to: - * - * - placing a "hold" on a lock guarantees that lock will not be moved - * into cl_lock_state::CLS_FREEING state until hold is released. Hold - * can be only acquired on a lock that is not in - * cl_lock_state::CLS_FREEING. All holds on a lock are counted in - * cl_lock::cll_holds. Hold protects lock from cancellation and - * destruction. Requests to cancel and destroy a lock on hold will be - * recorded, but only honored when last hold on a lock is released; - * - * - placing a "user" on a lock guarantees that lock will not leave - * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING, - * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of - * states, once it enters this set. That is, if a user is added onto a - * lock in a state not from this set, it doesn't immediately enforce - * lock to move to this set, but once lock enters this set it will - * remain there until all users are removed. Lock users are counted in - * cl_lock::cll_users. - * - * User is used to assure that lock is not canceled or destroyed while - * it is being enqueued, or actively used by some IO. - * - * Currently, a user always comes with a hold (cl_lock_invariant() - * checks that a number of holds is not less than a number of users). - * - * CONCURRENCY - * - * This is how lock state-machine operates. struct cl_lock contains a mutex - * cl_lock::cll_guard that protects struct fields. - * - * - mutex is taken, and cl_lock::cll_state is examined. - * - * - for every state there are possible target states where lock can move - * into. They are tried in order. Attempts to move into next state are - * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try(). - * - * - if the transition can be performed immediately, state is changed, - * and mutex is released. - * - * - if the transition requires blocking, _try() function returns - * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to - * sleep, waiting for possibility of lock state change. It is woken - * up when some event occurs, that makes lock state change possible - * (e.g., the reception of the reply from the server), and repeats - * the loop. - * - * Top-lock and sub-lock has separate mutexes and the latter has to be taken - * first to avoid dead-lock. - * - * To see an example of interaction of all these issues, take a look at the - * lov_cl.c:lov_lock_enqueue() function. It is called as a part of - * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by - * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note - * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It - * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be - * done in parallel, rather than one after another (this is used for glimpse - * locks, that cannot dead-lock). + * cl_lock is a cacheless data container for the requirements of locks to + * complete the IO. cl_lock is created before I/O starts and destroyed when the + * I/O is complete. + * + * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached + * to cl_lock at OSC layer. LDLM lock is still cacheable. * * INTERFACE AND USAGE * - * struct cl_lock_operations provide a number of call-backs that are invoked - * when events of interest occurs. Layers can intercept and handle glimpse, - * blocking, cancel ASTs and a reception of the reply from the server. + * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A + * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue() + * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock + * consists of multiple sub cl_locks, each sub locks will be enqueued + * correspondingly. At OSC layer, the lock enqueue request will tend to reuse + * cached LDLM lock; otherwise a new LDLM lock will have to be requested from + * OST side. * - * One important difference with the old client locking model is that new - * client has a representation for the top-lock, whereas in the old code only - * sub-locks existed as real data structures and file-level locks are - * represented by "request sets" that are created and destroyed on each and - * every lock creation. + * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel() + * method will be called for each layer to release the resource held by this + * lock. At OSC layer, the reference count of LDLM lock, which is held at + * clo_enqueue time, is released. * - * Top-locks are cached, and can be found in the cache by the system calls. It - * is possible that top-lock is in cache, but some of its sub-locks were - * canceled and destroyed. In that case top-lock has to be enqueued again - * before it can be used. + * LDLM lock can only be canceled if there is no cl_lock using it. * * Overall process of the locking during IO operation is as following: * @@ -1228,7 +1151,7 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * * - when all locks are acquired, IO is performed; * - * - locks are released into cache. + * - locks are released after IO is complete. * * Striping introduces major additional complexity into locking. The * fundamental problem is that it is generally unsafe to actively use (hold) @@ -1250,16 +1173,6 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) * buf is a part of memory mapped Lustre file, a lock or locks protecting buf * has to be held together with the usual lock on [offset, offset + count]. * - * As multi-stripe locks have to be allowed, it makes sense to cache them, so - * that, for example, a sequence of O_APPEND writes can proceed quickly - * without going down to the individual stripes to do lock matching. On the - * other hand, multi-stripe locks shouldn't be used by normal read/write - * calls. To achieve this, every layer can implement ->clo_fits_into() method, - * that is called by lock matching code (cl_lock_lookup()), and that can be - * used to selectively disable matching of certain locks for certain IOs. For - * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe - * locks to be matched only for truncates and O_APPEND writes. - * * Interaction with DLM * * In the expected setup, cl_lock is ultimately backed up by a collection of @@ -1456,7 +1369,6 @@ struct cl_2queue { * (3) sort all locks to avoid dead-locks, and acquire them * * (4) process the chunk: call per-page methods - * (cl_io_operations::cio_read_page() for read, * cl_io_operations::cio_prepare_write(), * cl_io_operations::cio_commit_write() for write) * @@ -1557,7 +1469,28 @@ struct cl_io_slice { }; typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, - struct cl_page *); + struct cl_page *); + +struct cl_read_ahead { + /* Maximum page index the readahead window will end. + * This is determined DLM lock coverage, RPC and stripe boundary. + * cra_end is included. */ + pgoff_t cra_end; + /* Release routine. If readahead holds resources underneath, this + * function should be called to release it. */ + void (*cra_release)(const struct lu_env *env, void *cbdata); + /* Callback data for cra_release routine */ + void *cra_cbdata; +}; + +static inline void cl_read_ahead_release(const struct lu_env *env, + struct cl_read_ahead *ra) +{ + if (ra->cra_release != NULL) + ra->cra_release(env, ra->cra_cbdata); + memset(ra, 0, sizeof(*ra)); +} + /** * Per-layer io operations. @@ -1664,17 +1597,14 @@ struct cl_io_operations { const struct cl_io_slice *slice, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); - /** - * Read missing page. - * - * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start() - * method, when it hits not-up-to-date page in the range. Optional. - * - * \pre io->ci_type == CIT_READ - */ - int (*cio_read_page)(const struct lu_env *env, - const struct cl_io_slice *slice, - const struct cl_page_slice *page); + /** + * Decide maximum read ahead extent + * + * \pre io->ci_type == CIT_READ + */ + int (*cio_read_ahead)(const struct lu_env *env, + const struct cl_io_slice *slice, + pgoff_t start, struct cl_read_ahead *ra); /** * Optional debugging helper. Print given io slice. */ @@ -1849,17 +1779,20 @@ struct cl_io { struct cl_rd_io { struct cl_io_rw_common rd; } ci_rd; - struct cl_wr_io { - struct cl_io_rw_common wr; - int wr_append; + struct cl_wr_io { + struct cl_io_rw_common wr; + int wr_append; int wr_sync; - } ci_wr; - struct cl_io_rw_common ci_rw; - struct cl_setattr_io { - struct ost_lvb sa_attr; - unsigned int sa_valid; - struct obd_capa *sa_capa; - } ci_setattr; + } ci_wr; + struct cl_io_rw_common ci_rw; + struct cl_setattr_io { + struct ost_lvb sa_attr; + unsigned int sa_attr_flags; + unsigned int sa_valid; + int sa_stripe_index; + const struct lu_fid *sa_parent_fid; + struct obd_capa *sa_capa; + } ci_setattr; struct cl_fault_io { /** page index within file. */ pgoff_t ft_index; @@ -2079,7 +2012,7 @@ struct cl_req_obj { */ struct cl_req { enum cl_req_type crq_type; - /** A list of pages being transfered */ + /** A list of pages being transferred */ struct list_head crq_pages; /** Number of pages in cl_req::crq_pages */ unsigned crq_nrpages; @@ -2172,11 +2105,6 @@ static inline struct cl_site *lu2cl_site(const struct lu_site *site) return container_of(site, struct cl_site, cs_lu); } -static inline int lu_device_is_cl(const struct lu_device *d) -{ - return d->ld_type->ldt_tags & LU_DEVICE_CL; -} - static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); @@ -2257,8 +2185,8 @@ void cl_object_put (const struct lu_env *env, struct cl_object *o); void cl_object_get (struct cl_object *o); void cl_object_attr_lock (struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid); int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, @@ -2267,8 +2195,18 @@ int cl_conf_set (const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); int cl_object_prune (const struct lu_env *env, struct cl_object *obj); void cl_object_kill (const struct lu_env *env, struct cl_object *obj); -int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *lum); +int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, + struct lov_user_md __user *lum); +int cl_object_find_cbdata(const struct lu_env *env, struct cl_object *obj, + ldlm_iterator_t iter, void *data); +int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, + struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, + size_t *buflen); +int cl_object_obd_info_get(const struct lu_env *env, struct cl_object *obj, + struct obd_info *oinfo, + struct ptlrpc_request_set *set); +int cl_object_data_version(const struct lu_env *env, struct cl_object *obj, + __u64 *version, int flags); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2392,8 +2330,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, pgoff_t *max_index); loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); pgoff_t cl_index(const struct cl_object *obj, loff_t offset); size_t cl_page_size(const struct cl_object *obj); @@ -2413,7 +2349,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, */ struct cl_client_cache { /** - * # of users (OSCs) + * # of client cache refcount + * # of users (OSCs) + 2 (held by llite and lov) */ atomic_t ccc_users; /** @@ -2450,6 +2387,12 @@ struct cl_client_cache { */ wait_queue_head_t ccc_unstable_waitq; }; +/** + * cl_cache functions + */ +struct cl_client_cache *cl_cache_init(unsigned long lru_page_max); +void cl_cache_incref(struct cl_client_cache *cache); +void cl_cache_decref(struct cl_client_cache *cache); /** @} cl_page */ @@ -2492,8 +2435,6 @@ int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, @@ -2502,6 +2443,8 @@ int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, int cl_io_commit_async (const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); +int cl_io_read_ahead (const struct lu_env *env, struct cl_io *io, + pgoff_t start, struct cl_read_ahead *ra); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io,