X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=e180f68a80f140f919f38357c9e7e0fd09bceaa8;hp=05c89ebd342c74711781c137f325c16461f95563;hb=1e4d10af3909452b0eee1f99010d80aeb01d42a7;hpb=7542820698696ed5853ded30c9bf7fd5a78f0937 diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 05c89eb..e180f68 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. */ #ifndef _LUSTRE_CL_OBJECT_H #define _LUSTRE_CL_OBJECT_H @@ -1482,9 +1481,13 @@ struct cl_read_ahead { unsigned long cra_rpc_pages; /* Release callback. If readahead holds resources underneath, this * function should be called to release it. */ - void (*cra_release)(const struct lu_env *env, void *cbdata); + void (*cra_release)(const struct lu_env *env, + struct cl_read_ahead *ra); + /* Callback data for cra_release routine */ - void *cra_cbdata; + void *cra_dlmlock; + void *cra_oio; + /* whether lock is in contention */ bool cra_contention; }; @@ -1493,7 +1496,7 @@ static inline void cl_read_ahead_release(const struct lu_env *env, struct cl_read_ahead *ra) { if (ra->cra_release != NULL) - ra->cra_release(env, ra->cra_cbdata); + ra->cra_release(env, ra); memset(ra, 0, sizeof(*ra)); } @@ -1604,6 +1607,11 @@ struct cl_io_operations { struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); /** + * Release active extent. + */ + void (*cio_extent_release)(const struct lu_env *env, + const struct cl_io_slice *slice); + /** * Decide maximum read ahead extent * * \pre io->ci_type == CIT_READ @@ -1611,6 +1619,13 @@ struct cl_io_operations { int (*cio_read_ahead)(const struct lu_env *env, const struct cl_io_slice *slice, pgoff_t start, struct cl_read_ahead *ra); + /** + * + * Reserve LRU slots before IO. + */ + int (*cio_lru_reserve) (const struct lu_env *env, + const struct cl_io_slice *slice, + loff_t pos, size_t bytes); /** * Optional debugging helper. Print given io slice. */ @@ -1623,11 +1638,11 @@ struct cl_io_operations { * \ingroup cl_lock */ enum cl_enq_flags { - /** - * instruct server to not block, if conflicting lock is found. Instead - * -EWOULDBLOCK is returned immediately. - */ - CEF_NONBLOCK = 0x00000001, + /** + * instruct server to not block, if conflicting lock is found. Instead + * -EAGAIN is returned immediately. + */ + CEF_NONBLOCK = 0x00000001, /** * Tell lower layers this is a glimpse request, translated to * LDLM_FL_HAS_INTENT at LDLM layer. @@ -1854,7 +1869,6 @@ struct cl_io { /* The following are used for fallocate(2) */ int sa_falloc_mode; loff_t sa_falloc_offset; - loff_t sa_falloc_len; loff_t sa_falloc_end; } ci_setattr; struct cl_data_version_io { @@ -1898,6 +1912,9 @@ struct cl_io { loff_t ls_result; int ls_whence; } ci_lseek; + struct cl_misc_io { + time64_t lm_next_rpc_time; + } ci_misc; } u; struct cl_2queue ci_queue; size_t ci_nob; @@ -1972,7 +1989,13 @@ struct cl_io { /** * Sequential read hints. */ - ci_seq_read:1; + ci_seq_read:1, + /** + * Do parallel (async) submission of DIO RPCs. Note DIO is still sync + * to userspace, only the RPCs are submitted async, then waited for at + * the llite layer before returning. + */ + ci_parallel_dio:1; /** * Bypass quota check */ @@ -2415,6 +2438,9 @@ int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, int cl_io_commit_async (const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, int from, int to, cl_commit_cbt cb); +void cl_io_extent_release (const struct lu_env *env, struct cl_io *io); +int cl_io_lru_reserve(const struct lu_env *env, struct cl_io *io, + loff_t pos, size_t bytes); int cl_io_read_ahead (const struct lu_env *env, struct cl_io *io, pgoff_t start, struct cl_read_ahead *ra); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, @@ -2438,6 +2464,11 @@ static inline int cl_io_is_mkwrite(const struct cl_io *io) return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite; } +static inline int cl_io_is_fault_writable(const struct cl_io *io) +{ + return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_writable; +} + /** * True, iff \a io is a truncate(2). */ @@ -2499,33 +2530,35 @@ static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist) #define cl_page_list_for_each_safe(page, temp, list) \ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) -void cl_page_list_init (struct cl_page_list *plist); -void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); +void cl_page_list_init(struct cl_page_list *plist); +void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page, + bool get_ref); +void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, + struct cl_page *page); void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_splice (struct cl_page_list *list, - struct cl_page_list *head); -void cl_page_list_del (const struct lu_env *env, - struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); -void cl_page_list_assume (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); + struct cl_page *page); +void cl_page_list_splice(struct cl_page_list *list, + struct cl_page_list *head); +void cl_page_list_del(const struct lu_env *env, + struct cl_page_list *plist, struct cl_page *page); +void cl_page_list_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page_list *plist); +void cl_page_list_assume(const struct lu_env *env, + struct cl_io *io, struct cl_page_list *plist); void cl_page_list_discard(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); -void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist); - -void cl_2queue_init (struct cl_2queue *queue); -void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page); -void cl_2queue_disown (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_assume (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_discard (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue); + struct cl_io *io, struct cl_page_list *plist); +void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist); + +void cl_2queue_init(struct cl_2queue *queue); +void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page, + bool get_ref); +void cl_2queue_disown(const struct lu_env *env, struct cl_io *io, + struct cl_2queue *queue); +void cl_2queue_assume(const struct lu_env *env, struct cl_io *io, + struct cl_2queue *queue); +void cl_2queue_discard(const struct lu_env *env, struct cl_io *io, + struct cl_2queue *queue); +void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue); void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ @@ -2544,12 +2577,14 @@ typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *); void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr, struct cl_dio_aio *aio, cl_sync_io_end_t *end); -int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, - long timeout); +int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, + long timeout); void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, int ioret); -struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb); -void cl_aio_free(struct cl_dio_aio *aio); +int cl_sync_io_wait_recycle(const struct lu_env *env, struct cl_sync_io *anchor, + long timeout, int ioret); +struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb, struct cl_object *obj); +void cl_aio_free(const struct lu_env *env, struct cl_dio_aio *aio); static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr) { cl_sync_io_init_notify(anchor, nr, NULL, NULL); @@ -2578,6 +2613,7 @@ struct cl_sync_io { struct cl_dio_aio { struct cl_sync_io cda_sync; struct cl_page_list cda_pages; + struct cl_object *cda_obj; struct kiocb *cda_iocb; ssize_t cda_bytes; unsigned cda_no_aio_complete:1;