X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=33cb20e8ce362a0e60d4adb9a5f50eead48d3f66;hb=490fd542a9bc8e8dafd22a8f4ca7b186f87ab21c;hp=5b922bdc44a3050625a966f46fbf43a72eec750c;hpb=45fe02fec6fa388951d165854f223d74e1b6457c;p=fs%2Flustre-release.git diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 5b922bd..33cb20e 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -736,6 +736,10 @@ struct cl_page { */ struct cl_io *cp_owner; /** + * Debug information, the task is owning the page. + */ + cfs_task_t *cp_task; + /** * Owning IO request in cl_page_state::CPS_PAGEOUT and * cl_page_state::CPS_PAGEIN states. This field is maintained only in * the top-level pages. Protected by a VM lock. @@ -749,6 +753,8 @@ struct cl_page { struct lu_ref_link *cp_queue_ref; /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ unsigned cp_flags; + /** Assigned if doing a sync_io */ + struct cl_sync_io *cp_sync_io; }; /** @@ -782,7 +788,8 @@ enum cl_lock_mode { */ CLM_PHANTOM, CLM_READ, - CLM_WRITE + CLM_WRITE, + CLM_GROUP }; /** @@ -854,15 +861,13 @@ struct cl_page_operations { const struct cl_page_slice *slice, struct cl_io *io); /** - * Announces that page contains valid data and user space can look and - * them without client's involvement from now on. Effectively marks - * the page up-to-date. Optional. + * Announces whether the page contains valid data or not by @uptodate. * * \see cl_page_export() * \see vvp_page_export() */ void (*cpo_export)(const struct lu_env *env, - const struct cl_page_slice *slice); + const struct cl_page_slice *slice, int uptodate); /** * Unmaps page from the user space (if it is mapped). * @@ -1291,6 +1296,8 @@ struct cl_lock_descr { pgoff_t cld_start; /** Index of the last page (inclusive) protected by this lock. */ pgoff_t cld_end; + /** Group ID, for group lock */ + __u64 cld_gid; /** Lock mode. */ enum cl_lock_mode cld_mode; }; @@ -1884,6 +1891,8 @@ enum cl_io_type { * * - glimpse. An io context to acquire glimpse lock. * + * - grouplock. An io context to acquire group lock. + * * CIT_MISC io is used simply as a context in which locks and pages * are manipulated. Such io has no internal "process", that is, * cl_io_loop() is never called for it. @@ -1916,6 +1925,11 @@ enum cl_io_state { CIS_FINI }; +enum cl_req_priority { + CRP_NORMAL, + CRP_CANCEL +}; + /** * IO state private for a layer. * @@ -2033,7 +2047,8 @@ struct cl_io_operations { int (*cio_submit)(const struct lu_env *env, const struct cl_io_slice *slice, enum cl_req_type crt, - struct cl_2queue *queue); + struct cl_2queue *queue, + enum cl_req_priority priority); } req_op[CRT_NR]; /** * Read missing page. @@ -2227,6 +2242,11 @@ struct cl_io { struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ enum cl_io_lock_dmd ci_lockreq; + /** + * This io has held grouplock, to inform sublayers that + * don't do lockless i/o. + */ + int ci_no_srvlock; union { struct cl_rd_io { struct cl_io_rw_common rd; @@ -2702,7 +2722,8 @@ int cl_page_unmap (const struct lu_env *env, struct cl_io *io, struct cl_page *pg); int cl_page_is_vmlocked (const struct lu_env *env, const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, struct cl_page *pg); +void cl_page_export (const struct lu_env *env, + struct cl_page *pg, int uptodate); int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, struct cl_page *page); loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); @@ -2868,7 +2889,11 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, struct cl_page *page, unsigned from, unsigned to); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue); + enum cl_req_type iot, struct cl_2queue *queue, + enum cl_req_priority priority); +int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue, + enum cl_req_priority priority, long timeout); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io, @@ -2975,14 +3000,15 @@ struct cl_sync_io { /** number of pages yet to be transferred. */ atomic_t csi_sync_nr; /** completion to be signaled when transfer is complete. */ - struct completion csi_sync_completion; + cfs_waitq_t csi_waitq; /** error code. */ int csi_sync_rc; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct cl_sync_io *anchor); + struct cl_page_list *queue, struct cl_sync_io *anchor, + long timeout); void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); /** @} cl_sync_io */