X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=563edd1ac2d73ab3528e7038478e00319e8f4e57;hp=fa8c61387bacca08760d5d27c3b6f862ffb80920;hb=1072318b4d89cddba00e9adeb939249f485f2d13;hpb=15385c3b934b511a1452327c701fbb6adad71416 diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index fa8c613..563edd1 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -736,6 +736,10 @@ struct cl_page { */ struct cl_io *cp_owner; /** + * Debug information, the task is owning the page. + */ + cfs_task_t *cp_task; + /** * Owning IO request in cl_page_state::CPS_PAGEOUT and * cl_page_state::CPS_PAGEIN states. This field is maintained only in * the top-level pages. Protected by a VM lock. @@ -749,6 +753,8 @@ struct cl_page { struct lu_ref_link *cp_queue_ref; /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ unsigned cp_flags; + /** Assigned if doing a sync_io */ + struct cl_sync_io *cp_sync_io; }; /** @@ -826,8 +832,9 @@ struct cl_page_operations { * \see cl_page_own() * \see vvp_page_own(), lov_page_own() */ - void (*cpo_own)(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); + int (*cpo_own)(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *io, int nonblock); /** Called when ownership it yielded. Optional. * * \see cl_page_disown() @@ -855,15 +862,13 @@ struct cl_page_operations { const struct cl_page_slice *slice, struct cl_io *io); /** - * Announces that page contains valid data and user space can look and - * them without client's involvement from now on. Effectively marks - * the page up-to-date. Optional. + * Announces whether the page contains valid data or not by \a uptodate. * * \see cl_page_export() * \see vvp_page_export() */ void (*cpo_export)(const struct lu_env *env, - const struct cl_page_slice *slice); + const struct cl_page_slice *slice, int uptodate); /** * Unmaps page from the user space (if it is mapped). * @@ -1296,6 +1301,11 @@ struct cl_lock_descr { __u64 cld_gid; /** Lock mode. */ enum cl_lock_mode cld_mode; + /** + * flags to enqueue lock. A combination of bit-flags from + * enum cl_enq_flags. + */ + __u32 cld_enq_flags; }; #define DDESCR "%s(%d):[%lu, %lu]" @@ -1332,15 +1342,15 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode); * | | V * | | HELD<---------+ * | | | | - * | | | | + * | | | | cl_use_try() * | | cl_unuse_try() | | * | | | | - * | | V | cached - * | +------------>UNLOCKING (*) | lock found - * | | | - * | cl_unuse_try() | | + * | | V ---+ + * | +------------>INTRANSIT (D) <--+ * | | | + * | cl_unuse_try() | | cached lock found * | | | cl_use_try() + * | | | * | V | * +------------------CACHED---------+ * | @@ -1359,6 +1369,8 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode); * * (C) is the point where Cancellation call-back is invoked. * + * (D) is the transit state which means the lock is changing. + * * Transition to FREEING state is possible from any other state in the * diagram in case of unrecoverable error. * @@ -1377,9 +1389,6 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode); * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note * that in this case, sub-locks move from state to state, and top-lock remains * in the same state). - * - * Separate UNLOCKING state is needed to maintain an invariant that in HELD - * state lock is immediately ready for use. */ enum cl_lock_state { /** @@ -1401,10 +1410,16 @@ enum cl_lock_state { */ CLS_HELD, /** - * Lock is in the transition from CLS_HELD to CLS_CACHED. Lock is in - * this state only while cl_unuse() is executing against it. + * This state is used to mark the lock is being used, or unused. + * We need this state because the lock may have several sublocks, + * so it's impossible to have an atomic way to bring all sublocks + * into CLS_HELD state at use case, or all sublocks to CLS_CACHED + * at unuse case. + * If a thread is referring to a lock, and it sees the lock is in this + * state, it must wait for the lock. + * See state diagram for details. */ - CLS_UNLOCKING, + CLS_INTRANSIT, /** * Lock granted, not used. */ @@ -1425,9 +1440,7 @@ enum cl_lock_flags { /** cancellation is pending for this lock. */ CLF_CANCELPEND = 1 << 1, /** destruction is pending for this lock. */ - CLF_DOOMED = 1 << 2, - /** State update is pending. */ - CLF_STATE = 1 << 3 + CLF_DOOMED = 1 << 2 }; /** @@ -1525,6 +1538,10 @@ struct cl_lock { cfs_task_t *cll_guarder; int cll_depth; + /** + * the owner for INTRANSIT state + */ + cfs_task_t *cll_intransit_owner; int cll_error; /** * Number of holds on a lock. A hold prevents a lock from being @@ -1654,8 +1671,9 @@ struct cl_lock_operations { * usual return values of lock state-machine methods, this can return * -ESTALE to indicate that lock cannot be returned to the cache, and * has to be re-initialized. + * unuse is a one-shot operation, so it must NOT return CLO_WAIT. * - * \see ccc_lock_unlock(), lov_lock_unlock(), osc_lock_unlock() + * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse() */ int (*clo_unuse)(const struct lu_env *env, const struct cl_lock_slice *slice); @@ -1727,7 +1745,7 @@ struct cl_lock_operations { const struct cl_lock_slice *slice, struct cl_lock_closure *closure); /** - * Executed top-to-bottom when lock description changes (e.g., as a + * Executed bottom-to-top when lock description changes (e.g., as a * result of server granting more generous lock than was requested). * * \see lovsub_lock_modify() @@ -1799,7 +1817,7 @@ struct cl_page_list { cfs_task_t *pl_owner; }; -/** \addtogroup cl_page_list cl_page_list +/** * A 2-queue of pages. A convenience data-type for common use case, 2-queue * contains an incoming page list and an outgoing page list. */ @@ -2144,11 +2162,6 @@ struct cl_io_lock_link { struct list_head cill_linkage; struct cl_lock_descr cill_descr; struct cl_lock *cill_lock; - /** - * flags to enqueue lock for this IO. A combination of bit-flags from - * enum cl_enq_flags. - */ - __u32 cill_enq_flags; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, struct cl_io_lock_link *link); @@ -2211,6 +2224,7 @@ struct cl_io_rw_common { int crw_nonblock; }; + /** * State for io. * @@ -2246,7 +2260,6 @@ struct cl_io { union { struct cl_rd_io { struct cl_io_rw_common rd; - int rd_is_sendfile; } ci_rd; struct cl_wr_io { struct cl_io_rw_common wr; @@ -2623,6 +2636,7 @@ int cl_conf_set (const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); void cl_object_prune (const struct lu_env *env, struct cl_object *obj); void cl_object_kill (const struct lu_env *env, struct cl_object *obj); +int cl_object_has_locks (struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2642,7 +2656,8 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, struct cl_io *io, pgoff_t start, pgoff_t end, - struct cl_page_list *plist); + struct cl_page_list *plist, + int nonblock); struct cl_page *cl_page_find (const struct lu_env *env, struct cl_object *obj, pgoff_t idx, struct page *vmpage, @@ -2674,6 +2689,8 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, int cl_page_own (const struct lu_env *env, struct cl_io *io, struct cl_page *page); +int cl_page_own_try (const struct lu_env *env, + struct cl_io *io, struct cl_page *page); void cl_page_assume (const struct lu_env *env, struct cl_io *io, struct cl_page *page); void cl_page_unassume (const struct lu_env *env, @@ -2718,7 +2735,8 @@ int cl_page_unmap (const struct lu_env *env, struct cl_io *io, struct cl_page *pg); int cl_page_is_vmlocked (const struct lu_env *env, const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, struct cl_page *pg); +void cl_page_export (const struct lu_env *env, + struct cl_page *pg, int uptodate); int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, struct cl_page *page); loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); @@ -2746,7 +2764,6 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, const char *scope, const void *source); struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const struct cl_lock_descr *need, - __u32 enqflags, const char *scope, const void *source); struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, struct cl_lock *except, @@ -2769,6 +2786,14 @@ int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); int cl_lock_compatible(const struct cl_lock *lock1, const struct cl_lock *lock2); +enum cl_lock_state cl_lock_intransit(const struct lu_env *env, + struct cl_lock *lock); + +void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, + enum cl_lock_state state); + +int cl_lock_is_intransit(struct cl_lock *lock); + /** \name statemachine statemachine * Interface to lock state machine consists of 3 parts: * @@ -2809,7 +2834,7 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 flags); int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock); +int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); /** @} statemachine */ void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); @@ -2876,7 +2901,7 @@ void cl_io_end (const struct lu_env *env, struct cl_io *io); int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr, int enqflags); + struct cl_lock_descr *descr); int cl_io_read_page (const struct lu_env *env, struct cl_io *io, struct cl_page *page); int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, @@ -2886,6 +2911,9 @@ int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue, enum cl_req_priority priority); +int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue, + enum cl_req_priority priority, long timeout); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io, @@ -2900,8 +2928,6 @@ static inline int cl_io_is_append(const struct cl_io *io) return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; } -int cl_io_is_sendfile(const struct cl_io *io); - struct cl_io *cl_io_top(struct cl_io *io); void cl_io_print(const struct lu_env *env, void *cookie, @@ -2992,14 +3018,15 @@ struct cl_sync_io { /** number of pages yet to be transferred. */ atomic_t csi_sync_nr; /** completion to be signaled when transfer is complete. */ - struct completion csi_sync_completion; + cfs_waitq_t csi_waitq; /** error code. */ int csi_sync_rc; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct cl_sync_io *anchor); + struct cl_page_list *queue, struct cl_sync_io *anchor, + long timeout); void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); /** @} cl_sync_io */ @@ -3030,12 +3057,15 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); * longer used environments instead of destroying them; * * - there is a notion of "current" environment, attached to the kernel - * data structure representing current thread (current->journal_info in - * Linux kernel). Top-level lustre code allocates an environment and makes - * it current, then calls into non-lustre code, that in turn calls lustre - * back. Low-level lustre code thus called can fetch environment created - * by the top-level code and reuse it, avoiding additional environment - * allocation. + * data structure representing current thread Top-level lustre code + * allocates an environment and makes it current, then calls into + * non-lustre code, that in turn calls lustre back. Low-level lustre + * code thus called can fetch environment created by the top-level code + * and reuse it, avoiding additional environment allocation. + * Right now, three interfaces can attach the cl_env to running thread: + * - cl_env_get + * - cl_env_implant + * - cl_env_reexit(cl_env_reenter had to be called priorly) * * \see lu_env, lu_context, lu_context_key * @{ */