X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flclient.h;h=9b2293294fbccb9c0ca3b8b2105361163cdeff46;hp=e48379864c0f4c4d4b9d08d36abe3146af3d2860;hb=b7caa793443674f65122d3e3ea23de569ae8510f;hpb=59f0a47800762b7833af50bff6acaa21ab08e481 diff --git a/lustre/include/lclient.h b/lustre/include/lclient.h index e483798..9b22932 100644 --- a/lustre/include/lclient.h +++ b/lustre/include/lclient.h @@ -121,10 +121,6 @@ struct ccc_io { } write; } u; /** - * True iff io is processing glimpse right now. - */ - int cui_glimpse; - /** * Layout version when this IO is initialized */ __u32 cui_layout_gen; @@ -145,9 +141,10 @@ extern struct lu_context_key ccc_key; extern struct lu_context_key ccc_session_key; struct ccc_thread_info { - struct cl_lock_descr cti_descr; - struct cl_io cti_io; - struct cl_attr cti_attr; + struct cl_lock cti_lock; + struct cl_lock_descr cti_descr; + struct cl_io cti_io; + struct cl_attr cti_attr; }; static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) @@ -159,6 +156,13 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) return info; } +static inline struct cl_lock *ccc_env_lock(const struct lu_env *env) +{ + struct cl_lock *lock = &ccc_env_info(env)->cti_lock; + memset(lock, 0, sizeof *lock); + return lock; +} + static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env) { struct cl_attr *attr = &ccc_env_info(env)->cti_attr; @@ -205,7 +209,7 @@ struct ccc_object { * * \see ccc_page::cpg_pending_linkage */ - cfs_list_t cob_pending_list; + struct list_head cob_pending_list; /** * Access this counter is protected by inode->i_sem. Now that @@ -237,20 +241,20 @@ struct ccc_object { * ccc-private page state. */ struct ccc_page { - struct cl_page_slice cpg_cl; - int cpg_defer_uptodate; - int cpg_ra_used; - int cpg_write_queued; - /** - * Non-empty iff this page is already counted in - * ccc_object::cob_pending_list. Protected by - * ccc_object::cob_pending_guard. This list is only used as a flag, - * that is, never iterated through, only checked for list_empty(), but - * having a list is useful for debugging. - */ - cfs_list_t cpg_pending_linkage; - /** VM page */ - struct page *cpg_page; + struct cl_page_slice cpg_cl; + unsigned cpg_defer_uptodate:1, + cpg_ra_used:1, + cpg_write_queued:1; + /** + * Non-empty iff this page is already counted in + * ccc_object::cob_pending_list. Protected by + * ccc_object::cob_pending_guard. This list is only used as a flag, + * that is, never iterated through, only checked for list_empty(), but + * having a list is useful for debugging. + */ + struct list_head cpg_pending_linkage; + /** VM page */ + struct page *cpg_page; }; static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) @@ -349,17 +353,7 @@ void ccc_lock_delete(const struct lu_env *env, const struct cl_lock_slice *slice); void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice); int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice, - struct cl_io *io, __u32 enqflags); -int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice); -int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice); -int ccc_lock_fits_into(const struct lu_env *env, - const struct cl_lock_slice *slice, - const struct cl_lock_descr *need, - const struct cl_io *io); -void ccc_lock_state(const struct lu_env *env, - const struct cl_lock_slice *slice, - enum cl_lock_state state); - + struct cl_io *io, struct cl_sync_io *anchor); void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios); int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, __u32 enqflags, enum cl_lock_mode mode, @@ -457,14 +451,43 @@ void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm); * layer for recovery purposes. */ struct cl_client_cache { - atomic_t ccc_users; /* # of users (OSCs) */ - cfs_list_t ccc_lru; /* LRU of cached clean pages */ - spinlock_t ccc_lru_lock; /* lock for list */ - atomic_t ccc_lru_left; /* # of LRU entries available */ - unsigned long ccc_lru_max; /* Max # of LRU entries */ - unsigned int ccc_lru_shrinkers; /* # of threads shrinking */ - atomic_t ccc_unstable_nr; /* # of pages pinned */ - wait_queue_head_t ccc_unstable_waitq; /* Signaled on BRW commit */ + /** + * # of users (OSCs) + */ + atomic_t ccc_users; + /** + * # of threads are doing shrinking + */ + unsigned int ccc_lru_shrinkers; + /** + * # of LRU entries available + */ + atomic_long_t ccc_lru_left; + /** + * List of entities(OSCs) for this LRU cache + */ + struct list_head ccc_lru; + /** + * Max # of LRU entries + */ + unsigned long ccc_lru_max; + /** + * Lock to protect ccc_lru list + */ + spinlock_t ccc_lru_lock; + /** + * Set if unstable check is enabled + */ + unsigned int ccc_unstable_check:1; + /** + * # of unstable pages for this mount point + */ + atomic_long_t ccc_unstable_nr; + /** + * Waitq for awaiting unstable pages to reach zero. + * Used at umounting time and signaled on BRW commit + */ + wait_queue_head_t ccc_unstable_waitq; }; enum {