*
* \see ccc_page::cpg_pending_linkage
*/
- cfs_list_t cob_pending_list;
+ struct list_head cob_pending_list;
/**
* Access this counter is protected by inode->i_sem. Now that
* ccc-private page state.
*/
struct ccc_page {
- struct cl_page_slice cpg_cl;
- int cpg_defer_uptodate;
- int cpg_ra_used;
- int cpg_write_queued;
- /**
- * Non-empty iff this page is already counted in
- * ccc_object::cob_pending_list. Protected by
- * ccc_object::cob_pending_guard. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- cfs_list_t cpg_pending_linkage;
- /** VM page */
- struct page *cpg_page;
+ struct cl_page_slice cpg_cl;
+ unsigned cpg_defer_uptodate:1,
+ cpg_ra_used:1,
+ cpg_write_queued:1;
+ /**
+ * Non-empty iff this page is already counted in
+ * ccc_object::cob_pending_list. Protected by
+ * ccc_object::cob_pending_guard. This list is only used as a flag,
+ * that is, never iterated through, only checked for list_empty(), but
+ * having a list is useful for debugging.
+ */
+ struct list_head cpg_pending_linkage;
+ /** VM page */
+ struct page *cpg_page;
};
static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice);
int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice,
struct cl_io *io, __u32 enqflags);
+int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_fits_into(const struct lu_env *env,
* layer for recovery purposes.
*/
struct cl_client_cache {
- atomic_t ccc_users; /* # of users (OSCs) */
- cfs_list_t ccc_lru; /* LRU of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries */
- unsigned int ccc_lru_shrinkers; /* # of threads shrinking */
- atomic_t ccc_unstable_nr; /* # of pages pinned */
- wait_queue_head_t ccc_unstable_waitq; /* Signaled on BRW commit */
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of LRU entries available
+ */
+ atomic_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * Set if unstable check is enabled
+ */
+ unsigned int ccc_unstable_check:1;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_t ccc_unstable_nr;
};
enum {
CLI_HASH64 = 1 << 2,
CLI_API32 = 1 << 3,
CLI_MIGRATE = 1 << 4,
- CLI_NEXT_ENTRY = 1 << 5,
};
#endif /*LCLIENT_H */