*
* \see ccc_page::cpg_pending_linkage
*/
- cfs_list_t cob_pending_list;
+ struct list_head cob_pending_list;
/**
* Access this counter is protected by inode->i_sem. Now that
void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice);
int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice,
struct cl_io *io, __u32 enqflags);
+int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_fits_into(const struct lu_env *env,
* layer for recovery purposes.
*/
struct cl_client_cache {
- atomic_t ccc_users; /* # of users (OSCs) */
- cfs_list_t ccc_lru; /* LRU of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries */
- unsigned int ccc_lru_shrinkers; /* # of threads shrinking */
- atomic_t ccc_unstable_nr; /* # of pages pinned */
- wait_queue_head_t ccc_unstable_waitq; /* Signaled on BRW commit */
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of LRU entries available
+ */
+ atomic_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * Set if unstable check is enabled
+ */
+ unsigned int ccc_unstable_check:1;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_t ccc_unstable_nr;
};
enum {
CLI_HASH64 = 1 << 2,
CLI_API32 = 1 << 3,
CLI_MIGRATE = 1 << 4,
- CLI_NEXT_ENTRY = 1 << 5,
};
#endif /*LCLIENT_H */