*
* \see ccc_page::cpg_pending_linkage
*/
- cfs_list_t cob_pending_list;
+ struct list_head cob_pending_list;
/**
* Access this counter is protected by inode->i_sem. Now that
void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice);
int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice,
struct cl_io *io, __u32 enqflags);
+int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice);
int ccc_lock_fits_into(const struct lu_env *env,
*/
atomic_t ccc_users;
/**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
* # of LRU entries available
*/
- atomic_t ccc_lru_left;
+ atomic_long_t ccc_lru_left;
/**
* List of entities(OSCs) for this LRU cache
*/
- cfs_list_t ccc_lru;
+ struct list_head ccc_lru;
/**
* Max # of LRU entries
*/
*/
spinlock_t ccc_lru_lock;
/**
- * # of threads are doing shrinking
- */
- unsigned int ccc_lru_shrinkers;
- /**
* Set if unstable check is enabled
*/
unsigned int ccc_unstable_check:1;
/**
+ * # of unstable pages for this mount point
+ */
+ atomic_long_t ccc_unstable_nr;
+ /**
* Waitq for awaiting unstable pages to reach zero.
* Used at umounting time and signaled on BRW commit
*/
wait_queue_head_t ccc_unstable_waitq;
- /**
- * # of unstable pages for this mount point
- */
- atomic_t ccc_unstable_nr;
};
enum {
CLI_HASH64 = 1 << 2,
CLI_API32 = 1 << 3,
CLI_MIGRATE = 1 << 4,
- CLI_NEXT_ENTRY = 1 << 5,
};
#endif /*LCLIENT_H */