* super-class definitions.
*/
#include <lu_object.h>
-#include <lvfs.h>
#ifdef __KERNEL__
# include <linux/mutex.h>
# include <linux/radix-tree.h>
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
* corresponding radix tree at the corresponding logical offset.
*
* cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), cfs_page_t. It is assumed, that this
+ * page in Linux kernel, for example), struct page. It is assumed, that this
* association is implemented by one of cl_page layers (top layer in the
* current design) that
*
* - translates state (page flag bits) and locking between lustre and
* environment.
*
- * The association between cl_page and cfs_page_t is immutable and
+ * The association between cl_page and struct page is immutable and
* established when cl_page is created.
*
* cl_page can be "owned" by a particular cl_io (see below), guaranteeing
* eviction of the page from the memory). Note, that in general cl_io
* cannot be identified with a particular thread, and page ownership is not
* exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and cfs_page_t has to implement
+ * implementing association between cl_page and struct page has to implement
* ownership on top of available synchronization mechanisms.
*
* While lustre client maintains the notion of an page ownership by io,
* - by doing a lookup in the cl_object radix tree, protected by the
* spin-lock;
*
- * - by starting from VM-locked cfs_page_t and following some
+ * - by starting from VM-locked struct page and following some
* hosting environment method (e.g., following ->private pointer in
* the case of Linux kernel), see cl_vmpage_page();
*
*
* Linux Kernel implementation.
*
- * Binding between cl_page and cfs_page_t (which is a typedef for
+ * Binding between cl_page and struct page (which is a typedef for
* struct page) is implemented in the vvp layer. cl_page is attached to the
* ->private pointer of the struct page, together with the setting of
* PG_private bit in page->flags, and acquiring additional reference on the
};
/**
- * Fields are protected by the lock on cfs_page_t, except for atomics and
+ * Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
* \invariant Data type invariants are in cl_page_invariant(). Basically:
/**
* Debug information, the task is owning the page.
*/
- cfs_task_t *cp_task;
+ struct task_struct *cp_task;
/**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
*/
struct cl_page_operations {
/**
- * cl_page<->cfs_page_t methods. Only one layer in the stack has to
+ * cl_page<->struct page methods. Only one layer in the stack has to
* implement these. Current code assumes that this functionality is
* provided by the topmost layer, see cl_page_disown0() as an example.
*/
/**
* \return the underlying VM page. Optional.
*/
- cfs_page_t *(*cpo_vmpage)(const struct lu_env *env,
+ struct page *(*cpo_vmpage)(const struct lu_env *env,
const struct cl_page_slice *slice);
/**
* Called when \a io acquires this page into the exclusive
*/
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- cfs_waitq_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
+ enum cl_lock_state cll_state;
+ /** signals state changes. */
+ wait_queue_head_t cll_wq;
+ /**
+ * Recursive lock, most fields in cl_lock{} are protected by this.
+ *
+ * Locking rules: this mutex is never held across network
+ * communication, except when lock is being canceled.
+ *
+ * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+ * on a top-lock. Other direction is implemented through a
+ * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+ * by try-locking.
+ *
+ * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+ */
struct mutex cll_guard;
- cfs_task_t *cll_guarder;
+ struct task_struct *cll_guarder;
int cll_depth;
/**
* the owner for INTRANSIT state
*/
- cfs_task_t *cll_intransit_owner;
+ struct task_struct *cll_intransit_owner;
int cll_error;
/**
* Number of holds on a lock. A hold prevents a lock from being
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- cfs_list_t pl_pages;
- cfs_task_t *pl_owner;
+ unsigned pl_nr;
+ cfs_list_t pl_pages;
+ struct task_struct *pl_owner;
};
/**
* Right now, only two opertaions need to verify layout: glimpse
* and setattr.
*/
- ci_verify_layout:1;
- /**
- * Number of pages owned by this IO. For invariant checking.
- */
- unsigned ci_owned_nr;
+ ci_verify_layout:1,
+ /**
+ * file is released, restore has to to be triggered by vvp layer
+ */
+ ci_restore_needed:1,
+ /**
+ * O_NOATIME
+ */
+ ci_noatime:1;
+ /**
+ * Number of pages owned by this IO. For invariant checking.
+ */
+ unsigned ci_owned_nr;
};
/** @} cl_io */
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
-cfs_page_t *cl_page_vmpage (const struct lu_env *env,
+struct page *cl_page_vmpage (const struct lu_env *env,
struct cl_page *page);
-struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
+struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
/** barrier of destroy this structure */
cfs_atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
- cfs_waitq_t csi_waitq;
+ wait_queue_head_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
void cl_env_reexit (void *cookie);
void cl_env_implant (struct lu_env *env, int *refcheck);
void cl_env_unplant (struct lu_env *env, int *refcheck);
+unsigned cl_env_cache_purge(unsigned nr);
/** @} cl_env */