/*
* super-class definitions.
*/
+#include <libcfs/libcfs.h>
#include <lu_object.h>
+
#ifdef __KERNEL__
-# include <linux/mutex.h>
-# include <linux/radix-tree.h>
+# include <linux/mutex.h>
+# include <linux/radix-tree.h>
+#else
+# include <liblustre.h>
#endif
struct inode;
* Group identifier for quota purposes.
*/
gid_t cat_gid;
+
+ /* nlink of the directory */
+ __u64 cat_nlink;
};
/**
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
*/
int (*coo_glimpse)(const struct lu_env *env,
const struct cl_object *obj, struct ost_lvb *lvb);
+ /**
+ * Object prune method. Called when the layout is going to change on
+ * this object, therefore each layer has to clean up their cache,
+ * mainly pages and locks.
+ */
+ int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
};
/**
struct cl_object_header {
/** Standard lu_object_header. cl_object::co_lu::lo_header points
* here. */
- struct lu_object_header coh_lu;
+ struct lu_object_header coh_lu;
/** \name locks
* \todo XXX move locks below to the separate cache-lines, they are
* mostly useless otherwise.
*/
/** @{ */
- /** Lock protecting page tree. */
- spinlock_t coh_page_guard;
/** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** Radix tree of cl_page's, cached for this object. */
- struct radix_tree_root coh_tree;
- /** # of pages in radix tree. */
- unsigned long coh_pages;
- /** List of cl_lock's granted for this object. */
- cfs_list_t coh_locks;
+ spinlock_t coh_lock_guard;
+ /** @} locks */
+ /** List of cl_lock's granted for this object. */
+ struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- cfs_list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
+
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- cfs_list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
/** @} cl_object */
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
};
/**
- * Flags maintained for every cl_page.
- */
-enum cl_page_flags {
- /**
- * Set when pagein completes. Used for debugging (read completes at
- * most once for a page).
- */
- CPF_READ_COMPLETED = 1 << 0
-};
-
-/**
* Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
* cl_page::cp_owner (when set).
*/
struct cl_page {
- /** Reference counter. */
- cfs_atomic_t cp_ref;
- /** An object this page is a part of. Immutable after creation. */
- struct cl_object *cp_obj;
- /** Logical page index within the object. Immutable after creation. */
- pgoff_t cp_index;
- /** List of slices. Immutable after creation. */
- cfs_list_t cp_layers;
- /** Parent page, NULL for top-level page. Immutable after creation. */
- struct cl_page *cp_parent;
- /** Lower-layer page. NULL for bottommost page. Immutable after
- * creation. */
- struct cl_page *cp_child;
- /**
- * Page state. This field is const to avoid accidental update, it is
- * modified only internally within cl_page.c. Protected by a VM lock.
- */
- const enum cl_page_state cp_state;
- /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
- cfs_list_t cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
- /** Linkage of pages within cl_req. */
- cfs_list_t cp_flight;
- /** Transfer error. */
- int cp_error;
-
+ /** Reference counter. */
+ atomic_t cp_ref;
+ /** Transfer error. */
+ int cp_error;
+ /** An object this page is a part of. Immutable after creation. */
+ struct cl_object *cp_obj;
+ /** vmpage */
+ struct page *cp_vmpage;
+ /** Linkage of pages within group. Pages must be owned */
+ struct list_head cp_batch;
+ /** List of slices. Immutable after creation. */
+ struct list_head cp_layers;
+ /** Linkage of pages within cl_req. */
+ struct list_head cp_flight;
+ /**
+ * Page state. This field is const to avoid accidental update, it is
+ * modified only internally within cl_page.c. Protected by a VM lock.
+ */
+ const enum cl_page_state cp_state;
/**
* Page type. Only CPT_TRANSIENT is used so far. Immutable after
* creation.
*/
struct cl_io *cp_owner;
/**
- * Debug information, the task is owning the page.
- */
- struct task_struct *cp_task;
- /**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
* the top-level pages. Protected by a VM lock.
struct lu_ref_link cp_obj_ref;
/** Link to a queue, for debugging. */
struct lu_ref_link cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
};
*/
struct cl_page_slice {
struct cl_page *cpl_page;
+ pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- cfs_list_t cpl_linkage;
+ struct list_head cpl_linkage;
};
/**
CLM_PHANTOM,
CLM_READ,
CLM_WRITE,
- CLM_GROUP
+ CLM_GROUP,
+ CLM_MAX,
};
/**
*/
/**
- * \return the underlying VM page. Optional.
- */
- struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
* not owned by other io, and no transfer is going on against
void (*cpo_export)(const struct lu_env *env,
const struct cl_page_slice *slice, int uptodate);
/**
- * Unmaps page from the user space (if it is mapped).
- *
- * \see cl_page_unmap()
- * \see vvp_page_unmap()
- */
- int (*cpo_unmap)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
* Checks whether underlying VM page is locked (in the suitable
* sense). Used for assertions.
*
* \see cl_page_is_under_lock()
*/
int (*cpo_is_under_lock)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
+ const struct cl_page_slice *slice,
+ struct cl_io *io, pgoff_t *max);
/**
* Optional debugging helper. Prints given page slice.
*/
int (*cpo_make_ready)(const struct lu_env *env,
const struct cl_page_slice *slice);
- /**
- * Announce that this page is to be written out
- * opportunistically, that is, page is dirty, it is not
- * necessary to start write-out transfer right now, but
- * eventually page has to be written out.
- *
- * Main caller of this is the write path (see
- * vvp_io_commit_write()), using this method to build a
- * "transfer cache" from which large transfers are then
- * constructed by the req-formation engine.
- *
- * \todo XXX it would make sense to add page-age tracking
- * semantics here, and to oblige the req-formation engine to
- * send the page out not later than it is too old.
- *
- * \see cl_page_cache_add()
- */
- int (*cpo_cache_add)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
*/
#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
*/
#define CL_PAGE_HEADER(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
} while (0)
-static inline int __page_in_use(const struct cl_page *page, int refc)
+static inline struct page *cl_page_vmpage(const struct cl_page *page)
+{
+ LASSERT(page->cp_vmpage != NULL);
+ return page->cp_vmpage;
+}
+
+/**
+ * Check if a cl_page is in use.
+ *
+ * Client cache holds a refcount, this refcount will be dropped when
+ * the page is taken out of cache, see vvp_page_delete().
+ */
+static inline bool __page_in_use(const struct cl_page *page, int refc)
{
- if (page->cp_type == CPT_CACHEABLE)
- ++refc;
- LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
- return (cfs_atomic_read(&page->cp_ref) > refc);
+ return (atomic_read(&page->cp_ref) > refc + 1);
}
+
+/**
+ * Caller itself holds a refcount of cl_page.
+ */
#define cl_page_in_use(pg) __page_in_use(pg, 1)
+/**
+ * Caller doesn't hold a refcount.
+ */
#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
/** @} cl_page */
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- cfs_list_t clc_list;
+ struct list_head clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
* Layered client lock.
*/
struct cl_lock {
- /** Reference counter. */
- cfs_atomic_t cll_ref;
- /** List of slices. Immutable after creation. */
- cfs_list_t cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- cfs_list_t cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ /** Reference counter. */
+ atomic_t cll_ref;
+ /** List of slices. Immutable after creation. */
+ struct list_head cll_layers;
+ /**
+ * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
+ * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
+ */
+ struct list_head cll_linkage;
+ /**
+ * Parameters of this lock. Protected by
+ * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
+ * cl_lock::cll_guard. Modified only on lock creation and in
+ * cl_lock_modify().
+ */
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
enum cl_lock_state cll_state;
*
* \see cl_lock_closure
*/
- cfs_list_t cll_inclosure;
+ struct list_head cll_inclosure;
/**
* Confict lock at queuing time.
*/
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- cfs_list_t cls_linkage;
+ struct list_head cls_linkage;
};
/**
#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- cfs_list_t pl_pages;
- struct task_struct *pl_owner;
+ unsigned pl_nr;
+ struct list_head pl_pages;
+ struct task_struct *pl_owner;
};
/**
* \see vvp_io, lov_io, osc_io, ccc_io
*/
struct cl_io_slice {
- struct cl_io *cis_io;
- /** corresponding object slice. Immutable after creation. */
- struct cl_object *cis_obj;
- /** io operations. Immutable after creation. */
- const struct cl_io_operations *cis_iop;
- /**
- * linkage into a list of all slices for a given cl_io, hanging off
- * cl_io::ci_layers. Immutable after creation.
- */
- cfs_list_t cis_linkage;
+ struct cl_io *cis_io;
+ /** corresponding object slice. Immutable after creation. */
+ struct cl_object *cis_obj;
+ /** io operations. Immutable after creation. */
+ const struct cl_io_operations *cis_iop;
+ /**
+ * linkage into a list of all slices for a given cl_io, hanging off
+ * cl_io::ci_layers. Immutable after creation.
+ */
+ struct list_head cis_linkage;
};
+typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
+ struct cl_page *);
/**
* Per-layer io operations.
void (*cio_fini) (const struct lu_env *env,
const struct cl_io_slice *slice);
} op[CIT_OP_NR];
- struct {
- /**
- * Submit pages from \a queue->c2_qin for IO, and move
- * successfully submitted pages into \a queue->c2_qout. Return
- * non-zero if failed to submit even the single page. If
- * submission failed after some pages were moved into \a
- * queue->c2_qout, completion callback with non-zero ioret is
- * executed on them.
- */
- int (*cio_submit)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- enum cl_req_type crt,
- struct cl_2queue *queue);
- } req_op[CRT_NR];
+
+ /**
+ * Submit pages from \a queue->c2_qin for IO, and move
+ * successfully submitted pages into \a queue->c2_qout. Return
+ * non-zero if failed to submit even the single page. If
+ * submission failed after some pages were moved into \a
+ * queue->c2_qout, completion callback with non-zero ioret is
+ * executed on them.
+ */
+ int (*cio_submit)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ enum cl_req_type crt,
+ struct cl_2queue *queue);
+ /**
+ * Queue async page for write.
+ * The difference between cio_submit and cio_queue is that
+ * cio_submit is for urgent request.
+ */
+ int (*cio_commit_async)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
/**
* Read missing page.
*
const struct cl_io_slice *slice,
const struct cl_page_slice *page);
/**
- * Prepare write of a \a page. Called bottom-to-top by a top-level
- * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
- * get data from user-level buffer.
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_prepare_write(), lov_io_prepare_write(),
- * osc_io_prepare_write().
- */
- int (*cio_prepare_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_commit_write(), lov_io_commit_write(),
- * osc_io_commit_write().
- */
- int (*cio_commit_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
* Optional debugging helper. Print given io slice.
*/
int (*cio_print)(const struct lu_env *env, void *cookie,
* same lock can be part of multiple io's simultaneously.
*/
struct cl_io_lock_link {
- /** linkage into one of cl_lockset lists. */
- cfs_list_t cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
- /** optional destructor */
- void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
+ /** linkage into one of cl_lockset lists. */
+ struct list_head cill_linkage;
+ struct cl_lock_descr cill_descr;
+ struct cl_lock *cill_lock;
+ /** optional destructor */
+ void (*cill_fini)(const struct lu_env *env,
+ struct cl_io_lock_link *link);
};
/**
* enqueued.
*/
struct cl_lockset {
- /** locks to be acquired. */
- cfs_list_t cls_todo;
- /** locks currently being processed. */
- cfs_list_t cls_curr;
- /** locks acquired. */
- cfs_list_t cls_done;
+ /** locks to be acquired. */
+ struct list_head cls_todo;
+ /** locks currently being processed. */
+ struct list_head cls_curr;
+ /** locks acquired. */
+ struct list_head cls_done;
};
/**
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- cfs_list_t ci_layers;
+ struct list_head ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transfered */
- cfs_list_t crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- cfs_list_t crq_layers;
+ enum cl_req_type crq_type;
+ /** A list of pages being transfered */
+ struct list_head crq_pages;
+ /** Number of pages in cl_req::crq_pages */
+ unsigned crq_nrpages;
+ /** An array of objects which pages are in ->crq_pages */
+ struct cl_req_obj *crq_o;
+ /** Number of elements in cl_req::crq_objs[] */
+ unsigned crq_nrobjs;
+ struct list_head crq_layers;
};
/**
* Per-layer state for request.
*/
struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- cfs_list_t crs_linkage;
- const struct cl_req_operations *crs_ops;
+ struct cl_req *crs_req;
+ struct cl_device *crs_dev;
+ struct list_head crs_linkage;
+ const struct cl_req_operations *crs_ops;
};
/* @} cl_req */
* Stats for a generic cache (similar to inode, lu_object, etc. caches).
*/
struct cache_stats {
- const char *cs_name;
- cfs_atomic_t cs_stats[CS_NR];
+ const char *cs_name;
+ atomic_t cs_stats[CS_NR];
};
/** These are not exported so far */
void cache_stats_init (struct cache_stats *cs, const char *name);
-int cache_stats_print(const struct cache_stats *cs,
- char *page, int count, int header);
/**
* Client-side site. This represents particular client stack. "Global"
* clients to co-exist in the single address space.
*/
struct cl_site {
- struct lu_site cs_lu;
- /**
- * Statistical counters. Atomics do not scale, something better like
- * per-cpu counters is needed.
- *
- * These are exported as /proc/fs/lustre/llite/.../site
- *
- * When interpreting keep in mind that both sub-locks (and sub-pages)
- * and top-locks (and top-pages) are accounted here.
- */
- struct cache_stats cs_pages;
- struct cache_stats cs_locks;
- cfs_atomic_t cs_pages_state[CPS_NR];
- cfs_atomic_t cs_locks_state[CLS_NR];
+ struct lu_site cs_lu;
+ /**
+ * Statistical counters. Atomics do not scale, something better like
+ * per-cpu counters is needed.
+ *
+ * These are exported as /proc/fs/lustre/llite/.../site
+ *
+ * When interpreting keep in mind that both sub-locks (and sub-pages)
+ * and top-locks (and top-pages) are accounted here.
+ */
+ struct cache_stats cs_pages;
+ struct cache_stats cs_locks;
+ atomic_t cs_pages_state[CPS_NR];
+ atomic_t cs_locks_state[CLS_NR];
};
-int cl_site_init (struct cl_site *s, struct cl_device *top);
-void cl_site_fini (struct cl_site *s);
+int cl_site_init(struct cl_site *s, struct cl_device *top);
+void cl_site_fini(struct cl_site *s);
void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
/**
* Output client site statistical counters into a buffer. Suitable for
* ll_rd_*()-style functions.
*/
-int cl_site_stats_print(const struct cl_site *s, char *page, int count);
+int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
/**
* \name helpers
return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
}
-static inline struct cl_device *cl_object_device(const struct cl_object *o)
-{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
- return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
-}
-
static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
{
return container_of0(h, struct cl_object_header, coh_lu);
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops);
+ struct cl_object *obj, pgoff_t index,
+ const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
const struct cl_lock_operations *ops);
static inline void cl_object_page_init(struct cl_object *clob, int size)
{
clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
- cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+ cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
+ WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
}
static inline void *cl_object_page_slice(struct cl_object *clob,
return (void *)((char *)page + clob->co_slice_off);
}
+/**
+ * Return refcount of cl_object.
+ */
+static inline int cl_object_refc(struct cl_object *clob)
+{
+ struct lu_object_header *header = clob->co_lu.lo_header;
+ return atomic_read(&header->loh_ref);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
CLP_GANG_AGAIN,
CLP_GANG_ABORT
};
-
/* callback of cl_page_gang_lookup() */
-typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
- struct cl_page *, void *);
-int cl_page_gang_lookup (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_io *io,
- pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
- pgoff_t index);
+
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
-struct cl_page *cl_page_find_sub (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent);
+struct cl_page *cl_page_alloc (const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type);
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
-struct page *cl_page_vmpage (const struct lu_env *env,
- struct cl_page *page);
struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
void cl_page_discard (const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
int cl_page_is_vmlocked (const struct lu_env *env,
const struct cl_page *pg);
void cl_page_export (const struct lu_env *env,
struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
+ struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
int cl_page_size (const struct cl_object *obj);
struct cl_object *obj, pgoff_t index,
struct cl_lock *except, int pending,
int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- struct cl_lock *except,
- int pending, int canceld)
-{
- LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
- return cl_lock_at_pgoff(env, obj, page->cp_index, except,
- pending, canceld);
-}
-
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const cfs_list_t *queue,
- const struct cl_lock_descr *need);
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock);
struct cl_lock_descr *descr);
int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue,
long timeout);
+int cl_io_commit_async (const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
*/
static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
{
- LASSERT(plist->pl_nr > 0);
- return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+}
+
+static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
}
/**
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
struct cl_page *page);
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
void cl_page_list_splice (struct cl_page_list *list,
struct cl_page_list *head);
void cl_page_list_del (const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_discard(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
-int cl_page_list_unmap (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
void cl_2queue_init (struct cl_2queue *queue);
*/
struct cl_sync_io {
/** number of pages yet to be transferred. */
- cfs_atomic_t csi_sync_nr;
+ atomic_t csi_sync_nr;
/** error code. */
int csi_sync_rc;
/** barrier of destroy this structure */
- cfs_atomic_t csi_barrier;
+ atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
};
void cl_env_reexit (void *cookie);
void cl_env_implant (struct lu_env *env, int *refcheck);
void cl_env_unplant (struct lu_env *env, int *refcheck);
+unsigned cl_env_cache_purge(unsigned nr);
+struct lu_env *cl_env_percpu_get (void);
+void cl_env_percpu_put (struct lu_env *env);
/** @} cl_env */