* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/*
* super-class definitions.
*/
+#include <libcfs/libcfs.h>
#include <lu_object.h>
-#include <lvfs.h>
+
#ifdef __KERNEL__
-# include <linux/mutex.h>
-# include <linux/radix-tree.h>
+# include <linux/mutex.h>
+# include <linux/radix-tree.h>
+#else
+# include <liblustre.h>
#endif
struct inode;
* Group identifier for quota purposes.
*/
gid_t cat_gid;
+
+ /* nlink of the directory */
+ __u64 cat_nlink;
};
/**
struct lu_object co_lu;
/** per-object-layer operations */
const struct cl_object_operations *co_ops;
+ /** offset of page slice in cl_page buffer */
+ int co_slice_off;
};
/**
*/
struct inode *coc_inode;
/**
- * Validate object conf. If object is using an invalid conf,
- * then invalidate it and set the new layout.
+ * Layout lock handle.
*/
- bool coc_validate_only;
+ struct ldlm_lock *coc_lock;
/**
- * Invalidate the current stripe configuration due to losing
- * layout lock.
+ * Operation to handle layout, OBJECT_CONF_XYZ.
*/
- bool coc_invalidate;
+ int coc_opc;
+};
+
+enum {
+ /** configure layout, set up a new stripe, must be called while
+ * holding layout lock. */
+ OBJECT_CONF_SET = 0,
+ /** invalidate the current stripe configuration due to losing
+ * layout lock. */
+ OBJECT_CONF_INVALIDATE = 1,
+ /** wait for old layout to go away so that new layout can be
+ * set up. */
+ OBJECT_CONF_WAIT = 2
};
/**
* \retval valid-pointer pointer to already existing referenced page
* to be used instead of newly created.
*/
- struct cl_page *(*coo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
*/
int (*coo_glimpse)(const struct lu_env *env,
const struct cl_object *obj, struct ost_lvb *lvb);
+ /**
+ * Object prune method. Called when the layout is going to change on
+ * this object, therefore each layer has to clean up their cache,
+ * mainly pages and locks.
+ */
+ int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
};
/**
struct cl_object_header {
/** Standard lu_object_header. cl_object::co_lu::lo_header points
* here. */
- struct lu_object_header coh_lu;
+ struct lu_object_header coh_lu;
/** \name locks
* \todo XXX move locks below to the separate cache-lines, they are
* mostly useless otherwise.
*/
/** @{ */
- /** Lock protecting page tree. */
- cfs_spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- cfs_spinlock_t coh_lock_guard;
- /** @} locks */
- /** Radix tree of cl_page's, cached for this object. */
- struct radix_tree_root coh_tree;
- /** # of pages in radix tree. */
- unsigned long coh_pages;
- /** List of cl_lock's granted for this object. */
- cfs_list_t coh_locks;
+ /** Lock protecting lock list. */
+ spinlock_t coh_lock_guard;
+ /** @} locks */
+ /** List of cl_lock's granted for this object. */
+ struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
*
* \todo XXX this can be read/write lock if needed.
*/
- cfs_spinlock_t coh_attr_guard;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned coh_nesting;
+ spinlock_t coh_attr_guard;
+ /**
+ * Size of cl_page + page slices
+ */
+ unsigned short coh_page_bufsize;
+ /**
+ * Number of objects above this one: 0 for a top-object, 1 for its
+ * sub-object, etc.
+ */
+ unsigned char coh_nesting;
};
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- cfs_list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
+
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- cfs_list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers,\
+ co_lu.lo_linkage)
/** @} cl_object */
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
* corresponding radix tree at the corresponding logical offset.
*
* cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), cfs_page_t. It is assumed, that this
+ * page in Linux kernel, for example), struct page. It is assumed, that this
* association is implemented by one of cl_page layers (top layer in the
* current design) that
*
* - translates state (page flag bits) and locking between lustre and
* environment.
*
- * The association between cl_page and cfs_page_t is immutable and
+ * The association between cl_page and struct page is immutable and
* established when cl_page is created.
*
* cl_page can be "owned" by a particular cl_io (see below), guaranteeing
* eviction of the page from the memory). Note, that in general cl_io
* cannot be identified with a particular thread, and page ownership is not
* exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and cfs_page_t has to implement
+ * implementing association between cl_page and struct page has to implement
* ownership on top of available synchronization mechanisms.
*
* While lustre client maintains the notion of an page ownership by io,
* - by doing a lookup in the cl_object radix tree, protected by the
* spin-lock;
*
- * - by starting from VM-locked cfs_page_t and following some
+ * - by starting from VM-locked struct page and following some
* hosting environment method (e.g., following ->private pointer in
* the case of Linux kernel), see cl_vmpage_page();
*
*
* Linux Kernel implementation.
*
- * Binding between cl_page and cfs_page_t (which is a typedef for
+ * Binding between cl_page and struct page (which is a typedef for
* struct page) is implemented in the vvp layer. cl_page is attached to the
* ->private pointer of the struct page, together with the setting of
* PG_private bit in page->flags, and acquiring additional reference on the
};
/**
- * Flags maintained for every cl_page.
- */
-enum cl_page_flags {
- /**
- * Set when pagein completes. Used for debugging (read completes at
- * most once for a page).
- */
- CPF_READ_COMPLETED = 1 << 0
-};
-
-/**
- * Fields are protected by the lock on cfs_page_t, except for atomics and
+ * Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
* \invariant Data type invariants are in cl_page_invariant(). Basically:
* cl_page::cp_owner (when set).
*/
struct cl_page {
- /** Reference counter. */
- cfs_atomic_t cp_ref;
- /** An object this page is a part of. Immutable after creation. */
- struct cl_object *cp_obj;
- /** Logical page index within the object. Immutable after creation. */
- pgoff_t cp_index;
- /** List of slices. Immutable after creation. */
- cfs_list_t cp_layers;
- /** Parent page, NULL for top-level page. Immutable after creation. */
- struct cl_page *cp_parent;
- /** Lower-layer page. NULL for bottommost page. Immutable after
- * creation. */
- struct cl_page *cp_child;
- /**
- * Page state. This field is const to avoid accidental update, it is
- * modified only internally within cl_page.c. Protected by a VM lock.
- */
- const enum cl_page_state cp_state;
- /**
- * Linkage of pages within some group. Protected by
- * cl_page::cp_mutex. */
- cfs_list_t cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- cfs_mutex_t cp_mutex;
- /** Linkage of pages within cl_req. */
- cfs_list_t cp_flight;
- /** Transfer error. */
- int cp_error;
-
+ /** Reference counter. */
+ atomic_t cp_ref;
+ /** Transfer error. */
+ int cp_error;
+ /** An object this page is a part of. Immutable after creation. */
+ struct cl_object *cp_obj;
+ /** vmpage */
+ struct page *cp_vmpage;
+ /** Linkage of pages within group. Pages must be owned */
+ struct list_head cp_batch;
+ /** List of slices. Immutable after creation. */
+ struct list_head cp_layers;
+ /** Linkage of pages within cl_req. */
+ struct list_head cp_flight;
+ /**
+ * Page state. This field is const to avoid accidental update, it is
+ * modified only internally within cl_page.c. Protected by a VM lock.
+ */
+ const enum cl_page_state cp_state;
/**
* Page type. Only CPT_TRANSIENT is used so far. Immutable after
* creation.
*/
struct cl_io *cp_owner;
/**
- * Debug information, the task is owning the page.
- */
- cfs_task_t *cp_task;
- /**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
* the top-level pages. Protected by a VM lock.
struct cl_req *cp_req;
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
- /** Link to an object, for debugging. */
- struct lu_ref_link *cp_obj_ref;
- /** Link to a queue, for debugging. */
- struct lu_ref_link *cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
- /** Assigned if doing a sync_io */
- struct cl_sync_io *cp_sync_io;
+ /** Link to an object, for debugging. */
+ struct lu_ref_link cp_obj_ref;
+ /** Link to a queue, for debugging. */
+ struct lu_ref_link cp_queue_ref;
+ /** Assigned if doing a sync_io */
+ struct cl_sync_io *cp_sync_io;
};
/**
*/
struct cl_page_slice {
struct cl_page *cpl_page;
+ pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- cfs_list_t cpl_linkage;
+ struct list_head cpl_linkage;
};
/**
CLM_PHANTOM,
CLM_READ,
CLM_WRITE,
- CLM_GROUP
+ CLM_GROUP,
+ CLM_MAX,
};
/**
*/
struct cl_page_operations {
/**
- * cl_page<->cfs_page_t methods. Only one layer in the stack has to
+ * cl_page<->struct page methods. Only one layer in the stack has to
* implement these. Current code assumes that this functionality is
* provided by the topmost layer, see cl_page_disown0() as an example.
*/
/**
- * \return the underlying VM page. Optional.
- */
- cfs_page_t *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
* not owned by other io, and no transfer is going on against
void (*cpo_export)(const struct lu_env *env,
const struct cl_page_slice *slice, int uptodate);
/**
- * Unmaps page from the user space (if it is mapped).
- *
- * \see cl_page_unmap()
- * \see vvp_page_unmap()
- */
- int (*cpo_unmap)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
* Checks whether underlying VM page is locked (in the suitable
* sense). Used for assertions.
*
* \see cl_page_is_under_lock()
*/
int (*cpo_is_under_lock)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
+ const struct cl_page_slice *slice,
+ struct cl_io *io, pgoff_t *max);
/**
* Optional debugging helper. Prints given page slice.
*/
int (*cpo_make_ready)(const struct lu_env *env,
const struct cl_page_slice *slice);
- /**
- * Announce that this page is to be written out
- * opportunistically, that is, page is dirty, it is not
- * necessary to start write-out transfer right now, but
- * eventually page has to be written out.
- *
- * Main caller of this is the write path (see
- * vvp_io_commit_write()), using this method to build a
- * "transfer cache" from which large transfers are then
- * constructed by the req-formation engine.
- *
- * \todo XXX it would make sense to add page-age tracking
- * semantics here, and to oblige the req-formation engine to
- * send the page out not later than it is too old.
- *
- * \see cl_page_cache_add()
- */
- int (*cpo_cache_add)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
*/
#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
*/
#define CL_PAGE_HEADER(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
} while (0)
+static inline struct page *cl_page_vmpage(const struct cl_page *page)
+{
+ LASSERT(page->cp_vmpage != NULL);
+ return page->cp_vmpage;
+}
+
+/**
+ * Check if a cl_page is in use.
+ *
+ * Client cache holds a refcount, this refcount will be dropped when
+ * the page is taken out of cache, see vvp_page_delete().
+ */
+static inline bool __page_in_use(const struct cl_page *page, int refc)
+{
+ return (atomic_read(&page->cp_ref) > refc + 1);
+}
+
+/**
+ * Caller itself holds a refcount of cl_page.
+ */
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+/**
+ * Caller doesn't hold a refcount.
+ */
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- cfs_list_t clc_list;
+ struct list_head clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
* Layered client lock.
*/
struct cl_lock {
- /** Reference counter. */
- cfs_atomic_t cll_ref;
- /** List of slices. Immutable after creation. */
- cfs_list_t cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- cfs_list_t cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ /** Reference counter. */
+ atomic_t cll_ref;
+ /** List of slices. Immutable after creation. */
+ struct list_head cll_layers;
+ /**
+ * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
+ * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
+ */
+ struct list_head cll_linkage;
+ /**
+ * Parameters of this lock. Protected by
+ * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
+ * cl_lock::cll_guard. Modified only on lock creation and in
+ * cl_lock_modify().
+ */
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- cfs_waitq_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
- cfs_mutex_t cll_guard;
- cfs_task_t *cll_guarder;
+ enum cl_lock_state cll_state;
+ /** signals state changes. */
+ wait_queue_head_t cll_wq;
+ /**
+ * Recursive lock, most fields in cl_lock{} are protected by this.
+ *
+ * Locking rules: this mutex is never held across network
+ * communication, except when lock is being canceled.
+ *
+ * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+ * on a top-lock. Other direction is implemented through a
+ * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+ * by try-locking.
+ *
+ * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+ */
+ struct mutex cll_guard;
+ struct task_struct *cll_guarder;
int cll_depth;
/**
* the owner for INTRANSIT state
*/
- cfs_task_t *cll_intransit_owner;
+ struct task_struct *cll_intransit_owner;
int cll_error;
/**
* Number of holds on a lock. A hold prevents a lock from being
*
* \see cl_lock_closure
*/
- cfs_list_t cll_inclosure;
+ struct list_head cll_inclosure;
/**
* Confict lock at queuing time.
*/
* A list of holds on this lock, for debugging.
*/
struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link *cll_obj_ref;
+ /**
+ * A reference for cl_lock::cll_descr::cld_obj. For debugging.
+ */
+ struct lu_ref_link cll_obj_ref;
#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
+ /* "dep_map" name is assumed by lockdep.h macros. */
+ struct lockdep_map dep_map;
#endif
};
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- cfs_list_t cls_linkage;
+ struct list_head cls_linkage;
};
/**
#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- cfs_list_t pl_pages;
- cfs_task_t *pl_owner;
+ unsigned pl_nr;
+ struct list_head pl_pages;
+ struct task_struct *pl_owner;
};
/**
* \see vvp_io, lov_io, osc_io, ccc_io
*/
struct cl_io_slice {
- struct cl_io *cis_io;
- /** corresponding object slice. Immutable after creation. */
- struct cl_object *cis_obj;
- /** io operations. Immutable after creation. */
- const struct cl_io_operations *cis_iop;
- /**
- * linkage into a list of all slices for a given cl_io, hanging off
- * cl_io::ci_layers. Immutable after creation.
- */
- cfs_list_t cis_linkage;
+ struct cl_io *cis_io;
+ /** corresponding object slice. Immutable after creation. */
+ struct cl_object *cis_obj;
+ /** io operations. Immutable after creation. */
+ const struct cl_io_operations *cis_iop;
+ /**
+ * linkage into a list of all slices for a given cl_io, hanging off
+ * cl_io::ci_layers. Immutable after creation.
+ */
+ struct list_head cis_linkage;
};
+typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
+ struct cl_page *);
/**
* Per-layer io operations.
void (*cio_fini) (const struct lu_env *env,
const struct cl_io_slice *slice);
} op[CIT_OP_NR];
- struct {
- /**
- * Submit pages from \a queue->c2_qin for IO, and move
- * successfully submitted pages into \a queue->c2_qout. Return
- * non-zero if failed to submit even the single page. If
- * submission failed after some pages were moved into \a
- * queue->c2_qout, completion callback with non-zero ioret is
- * executed on them.
- */
- int (*cio_submit)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- enum cl_req_type crt,
- struct cl_2queue *queue);
- } req_op[CRT_NR];
+
+ /**
+ * Submit pages from \a queue->c2_qin for IO, and move
+ * successfully submitted pages into \a queue->c2_qout. Return
+ * non-zero if failed to submit even the single page. If
+ * submission failed after some pages were moved into \a
+ * queue->c2_qout, completion callback with non-zero ioret is
+ * executed on them.
+ */
+ int (*cio_submit)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ enum cl_req_type crt,
+ struct cl_2queue *queue);
+ /**
+ * Queue async page for write.
+ * The difference between cio_submit and cio_queue is that
+ * cio_submit is for urgent request.
+ */
+ int (*cio_commit_async)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
/**
* Read missing page.
*
const struct cl_io_slice *slice,
const struct cl_page_slice *page);
/**
- * Prepare write of a \a page. Called bottom-to-top by a top-level
- * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
- * get data from user-level buffer.
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_prepare_write(), lov_io_prepare_write(),
- * osc_io_prepare_write().
- */
- int (*cio_prepare_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_commit_write(), lov_io_commit_write(),
- * osc_io_commit_write().
- */
- int (*cio_commit_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
* Optional debugging helper. Print given io slice.
*/
int (*cio_print)(const struct lu_env *env, void *cookie,
* same lock can be part of multiple io's simultaneously.
*/
struct cl_io_lock_link {
- /** linkage into one of cl_lockset lists. */
- cfs_list_t cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
- /** optional destructor */
- void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
+ /** linkage into one of cl_lockset lists. */
+ struct list_head cill_linkage;
+ struct cl_lock_descr cill_descr;
+ struct cl_lock *cill_lock;
+ /** optional destructor */
+ void (*cill_fini)(const struct lu_env *env,
+ struct cl_io_lock_link *link);
};
/**
* enqueued.
*/
struct cl_lockset {
- /** locks to be acquired. */
- cfs_list_t cls_todo;
- /** locks currently being processed. */
- cfs_list_t cls_curr;
- /** locks acquired. */
- cfs_list_t cls_done;
+ /** locks to be acquired. */
+ struct list_head cls_todo;
+ /** locks currently being processed. */
+ struct list_head cls_curr;
+ /** locks acquired. */
+ struct list_head cls_done;
};
/**
/** Layers are free to decide between local and global locking. */
CILR_MAYBE,
/** Never lock: there is no cache (e.g., liblustre). */
- CILR_NEVER,
- /** Peek lock: use existing locks, don't queue new ones */
- CILR_PEEK
+ CILR_NEVER
};
enum cl_fsync_mode {
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- cfs_list_t ci_layers;
+ struct list_head ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
enum cl_io_lock_dmd ci_lockreq;
- /**
- * This io has held grouplock, to inform sublayers that
- * don't do lockless i/o.
- */
- int ci_no_srvlock;
union {
struct cl_rd_io {
struct cl_io_rw_common rd;
struct cl_2queue ci_queue;
size_t ci_nob;
int ci_result;
- int ci_continue;
- /**
- * Number of pages owned by this IO. For invariant checking.
- */
- unsigned ci_owned_nr;
+ unsigned int ci_continue:1,
+ /**
+ * This io has held grouplock, to inform sublayers that
+ * don't do lockless i/o.
+ */
+ ci_no_srvlock:1,
+ /**
+ * The whole IO need to be restarted because layout has been changed
+ */
+ ci_need_restart:1,
+ /**
+ * to not refresh layout - the IO issuer knows that the layout won't
+ * change(page operations, layout change causes all page to be
+ * discarded), or it doesn't matter if it changes(sync).
+ */
+ ci_ignore_layout:1,
+ /**
+ * Check if layout changed after the IO finishes. Mainly for HSM
+ * requirement. If IO occurs to openning files, it doesn't need to
+ * verify layout because HSM won't release openning files.
+ * Right now, only two opertaions need to verify layout: glimpse
+ * and setattr.
+ */
+ ci_verify_layout:1,
+ /**
+ * file is released, restore has to to be triggered by vvp layer
+ */
+ ci_restore_needed:1,
+ /**
+ * O_NOATIME
+ */
+ ci_noatime:1;
+ /**
+ * Number of pages owned by this IO. For invariant checking.
+ */
+ unsigned ci_owned_nr;
};
/** @} cl_io */
* A per-object state that (potentially multi-object) transfer request keeps.
*/
struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link *ro_obj_ref;
- /* something else? Number of pages for a given object? */
+ /** object itself */
+ struct cl_object *ro_obj;
+ /** reference to cl_req_obj::ro_obj. For debugging. */
+ struct lu_ref_link ro_obj_ref;
+ /* something else? Number of pages for a given object? */
};
/**
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transfered */
- cfs_list_t crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- cfs_list_t crq_layers;
+ enum cl_req_type crq_type;
+ /** A list of pages being transfered */
+ struct list_head crq_pages;
+ /** Number of pages in cl_req::crq_pages */
+ unsigned crq_nrpages;
+ /** An array of objects which pages are in ->crq_pages */
+ struct cl_req_obj *crq_o;
+ /** Number of elements in cl_req::crq_objs[] */
+ unsigned crq_nrobjs;
+ struct list_head crq_layers;
};
/**
* Per-layer state for request.
*/
struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- cfs_list_t crs_linkage;
- const struct cl_req_operations *crs_ops;
+ struct cl_req *crs_req;
+ struct cl_device *crs_dev;
+ struct list_head crs_linkage;
+ const struct cl_req_operations *crs_ops;
};
/* @} cl_req */
+enum cache_stats_item {
+ /** how many cache lookups were performed */
+ CS_lookup = 0,
+ /** how many times cache lookup resulted in a hit */
+ CS_hit,
+ /** how many entities are in the cache right now */
+ CS_total,
+ /** how many entities in the cache are actively used (and cannot be
+ * evicted) right now */
+ CS_busy,
+ /** how many entities were created at all */
+ CS_create,
+ CS_NR
+};
+
+#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
+
/**
* Stats for a generic cache (similar to inode, lu_object, etc. caches).
*/
struct cache_stats {
- const char *cs_name;
- /** how many entities were created at all */
- cfs_atomic_t cs_created;
- /** how many cache lookups were performed */
- cfs_atomic_t cs_lookup;
- /** how many times cache lookup resulted in a hit */
- cfs_atomic_t cs_hit;
- /** how many entities are in the cache right now */
- cfs_atomic_t cs_total;
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
- cfs_atomic_t cs_busy;
+ const char *cs_name;
+ atomic_t cs_stats[CS_NR];
};
/** These are not exported so far */
void cache_stats_init (struct cache_stats *cs, const char *name);
-int cache_stats_print(const struct cache_stats *cs,
- char *page, int count, int header);
/**
* Client-side site. This represents particular client stack. "Global"
* clients to co-exist in the single address space.
*/
struct cl_site {
- struct lu_site cs_lu;
- /**
- * Statistical counters. Atomics do not scale, something better like
- * per-cpu counters is needed.
- *
- * These are exported as /proc/fs/lustre/llite/.../site
- *
- * When interpreting keep in mind that both sub-locks (and sub-pages)
- * and top-locks (and top-pages) are accounted here.
- */
- struct cache_stats cs_pages;
- struct cache_stats cs_locks;
- cfs_atomic_t cs_pages_state[CPS_NR];
- cfs_atomic_t cs_locks_state[CLS_NR];
+ struct lu_site cs_lu;
+ /**
+ * Statistical counters. Atomics do not scale, something better like
+ * per-cpu counters is needed.
+ *
+ * These are exported as /proc/fs/lustre/llite/.../site
+ *
+ * When interpreting keep in mind that both sub-locks (and sub-pages)
+ * and top-locks (and top-pages) are accounted here.
+ */
+ struct cache_stats cs_pages;
+ struct cache_stats cs_locks;
+ atomic_t cs_pages_state[CPS_NR];
+ atomic_t cs_locks_state[CLS_NR];
};
-int cl_site_init (struct cl_site *s, struct cl_device *top);
-void cl_site_fini (struct cl_site *s);
+int cl_site_init(struct cl_site *s, struct cl_device *top);
+void cl_site_fini(struct cl_site *s);
void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
/**
* Output client site statistical counters into a buffer. Suitable for
* ll_rd_*()-style functions.
*/
-int cl_site_stats_print(const struct cl_site *s, char *page, int count);
+int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
/**
* \name helpers
return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
}
-static inline struct cl_device *cl_object_device(const struct cl_object *o)
-{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
- return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
-}
-
static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
{
return container_of0(h, struct cl_object_header, coh_lu);
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops);
+ struct cl_object *obj, pgoff_t index,
+ const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
const struct cl_lock_operations *ops);
return cl_object_header(o0) == cl_object_header(o1);
}
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+ clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+ cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
+ WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+ struct cl_page *page)
+{
+ return (void *)((char *)page + clob->co_slice_off);
+}
+
+/**
+ * Return refcount of cl_object.
+ */
+static inline int cl_object_refc(struct cl_object *clob)
+{
+ struct lu_object_header *header = clob->co_lu.lo_header;
+ return atomic_read(&header->loh_ref);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
CLP_GANG_AGAIN,
CLP_GANG_ABORT
};
-
/* callback of cl_page_gang_lookup() */
-typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
- struct cl_page *, void *);
-int cl_page_gang_lookup (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_io *io,
- pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
- pgoff_t index);
+
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
-struct cl_page *cl_page_find_sub (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent);
+struct cl_page *cl_page_alloc (const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type);
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
-cfs_page_t *cl_page_vmpage (const struct lu_env *env,
- struct cl_page *page);
-struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
+struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
void cl_page_discard (const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
int cl_page_is_vmlocked (const struct lu_env *env,
const struct cl_page *pg);
void cl_page_export (const struct lu_env *env,
struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
+ struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
int cl_page_size (const struct cl_object *obj);
struct cl_object *obj, pgoff_t index,
struct cl_lock *except, int pending,
int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- struct cl_lock *except,
- int pending, int canceld)
-{
- return cl_lock_at_pgoff(env, obj, page->cp_index, except,
- pending, canceld);
-}
-
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const cfs_list_t *queue,
- const struct cl_lock_descr *need);
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock);
struct cl_lock_descr *descr);
int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue,
long timeout);
+int cl_io_commit_async (const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
*/
static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
{
- LASSERT(plist->pl_nr > 0);
- return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+}
+
+static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
}
/**
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
struct cl_page *page);
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
void cl_page_list_splice (struct cl_page_list *list,
struct cl_page_list *head);
void cl_page_list_del (const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_discard(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
-int cl_page_list_unmap (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
void cl_2queue_init (struct cl_2queue *queue);
* anchor and wakes up waiting thread when transfer is complete.
*/
struct cl_sync_io {
- /** number of pages yet to be transferred. */
- cfs_atomic_t csi_sync_nr;
- /** completion to be signaled when transfer is complete. */
- cfs_waitq_t csi_waitq;
- /** error code. */
- int csi_sync_rc;
+ /** number of pages yet to be transferred. */
+ atomic_t csi_sync_nr;
+ /** error code. */
+ int csi_sync_rc;
+ /** barrier of destroy this structure */
+ atomic_t csi_barrier;
+ /** completion to be signaled when transfer is complete. */
+ wait_queue_head_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
void cl_env_implant (struct lu_env *env, int *refcheck);
void cl_env_unplant (struct lu_env *env, int *refcheck);
unsigned cl_env_cache_purge(unsigned nr);
+struct lu_env *cl_env_percpu_get (void);
+void cl_env_percpu_put (struct lu_env *env);
/** @} cl_env */
struct lu_device *next);
/** @} clio */
+int cl_global_init(void);
+void cl_global_fini(void);
+
#endif /* _LINUX_CL_OBJECT_H */