-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* super-class definitions.
*/
#include <lu_object.h>
-#include <lvfs.h>
#ifdef __KERNEL__
# include <linux/mutex.h>
# include <linux/radix-tree.h>
struct lu_object co_lu;
/** per-object-layer operations */
const struct cl_object_operations *co_ops;
+ /** offset of page slice in cl_page buffer */
+ int co_slice_off;
};
/**
* VFS inode. This is consumed by vvp.
*/
struct inode *coc_inode;
+ /**
+ * Layout lock handle.
+ */
+ struct ldlm_lock *coc_lock;
+ /**
+ * Operation to handle layout, OBJECT_CONF_XYZ.
+ */
+ int coc_opc;
+};
+
+enum {
+ /** configure layout, set up a new stripe, must be called while
+ * holding layout lock. */
+ OBJECT_CONF_SET = 0,
+ /** invalidate the current stripe configuration due to losing
+ * layout lock. */
+ OBJECT_CONF_INVALIDATE = 1,
+ /** wait for old layout to go away so that new layout can be
+ * set up. */
+ OBJECT_CONF_WAIT = 2
};
/**
* \retval valid-pointer pointer to already existing referenced page
* to be used instead of newly created.
*/
- struct cl_page *(*coo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
*/
/** @{ */
/** Lock protecting page tree. */
- spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
+ spinlock_t coh_page_guard;
+ /** Lock protecting lock list. */
+ spinlock_t coh_lock_guard;
/** @} locks */
/** Radix tree of cl_page's, cached for this object. */
struct radix_tree_root coh_tree;
/** # of pages in radix tree. */
unsigned long coh_pages;
/** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
+ cfs_list_t coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
*
* \todo XXX this can be read/write lock if needed.
*/
- spinlock_t coh_attr_guard;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned coh_nesting;
+ spinlock_t coh_attr_guard;
+ /**
+ * Size of cl_page + page slices
+ */
+ unsigned short coh_page_bufsize;
+ /**
+ * Number of objects above this one: 0 for a top-object, 1 for its
+ * sub-object, etc.
+ */
+ unsigned char coh_nesting;
};
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer top-to-bottom to \a slice.
*/
-#define cl_object_for_each(slice, obj) \
- list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each(slice, obj) \
+ cfs_list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
/**
* Helper macro: iterate over all layers of the object \a obj, assigning every
* layer bottom-to-top to \a slice.
*/
-#define cl_object_for_each_reverse(slice, obj) \
- list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
+#define cl_object_for_each_reverse(slice, obj) \
+ cfs_list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
/** @} cl_object */
#ifndef pgoff_t
* corresponding radix tree at the corresponding logical offset.
*
* cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), cfs_page_t. It is assumed, that this
+ * page in Linux kernel, for example), struct page. It is assumed, that this
* association is implemented by one of cl_page layers (top layer in the
* current design) that
*
* - translates state (page flag bits) and locking between lustre and
* environment.
*
- * The association between cl_page and cfs_page_t is immutable and
+ * The association between cl_page and struct page is immutable and
* established when cl_page is created.
*
* cl_page can be "owned" by a particular cl_io (see below), guaranteeing
* eviction of the page from the memory). Note, that in general cl_io
* cannot be identified with a particular thread, and page ownership is not
* exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and cfs_page_t has to implement
+ * implementing association between cl_page and struct page has to implement
* ownership on top of available synchronization mechanisms.
*
* While lustre client maintains the notion of an page ownership by io,
* - by doing a lookup in the cl_object radix tree, protected by the
* spin-lock;
*
- * - by starting from VM-locked cfs_page_t and following some
+ * - by starting from VM-locked struct page and following some
* hosting environment method (e.g., following ->private pointer in
* the case of Linux kernel), see cl_vmpage_page();
*
*
* Linux Kernel implementation.
*
- * Binding between cl_page and cfs_page_t (which is a typedef for
+ * Binding between cl_page and struct page (which is a typedef for
* struct page) is implemented in the vvp layer. cl_page is attached to the
* ->private pointer of the struct page, together with the setting of
* PG_private bit in page->flags, and acquiring additional reference on the
};
/**
- * Fields are protected by the lock on cfs_page_t, except for atomics and
+ * Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
* \invariant Data type invariants are in cl_page_invariant(). Basically:
*/
struct cl_page {
/** Reference counter. */
- atomic_t cp_ref;
+ cfs_atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** Logical page index within the object. Immutable after creation. */
pgoff_t cp_index;
/** List of slices. Immutable after creation. */
- struct list_head cp_layers;
+ cfs_list_t cp_layers;
/** Parent page, NULL for top-level page. Immutable after creation. */
struct cl_page *cp_parent;
/** Lower-layer page. NULL for bottommost page. Immutable after
* modified only internally within cl_page.c. Protected by a VM lock.
*/
const enum cl_page_state cp_state;
- /**
- * Linkage of pages within some group. Protected by
- * cl_page::cp_mutex. */
- struct list_head cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
+ /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
+ cfs_list_t cp_batch;
+ /** Mutex serializing membership of a page in a batch. */
+ struct mutex cp_mutex;
/** Linkage of pages within cl_req. */
- struct list_head cp_flight;
+ cfs_list_t cp_flight;
/** Transfer error. */
int cp_error;
*/
struct cl_io *cp_owner;
/**
+ * Debug information, the task is owning the page.
+ */
+ struct task_struct *cp_task;
+ /**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
* the top-level pages. Protected by a VM lock.
struct cl_req *cp_req;
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
- /** Link to an object, for debugging. */
- struct lu_ref_link *cp_obj_ref;
- /** Link to a queue, for debugging. */
- struct lu_ref_link *cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
+ /** Link to an object, for debugging. */
+ struct lu_ref_link cp_obj_ref;
+ /** Link to a queue, for debugging. */
+ struct lu_ref_link cp_queue_ref;
+ /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
+ unsigned cp_flags;
+ /** Assigned if doing a sync_io */
+ struct cl_sync_io *cp_sync_io;
};
/**
struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
/** Linkage into cl_page::cp_layers. Immutable after creation. */
- struct list_head cpl_linkage;
+ cfs_list_t cpl_linkage;
};
/**
*/
CLM_PHANTOM,
CLM_READ,
- CLM_WRITE
+ CLM_WRITE,
+ CLM_GROUP
};
/**
*/
struct cl_page_operations {
/**
- * cl_page<->cfs_page_t methods. Only one layer in the stack has to
+ * cl_page<->struct page methods. Only one layer in the stack has to
* implement these. Current code assumes that this functionality is
* provided by the topmost layer, see cl_page_disown0() as an example.
*/
/**
* \return the underlying VM page. Optional.
*/
- cfs_page_t *(*cpo_vmpage)(const struct lu_env *env,
+ struct page *(*cpo_vmpage)(const struct lu_env *env,
const struct cl_page_slice *slice);
/**
* Called when \a io acquires this page into the exclusive
* \see cl_page_own()
* \see vvp_page_own(), lov_page_own()
*/
- void (*cpo_own)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
+ int (*cpo_own)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock);
/** Called when ownership it yielded. Optional.
*
* \see cl_page_disown()
const struct cl_page_slice *slice,
struct cl_io *io);
/**
- * Announces that page contains valid data and user space can look and
- * them without client's involvement from now on. Effectively marks
- * the page up-to-date. Optional.
+ * Announces whether the page contains valid data or not by \a uptodate.
*
* \see cl_page_export()
* \see vvp_page_export()
*/
void (*cpo_export)(const struct lu_env *env,
- const struct cl_page_slice *slice);
+ const struct cl_page_slice *slice, int uptodate);
/**
* Unmaps page from the user space (if it is mapped).
*
*/
int (*cpo_cancel)(const struct lu_env *env,
const struct cl_page_slice *slice);
+ /**
+ * Write out a page by kernel. This is only called by ll_writepage
+ * right now.
+ *
+ * \see cl_page_flush()
+ */
+ int (*cpo_flush)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
/** @} transfer */
};
*/
#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
do { \
- static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_page_print(env, &__info, lu_cdebug_printer, page); \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
} while (0)
/**
* Helper macro, dumping shorter information about \a page into a log.
*/
-#define CL_PAGE_HEADER(mask, env, page, format, ...) \
-do { \
- static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
- \
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_page_header_print(env, &__info, lu_cdebug_printer, page); \
- CDEBUG(mask, format , ## __VA_ARGS__); \
- } \
+#define CL_PAGE_HEADER(mask, env, page, format, ...) \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
+ CDEBUG(mask, format , ## __VA_ARGS__); \
+ } \
} while (0)
+static inline int __page_in_use(const struct cl_page *page, int refc)
+{
+ if (page->cp_type == CPT_CACHEABLE)
+ ++refc;
+ LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+ return (cfs_atomic_read(&page->cp_ref) > refc);
+}
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
pgoff_t cld_start;
/** Index of the last page (inclusive) protected by this lock. */
pgoff_t cld_end;
+ /** Group ID, for group lock */
+ __u64 cld_gid;
/** Lock mode. */
enum cl_lock_mode cld_mode;
+ /**
+ * flags to enqueue lock. A combination of bit-flags from
+ * enum cl_enq_flags.
+ */
+ __u32 cld_enq_flags;
};
#define DDESCR "%s(%d):[%lu, %lu]"
* | | V
* | | HELD<---------+
* | | | |
- * | | | |
+ * | | | | cl_use_try()
* | | cl_unuse_try() | |
* | | | |
- * | | V | cached
- * | +------------>UNLOCKING (*) | lock found
- * | | |
- * | cl_unuse_try() | |
+ * | | V ---+
+ * | +------------>INTRANSIT (D) <--+
* | | |
+ * | cl_unuse_try() | | cached lock found
* | | | cl_use_try()
+ * | | |
* | V |
* +------------------CACHED---------+
* |
*
* (C) is the point where Cancellation call-back is invoked.
*
+ * (D) is the transit state which means the lock is changing.
+ *
* Transition to FREEING state is possible from any other state in the
* diagram in case of unrecoverable error.
* </pre>
* handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
* that in this case, sub-locks move from state to state, and top-lock remains
* in the same state).
- *
- * Separate UNLOCKING state is needed to maintain an invariant that in HELD
- * state lock is immediately ready for use.
*/
enum cl_lock_state {
/**
*/
CLS_HELD,
/**
- * Lock is in the transition from CLS_HELD to CLS_CACHED. Lock is in
- * this state only while cl_unuse() is executing against it.
+ * This state is used to mark the lock is being used, or unused.
+ * We need this state because the lock may have several sublocks,
+ * so it's impossible to have an atomic way to bring all sublocks
+ * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
+ * at unuse case.
+ * If a thread is referring to a lock, and it sees the lock is in this
+ * state, it must wait for the lock.
+ * See state diagram for details.
*/
- CLS_UNLOCKING,
+ CLS_INTRANSIT,
/**
* Lock granted, not used.
*/
CLF_CANCELPEND = 1 << 1,
/** destruction is pending for this lock. */
CLF_DOOMED = 1 << 2,
- /** State update is pending. */
- CLF_STATE = 1 << 3
+ /** from enqueue RPC reply upcall. */
+ CLF_FROM_UPCALL= 1 << 3,
};
/**
* List of enclosed locks, so far. Locks are linked here through
* cl_lock::cll_inclosure.
*/
- struct list_head clc_list;
+ cfs_list_t clc_list;
/**
* True iff closure is in a `wait' mode. This determines what
* cl_lock_enclosure() does when a lock L to be added to the closure
*/
struct cl_lock {
/** Reference counter. */
- atomic_t cll_ref;
+ cfs_atomic_t cll_ref;
/** List of slices. Immutable after creation. */
- struct list_head cll_layers;
+ cfs_list_t cll_layers;
/**
* Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
* by cl_lock::cll_descr::cld_obj::coh_lock_guard.
*/
- struct list_head cll_linkage;
+ cfs_list_t cll_linkage;
/**
* Parameters of this lock. Protected by
* cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
*/
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- cfs_waitq_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
- struct mutex cll_guard;
- cfs_task_t *cll_guarder;
+ enum cl_lock_state cll_state;
+ /** signals state changes. */
+ wait_queue_head_t cll_wq;
+ /**
+ * Recursive lock, most fields in cl_lock{} are protected by this.
+ *
+ * Locking rules: this mutex is never held across network
+ * communication, except when lock is being canceled.
+ *
+ * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+ * on a top-lock. Other direction is implemented through a
+ * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+ * by try-locking.
+ *
+ * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+ */
+ struct mutex cll_guard;
+ struct task_struct *cll_guarder;
int cll_depth;
+ /**
+ * the owner for INTRANSIT state
+ */
+ struct task_struct *cll_intransit_owner;
int cll_error;
/**
* Number of holds on a lock. A hold prevents a lock from being
*
* \see cl_lock_closure
*/
- struct list_head cll_inclosure;
+ cfs_list_t cll_inclosure;
+ /**
+ * Confict lock at queuing time.
+ */
+ struct cl_lock *cll_conflict;
/**
* A list of references to this lock, for debugging.
*/
* A list of holds on this lock, for debugging.
*/
struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link *cll_obj_ref;
+ /**
+ * A reference for cl_lock::cll_descr::cld_obj. For debugging.
+ */
+ struct lu_ref_link cll_obj_ref;
#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
+ /* "dep_map" name is assumed by lockdep.h macros. */
+ struct lockdep_map dep_map;
#endif
};
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
- struct list_head cls_linkage;
+ cfs_list_t cls_linkage;
};
/**
*/
enum cl_lock_transition {
/** operation cannot be completed immediately. Wait for state change. */
- CLO_WAIT = 1,
+ CLO_WAIT = 1,
/** operation had to release lock mutex, restart. */
- CLO_REPEAT = 2
+ CLO_REPEAT = 2,
+ /** lower layer re-enqueued. */
+ CLO_REENQUEUED = 3,
};
/**
* usual return values of lock state-machine methods, this can return
* -ESTALE to indicate that lock cannot be returned to the cache, and
* has to be re-initialized.
+ * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
*
- * \see ccc_lock_unlock(), lov_lock_unlock(), osc_lock_unlock()
+ * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
*/
int (*clo_unuse)(const struct lu_env *env,
const struct cl_lock_slice *slice);
const struct cl_lock_slice *slice,
struct cl_lock_closure *closure);
/**
- * Executed top-to-bottom when lock description changes (e.g., as a
+ * Executed bottom-to-top when lock description changes (e.g., as a
* result of server granting more generous lock than was requested).
*
* \see lovsub_lock_modify()
#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
do { \
- static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
\
- if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_lock_print(env, &__info, lu_cdebug_printer, lock); \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
CDEBUG(mask, format , ## __VA_ARGS__); \
} \
} while (0)
+#define CL_LOCK_ASSERT(expr, env, lock) do { \
+ if (likely(expr)) \
+ break; \
+ \
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
+ LBUG(); \
+} while (0)
+
/** @} cl_lock */
/** \addtogroup cl_page_list cl_page_list
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- struct list_head pl_pages;
- cfs_task_t *pl_owner;
+ unsigned pl_nr;
+ cfs_list_t pl_pages;
+ struct task_struct *pl_owner;
};
-/** \addtogroup cl_page_list cl_page_list
+/**
* A 2-queue of pages. A convenience data-type for common use case, 2-queue
* contains an incoming page list and an outgoing page list.
*/
CIT_READ,
/** write system call */
CIT_WRITE,
- /** truncate system call */
- CIT_TRUNC,
+ /** truncate, utime system calls */
+ CIT_SETATTR,
/**
* page fault handling
*/
CIT_FAULT,
/**
+ * fsync system call handling
+ * To write out a range of file
+ */
+ CIT_FSYNC,
+ /**
* Miscellaneous io. This is used for occasional io activity that
* doesn't fit into other types. Currently this is used for:
*
*
* - glimpse. An io context to acquire glimpse lock.
*
+ * - grouplock. An io context to acquire group lock.
+ *
* CIT_MISC io is used simply as a context in which locks and pages
* are manipulated. Such io has no internal "process", that is,
* cl_io_loop() is never called for it.
* linkage into a list of all slices for a given cl_io, hanging off
* cl_io::ci_layers. Immutable after creation.
*/
- struct list_head cis_linkage;
+ cfs_list_t cis_linkage;
};
int (*cio_submit)(const struct lu_env *env,
const struct cl_io_slice *slice,
enum cl_req_type crt,
- struct cl_2queue *queue);
+ struct cl_2queue *queue);
} req_op[CRT_NR];
/**
* Read missing page.
*/
CEF_DISCARD_DATA = 0x00000004,
/**
- * tell the sub layers that it must be a `real' lock.
+ * tell the sub layers that it must be a `real' lock. This is used for
+ * mmapped-buffer locks and glimpse locks that must be never converted
+ * into lockless mode.
+ *
+ * \see vvp_mmap_locks(), cl_glimpse_lock().
*/
CEF_MUST = 0x00000008,
/**
- * tell the sub layers that never request a `real' lock.
- * currently, the CEF_MUST & CEF_NEVER are only used for mmap locks.
- * cl_io::ci_lockreq and these two flags: ci_lockreq just describes
- * generic information of lock requirement for this IO, especially for
- * locks which belong to the object doing IO; however, lock itself may
- * have precise requirements, this is described by the latter.
+ * tell the sub layers that never request a `real' lock. This flag is
+ * not used currently.
+ *
+ * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
+ * conversion policy: ci_lockreq describes generic information of lock
+ * requirement for this IO, especially for locks which belong to the
+ * object doing IO; however, lock itself may have precise requirements
+ * that are described by the enqueue flags.
*/
CEF_NEVER = 0x00000010,
/**
+ * for async glimpse lock.
+ */
+ CEF_AGL = 0x00000020,
+ /**
* mask of enq_flags.
*/
- CEF_MASK = 0x0000001f
+ CEF_MASK = 0x0000003f,
};
/**
*/
struct cl_io_lock_link {
/** linkage into one of cl_lockset lists. */
- struct list_head cill_linkage;
+ cfs_list_t cill_linkage;
struct cl_lock_descr cill_descr;
struct cl_lock *cill_lock;
- /**
- * flags to enqueue lock for this IO. A combination of bit-flags from
- * enum cl_enq_flags.
- */
- __u32 cill_enq_flags;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
struct cl_io_lock_link *link);
*/
struct cl_lockset {
/** locks to be acquired. */
- struct list_head cls_todo;
+ cfs_list_t cls_todo;
/** locks currently being processed. */
- struct list_head cls_curr;
+ cfs_list_t cls_curr;
/** locks acquired. */
- struct list_head cls_done;
+ cfs_list_t cls_done;
};
/**
CILR_NEVER
};
+enum cl_fsync_mode {
+ /** start writeback, do not wait for them to finish */
+ CL_FSYNC_NONE = 0,
+ /** start writeback and wait for them to finish */
+ CL_FSYNC_LOCAL = 1,
+ /** discard all of dirty pages in a specific file range */
+ CL_FSYNC_DISCARD = 2,
+ /** start writeback and make sure they have reached storage before
+ * return. OST_SYNC RPC must be issued and finished */
+ CL_FSYNC_ALL = 3
+};
+
struct cl_io_rw_common {
loff_t crw_pos;
size_t crw_count;
int crw_nonblock;
};
+
/**
* State for io.
*
*/
struct cl_io *ci_parent;
/** List of slices. Immutable after creation. */
- struct list_head ci_layers;
+ cfs_list_t ci_layers;
/** list of locks (to be) acquired by this io. */
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
union {
struct cl_rd_io {
struct cl_io_rw_common rd;
- int rd_is_sendfile;
} ci_rd;
struct cl_wr_io {
struct cl_io_rw_common wr;
int wr_append;
+ int wr_sync;
} ci_wr;
struct cl_io_rw_common ci_rw;
- struct cl_truncate_io {
- /** new size to which file is truncated */
- size_t tr_size;
- struct obd_capa *tr_capa;
- } ci_truncate;
+ struct cl_setattr_io {
+ struct ost_lvb sa_attr;
+ unsigned int sa_valid;
+ struct obd_capa *sa_capa;
+ } ci_setattr;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
/** bytes valid byte on a faulted page. */
int ft_nob;
- /** writable page? */
+ /** writable page? for nopage() only */
int ft_writable;
/** page of an executable? */
int ft_executable;
+ /** page_mkwrite() */
+ int ft_mkwrite;
/** resulting page */
struct cl_page *ft_page;
} ci_fault;
+ struct cl_fsync_io {
+ loff_t fi_start;
+ loff_t fi_end;
+ struct obd_capa *fi_capa;
+ /** file system level fid */
+ struct lu_fid *fi_fid;
+ enum cl_fsync_mode fi_mode;
+ /* how many pages were written/discarded */
+ unsigned int fi_nr_written;
+ } ci_fsync;
} u;
struct cl_2queue ci_queue;
size_t ci_nob;
int ci_result;
- int ci_continue;
- /**
- * Number of pages owned by this IO. For invariant checking.
- */
- unsigned ci_owned_nr;
+ unsigned int ci_continue:1,
+ /**
+ * This io has held grouplock, to inform sublayers that
+ * don't do lockless i/o.
+ */
+ ci_no_srvlock:1,
+ /**
+ * The whole IO need to be restarted because layout has been changed
+ */
+ ci_need_restart:1,
+ /**
+ * to not refresh layout - the IO issuer knows that the layout won't
+ * change(page operations, layout change causes all page to be
+ * discarded), or it doesn't matter if it changes(sync).
+ */
+ ci_ignore_layout:1,
+ /**
+ * Check if layout changed after the IO finishes. Mainly for HSM
+ * requirement. If IO occurs to openning files, it doesn't need to
+ * verify layout because HSM won't release openning files.
+ * Right now, only two opertaions need to verify layout: glimpse
+ * and setattr.
+ */
+ ci_verify_layout:1,
+ /**
+ * file is released, restore has to to be triggered by vvp layer
+ */
+ ci_restore_needed:1,
+ /**
+ * O_NOATIME
+ */
+ ci_noatime:1;
+ /**
+ * Number of pages owned by this IO. For invariant checking.
+ */
+ unsigned ci_owned_nr;
};
/** @} cl_io */
* Per-transfer attributes.
*/
struct cl_req_attr {
- /** Generic attributes for the server consumption. */
- struct obdo *cra_oa;
- /** Capability. */
- struct obd_capa *cra_capa;
+ /** Generic attributes for the server consumption. */
+ struct obdo *cra_oa;
+ /** Capability. */
+ struct obd_capa *cra_capa;
+ /** Jobid */
+ char cra_jobid[JOBSTATS_JOBID_SIZE];
};
/**
* A per-object state that (potentially multi-object) transfer request keeps.
*/
struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link *ro_obj_ref;
- /* something else? Number of pages for a given object? */
+ /** object itself */
+ struct cl_object *ro_obj;
+ /** reference to cl_req_obj::ro_obj. For debugging. */
+ struct lu_ref_link ro_obj_ref;
+ /* something else? Number of pages for a given object? */
};
/**
* req's pages.
*/
struct cl_req {
- enum cl_req_type crq_type;
+ enum cl_req_type crq_type;
/** A list of pages being transfered */
- struct list_head crq_pages;
+ cfs_list_t crq_pages;
/** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
+ unsigned crq_nrpages;
/** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
+ struct cl_req_obj *crq_o;
/** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
+ unsigned crq_nrobjs;
+ cfs_list_t crq_layers;
};
/**
struct cl_req_slice {
struct cl_req *crs_req;
struct cl_device *crs_dev;
- struct list_head crs_linkage;
+ cfs_list_t crs_linkage;
const struct cl_req_operations *crs_ops;
};
/* @} cl_req */
+enum cache_stats_item {
+ /** how many cache lookups were performed */
+ CS_lookup = 0,
+ /** how many times cache lookup resulted in a hit */
+ CS_hit,
+ /** how many entities are in the cache right now */
+ CS_total,
+ /** how many entities in the cache are actively used (and cannot be
+ * evicted) right now */
+ CS_busy,
+ /** how many entities were created at all */
+ CS_create,
+ CS_NR
+};
+
+#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
+
/**
* Stats for a generic cache (similar to inode, lu_object, etc. caches).
*/
struct cache_stats {
const char *cs_name;
- /** how many entities were created at all */
- atomic_t cs_created;
- /** how many cache lookups were performed */
- atomic_t cs_lookup;
- /** how many times cache lookup resulted in a hit */
- atomic_t cs_hit;
- /** how many entities are in the cache right now */
- atomic_t cs_total;
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
- atomic_t cs_busy;
+ cfs_atomic_t cs_stats[CS_NR];
};
/** These are not exported so far */
*/
struct cache_stats cs_pages;
struct cache_stats cs_locks;
- atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
+ cfs_atomic_t cs_pages_state[CPS_NR];
+ cfs_atomic_t cs_locks_state[CLS_NR];
};
int cl_site_init (struct cl_site *s, struct cl_device *top);
const struct cl_object_conf *conf);
void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_has_locks (struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
return cl_object_header(o0) == cl_object_header(o1);
}
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+ clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+ cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+ struct cl_page *page)
+{
+ return (void *)((char *)page + clob->co_slice_off);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
* @{ */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr,
+enum {
+ CLP_GANG_OKAY = 0,
+ CLP_GANG_RESCHED,
+ CLP_GANG_AGAIN,
+ CLP_GANG_ABORT
+};
+
+/* callback of cl_page_gang_lookup() */
+typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
+ struct cl_page *, void *);
+int cl_page_gang_lookup (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_io *io,
+ pgoff_t start, pgoff_t end,
+ cl_page_gang_cb_t cb, void *cbdata);
+struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
pgoff_t index);
-void cl_page_gang_lookup(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_io *io,
- pgoff_t start, pgoff_t end,
- struct cl_page_list *plist);
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
+struct cl_page *cl_page_find_sub (const struct lu_env *env,
+ struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent);
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
-cfs_page_t *cl_page_vmpage (const struct lu_env *env,
+struct page *cl_page_vmpage (const struct lu_env *env,
struct cl_page *page);
-struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
+struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
-int cl_is_page (const void *addr);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
int cl_page_own (const struct lu_env *env,
struct cl_io *io, struct cl_page *page);
+int cl_page_own_try (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
void cl_page_assume (const struct lu_env *env,
struct cl_io *io, struct cl_page *page);
void cl_page_unassume (const struct lu_env *env,
void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
int from, int to);
int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
+int cl_page_flush (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
/** @} transfer */
struct cl_page *pg);
int cl_page_is_vmlocked (const struct lu_env *env,
const struct cl_page *pg);
-void cl_page_export (const struct lu_env *env, struct cl_page *pg);
+void cl_page_export (const struct lu_env *env,
+ struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
const char *scope, const void *source);
struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
const struct cl_lock_descr *need,
- __u32 enqflags,
const char *scope, const void *source);
-struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct cl_lock *except,
- int pending, int canceld);
+struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
+ struct cl_object *obj, pgoff_t index,
+ struct cl_lock *except, int pending,
+ int canceld);
+static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page,
+ struct cl_lock *except,
+ int pending, int canceld)
+{
+ LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
+ return cl_lock_at_pgoff(env, obj, page->cp_index, except,
+ pending, canceld);
+}
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_compatible(const struct cl_lock *lock1,
- const struct cl_lock *lock2);
+void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
+
+enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
+ struct cl_lock *lock);
+void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state);
+int cl_lock_is_intransit(struct cl_lock *lock);
+
+int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
+ int keep_mutex);
/** \name statemachine statemachine
* Interface to lock state machine consists of 3 parts:
struct cl_io *io, __u32 flags);
int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try (const struct lu_env *env, struct cl_lock *lock);
+int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
+
/** @} statemachine */
void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state);
-int cl_queue_match (const struct list_head *queue,
+int cl_queue_match (const cfs_list_t *queue,
const struct cl_lock_descr *need);
void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock);
int cl_lock_is_mutexed (struct cl_lock *lock);
int cl_lock_nr_mutexed (const struct lu_env *env);
-int cl_lock_page_out (const struct lu_env *env, struct cl_lock *lock,
- int discard);
+int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
int cl_lock_ext_match (const struct cl_lock_descr *has,
const struct cl_lock_descr *need);
int cl_lock_descr_match(const struct cl_lock_descr *has,
void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-int cl_is_lock (const void *addr);
unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr, int enqflags);
+ struct cl_lock_descr *descr);
int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
+ enum cl_req_type iot, struct cl_2queue *queue);
+int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue,
+ long timeout);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
}
-int cl_io_is_sendfile(const struct cl_io *io);
+static inline int cl_io_is_sync_write(const struct cl_io *io)
+{
+ return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
+}
+
+static inline int cl_io_is_mkwrite(const struct cl_io *io)
+{
+ return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
+}
+
+/**
+ * True, iff \a io is a truncate(2).
+ */
+static inline int cl_io_is_trunc(const struct cl_io *io)
+{
+ return io->ci_type == CIT_SETATTR &&
+ (io->u.ci_setattr.sa_valid & ATTR_SIZE);
+}
struct cl_io *cl_io_top(struct cl_io *io);
* @{ */
/**
+ * Last page in the page list.
+ */
+static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+}
+
+/**
* Iterate over pages in a page list.
*/
#define cl_page_list_for_each(page, list) \
- list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+ cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch)
/**
* Iterate over pages in a page list, taking possible removals into account.
*/
#define cl_page_list_for_each_safe(page, temp, list) \
- list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+ cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
void cl_page_list_init (struct cl_page_list *plist);
void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
* anchor and wakes up waiting thread when transfer is complete.
*/
struct cl_sync_io {
- /** number of pages yet to be transferred. */
- atomic_t csi_sync_nr;
- /** completion to be signaled when transfer is complete. */
- struct completion csi_sync_completion;
- /** error code. */
- int csi_sync_rc;
+ /** number of pages yet to be transferred. */
+ cfs_atomic_t csi_sync_nr;
+ /** error code. */
+ int csi_sync_rc;
+ /** barrier of destroy this structure */
+ cfs_atomic_t csi_barrier;
+ /** completion to be signaled when transfer is complete. */
+ wait_queue_head_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor);
+ struct cl_page_list *queue, struct cl_sync_io *anchor,
+ long timeout);
void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
/** @} cl_sync_io */
* longer used environments instead of destroying them;
*
* - there is a notion of "current" environment, attached to the kernel
- * data structure representing current thread (current->journal_info in
- * Linux kernel). Top-level lustre code allocates an environment and makes
- * it current, then calls into non-lustre code, that in turn calls lustre
- * back. Low-level lustre code thus called can fetch environment created
- * by the top-level code and reuse it, avoiding additional environment
- * allocation.
+ * data structure representing current thread Top-level lustre code
+ * allocates an environment and makes it current, then calls into
+ * non-lustre code, that in turn calls lustre back. Low-level lustre
+ * code thus called can fetch environment created by the top-level code
+ * and reuse it, avoiding additional environment allocation.
+ * Right now, three interfaces can attach the cl_env to running thread:
+ * - cl_env_get
+ * - cl_env_implant
+ * - cl_env_reexit(cl_env_reenter had to be called priorly)
*
* \see lu_env, lu_context, lu_context_key
* @{ */
struct lu_device *next);
/** @} clio */
+int cl_global_init(void);
+void cl_global_fini(void);
+
#endif /* _LINUX_CL_OBJECT_H */