#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <lustre_dlm.h>
struct inode;
* \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
*/
struct cl_device_operations {
- /**
- * Initialize cl_req. This method is called top-to-bottom on all
- * devices in the stack to get them a chance to allocate layer-private
- * data, and to attach them to the cl_req by calling
- * cl_req_slice_add().
- *
- * \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see ccc_req_init()
- */
- int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
+ /**
+ * Initialize cl_req. This method is called top-to-bottom on all
+ * devices in the stack to get them a chance to allocate layer-private
+ * data, and to attach them to the cl_req by calling
+ * cl_req_slice_add().
+ *
+ * \see osc_req_init(), lov_req_init(), lovsub_req_init()
+ * \see vvp_req_init()
+ */
+ int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
};
/**
* \return the same convention as for
* cl_object_operations::coo_attr_get() is used.
*/
- int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+ int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
/**
* Update object configuration. Called top-to-bottom to modify object
* configuration.
*/
int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum);
+ /**
+ * Find whether there is any callback data (ldlm lock) attached upon
+ * the object.
+ */
+ int (*coo_find_cbdata)(const struct lu_env *env, struct cl_object *obj,
+ ldlm_iterator_t iter, void *data);
};
/**
/**
* Per-layer part of cl_page.
*
- * \see ccc_page, lov_page, osc_page
+ * \see vvp_page, lov_page, osc_page
*/
struct cl_page_slice {
struct cl_page *cpl_page;
*
* LIFE CYCLE
*
- * cl_lock is reference counted. When reference counter drops to 0, lock is
- * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
- * lock is destroyed when last reference is released. Referencing between
- * top-lock and its sub-locks is described in the lov documentation module.
- *
- * STATE MACHINE
- *
- * Also, cl_lock is a state machine. This requires some clarification. One of
- * the goals of client IO re-write was to make IO path non-blocking, or at
- * least to make it easier to make it non-blocking in the future. Here
- * `non-blocking' means that when a system call (read, write, truncate)
- * reaches a situation where it has to wait for a communication with the
- * server, it should --instead of waiting-- remember its current state and
- * switch to some other work. E.g,. instead of waiting for a lock enqueue,
- * client should proceed doing IO on the next stripe, etc. Obviously this is
- * rather radical redesign, and it is not planned to be fully implemented at
- * this time, instead we are putting some infrastructure in place, that would
- * make it easier to do asynchronous non-blocking IO easier in the
- * future. Specifically, where old locking code goes to sleep (waiting for
- * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
- * enqueue reply comes, its completion handler signals that lock state-machine
- * is ready to transit to the next state. There is some generic code in
- * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
- * this cl_lock.c code, it looks like locking is done in normal blocking
- * fashion, and it the same time it is possible to switch to the non-blocking
- * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
- * functions).
- *
- * For a description of state machine states and transitions see enum
- * cl_lock_state.
- *
- * There are two ways to restrict a set of states which lock might move to:
- *
- * - placing a "hold" on a lock guarantees that lock will not be moved
- * into cl_lock_state::CLS_FREEING state until hold is released. Hold
- * can be only acquired on a lock that is not in
- * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
- * cl_lock::cll_holds. Hold protects lock from cancellation and
- * destruction. Requests to cancel and destroy a lock on hold will be
- * recorded, but only honored when last hold on a lock is released;
- *
- * - placing a "user" on a lock guarantees that lock will not leave
- * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
- * states, once it enters this set. That is, if a user is added onto a
- * lock in a state not from this set, it doesn't immediately enforce
- * lock to move to this set, but once lock enters this set it will
- * remain there until all users are removed. Lock users are counted in
- * cl_lock::cll_users.
- *
- * User is used to assure that lock is not canceled or destroyed while
- * it is being enqueued, or actively used by some IO.
- *
- * Currently, a user always comes with a hold (cl_lock_invariant()
- * checks that a number of holds is not less than a number of users).
- *
- * CONCURRENCY
- *
- * This is how lock state-machine operates. struct cl_lock contains a mutex
- * cl_lock::cll_guard that protects struct fields.
- *
- * - mutex is taken, and cl_lock::cll_state is examined.
- *
- * - for every state there are possible target states where lock can move
- * into. They are tried in order. Attempts to move into next state are
- * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
- *
- * - if the transition can be performed immediately, state is changed,
- * and mutex is released.
- *
- * - if the transition requires blocking, _try() function returns
- * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
- * sleep, waiting for possibility of lock state change. It is woken
- * up when some event occurs, that makes lock state change possible
- * (e.g., the reception of the reply from the server), and repeats
- * the loop.
- *
- * Top-lock and sub-lock has separate mutexes and the latter has to be taken
- * first to avoid dead-lock.
- *
- * To see an example of interaction of all these issues, take a look at the
- * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
- * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
- * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
- * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
- * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
- * done in parallel, rather than one after another (this is used for glimpse
- * locks, that cannot dead-lock).
+ * cl_lock is a cacheless data container for the requirements of locks to
+ * complete the IO. cl_lock is created before I/O starts and destroyed when the
+ * I/O is complete.
+ *
+ * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
+ * to cl_lock at OSC layer. LDLM lock is still cacheable.
*
* INTERFACE AND USAGE
*
- * struct cl_lock_operations provide a number of call-backs that are invoked
- * when events of interest occurs. Layers can intercept and handle glimpse,
- * blocking, cancel ASTs and a reception of the reply from the server.
+ * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
+ * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
+ * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
+ * consists of multiple sub cl_locks, each sub locks will be enqueued
+ * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
+ * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
+ * OST side.
*
- * One important difference with the old client locking model is that new
- * client has a representation for the top-lock, whereas in the old code only
- * sub-locks existed as real data structures and file-level locks are
- * represented by "request sets" that are created and destroyed on each and
- * every lock creation.
+ * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
+ * method will be called for each layer to release the resource held by this
+ * lock. At OSC layer, the reference count of LDLM lock, which is held at
+ * clo_enqueue time, is released.
*
- * Top-locks are cached, and can be found in the cache by the system calls. It
- * is possible that top-lock is in cache, but some of its sub-locks were
- * canceled and destroyed. In that case top-lock has to be enqueued again
- * before it can be used.
+ * LDLM lock can only be canceled if there is no cl_lock using it.
*
* Overall process of the locking during IO operation is as following:
*
*
* - when all locks are acquired, IO is performed;
*
- * - locks are released into cache.
+ * - locks are released after IO is complete.
*
* Striping introduces major additional complexity into locking. The
* fundamental problem is that it is generally unsafe to actively use (hold)
* buf is a part of memory mapped Lustre file, a lock or locks protecting buf
* has to be held together with the usual lock on [offset, offset + count].
*
- * As multi-stripe locks have to be allowed, it makes sense to cache them, so
- * that, for example, a sequence of O_APPEND writes can proceed quickly
- * without going down to the individual stripes to do lock matching. On the
- * other hand, multi-stripe locks shouldn't be used by normal read/write
- * calls. To achieve this, every layer can implement ->clo_fits_into() method,
- * that is called by lock matching code (cl_lock_lookup()), and that can be
- * used to selectively disable matching of certain locks for certain IOs. For
- * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
- * locks to be matched only for truncates and O_APPEND writes.
- *
* Interaction with DLM
*
* In the expected setup, cl_lock is ultimately backed up by a collection of
/**
* Per-layer part of cl_lock
*
- * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
+ * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
* @anchor for resources
* \retval -ve failure
*
- * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
+ * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
* \see osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
void (*clo_cancel)(const struct lu_env *env,
const struct cl_lock_slice *slice);
/** @} */
- /**
- * Destructor. Frees resources and the slice.
- *
- * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
- */
+ /**
+ * Destructor. Frees resources and the slice.
+ *
+ * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
+ * \see osc_lock_fini()
+ */
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
/**
* Optional debugging helper. Prints given lock slice.
* This is usually embedded into layer session data, rather than allocated
* dynamically.
*
- * \see vvp_io, lov_io, osc_io, ccc_io
+ * \see vvp_io, lov_io, osc_io
*/
struct cl_io_slice {
struct cl_io *cis_io;
struct cl_rd_io {
struct cl_io_rw_common rd;
} ci_rd;
- struct cl_wr_io {
- struct cl_io_rw_common wr;
- int wr_append;
+ struct cl_wr_io {
+ struct cl_io_rw_common wr;
+ int wr_append;
int wr_sync;
- } ci_wr;
- struct cl_io_rw_common ci_rw;
- struct cl_setattr_io {
- struct ost_lvb sa_attr;
- unsigned int sa_valid;
- struct obd_capa *sa_capa;
- } ci_setattr;
+ } ci_wr;
+ struct cl_io_rw_common ci_rw;
+ struct cl_setattr_io {
+ struct ost_lvb sa_attr;
+ unsigned int sa_valid;
+ int sa_stripe_index;
+ struct lu_fid *sa_parent_fid;
+ struct obd_capa *sa_capa;
+ } ci_setattr;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
return container_of(site, struct cl_site, cs_lu);
}
-static inline int lu_device_is_cl(const struct lu_device *d)
-{
- return d->ld_type->ldt_tags & LU_DEVICE_CL;
-}
-
static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
{
LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
void cl_object_attr_unlock(struct cl_object *o);
int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
-int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
+int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned valid);
int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
const struct cl_object_conf *conf);
int cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
-int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *lum);
+int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *lum);
+int cl_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
+ ldlm_iterator_t iter, void *data);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.