__u64 cld_gid;
/** Lock mode. */
enum cl_lock_mode cld_mode;
+ /**
+ * flags to enqueue lock. A combination of bit-flags from
+ * enum cl_enq_flags.
+ */
+ __u32 cld_enq_flags;
};
#define DDESCR "%s(%d):[%lu, %lu]"
* | | V
* | | HELD<---------+
* | | | |
- * | | | |
+ * | | | | cl_use_try()
* | | cl_unuse_try() | |
* | | | |
- * | | V | cached
- * | +------------>UNLOCKING (*) | lock found
- * | | |
- * | cl_unuse_try() | |
+ * | | V ---+
+ * | +------------>INTRANSIT (D) <--+
* | | |
+ * | cl_unuse_try() | | cached lock found
* | | | cl_use_try()
+ * | | |
* | V |
* +------------------CACHED---------+
* |
*
* (C) is the point where Cancellation call-back is invoked.
*
+ * (D) is the transit state which means the lock is changing.
+ *
* Transition to FREEING state is possible from any other state in the
* diagram in case of unrecoverable error.
* </pre>
* handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
* that in this case, sub-locks move from state to state, and top-lock remains
* in the same state).
- *
- * Separate UNLOCKING state is needed to maintain an invariant that in HELD
- * state lock is immediately ready for use.
*/
enum cl_lock_state {
/**
*/
CLS_HELD,
/**
- * Lock is in the transition from CLS_HELD to CLS_CACHED. Lock is in
- * this state only while cl_unuse() is executing against it.
+ * This state is used to mark the lock is being used, or unused.
+ * We need this state because the lock may have several sublocks,
+ * so it's impossible to have an atomic way to bring all sublocks
+ * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
+ * at unuse case.
+ * If a thread is referring to a lock, and it sees the lock is in this
+ * state, it must wait for the lock.
+ * See state diagram for details.
*/
- CLS_UNLOCKING,
+ CLS_INTRANSIT,
/**
* Lock granted, not used.
*/
/** cancellation is pending for this lock. */
CLF_CANCELPEND = 1 << 1,
/** destruction is pending for this lock. */
- CLF_DOOMED = 1 << 2,
- /** State update is pending. */
- CLF_STATE = 1 << 3
+ CLF_DOOMED = 1 << 2
};
/**
cfs_task_t *cll_guarder;
int cll_depth;
+ /**
+ * the owner for INTRANSIT state
+ */
+ cfs_task_t *cll_intransit_owner;
int cll_error;
/**
* Number of holds on a lock. A hold prevents a lock from being
* usual return values of lock state-machine methods, this can return
* -ESTALE to indicate that lock cannot be returned to the cache, and
* has to be re-initialized.
+ * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
*
- * \see ccc_lock_unlock(), lov_lock_unlock(), osc_lock_unlock()
+ * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
*/
int (*clo_unuse)(const struct lu_env *env,
const struct cl_lock_slice *slice);
const struct cl_lock_slice *slice,
struct cl_lock_closure *closure);
/**
- * Executed top-to-bottom when lock description changes (e.g., as a
+ * Executed bottom-to-top when lock description changes (e.g., as a
* result of server granting more generous lock than was requested).
*
* \see lovsub_lock_modify()
struct list_head cill_linkage;
struct cl_lock_descr cill_descr;
struct cl_lock *cill_lock;
- /**
- * flags to enqueue lock for this IO. A combination of bit-flags from
- * enum cl_enq_flags.
- */
- __u32 cill_enq_flags;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
struct cl_io_lock_link *link);
int crw_nonblock;
};
+
/**
* State for io.
*
union {
struct cl_rd_io {
struct cl_io_rw_common rd;
- int rd_is_sendfile;
} ci_rd;
struct cl_wr_io {
struct cl_io_rw_common wr;
const struct cl_object_conf *conf);
void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_has_locks (struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
const char *scope, const void *source);
struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
const struct cl_lock_descr *need,
- __u32 enqflags,
const char *scope, const void *source);
struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, struct cl_lock *except,
int cl_lock_compatible(const struct cl_lock *lock1,
const struct cl_lock *lock2);
+enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
+ struct cl_lock *lock);
+
+void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state);
+
+int cl_lock_is_intransit(struct cl_lock *lock);
+
/** \name statemachine statemachine
* Interface to lock state machine consists of 3 parts:
*
struct cl_io *io, __u32 flags);
int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try (const struct lu_env *env, struct cl_lock *lock);
+int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
/** @} statemachine */
void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr, int enqflags);
+ struct cl_lock_descr *descr);
int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
}
-int cl_io_is_sendfile(const struct cl_io *io);
-
struct cl_io *cl_io_top(struct cl_io *io);
void cl_io_print(const struct lu_env *env, void *cookie,