*/
CLM_PHANTOM,
CLM_READ,
- CLM_WRITE
+ CLM_WRITE,
+ CLM_GROUP
};
/**
pgoff_t cld_start;
/** Index of the last page (inclusive) protected by this lock. */
pgoff_t cld_end;
+ /** Group ID, for group lock */
+ __u64 cld_gid;
/** Lock mode. */
enum cl_lock_mode cld_mode;
};
*
* - glimpse. An io context to acquire glimpse lock.
*
+ * - grouplock. An io context to acquire group lock.
+ *
* CIT_MISC io is used simply as a context in which locks and pages
* are manipulated. Such io has no internal "process", that is,
* cl_io_loop() is never called for it.
CIS_FINI
};
+enum cl_req_priority {
+ CRP_NORMAL,
+ CRP_CANCEL
+};
+
/**
* IO state private for a layer.
*
int (*cio_submit)(const struct lu_env *env,
const struct cl_io_slice *slice,
enum cl_req_type crt,
- struct cl_2queue *queue);
+ struct cl_2queue *queue,
+ enum cl_req_priority priority);
} req_op[CRT_NR];
/**
* Read missing page.
*/
CEF_DISCARD_DATA = 0x00000004,
/**
- * tell the sub layers that it must be a `real' lock.
+ * tell the sub layers that it must be a `real' lock. This is used for
+ * mmapped-buffer locks and glimpse locks that must be never converted
+ * into lockless mode.
+ *
+ * \see vvp_mmap_locks(), cl_glimpse_lock().
*/
CEF_MUST = 0x00000008,
/**
- * tell the sub layers that never request a `real' lock.
- * currently, the CEF_MUST & CEF_NEVER are only used for mmap locks.
- * cl_io::ci_lockreq and these two flags: ci_lockreq just describes
- * generic information of lock requirement for this IO, especially for
- * locks which belong to the object doing IO; however, lock itself may
- * have precise requirements, this is described by the latter.
+ * tell the sub layers that never request a `real' lock. This flag is
+ * not used currently.
+ *
+ * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
+ * conversion policy: ci_lockreq describes generic information of lock
+ * requirement for this IO, especially for locks which belong to the
+ * object doing IO; however, lock itself may have precise requirements
+ * that are described by the enqueue flags.
*/
CEF_NEVER = 0x00000010,
/**
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
enum cl_io_lock_dmd ci_lockreq;
+ /**
+ * This io has held grouplock, to inform sublayers that
+ * don't do lockless i/o.
+ */
+ int ci_no_srvlock;
union {
struct cl_rd_io {
struct cl_io_rw_common rd;
int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
+ enum cl_req_type iot, struct cl_2queue *queue,
+ enum cl_req_priority priority);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
int cl_io_cancel (const struct lu_env *env, struct cl_io *io,