*/
CLM_PHANTOM,
CLM_READ,
- CLM_WRITE
+ CLM_WRITE,
+ CLM_GROUP
};
/**
pgoff_t cld_start;
/** Index of the last page (inclusive) protected by this lock. */
pgoff_t cld_end;
+ /** Group ID, for group lock */
+ __u64 cld_gid;
/** Lock mode. */
enum cl_lock_mode cld_mode;
};
*
* - glimpse. An io context to acquire glimpse lock.
*
+ * - grouplock. An io context to acquire group lock.
+ *
* CIT_MISC io is used simply as a context in which locks and pages
* are manipulated. Such io has no internal "process", that is,
* cl_io_loop() is never called for it.
CIS_FINI
};
+enum cl_req_priority {
+ CRP_NORMAL,
+ CRP_CANCEL
+};
+
/**
* IO state private for a layer.
*
int (*cio_submit)(const struct lu_env *env,
const struct cl_io_slice *slice,
enum cl_req_type crt,
- struct cl_2queue *queue);
+ struct cl_2queue *queue,
+ enum cl_req_priority priority);
} req_op[CRT_NR];
/**
* Read missing page.
* owner of the conflicting lock, that it can drop dirty pages
* protected by this lock, without sending them to the server.
*/
- CEF_DISCARD_DATA = 0x00000004
+ CEF_DISCARD_DATA = 0x00000004,
+ /**
+ * tell the sub layers that it must be a `real' lock. This is used for
+ * mmapped-buffer locks and glimpse locks that must be never converted
+ * into lockless mode.
+ *
+ * \see vvp_mmap_locks(), cl_glimpse_lock().
+ */
+ CEF_MUST = 0x00000008,
+ /**
+ * tell the sub layers that never request a `real' lock. This flag is
+ * not used currently.
+ *
+ * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
+ * conversion policy: ci_lockreq describes generic information of lock
+ * requirement for this IO, especially for locks which belong to the
+ * object doing IO; however, lock itself may have precise requirements
+ * that are described by the enqueue flags.
+ */
+ CEF_NEVER = 0x00000010,
+ /**
+ * mask of enq_flags.
+ */
+ CEF_MASK = 0x0000001f
};
/**
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
enum cl_io_lock_dmd ci_lockreq;
+ /**
+ * This io has held grouplock, to inform sublayers that
+ * don't do lockless i/o.
+ */
+ int ci_no_srvlock;
union {
struct cl_rd_io {
struct cl_io_rw_common rd;
int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr);
+ struct cl_lock_descr *descr, int enqflags);
int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
+ enum cl_req_type iot, struct cl_2queue *queue,
+ enum cl_req_priority priority);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
int cl_io_cancel (const struct lu_env *env, struct cl_io *io,