/* Maximum page index the readahead window will end.
* This is determined DLM lock coverage, RPC and stripe boundary.
* cra_end is included. */
- pgoff_t cra_end;
+ pgoff_t cra_end_idx;
/* optimal RPC size for this read, by pages */
- unsigned long cra_rpc_size;
+ unsigned long cra_rpc_pages;
/* Release callback. If readahead holds resources underneath, this
* function should be called to release it. */
- void (*cra_release)(const struct lu_env *env, void *cbdata);
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
/* Callback data for cra_release routine */
- void *cra_cbdata;
+ void *cra_cbdata;
/* whether lock is in contention */
- bool cra_contention;
+ bool cra_contention;
};
static inline void cl_read_ahead_release(const struct lu_env *env,
*/
ci_async_readahead:1,
/**
+ * Ignore lockless and do normal locking for this io.
+ */
+ ci_ignore_lockless:1,
+ /**
* Set if we've tried all mirrors for this read IO, if it's not set,
* the read IO will check to-be-read OSCs' status, and make fast-switch
* another mirror if some of the OSTs are not healthy.
* @{ */
struct cl_sync_io;
+struct cl_dio_aio;
typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
- cl_sync_io_end_t *end);
+ struct cl_dio_aio *aio, cl_sync_io_end_t *end);
int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout);
int ioret);
static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
{
- cl_sync_io_init_notify(anchor, nr, NULL);
+ cl_sync_io_init_notify(anchor, nr, NULL, NULL);
}
/**
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
cl_sync_io_end_t *csi_end_io;
+ /** aio private data */
+ struct cl_dio_aio *csi_aio;
+};
+
+/** To support Direct AIO */
+struct cl_dio_aio {
+ struct cl_sync_io cda_sync;
+ struct cl_page_list cda_pages;
+ struct kiocb *cda_iocb;
+ ssize_t cda_bytes;
};
/** @} cl_sync_io */