struct lu_buf cl_buf;
/** size of layout in lov_mds_md format. */
size_t cl_size;
- /** size of DoM component if exists or zero otherwise */
- u64 cl_dom_comp_size;
/** Layout generation. */
u32 cl_layout_gen;
/** whether layout is a composite one */
struct lu_ref_link cp_queue_ref;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
+ /** layout_entry + stripe index, composed using lov_comp_index() */
+ unsigned int cp_lov_index;
};
/**
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
- struct cl_page *);
+ struct pagevec *);
struct cl_read_ahead {
/* Maximum page index the readahead window will end.
* This is determined DLM lock coverage, RPC and stripe boundary.
* cra_end is included. */
- pgoff_t cra_end;
+ pgoff_t cra_end_idx;
/* optimal RPC size for this read, by pages */
- unsigned long cra_rpc_size;
+ unsigned long cra_rpc_pages;
/* Release callback. If readahead holds resources underneath, this
* function should be called to release it. */
- void (*cra_release)(const struct lu_env *env, void *cbdata);
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
/* Callback data for cra_release routine */
- void *cra_cbdata;
+ void *cra_cbdata;
+ /* whether lock is in contention */
+ bool cra_contention;
};
static inline void cl_read_ahead_release(const struct lu_env *env,
/**
* Set if IO is triggered by async workqueue readahead.
*/
- ci_async_readahead:1;
+ ci_async_readahead:1,
+ /**
+ * Ignore lockless and do normal locking for this io.
+ */
+ ci_ignore_lockless:1,
+ /**
+ * Set if we've tried all mirrors for this read IO, if it's not set,
+ * the read IO will check to-be-read OSCs' status, and make fast-switch
+ * another mirror if some of the OSTs are not healthy.
+ */
+ ci_tried_all_mirrors:1;
/**
* How many times the read has retried before this one.
* Set by the top level and consumed by the LOV.
/** \defgroup cl_sync_io cl_sync_io
* @{ */
+struct cl_sync_io;
+struct cl_dio_aio;
+
+typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
+
+void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
+ struct cl_dio_aio *aio, cl_sync_io_end_t *end);
+
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
+ long timeout);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
+{
+ cl_sync_io_init_notify(anchor, nr, NULL, NULL);
+}
+
/**
* Anchor for synchronous transfer. This is allocated on a stack by thread
* doing synchronous transfer, and a pointer to this structure is set up in
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
- void (*csi_end_io)(const struct lu_env *,
- struct cl_sync_io *);
+ cl_sync_io_end_t *csi_end_io;
+ /** aio private data */
+ struct cl_dio_aio *csi_aio;
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *));
-int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
- long timeout);
-void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
- int ioret);
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
+/** To support Direct AIO */
+struct cl_dio_aio {
+ struct cl_sync_io cda_sync;
+ struct cl_page_list cda_pages;
+ struct kiocb *cda_iocb;
+ ssize_t cda_bytes;
+};
/** @} cl_sync_io */