else
per_bytes = min(max_io_pages << PAGE_SHIFT, count);
partial_io = per_bytes < count;
+ /* this clears the cl_io entirely (ie, memset to 0) */
io = vvp_env_thread_io(env);
ll_io_init(io, file, iot, args);
+ /* set cl_io members potentially preserved from previous IO */
io->ci_dio_aio = ci_dio_aio;
io->ci_dio_lock = dio_lock;
io->ci_ndelay_tried = retried;
RETURN(0);
}
+static struct lov_stripe_md_entry *lov_io_lsme_at(const struct lov_io *lio,
+ const pgoff_t page_index);
static int lov_io_slice_init(struct lov_io *lio,
struct lov_object *obj, struct cl_io *io)
{
- int index;
int result = 0;
bool rdonly;
+ int index;
ENTRY;
io->ci_result = 0;
EXIT;
out:
+ /* we only do this when there's no need to instantiate the layout, ie,
+ * when result == 0
+ */
+ if (result == 0 &&
+ (io->ci_type == CIT_WRITE || io->ci_type == CIT_READ ||
+ io->ci_type == CIT_FAULT)) {
+ struct lov_stripe_md_entry *lsme;
+ loff_t io_end = lio->lis_endpos - 1;
+ loff_t io_start = lio->lis_pos;
+ loff_t first_chunk_start_bytes = io_start;
+ loff_t last_chunk_end_bytes = io_end;
+ pgoff_t first_chunk_start_idx = 0;
+ pgoff_t last_chunk_end_idx = 0;
+ int chunk_size;
+
+ /* first we check to see if the start of the IO lands in a
+ * compressed extent
+ */
+ lsme = lov_io_lsme_at(lio, io_start >> PAGE_SHIFT);
+
+ /* if so, determine the boundary of the leading chunk
+ *
+ * NB: If an IO is past the end of the layout, then no lsme.
+ */
+ if (lsme && lsme->lsme_pattern & LOV_PATTERN_COMPRESS) {
+ int chunk_pages;
+
+ io->ci_compressed_file = true;
+ chunk_size =
+ (1 << (lsme->lsme_compr_chunk_log_bits + 16));
+ chunk_pages = chunk_size >> PAGE_SHIFT;
+
+ first_chunk_start_bytes = io_start & ~(chunk_size - 1);
+ first_chunk_start_idx =
+ first_chunk_start_bytes >> PAGE_SHIFT;
+
+ if (first_chunk_start_bytes != io_start) {
+ io->ci_cmpc_leading.e_start =
+ first_chunk_start_idx;
+ io->ci_cmpc_leading.e_end =
+ first_chunk_start_idx + chunk_pages - 1;
+ }
+ }
+
+ /* now we repeat this for the end of the IO */
+ lsme = lov_io_lsme_at(lio, io_end >> PAGE_SHIFT);
+
+ if (lsme && lsme->lsme_pattern & LOV_PATTERN_COMPRESS) {
+ int chunk_pages;
+
+ io->ci_compressed_file = true;
+ chunk_size =
+ (1 << (lsme->lsme_compr_chunk_log_bits + 16));
+ chunk_pages = chunk_size >> PAGE_SHIFT;
+
+ last_chunk_end_bytes = io_end | (chunk_size - 1);
+ last_chunk_end_idx = last_chunk_end_bytes >> PAGE_SHIFT;
+
+ if (last_chunk_end_bytes != io_end) {
+ io->ci_cmpc_trailing.e_start =
+ last_chunk_end_idx - chunk_pages + 1;
+ io->ci_cmpc_trailing.e_end = last_chunk_end_idx;
+ }
+ }
+
+ if (io->ci_compressed_file) {
+ CDEBUG(D_VFSTRACE,
+ "Compressed file, leading chunk and trailing chunk (-1 means aligned): [%lld, %lld] and [%lld, %lld]\n",
+ io->ci_cmpc_leading.e_start,
+ io->ci_cmpc_leading.e_end,
+ io->ci_cmpc_trailing.e_start,
+ io->ci_cmpc_trailing.e_end);
+ }
+ }
+
return result;
}
INIT_LIST_HEAD(&io->ci_lockset.cls_done);
INIT_LIST_HEAD(&io->ci_layers);
+ io->ci_cmpc_leading.e_start = -1;
+ io->ci_cmpc_leading.e_end = -1;
+ io->ci_cmpc_trailing.e_start = -1;
+ io->ci_cmpc_trailing.e_end = -1;
+
result = 0;
cl_object_for_each(scan, obj) {
if (scan->co_ops->coo_io_init != NULL) {
{
LASSERT(iot == CIT_READ || iot == CIT_WRITE);
LASSERT(io->ci_obj != NULL);
- ENTRY;
+ ENTRY;
LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
"io range: %u [%llu, %llu) %u %u\n",
iot, (__u64)pos, (__u64)pos + count,