struct range_lock *range = &ofd_info(env)->fti_write_range;
struct dt_object *dt_obj = NULL;
struct ofd_object *fo;
- enum dt_bufs_type dbt = DT_BUFS_TYPE_WRITE;
int chunk_size = chunk_bits ? 1 << chunk_bits : 0;
+ enum dt_bufs_type dbt = DT_BUFS_TYPE_WRITE;
+ bool compr_unaligned_write = false;
int maxlnb = *nr_local;
__u64 prev_buf_end = 0;
int tot_bytes = 0;
continue;
}
+ /* this write is unaligned */
if (buf_start != orig_start || buf_end != orig_end) {
/* get attr only once for each IO */
if (!dt_obj) {
if (buf_start >= la->la_size) {
buf_start = orig_start;
buf_end = orig_end;
+ } else {
+ compr_unaligned_write = true;
}
}
prev_buf_end = buf_end;
GOTO(err, rc);
}
+ if (compr_unaligned_write) {
+ /* for now, read will not hold pages locked, since it's not
+ * doing decompression. this will be changed shortly.
+ */
+ rc = dt_read_prep(env, ofd_object_child(fo), lnb, *nr_local,
+ false);
+ if (unlikely(rc != 0))
+ GOTO(err, rc);
+ /* read_prep sets rc if it read data, or on error, but the write
+ * code expects rc to be zero, so we clear rc here except on
+ * error
+ */
+ for (i = 0; i < *nr_local; i++) {
+ if (lnb[i].lnb_rc > 0)
+ lnb[i].lnb_rc = 0;
+ }
+ }
+
rc = dt_write_prep(env, ofd_object_child(fo), lnb, *nr_local);
if (unlikely(rc != 0))
GOTO(err, rc);