client_cksum, server_cksum);
}
+#define COMPR_LOCK_ID 0xc5dc
int tgt_brw_write(struct tgt_session_info *tsi)
{
struct ptlrpc_request *req = tgt_ses_req(tsi);
struct niobuf_remote chunk_lock_rnb;
struct ptlrpc_bulk_desc *desc = NULL;
struct lustre_handle lockh = {0};
+ struct lustre_handle compr_lockh = {0};
struct niobuf_local *local_write_nb;
struct niobuf_local *local_read_nb;
struct niobuf_local *local_tx_nb;
int chunk_size;
__u64 io_start;
__u64 io_end;
+ struct ldlm_res_id resid;
chunk_log_bits = olc->ol_compr_chunk_log_bits;
chunk_bits = chunk_log_bits + COMPR_CHUNK_MIN_BITS;
chunk_lock_rnb.rnb_flags = remote_nb[0].rnb_flags;
compr_rounded_write_lock = true;
}
+
+ /* The tgt_brw_lock() below doesn't work in case then there
+ * are two requests from the same client, there the first
+ * writes data untill the middle of a chunk and the second
+ * one writes data from the middle of the chunk.
+ * Let's add a special lock for this case.
+ */
+ resid = tsi->tsi_resid;
+ resid.name[LUSTRE_RES_ID_WAS_VER_OFF] = COMPR_LOCK_ID;
+ rc = tgt_data_lock(tsi->tsi_env, exp, &resid, chunk_start,
+ chunk_end, &compr_lockh, LCK_PW);
+ if (rc != 0)
+ GOTO(out, rc);
}
rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid,
tgt_brw_unlock(exp, ioo,
compr_rounded_write_lock ? &chunk_lock_rnb : remote_nb,
&lockh, LCK_PW);
+ if (lustre_handle_is_used(&compr_lockh))
+ tgt_data_unlock(&compr_lockh, LCK_PW);
if (desc)
ptlrpc_free_bulk(desc);
out: