From: Alex Zhuravlev Date: Wed, 24 Apr 2024 11:53:17 +0000 (+0300) Subject: EX-9192 ofd: take local chunk-aligned lock X-Git-Url: https://git.whamcloud.com/gitweb?a=commitdiff_plain;h=bc31bc100518f938a69b941eb78684e3e12d18d8;p=fs%2Flustre-release.git EX-9192 ofd: take local chunk-aligned lock On OST side to prevent racing read-modify-write against same compressed chunk from the same client. Signed-off-by: Alex Zhuravlev Signed-off-by: Artem Blagodarenko Change-Id: Iffaf2d2856e276cb2f9becce2506154314217e3c Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/54890 Reviewed-by: Artem Blagodarenko Reviewed-by: Andreas Dilger Tested-by: jenkins Tested-by: Maloo --- diff --git a/lustre/target/tgt_handler.c b/lustre/target/tgt_handler.c index 54f87b7..d6c82c2 100644 --- a/lustre/target/tgt_handler.c +++ b/lustre/target/tgt_handler.c @@ -2836,6 +2836,7 @@ static void tgt_warn_on_cksum(struct ptlrpc_request *req, client_cksum, server_cksum); } +#define COMPR_LOCK_ID 0xc5dc int tgt_brw_write(struct tgt_session_info *tsi) { struct ptlrpc_request *req = tgt_ses_req(tsi); @@ -2844,6 +2845,7 @@ int tgt_brw_write(struct tgt_session_info *tsi) struct niobuf_remote chunk_lock_rnb; struct ptlrpc_bulk_desc *desc = NULL; struct lustre_handle lockh = {0}; + struct lustre_handle compr_lockh = {0}; struct niobuf_local *local_write_nb; struct niobuf_local *local_read_nb; struct niobuf_local *local_tx_nb; @@ -2995,6 +2997,7 @@ int tgt_brw_write(struct tgt_session_info *tsi) int chunk_size; __u64 io_start; __u64 io_end; + struct ldlm_res_id resid; chunk_log_bits = olc->ol_compr_chunk_log_bits; chunk_bits = chunk_log_bits + COMPR_CHUNK_MIN_BITS; @@ -3022,6 +3025,19 @@ int tgt_brw_write(struct tgt_session_info *tsi) chunk_lock_rnb.rnb_flags = remote_nb[0].rnb_flags; compr_rounded_write_lock = true; } + + /* The tgt_brw_lock() below doesn't work in case then there + * are two requests from the same client, there the first + * writes data untill the middle of a chunk and the second + * one writes data from the middle of the chunk. + * Let's add a special lock for this case. + */ + resid = tsi->tsi_resid; + resid.name[LUSTRE_RES_ID_WAS_VER_OFF] = COMPR_LOCK_ID; + rc = tgt_data_lock(tsi->tsi_env, exp, &resid, chunk_start, + chunk_end, &compr_lockh, LCK_PW); + if (rc != 0) + GOTO(out, rc); } rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, @@ -3241,6 +3257,8 @@ out_lock: tgt_brw_unlock(exp, ioo, compr_rounded_write_lock ? &chunk_lock_rnb : remote_nb, &lockh, LCK_PW); + if (lustre_handle_is_used(&compr_lockh)) + tgt_data_unlock(&compr_lockh, LCK_PW); if (desc) ptlrpc_free_bulk(desc); out: