Whamcloud - gitweb
EX-7601 tgt: round write lock to chunk
authorPatrick Farrell <pfarrell@whamcloud.com>
Thu, 2 Nov 2023 21:31:41 +0000 (17:31 -0400)
committerAndreas Dilger <adilger@whamcloud.com>
Fri, 29 Dec 2023 11:00:47 +0000 (11:00 +0000)
For unaligned writes, we need to round the write locking to
cover the any leading or trailing chunks.  We do this by
creating a local 'remote niobuf' to describe the rounded
range and doing the locking against that niobuf.

Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I2bdea620386ad229375647a0e2cc6180c9bd7aa6
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/52961
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Artem Blagodarenko <ablagodarenko@ddn.com>
lustre/target/tgt_handler.c

index 01787c3..1963c1d 100644 (file)
@@ -2804,6 +2804,7 @@ int tgt_brw_write(struct tgt_session_info *tsi)
        struct ptlrpc_request   *req = tgt_ses_req(tsi);
        struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
        struct obd_export       *exp = req->rq_export;
+       struct niobuf_remote     chunk_lock_rnb;
        struct ptlrpc_bulk_desc *desc = NULL;
        struct lustre_handle     lockh = {0};
        struct niobuf_remote    *remote_nb;
@@ -2814,6 +2815,7 @@ int tgt_brw_write(struct tgt_session_info *tsi)
        struct ost_layout_compr *olc;
        const char *obd_name = exp->exp_obd->obd_name;
        enum cksum_types cksum_type = OBD_CKSUM_CRC32;
+       bool compr_rounded_write_lock = false;
        /* '1' for consistency with code that checks !mpflag to restore */
        unsigned int mpflags = 1;
        enum ll_compr_type type;
@@ -2952,15 +2954,26 @@ int tgt_brw_write(struct tgt_session_info *tsi)
                       io_start, io_end, chunk_start, chunk_end);
 
                /* the start or end of this IO is unaligned */
-               if (io_start != chunk_start || io_end != chunk_end)
-                       CDEBUG(D_SEC, "unaligned write\n");
+               if (io_start != chunk_start || io_end != chunk_end) {
+                       chunk_lock_rnb.rnb_offset = chunk_start;
+                       chunk_lock_rnb.rnb_len = chunk_end - chunk_start;
+                       chunk_lock_rnb.rnb_flags = remote_nb[0].rnb_flags;
+                       compr_rounded_write_lock = true;
+               }
        }
 
-       rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, remote_nb, &lockh,
-                         LCK_PW, niocount);
+       rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid,
+                         compr_rounded_write_lock ? &chunk_lock_rnb : remote_nb,
+                         &lockh, LCK_PW,
+                         compr_rounded_write_lock ? 1 : niocount);
        if (rc != 0)
                GOTO(out, rc);
 
+       /* NB/FIXME/to-be-removed: we can't do the 'skip unaligned io and
+        * return EINVAL trick like we do for reads, because unaligned writes
+        * at EOF are supported
+        */
+
        /*
         * If getting the lock took more time than
         * client was willing to wait, drop it. b=11330
@@ -3136,7 +3149,9 @@ out_commitrw:
                ptlrpc_lprocfs_brw(req, nob);
        }
 out_lock:
-       tgt_brw_unlock(exp, ioo, remote_nb, &lockh, LCK_PW);
+       tgt_brw_unlock(exp, ioo,
+                      compr_rounded_write_lock ? &chunk_lock_rnb : remote_nb,
+                      &lockh, LCK_PW);
        if (desc)
                ptlrpc_free_bulk(desc);
 out: