From 2f8f38effac3a95199cdcdbd4854f958cdb0c72c Mon Sep 17 00:00:00 2001 From: Qian Yingjin Date: Fri, 20 Jan 2023 12:30:27 -0500 Subject: [PATCH] LU-16412 llite: check read page past requested Due to a kernel bug introduced in 5.12 in commit: cbd59c48ae2bcadc4a7599c29cf32fd3f9b78251 ("mm/filemap: use head pages in generic_file_buffered_read") if the page immediately after the current read is in cache, the kernel will try to read it. This attempts to read a page past the end of requested read from userspace, and so has not been safely locked by Lustre. For a page after the end of the current read, check wether it is under the protection of a DLM lock. If so, we take a reference on the DLM lock until the page read has finished and then release the reference. If the page is not covered by a DLM lock, then we are racing with the page being removed from Lustre. In that case, we return AOP_TRUNCATED_PAGE, which makes the kernel release its reference on the page and retry the page read. This allows the page to be removed from cache, so the kernel will not find it and incorrectly attempt to read it again. NB: Earlier versions of this description refer to stripe boundaries, but the locking issue can occur whether or not the page is on a stripe boundary, because dlmlocks can cover part of a stripe. (This is rare, but is allowed.) Change-Id: Ib93bd0624fda0ed1c2b89f609d15208c86e21c29 Signed-off-by: Qian Yingjin Signed-off-by: Patrick Farrell Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/49723 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Zhenyu Xu Reviewed-by: Oleg Drokin --- lustre/llite/llite_internal.h | 2 ++ lustre/llite/rw.c | 57 ++++++++++++++++++++++++++++++++++++++++--- lustre/llite/vvp_io.c | 9 +++++++ 3 files changed, 65 insertions(+), 3 deletions(-) diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index 781ea86..60ac650 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -1391,6 +1391,8 @@ struct ll_cl_context { struct cl_io *lcc_io; struct cl_page *lcc_page; enum lcc_type lcc_type; + struct kiocb *lcc_iocb; + struct iov_iter *lcc_iter; }; struct ll_thread_info { diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index aaf7433..49f1688 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -1857,11 +1857,14 @@ int ll_readpage(struct file *file, struct page *vmpage) { struct inode *inode = file_inode(file); struct cl_object *clob = ll_i2info(inode)->lli_clob; - struct ll_cl_context *lcc; + struct ll_sb_info *sbi = ll_i2sbi(inode); const struct lu_env *env = NULL; + struct cl_read_ahead ra = { 0 }; + struct ll_cl_context *lcc; struct cl_io *io = NULL; + struct iov_iter *iter; struct cl_page *page; - struct ll_sb_info *sbi = ll_i2sbi(inode); + struct kiocb *iocb; int result; ENTRY; @@ -1912,6 +1915,8 @@ int ll_readpage(struct file *file, struct page *vmpage) struct ll_readahead_state *ras = &fd->fd_ras; struct lu_env *local_env = NULL; + CDEBUG(D_VFSTRACE, "fast read pgno: %ld\n", vmpage->index); + result = -ENODATA; /* TODO: need to verify the layout version to make sure @@ -1964,6 +1969,47 @@ int ll_readpage(struct file *file, struct page *vmpage) RETURN(result); } + if (lcc && lcc->lcc_type != LCC_MMAP) { + iocb = lcc->lcc_iocb; + iter = lcc->lcc_iter; + + CDEBUG(D_VFSTRACE, "pgno:%ld, cnt:%ld, pos:%lld\n", + vmpage->index, iter->count, iocb->ki_pos); + + /* + * This handles a kernel bug introduced in kernel 5.12: + * comment: cbd59c48ae2bcadc4a7599c29cf32fd3f9b78251 + * ("mm/filemap: use head pages in generic_file_buffered_read") + * + * See above in this function for a full description of the + * bug. Briefly, the kernel will try to read 1 more page than + * was actually requested *if that page is already in cache*. + * + * Because this page is beyond the boundary of the requested + * read, Lustre does not lock it as part of the read. This + * means we must check if there is a valid dlmlock on this + * this page and reference it before we attempt to read in the + * page. If there is not a valid dlmlock, then we are racing + * with dlmlock cancellation and the page is being removed + * from the cache. + * + * That means we should return AOP_TRUNCATED_PAGE, which will + * cause the kernel to retry the read, which should allow the + * page to be removed from cache as the lock is cancelled. + * + * This should never occur except in kernels with the bug + * mentioned above. + */ + if (cl_offset(clob, vmpage->index) >= iter->count + iocb->ki_pos) { + result = cl_io_read_ahead(env, io, vmpage->index, &ra); + if (result < 0 || vmpage->index > ra.cra_end_idx) { + cl_read_ahead_release(env, &ra); + unlock_page(vmpage); + RETURN(AOP_TRUNCATED_PAGE); + } + } + } + /** * Direct read can fall back to buffered read, but DIO is done * with lockless i/o, and buffered requires LDLM locking, so in @@ -1975,7 +2021,7 @@ int ll_readpage(struct file *file, struct page *vmpage) unlock_page(vmpage); io->ci_dio_lock = 1; io->ci_need_restart = 1; - RETURN(-ENOLCK); + GOTO(out, result = -ENOLCK); } LASSERT(io->ci_state == CIS_IO_GOING); @@ -1996,5 +2042,10 @@ int ll_readpage(struct file *file, struct page *vmpage) unlock_page(vmpage); result = PTR_ERR(page); } + +out: + if (ra.cra_release != NULL) + cl_read_ahead_release(env, &ra); + RETURN(result); } diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index f1cb18d..1af0940 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -821,6 +821,7 @@ static int vvp_io_read_start(const struct lu_env *env, loff_t pos = io->u.ci_rd.rd.crw_pos; size_t cnt = io->u.ci_rd.rd.crw_count; size_t tot = vio->vui_tot_count; + struct ll_cl_context *lcc; int exceed = 0; int result; struct iov_iter iter; @@ -886,7 +887,15 @@ static int vvp_io_read_start(const struct lu_env *env, file_accessed(file); LASSERT(vio->vui_iocb->ki_pos == pos); iter = *vio->vui_iter; + + lcc = ll_cl_find(inode); + lcc->lcc_iter = &iter; + lcc->lcc_iocb = vio->vui_iocb; + CDEBUG(D_VFSTRACE, "cnt:%ld,iocb pos:%lld\n", lcc->lcc_iter->count, + lcc->lcc_iocb->ki_pos); + result = generic_file_read_iter(vio->vui_iocb, &iter); + out: if (result >= 0) { if (result < cnt) -- 1.8.3.1