From b651089f859e8269af7272b91f5e60aa25f24226 Mon Sep 17 00:00:00 2001 From: Alexander Zarochentsev Date: Sat, 21 Dec 2019 02:19:44 +0300 Subject: [PATCH] LU-13228 clio: mmap write when overquota Flagging client by overquota flag should not cause mmap write access to sigbus the app. Cray-bug-id: LUS-8221 Signed-off-by: Alexander Zarochentsev Change-Id: I29d5901fa5078b5cfca40391a02531cf27efce93 Reviewed-on: https://review.whamcloud.com/37495 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andrew Perepechko Reviewed-by: Andriy Skulysh Reviewed-by: Oleg Drokin --- lustre/include/cl_object.h | 4 ++++ lustre/llite/vvp_internal.h | 1 + lustre/llite/vvp_io.c | 28 +++++++++++++++++++++++++--- lustre/lov/lov_io.c | 2 ++ lustre/obdclass/cl_io.c | 3 ++- lustre/osc/osc_cache.c | 2 +- lustre/tests/sanity-quota.sh | 5 +++++ 7 files changed, 40 insertions(+), 5 deletions(-) diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 3e31227..202bec2 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1925,6 +1925,10 @@ struct cl_io { */ ci_tried_all_mirrors:1; /** + * Bypass quota check + */ + unsigned ci_noquota:1; + /** * How many times the read has retried before this one. * Set by the top level and consumed by the LOV. */ diff --git a/lustre/llite/vvp_internal.h b/lustre/llite/vvp_internal.h index 4f6eb0a..5384e0e 100644 --- a/lustre/llite/vvp_internal.h +++ b/lustre/llite/vvp_internal.h @@ -88,6 +88,7 @@ struct vvp_io { * check that flags are from filemap_fault */ bool ft_flags_valid; + struct cl_page_list ft_queue; } fault; struct { struct pipe_inode_info *vui_pipe; diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 007a8b3..fe49b59 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -1457,7 +1457,7 @@ static int vvp_io_fault_start(const struct lu_env *env, if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (!PageDirty(vmpage)) { - struct cl_page_list *plist = &io->ci_queue.c2_qin; + struct cl_page_list *plist = &vio->u.fault.ft_queue; struct vvp_page *vpg = cl_object_page_slice(obj, page); int to = PAGE_SIZE; @@ -1469,13 +1469,34 @@ static int vvp_io_fault_start(const struct lu_env *env, /* size fixup */ if (last_index == vvp_index(vpg)) - to = size & ~PAGE_MASK; + to = ((size - 1) & ~PAGE_MASK) + 1; /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we * still have chance to detect it. */ result = cl_io_commit_async(env, io, plist, 0, to, mkwrite_commit_callback); + /* Have overquota flag, trying sync write to check + * whether indeed out of quota */ + if (result == -EDQUOT) { + cl_page_get(page); + result = vvp_io_commit_sync(env, io, + plist, 0, to); + if (result >= 0) { + io->ci_noquota = 1; + cl_page_own(env, io, page); + cl_page_list_add(plist, page); + lu_ref_add(&page->cp_reference, + "cl_io", io); + result = cl_io_commit_async(env, io, + plist, 0, to, + mkwrite_commit_callback); + io->ci_noquota = 0; + } else { + cl_page_put(env, page); + } + } + LASSERT(cl_page_is_owned(page, io)); cl_page_list_fini(env, plist); @@ -1490,8 +1511,9 @@ static int vvp_io_fault_start(const struct lu_env *env, if (result == -EDQUOT) result = -ENOSPC; GOTO(out, result); - } else + } else { cl_page_disown(env, io, page); + } } } diff --git a/lustre/lov/lov_io.c b/lustre/lov/lov_io.c index 92c84d9..9f81d9b 100644 --- a/lustre/lov/lov_io.c +++ b/lustre/lov/lov_io.c @@ -184,6 +184,8 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env, out: if (rc < 0) sub = ERR_PTR(rc); + else + sub->sub_io.ci_noquota = lio->lis_cl.cis_io->ci_noquota; RETURN(sub); } diff --git a/lustre/obdclass/cl_io.c b/lustre/obdclass/cl_io.c index 45d9307..bc6dba9 100644 --- a/lustre/obdclass/cl_io.c +++ b/lustre/obdclass/cl_io.c @@ -650,6 +650,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor; struct cl_page *pg; int rc; + ENTRY; cl_page_list_for_each(pg, &queue->c2_qin) { LASSERT(pg->cp_sync_io == NULL); @@ -678,7 +679,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, cl_page_list_for_each(pg, &queue->c2_qin) pg->cp_sync_io = NULL; } - return rc; + RETURN(rc); } EXPORT_SYMBOL(cl_io_submit_sync); diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index 9e345c1..51b9905 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -2374,7 +2374,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, /* Set the OBD_BRW_SRVLOCK before the page is queued. */ brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0; - if (oio->oi_cap_sys_resource) { + if (oio->oi_cap_sys_resource || io->ci_noquota) { brw_flags |= OBD_BRW_NOQUOTA; cmd |= OBD_BRW_NOQUOTA; } diff --git a/lustre/tests/sanity-quota.sh b/lustre/tests/sanity-quota.sh index 037ff4b..5b08e95 100755 --- a/lustre/tests/sanity-quota.sh +++ b/lustre/tests/sanity-quota.sh @@ -755,6 +755,11 @@ test_block_soft() { OFFSET=$((OFFSET + 1024)) # make sure we don't write to same block cancel_lru_locks osc + echo "mmap write when over soft limit" + $RUNAS $MULTIOP $TESTFILE.mmap OT40960SMW || + quota_error a $TSTUSR "mmap write failure, but expect success" + cancel_lru_locks osc + $SHOW_QUOTA_USER $SHOW_QUOTA_GROUP $SHOW_QUOTA_PROJID -- 1.8.3.1