From c9739496bb089f82b7b7d0952a3b2c1c2bcc616d Mon Sep 17 00:00:00 2001 From: Jinshan Xiong Date: Thu, 19 Jun 2014 16:21:09 -0700 Subject: [PATCH] LU-5062 llite: Solve a race to access lli_has_smd in read case In vvp_io_read_lock(), it used to decide if to add read lock by checking lli_has_smd. Accessing lli_has_smd is racy when an empty file is turned into raid0, therefore, it may result in read requests are issued without corresponding lock. Signed-off-by: Jinshan Xiong Change-Id: I70cb1888b42e0e9928f8346359e2b1f554fe4e8f Reviewed-on: http://review.whamcloud.com/10760 Tested-by: Jenkins Reviewed-by: Patrick Farrell Tested-by: Maloo Reviewed-by: Bobi Jam Reviewed-by: Oleg Drokin --- lustre/include/lclient.h | 1 + lustre/lclient/lcommon_cl.c | 6 ++++++ lustre/llite/vvp_io.c | 20 +++++++------------- lustre/llite/vvp_lock.c | 1 + 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/lustre/include/lclient.h b/lustre/include/lclient.h index 62a79d7..dd784f3 100644 --- a/lustre/include/lclient.h +++ b/lustre/include/lclient.h @@ -350,6 +350,7 @@ void ccc_lock_delete(const struct lu_env *env, void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice); int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice, struct cl_io *io, __u32 enqflags); +int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice); int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice); int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice); int ccc_lock_fits_into(const struct lu_env *env, diff --git a/lustre/lclient/lcommon_cl.c b/lustre/lclient/lcommon_cl.c index 0b9e773..afda860 100644 --- a/lustre/lclient/lcommon_cl.c +++ b/lustre/lclient/lcommon_cl.c @@ -572,6 +572,12 @@ int ccc_lock_enqueue(const struct lu_env *env, return 0; } +int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice) +{ + CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); + return 0; +} + int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) { CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 8186e41..798329e 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -335,20 +335,14 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, static int vvp_io_read_lock(const struct lu_env *env, const struct cl_io_slice *ios) { - struct cl_io *io = ios->cis_io; - struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj)); - int result; + struct cl_io *io = ios->cis_io; + struct cl_io_rw_common *rd = &io->u.ci_rd.rd; + int result; - ENTRY; - /* XXX: Layer violation, we shouldn't see lsm at llite level. */ - if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */ - result = vvp_io_rw_lock(env, io, CLM_READ, - io->u.ci_rd.rd.crw_pos, - io->u.ci_rd.rd.crw_pos + - io->u.ci_rd.rd.crw_count - 1); - else - result = 0; - RETURN(result); + ENTRY; + result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos, + rd->crw_pos + rd->crw_count - 1); + RETURN(result); } static int vvp_io_fault_lock(const struct lu_env *env, diff --git a/lustre/llite/vvp_lock.c b/lustre/llite/vvp_lock.c index 54d99f0..94f85d4 100644 --- a/lustre/llite/vvp_lock.c +++ b/lustre/llite/vvp_lock.c @@ -75,6 +75,7 @@ static const struct cl_lock_operations vvp_lock_ops = { .clo_fini = ccc_lock_fini, .clo_enqueue = ccc_lock_enqueue, .clo_wait = ccc_lock_wait, + .clo_use = ccc_lock_use, .clo_unuse = ccc_lock_unuse, .clo_fits_into = ccc_lock_fits_into, .clo_state = ccc_lock_state, -- 1.8.3.1