From: John L. Hammond Date: Mon, 12 Jan 2015 18:55:36 +0000 (-0600) Subject: LU-5971 llite: merge ccc_io and vvp_io X-Git-Tag: 2.6.93~45 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=bb6dbca9c2c9bdcd33663d6449b27a671fcaf902 LU-5971 llite: merge ccc_io and vvp_io Move the contents of struct vvp_io into struct ccc_io, delete the former, and rename the latter to struct vvp_io. Rename various ccc_io related functions to use vvp rather than ccc. Signed-off-by: John L. Hammond Change-Id: I6200f1a5ff4d639d547bd45f91e146dbcc80fe6d Reviewed-on: http://review.whamcloud.com/13351 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Lai Siyao Reviewed-by: Jinshan Xiong Reviewed-by: Oleg Drokin --- diff --git a/lustre/doc/clio.txt b/lustre/doc/clio.txt index ee56eb7..65a1bbc 100644 --- a/lustre/doc/clio.txt +++ b/lustre/doc/clio.txt @@ -1125,11 +1125,10 @@ layers. Important properties so described include: Each layer keeps IO state in its `IO slice', described below, with all slices chained to the list hanging off of struct cl_io: -- vvp_io, ccc_io: these two slices are used by the top-most layer of the Linux - kernel client. ccc_io is a state common between kernel client and liblustre, - and vvp_io is a state private to the kernel client. +- vvp_io is used by the top-most layer of the Linux kernel + client. - The most important state in ccc_io is an array of struct iovec, describing + The most important state in vvp_io is an array of struct iovec, describing user space buffers from or to which IO is taking place. Note that other layers in the IO stack have no idea that data actually came from user space. diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 945bf5a..aadffa6 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1541,7 +1541,7 @@ enum cl_io_state { * This is usually embedded into layer session data, rather than allocated * dynamically. * - * \see vvp_io, lov_io, osc_io, ccc_io + * \see vvp_io, lov_io, osc_io */ struct cl_io_slice { struct cl_io *cis_io; diff --git a/lustre/llite/file.c b/lustre/llite/file.c index 9dd6906..e6d32d6 100644 --- a/lustre/llite/file.c +++ b/lustre/llite/file.c @@ -1152,18 +1152,18 @@ restart: ll_io_init(io, file, iot == CIT_WRITE); if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) { - struct vvp_io *vio = vvp_env_io(env); - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); bool range_locked = false; if (file->f_flags & O_APPEND) range_lock_init(&range, 0, LUSTRE_EOF); else range_lock_init(&range, *ppos, *ppos + count - 1); + cio->cui_fd = LUSTRE_FPRIVATE(file); - vio->cui_io_subtype = args->via_io_subtype; + cio->cui_io_subtype = args->via_io_subtype; - switch (vio->cui_io_subtype) { + switch (cio->cui_io_subtype) { case IO_NORMAL: cio->cui_iov = args->u.normal.via_iov; cio->cui_nrsegs = args->u.normal.via_nrsegs; @@ -1182,14 +1182,14 @@ restart: } down_read(&lli->lli_trunc_sem); break; - case IO_SPLICE: - vio->u.splice.cui_pipe = args->u.splice.via_pipe; - vio->u.splice.cui_flags = args->u.splice.via_flags; - break; - default: - CERROR("Unknow IO type - %u\n", vio->cui_io_subtype); - LBUG(); - } + case IO_SPLICE: + cio->u.splice.cui_pipe = args->u.splice.via_pipe; + cio->u.splice.cui_flags = args->u.splice.via_flags; + break; + default: + CERROR("unknown IO subtype %u\n", cio->cui_io_subtype); + LBUG(); + } ll_cl_add(file, env, io); result = cl_io_loop(env, io); diff --git a/lustre/llite/lcommon_cl.c b/lustre/llite/lcommon_cl.c index f7a47ed..94254e3 100644 --- a/lustre/llite/lcommon_cl.c +++ b/lustre/llite/lcommon_cl.c @@ -67,7 +67,6 @@ static const struct cl_req_operations ccc_req_ops; */ static struct kmem_cache *ccc_thread_kmem; -static struct kmem_cache *ccc_session_kmem; static struct kmem_cache *ccc_req_kmem; static struct lu_kmem_descr ccc_caches[] = { @@ -77,11 +76,6 @@ static struct lu_kmem_descr ccc_caches[] = { .ckd_size = sizeof (struct ccc_thread_info), }, { - .ckd_cache = &ccc_session_kmem, - .ckd_name = "ccc_session_kmem", - .ckd_size = sizeof (struct ccc_session) - }, - { .ckd_cache = &ccc_req_kmem, .ckd_name = "ccc_req_kmem", .ckd_size = sizeof (struct ccc_req) @@ -114,36 +108,12 @@ void ccc_key_fini(const struct lu_context *ctx, OBD_SLAB_FREE_PTR(info, ccc_thread_kmem); } -void *ccc_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct ccc_session *session; - - OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS); - if (session == NULL) - session = ERR_PTR(-ENOMEM); - return session; -} - -void ccc_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct ccc_session *session = data; - OBD_SLAB_FREE_PTR(session, ccc_session_kmem); -} - struct lu_context_key ccc_key = { .lct_tags = LCT_CL_THREAD, .lct_init = ccc_key_init, .lct_fini = ccc_key_fini }; -struct lu_context_key ccc_session_key = { - .lct_tags = LCT_SESSION, - .lct_init = ccc_session_key_init, - .lct_fini = ccc_session_key_fini -}; - int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct cl_req *req) { @@ -233,18 +203,11 @@ static void vvp_object_size_unlock(struct cl_object *obj) * */ -void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - - CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj)); -} - -int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, - __u32 enqflags, enum cl_lock_mode mode, - pgoff_t start, pgoff_t end) +int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io, + __u32 enqflags, enum cl_lock_mode mode, + pgoff_t start, pgoff_t end) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); struct cl_lock_descr *descr = &cio->cui_link.cill_descr; struct cl_object *obj = io->ci_obj; @@ -270,8 +233,8 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, RETURN(0); } -void ccc_io_update_iov(const struct lu_env *env, - struct ccc_io *cio, struct cl_io *io) +void vvp_io_update_iov(const struct lu_env *env, + struct vvp_io *cio, struct cl_io *io) { int i; size_t size = io->u.ci_rw.crw_count; @@ -300,26 +263,27 @@ void ccc_io_update_iov(const struct lu_env *env, cio->cui_tot_nrsegs, cio->cui_nrsegs); } -int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, - __u32 enqflags, enum cl_lock_mode mode, - loff_t start, loff_t end) +int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io, + __u32 enqflags, enum cl_lock_mode mode, + loff_t start, loff_t end) { - struct cl_object *obj = io->ci_obj; - return ccc_io_one_lock_index(env, io, enqflags, mode, - cl_index(obj, start), cl_index(obj, end)); + struct cl_object *obj = io->ci_obj; + + return vvp_io_one_lock_index(env, io, enqflags, mode, + cl_index(obj, start), cl_index(obj, end)); } -void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios) +void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios) { CLOBINVRNT(env, ios->cis_io->ci_obj, vvp_object_invariant(ios->cis_io->ci_obj)); } -void ccc_io_advance(const struct lu_env *env, - const struct cl_io_slice *ios, - size_t nob) +void vvp_io_advance(const struct lu_env *env, + const struct cl_io_slice *ios, + size_t nob) { - struct ccc_io *cio = cl2ccc_io(env, ios); + struct vvp_io *cio = cl2vvp_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = ios->cis_io->ci_obj; @@ -552,7 +516,7 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr, again: if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); if (attr->ia_valid & ATTR_FILE) /* populate the file descriptor for ftruncate to honor @@ -582,14 +546,15 @@ again: * */ -struct ccc_io *cl2ccc_io(const struct lu_env *env, - const struct cl_io_slice *slice) +struct vvp_io *cl2vvp_io(const struct lu_env *env, + const struct cl_io_slice *slice) { - struct ccc_io *cio; + struct vvp_io *cio; + + cio = container_of(slice, struct vvp_io, cui_cl); + LASSERT(cio == vvp_env_io(env)); - cio = container_of(slice, struct ccc_io, cui_cl); - LASSERT(cio == ccc_env_io(env)); - return cio; + return cio; } struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index 4ebbee7..2d83eca 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -974,59 +974,6 @@ struct ll_close_queue { void vvp_write_pending(struct vvp_object *club, struct vvp_page *page); void vvp_write_complete(struct vvp_object *club, struct vvp_page *page); -/* specific achitecture can implement only part of this list */ -enum vvp_io_subtype { - /** normal IO */ - IO_NORMAL, - /** io started from splice_{read|write} */ - IO_SPLICE -}; - -/* IO subtypes */ -struct vvp_io { - /** io subtype */ - enum vvp_io_subtype cui_io_subtype; - - union { - struct { - struct pipe_inode_info *cui_pipe; - unsigned int cui_flags; - } splice; - struct vvp_fault_io { - /** - * Inode modification time that is checked across DLM - * lock request. - */ - time_t ft_mtime; - struct vm_area_struct *ft_vma; - /** - * locked page returned from vvp_io - */ - struct page *ft_vmpage; - struct vm_fault_api { - /** - * kernel fault info - */ - struct vm_fault *ft_vmf; - /** - * fault API used bitflags for return code. - */ - unsigned int ft_flags; - /** - * check that flags are from filemap_fault - */ - bool ft_flags_valid; - } fault; - } fault; - } u; - - /* Readahead state. */ - pgoff_t cui_ra_start; - pgoff_t cui_ra_count; - /* Set when cui_ra_{start,count} have been initialized. */ - bool cui_ra_valid; -}; - /** * IO arguments for various VFS I/O interfaces. */ @@ -1084,26 +1031,6 @@ static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env, return ret; } -struct vvp_session { - struct vvp_io vs_ios; -}; - -extern struct lu_context_key vvp_session_key; - -static inline struct vvp_session *vvp_env_session(const struct lu_env *env) -{ - struct vvp_session *ses; - - ses = lu_context_key_get(env->le_ses, &vvp_session_key); - LASSERT(ses != NULL); - return ses; -} - -static inline struct vvp_io *vvp_env_io(const struct lu_env *env) -{ - return &vvp_env_session(env)->vs_ios; -} - int vvp_global_init(void); void vvp_global_fini(void); diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 7ead998..6836802 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -151,7 +151,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); if (rc == 0) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); LASSERT(cio->cui_cl.cis_io == io); @@ -315,9 +315,9 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) vio = vvp_env_io(env); vio->u.fault.ft_vma = vma; vio->u.fault.ft_vmpage = NULL; - vio->u.fault.fault.ft_vmf = vmf; - vio->u.fault.fault.ft_flags = 0; - vio->u.fault.fault.ft_flags_valid = 0; + vio->u.fault.ft_vmf = vmf; + vio->u.fault.ft_flags = 0; + vio->u.fault.ft_flags_valid = 0; /* May call ll_readpage() */ ll_cl_add(vma->vm_file, env, io); @@ -328,8 +328,8 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) /* ft_flags are only valid if we reached * the call to filemap_fault */ - if (vio->u.fault.fault.ft_flags_valid) - fault_ret = vio->u.fault.fault.ft_flags; + if (vio->u.fault.ft_flags_valid) + fault_ret = vio->u.fault.ft_flags; vmpage = vio->u.fault.ft_vmpage; if (result != 0 && vmpage != NULL) { diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c index 40b68ca..be8f02f 100644 --- a/lustre/llite/rw26.c +++ b/lustre/llite/rw26.c @@ -406,7 +406,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, env = cl_env_get(&refcheck); LASSERT(!IS_ERR(env)); - io = ccc_env_io(env)->cui_cl.cis_io; + io = vvp_env_io(env)->cui_cl.cis_io; LASSERT(io != NULL); for (seg = 0; seg < nr_segs; seg++) { @@ -469,7 +469,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, } out: if (tot_bytes > 0) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); /* no commit async for direct IO */ cio->u.write.cui_written += tot_bytes; @@ -544,7 +544,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, if (unlikely(vmpage == NULL || PageDirty(vmpage) || PageWriteback(vmpage))) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); struct cl_page_list *plist = &cio->u.write.cui_queue; /* if the page is already in dirty cache, we have to commit @@ -619,7 +619,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, struct ll_cl_context *lcc = fsdata; const struct lu_env *env; struct cl_io *io; - struct ccc_io *cio; + struct vvp_io *cio; struct cl_page *page; unsigned from = pos & (PAGE_CACHE_SIZE - 1); bool unplug = false; @@ -632,7 +632,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, env = lcc->lcc_env; page = lcc->lcc_page; io = lcc->lcc_io; - cio = ccc_env_io(env); + cio = vvp_env_io(env); LASSERT(cl_page_is_owned(page, io)); if (copied > 0) { diff --git a/lustre/llite/vvp_dev.c b/lustre/llite/vvp_dev.c index 49b4129..c76e222 100644 --- a/lustre/llite/vvp_dev.c +++ b/lustre/llite/vvp_dev.c @@ -137,7 +137,7 @@ struct lu_context_key vvp_session_key = { }; /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */ -LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key); +LU_TYPE_INIT_FINI(vvp, &ccc_key, &vvp_key, &vvp_session_key); static const struct lu_device_operations vvp_lu_ops = { .ldo_object_alloc = vvp_object_alloc diff --git a/lustre/llite/vvp_internal.h b/lustre/llite/vvp_internal.h index d6cc854..17c7dea 100644 --- a/lustre/llite/vvp_internal.h +++ b/lustre/llite/vvp_internal.h @@ -81,11 +81,17 @@ enum ccc_setattr_lock_type { SETATTR_MATCH_LOCK }; +enum vvp_io_subtype { + /** normal IO */ + IO_NORMAL, + /** io started from splice_{read|write} */ + IO_SPLICE, +}; /** - * IO state private to vvp or slp layers. + * IO state private to VVP layer. */ -struct ccc_io { +struct vvp_io { /** super class */ struct cl_io_slice cui_cl; struct cl_io_lock_link cui_link; @@ -108,16 +114,47 @@ struct ccc_io { size_t cui_tot_count; union { + struct vvp_fault_io { + /** + * Inode modification time that is checked across DLM + * lock request. + */ + time_t ft_mtime; + struct vm_area_struct *ft_vma; + /** + * locked page returned from vvp_io + */ + struct page *ft_vmpage; + /** + * kernel fault info + */ + struct vm_fault *ft_vmf; + /** + * fault API used bitflags for return code. + */ + unsigned int ft_flags; + /** + * check that flags are from filemap_fault + */ + bool ft_flags_valid; + } fault; struct { enum ccc_setattr_lock_type cui_local_lock; } setattr; struct { + struct pipe_inode_info *cui_pipe; + unsigned int cui_flags; + } splice; + struct { struct cl_page_list cui_queue; unsigned long cui_written; int cui_from; int cui_to; } write; } u; + + enum vvp_io_subtype cui_io_subtype; + /** * Layout version when this IO is initialized */ @@ -127,6 +164,12 @@ struct ccc_io { */ struct ll_file_data *cui_fd; struct kiocb *cui_iocb; + + /* Readahead state. */ + pgoff_t cui_ra_start; + pgoff_t cui_ra_count; + /* Set when cui_ra_{start,count} have been initialized. */ + bool cui_ra_valid; }; /** @@ -136,7 +179,7 @@ struct ccc_io { int cl_is_normalio(const struct lu_env *env, const struct cl_io *io); extern struct lu_context_key ccc_key; -extern struct lu_context_key ccc_session_key; +extern struct lu_context_key vvp_session_key; extern struct kmem_cache *vvp_lock_kmem; extern struct kmem_cache *vvp_object_kmem; @@ -185,23 +228,23 @@ static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env) return io; } -struct ccc_session { - struct ccc_io cs_ios; +struct vvp_session { + struct vvp_io cs_ios; }; -static inline struct ccc_session *ccc_env_session(const struct lu_env *env) +static inline struct vvp_session *vvp_env_session(const struct lu_env *env) { - struct ccc_session *ses; + struct vvp_session *ses; - ses = lu_context_key_get(env->le_ses, &ccc_session_key); + ses = lu_context_key_get(env->le_ses, &vvp_session_key); LASSERT(ses != NULL); return ses; } -static inline struct ccc_io *ccc_env_io(const struct lu_env *env) +static inline struct vvp_io *vvp_env_io(const struct lu_env *env) { - return &ccc_env_session(env)->cs_ios; + return &vvp_env_session(env)->cs_ios; } /** @@ -292,27 +335,23 @@ struct ccc_req { void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key); void ccc_key_fini(const struct lu_context *ctx, struct lu_context_key *key, void *data); -void *ccc_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key); -void ccc_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data); int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct cl_req *req); void ccc_umount(const struct lu_env *env, struct cl_device *dev); int ccc_global_init(struct lu_device_type *device_type); void ccc_global_fini(struct lu_device_type *device_type); -void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios); -int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, + +int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io, __u32 enqflags, enum cl_lock_mode mode, pgoff_t start, pgoff_t end); -int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, +int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io, __u32 enqflags, enum cl_lock_mode mode, loff_t start, loff_t end); -void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios); -void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios, +void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios); +void vvp_io_advance(const struct lu_env *env, const struct cl_io_slice *ios, size_t nob); -void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio, +void vvp_io_update_iov(const struct lu_env *env, struct vvp_io *cio, struct cl_io *io); int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, struct cl_io *io, loff_t start, size_t count, int *exceed); @@ -366,7 +405,7 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) return container_of(slice, struct vvp_lock, vlk_cl); } -struct ccc_io *cl2ccc_io(const struct lu_env *env, +struct vvp_io *cl2vvp_io(const struct lu_env *env, const struct cl_io_slice *slice); struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice); diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index dc4e294..b0523fa 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -46,9 +46,6 @@ #include "llite_internal.h" #include "vvp_internal.h" -static struct vvp_io *cl2vvp_io(const struct lu_env *env, - const struct cl_io_slice *slice); - /** * True, if \a io is a normal io, False for splice_{read,write} */ @@ -71,7 +68,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); bool rc = true; switch (io->ci_type) { @@ -103,7 +100,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, static int vvp_io_write_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { - struct ccc_io *cio = cl2ccc_io(env, ios); + struct vvp_io *cio = cl2vvp_io(env, ios); cl_page_list_init(&cio->u.write.cui_queue); cio->u.write.cui_written = 0; @@ -116,7 +113,7 @@ static int vvp_io_write_iter_init(const struct lu_env *env, static void vvp_io_write_iter_fini(const struct lu_env *env, const struct cl_io_slice *ios) { - struct ccc_io *cio = cl2ccc_io(env, ios); + struct vvp_io *cio = cl2vvp_io(env, ios); LASSERT(cio->u.write.cui_queue.pl_nr == 0); } @@ -124,20 +121,20 @@ static void vvp_io_write_iter_fini(const struct lu_env *env, static int vvp_io_fault_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { - struct vvp_io *vio = cl2vvp_io(env, ios); + struct vvp_io *vio = cl2vvp_io(env, ios); struct inode *inode = vvp_object_inode(ios->cis_obj); - LASSERT(inode == - cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode); - vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); - return 0; + LASSERT(inode == vio->cui_fd->fd_file->f_dentry->d_inode); + vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); + + return 0; } static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; - struct ccc_io *cio = cl2ccc_io(env, ios); + struct vvp_io *cio = cl2vvp_io(env, ios); struct inode *inode = vvp_object_inode(obj); CLOBINVRNT(env, obj, vvp_object_invariant(obj)); @@ -224,7 +221,7 @@ static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma) } static int vvp_mmap_locks(const struct lu_env *env, - struct ccc_io *vio, struct cl_io *io) + struct vvp_io *vio, struct cl_io *io) { struct ccc_thread_info *cti = ccc_env_info(env); struct mm_struct *mm = current->mm; @@ -311,20 +308,22 @@ static int vvp_mmap_locks(const struct lu_env *env, static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, enum cl_lock_mode mode, loff_t start, loff_t end) { - struct ccc_io *cio = ccc_env_io(env); - int result; - int ast_flags = 0; + struct vvp_io *cio = vvp_env_io(env); + int result; + int ast_flags = 0; - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - ENTRY; + LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); + ENTRY; + + vvp_io_update_iov(env, cio, io); - ccc_io_update_iov(env, cio, io); + if (io->u.ci_rw.crw_nonblock) + ast_flags |= CEF_NONBLOCK; + + result = vvp_mmap_locks(env, cio, io); + if (result == 0) + result = vvp_io_one_lock(env, io, ast_flags, mode, start, end); - if (io->u.ci_rw.crw_nonblock) - ast_flags |= CEF_NONBLOCK; - result = vvp_mmap_locks(env, cio, io); - if (result == 0) - result = ccc_io_one_lock(env, io, ast_flags, mode, start, end); RETURN(result); } @@ -349,9 +348,11 @@ static int vvp_io_fault_lock(const struct lu_env *env, /* * XXX LDLM_FL_CBPENDING */ - return ccc_io_one_lock_index - (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma), - io->u.ci_fault.ft_index, io->u.ci_fault.ft_index); + return vvp_io_one_lock_index(env, + io, 0, + vvp_mode_from_vma(vio->u.fault.ft_vma), + io->u.ci_fault.ft_index, + io->u.ci_fault.ft_index); } static int vvp_io_write_lock(const struct lu_env *env, @@ -385,7 +386,7 @@ static int vvp_io_setattr_iter_init(const struct lu_env *env, static int vvp_io_setattr_lock(const struct lu_env *env, const struct cl_io_slice *ios) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); struct cl_io *io = ios->cis_io; __u64 new_size; __u32 enqflags = 0; @@ -403,8 +404,9 @@ static int vvp_io_setattr_lock(const struct lu_env *env, new_size = 0; } cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK; - return ccc_io_one_lock(env, io, enqflags, CLM_WRITE, - new_size, OBD_OBJECT_EOF); + + return vvp_io_one_lock(env, io, enqflags, CLM_WRITE, + new_size, OBD_OBJECT_EOF); } static int vvp_do_vmtruncate(struct inode *inode, size_t size) @@ -498,19 +500,18 @@ static void vvp_io_setattr_fini(const struct lu_env *env, } static int vvp_io_read_start(const struct lu_env *env, - const struct cl_io_slice *ios) + const struct cl_io_slice *ios) { - struct vvp_io *vio = cl2vvp_io(env, ios); - struct ccc_io *cio = cl2ccc_io(env, ios); - struct cl_io *io = ios->cis_io; - struct cl_object *obj = io->ci_obj; + struct vvp_io *vio = cl2vvp_io(env, ios); + struct cl_io *io = ios->cis_io; + struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); - struct file *file = cio->cui_fd->fd_file; + struct file *file = vio->cui_fd->fd_file; - int result; - loff_t pos = io->u.ci_rd.rd.crw_pos; - long cnt = io->u.ci_rd.rd.crw_count; - long tot = cio->cui_tot_count; + int result; + loff_t pos = io->u.ci_rd.rd.crw_pos; + long cnt = io->u.ci_rd.rd.crw_count; + long tot = vio->cui_tot_count; int exceed = 0; CLOBINVRNT(env, obj, vvp_object_invariant(obj)); @@ -530,8 +531,8 @@ static int vvp_io_read_start(const struct lu_env *env, "Read ino %lu, %lu bytes, offset %lld, size %llu\n", inode->i_ino, cnt, pos, i_size_read(inode)); - /* turn off the kernel's read-ahead */ - cio->cui_fd->fd_file->f_ra.ra_pages = 0; + /* turn off the kernel's read-ahead */ + vio->cui_fd->fd_file->f_ra.ra_pages = 0; /* initialize read-ahead window once per syscall */ if (!vio->cui_ra_valid) { @@ -545,31 +546,31 @@ static int vvp_io_read_start(const struct lu_env *env, file_accessed(file); switch (vio->cui_io_subtype) { case IO_NORMAL: - LASSERT(cio->cui_iocb->ki_pos == pos); - result = generic_file_aio_read(cio->cui_iocb, - cio->cui_iov, cio->cui_nrsegs, - cio->cui_iocb->ki_pos); + LASSERT(vio->cui_iocb->ki_pos == pos); + result = generic_file_aio_read(vio->cui_iocb, + vio->cui_iov, vio->cui_nrsegs, + vio->cui_iocb->ki_pos); break; - case IO_SPLICE: - result = generic_file_splice_read(file, &pos, - vio->u.splice.cui_pipe, cnt, - vio->u.splice.cui_flags); - /* LU-1109: do splice read stripe by stripe otherwise if it - * may make nfsd stuck if this read occupied all internal pipe - * buffers. */ - io->ci_continue = 0; - break; - default: - CERROR("Wrong IO type %u\n", vio->cui_io_subtype); - LBUG(); - } + case IO_SPLICE: + result = generic_file_splice_read(file, &pos, + vio->u.splice.cui_pipe, cnt, + vio->u.splice.cui_flags); + /* LU-1109: do splice read stripe by stripe otherwise if it + * may make nfsd stuck if this read occupied all internal pipe + * buffers. */ + io->ci_continue = 0; + break; + default: + CERROR("Wrong IO type %u\n", vio->cui_io_subtype); + LBUG(); + } out: if (result >= 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; - ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, + ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->cui_fd, pos, result, READ); result = 0; } @@ -684,7 +685,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io) { struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); struct cl_page_list *queue = &cio->u.write.cui_queue; struct cl_page *page; int rc = 0; @@ -763,7 +764,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io) static int vvp_io_write_start(const struct lu_env *env, const struct cl_io_slice *ios) { - struct ccc_io *cio = cl2ccc_io(env, ios); + struct vvp_io *cio = cl2vvp_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); @@ -844,38 +845,40 @@ static int vvp_io_write_start(const struct lu_env *env, static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) { - struct vm_fault *vmf = cfio->fault.ft_vmf; + struct vm_fault *vmf = cfio->ft_vmf; - cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf); - cfio->fault.ft_flags_valid = 1; + cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf); + cfio->ft_flags_valid = 1; - if (vmf->page) { - LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n", - vmf->virtual_address); - if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) { - lock_page(vmf->page); - cfio->fault.ft_flags |= VM_FAULT_LOCKED; - } + if (vmf->page) { + LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n", + vmf->virtual_address); + if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) { + lock_page(vmf->page); + cfio->ft_flags |= VM_FAULT_LOCKED; + } - cfio->ft_vmpage = vmf->page; - return 0; - } + cfio->ft_vmpage = vmf->page; - if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { - CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); - return -EFAULT; - } + return 0; + } - if (cfio->fault.ft_flags & VM_FAULT_OOM) { - CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); - return -ENOMEM; - } + if (cfio->ft_flags & VM_FAULT_SIGBUS) { + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); + return -EFAULT; + } + + if (cfio->ft_flags & VM_FAULT_OOM) { + CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); + return -ENOMEM; + } - if (cfio->fault.ft_flags & VM_FAULT_RETRY) - return -EAGAIN; + if (cfio->ft_flags & VM_FAULT_RETRY) + return -EAGAIN; - CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags); - return -EINVAL; + CERROR("unknown error in page fault %d\n", cfio->ft_flags); + + return -EINVAL; } static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io, @@ -1047,7 +1050,9 @@ out: /* return unlocked vmpage to avoid deadlocking */ if (vmpage != NULL) unlock_page(vmpage); - cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; + + cfio->ft_flags &= ~VM_FAULT_LOCKED; + return result; } @@ -1069,7 +1074,7 @@ static int vvp_io_read_page(const struct lu_env *env, struct cl_page *page = slice->cpl_page; struct inode *inode = vvp_object_inode(slice->cpl_obj); struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; + struct ll_file_data *fd = cl2vvp_io(env, ios)->cui_fd; struct ll_readahead_state *ras = &fd->fd_ras; struct cl_2queue *queue = &io->ci_queue; @@ -1104,7 +1109,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_fini = vvp_io_fini, .cio_lock = vvp_io_read_lock, .cio_start = vvp_io_read_start, - .cio_advance = ccc_io_advance, + .cio_advance = vvp_io_advance, }, [CIT_WRITE] = { .cio_fini = vvp_io_fini, @@ -1112,7 +1117,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_iter_fini = vvp_io_write_iter_fini, .cio_lock = vvp_io_write_lock, .cio_start = vvp_io_write_start, - .cio_advance = ccc_io_advance + .cio_advance = vvp_io_advance, }, [CIT_SETATTR] = { .cio_fini = vvp_io_setattr_fini, @@ -1126,7 +1131,7 @@ static const struct cl_io_operations vvp_io_ops = { .cio_iter_init = vvp_io_fault_iter_init, .cio_lock = vvp_io_fault_lock, .cio_start = vvp_io_fault_start, - .cio_end = ccc_io_end + .cio_end = vvp_io_end, }, [CIT_FSYNC] = { .cio_start = vvp_io_fsync_start, @@ -1143,21 +1148,20 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) { struct vvp_io *vio = vvp_env_io(env); - struct ccc_io *cio = ccc_env_io(env); struct inode *inode = vvp_object_inode(obj); - int result; + int result; CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - ENTRY; + ENTRY; CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d " - "restore needed %d\n", + "restore needed %d\n", PFID(lu_object_fid(&obj->co_lu)), io->ci_ignore_layout, io->ci_verify_layout, - cio->cui_layout_gen, io->ci_restore_needed); + vio->cui_layout_gen, io->ci_restore_needed); - CL_IO_SLICE_CLEAN(cio, cui_cl); - cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops); + CL_IO_SLICE_CLEAN(vio, cui_cl); + cl_io_slice_add(io, &vio->cui_cl, obj, &vvp_io_ops); vio->cui_ra_valid = false; result = 0; if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { @@ -1169,10 +1173,10 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, * results." -- Single Unix Spec */ if (count == 0) result = 1; - else { - cio->cui_tot_count = count; - cio->cui_tot_nrsegs = 0; - } + else { + vio->cui_tot_count = count; + vio->cui_tot_nrsegs = 0; + } /* for read/write, we store the jobid in the inode, and * it'll be fetched by osc when building RPC. @@ -1196,7 +1200,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, * even for operations requiring to open file, such as read and write, * because it might not grant layout lock in IT_OPEN. */ if (result == 0 && !io->ci_ignore_layout) { - result = ll_layout_refresh(inode, &cio->cui_layout_gen); + result = ll_layout_refresh(inode, &vio->cui_layout_gen); if (result == -ENOENT) /* If the inode on MDS has been removed, but the objects * on OSTs haven't been destroyed (async unlink), layout @@ -1211,11 +1215,3 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, RETURN(result); } - -static struct vvp_io *cl2vvp_io(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - /* Caling just for assertion */ - cl2ccc_io(env, slice); - return vvp_env_io(env); -} diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index d8ebd14..f6ff571 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -372,7 +372,7 @@ static int vvp_page_is_under_lock(const struct lu_env *env, if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || io->ci_type == CIT_FAULT) { - struct ccc_io *cio = ccc_env_io(env); + struct vvp_io *cio = vvp_env_io(env); if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) *max_index = CL_PAGE_EOF;