X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_io.c;h=9de5f9b40cf20014c2b74326a866e131e3c09f01;hp=a076e5a447a198b9c2ee5518cd860daf738c82af;hb=a1fc8dffef216b71cb4a29a5a8faa2aa7919d2ae;hpb=d62f58c32074bc54055801ed0f919bfd5c277f6d diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index a076e5a..9de5f9b 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2015, Intel Corporation. + * Copyright (c) 2011, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -179,10 +175,10 @@ static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj, * --bug 17336 */ loff_t size = i_size_read(inode); unsigned long cur_index = start >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((size == 0 && cur_index != 0) || - (((size - 1) >> PAGE_CACHE_SHIFT) < + (((size - 1) >> PAGE_SHIFT) < cur_index)) *exceed = 1; } @@ -202,7 +198,7 @@ static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj, if (i_size_read(inode) < kms) { i_size_write(inode, kms); CDEBUG(D_VFSTRACE, - DFID" updating i_size "LPU64"\n", + DFID" updating i_size %llu\n", PFID(lu_object_fid(&obj->co_lu)), (__u64)i_size_read(inode)); } @@ -290,8 +286,8 @@ static int vvp_io_fault_iter_init(const struct lu_env *env, struct vvp_io *vio = cl2vvp_io(env, ios); struct inode *inode = vvp_object_inode(ios->cis_obj); - LASSERT(inode == vio->vui_fd->fd_file->f_path.dentry->d_inode); - vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); + LASSERT(inode == file_inode(vio->vui_fd->fd_file)); + vio->u.fault.ft_mtime = inode->i_mtime.tv_sec; return 0; } @@ -302,18 +298,18 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) struct cl_object *obj = io->ci_obj; struct vvp_io *vio = cl2vvp_io(env, ios); struct inode *inode = vvp_object_inode(obj); + int rc; CLOBINVRNT(env, obj, vvp_object_invariant(obj)); CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d " - "restore needed %d\n", + "need write layout %d, restore needed %d\n", PFID(lu_object_fid(&obj->co_lu)), io->ci_ignore_layout, io->ci_verify_layout, - vio->vui_layout_gen, io->ci_restore_needed); + vio->vui_layout_gen, io->ci_need_write_intent, + io->ci_restore_needed); if (io->ci_restore_needed) { - int rc; - /* file was detected release, we need to restore it * before finishing the io */ @@ -338,6 +334,42 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) } } + /** + * dynamic layout change needed, send layout intent + * RPC. + */ + if (io->ci_need_write_intent) { + loff_t start = 0; + loff_t end = OBD_OBJECT_EOF; + + io->ci_need_write_intent = 0; + + LASSERT(io->ci_type == CIT_WRITE || + cl_io_is_trunc(io) || cl_io_is_mkwrite(io)); + + if (io->ci_type == CIT_WRITE) { + if (!cl_io_is_append(io)) { + start = io->u.ci_rw.rw_range.cir_pos; + end = start + io->u.ci_rw.rw_range.cir_count; + } + } else if (cl_io_is_trunc(io)) { + end = io->u.ci_setattr.sa_attr.lvb_size; + } else { /* mkwrite */ + pgoff_t index = io->u.ci_fault.ft_index; + + start = cl_offset(io->ci_obj, index); + end = cl_offset(io->ci_obj, index + 1); + } + + CDEBUG(D_VFSTRACE, DFID" write layout, type %u [%llu, %llu)\n", + PFID(lu_object_fid(&obj->co_lu)), io->ci_type, + start, end); + rc = ll_layout_write_intent(inode, start, end); + io->ci_result = rc; + if (!rc) + io->ci_need_restart = 1; + } + if (!io->ci_ignore_layout && io->ci_verify_layout) { __u32 gen = 0; @@ -386,20 +418,15 @@ static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma) return CLM_READ; } -static int vvp_mmap_locks(const struct lu_env *env, - struct vvp_io *vio, struct cl_io *io) +static int vvp_mmap_locks(const struct lu_env *env, struct cl_io *io) { struct vvp_thread_info *vti = vvp_env_info(env); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct cl_lock_descr *descr = &vti->vti_descr; union ldlm_policy_data policy; -#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER struct iovec iov; struct iov_iter i; -#else - unsigned long seg; -#endif int result = 0; ENTRY; @@ -408,20 +435,11 @@ static int vvp_mmap_locks(const struct lu_env *env, if (!cl_is_normalio(env, io)) RETURN(0); - /* nfs or loop back device write */ - if (vio->vui_iter == NULL) - RETURN(0); - /* No MM (e.g. NFS)? No vmas too. */ if (mm == NULL) RETURN(0); -#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - iov_for_each(iov, i, *(vio->vui_iter)) { -#else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ - for (seg = 0; seg < vio->vui_iter->nr_segs; seg++) { - const struct iovec iov = vio->vui_iter->iov[seg]; -#endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ + iov_for_each(iov, i, io->u.ci_rw.rw_iter) { unsigned long addr = (unsigned long)iov.iov_base; size_t count = iov.iov_len; @@ -433,7 +451,7 @@ static int vvp_mmap_locks(const struct lu_env *env, down_read(&mm->mmap_sem); while((vma = our_vma(mm, addr, count)) != NULL) { - struct dentry *de = vma->vm_file->f_path.dentry; + struct dentry *de = file_dentry(vma->vm_file); struct inode *inode = de->d_inode; int flags = CEF_MUST; @@ -487,126 +505,61 @@ static void vvp_io_advance(const struct lu_env *env, struct vvp_io *vio = cl2vvp_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = ios->cis_io->ci_obj; -#ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - struct iovec *iov; -#endif + CLOBINVRNT(env, obj, vvp_object_invariant(obj)); if (!cl_is_normalio(env, io)) return; -#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - vio->vui_tot_count -= nob; - iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count); -#else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ - LASSERT(vio->vui_tot_nrsegs >= vio->vui_iter->nr_segs); - LASSERT(vio->vui_tot_count >= nob); - - /* Restore the iov changed in vvp_io_update_iov() */ - if (vio->vui_iov_olen > 0) { - unsigned long idx = vio->vui_iter->nr_segs - 1; - - /* In the latest kernels iov is const so that - * changes are done using iter helpers. In older - * kernels those helpers don't exist we lustre - * has to do some of the management of the iter - * itself. */ - iov = (struct iovec *)&vio->vui_iter->iov[idx]; - iov->iov_len = vio->vui_iov_olen; - vio->vui_iov_olen = 0; - } - - /* In the latest kernels special helpers exist to help - * advance the iov but we don't have that in older kernels - * so we need to do the book keeping ourselves. */ - iov = (struct iovec *)vio->vui_iter->iov; - while (nob > 0) { - if (iov->iov_len > nob) { - iov->iov_len -= nob; - iov->iov_base += nob; - break; - } - - nob -= iov->iov_len; - iov++; - vio->vui_tot_nrsegs--; - } - - vio->vui_iter->iov = iov; vio->vui_tot_count -= nob; -#endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ -} - -static void vvp_io_update_iov(const struct lu_env *env, - struct vvp_io *vio, struct cl_io *io) -{ - size_t size = io->u.ci_rw.crw_count; -#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - if (!cl_is_normalio(env, io) || vio->vui_iter == NULL) - return; - - iov_iter_truncate(vio->vui_iter, size); -#else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ - unsigned long i; - - vio->vui_iov_olen = 0; - if (!cl_is_normalio(env, io) || vio->vui_tot_nrsegs == 0) - return; - - for (i = 0; i < vio->vui_tot_nrsegs; i++) { - struct iovec *iv = (struct iovec *) &vio->vui_iter->iov[i]; - - if (iv->iov_len < size) { - size -= iv->iov_len; - } else { - if (iv->iov_len > size) { - vio->vui_iov_olen = iv->iov_len; - iv->iov_len = size; - } - break; - } + if (io->ci_pio) { + iov_iter_advance(&io->u.ci_rw.rw_iter, nob); + io->u.ci_rw.rw_iocb.ki_pos = io->u.ci_rw.rw_range.cir_pos; +#ifdef HAVE_KIOCB_KI_LEFT + io->u.ci_rw.rw_iocb.ki_left = vio->vui_tot_count; +#elif defined(HAVE_KI_NBYTES) + io->u.ci_rw.rw_iocb.ki_nbytes = vio->vui_tot_count; +#endif + } else { + /* It was truncated to stripe size in vvp_io_rw_lock() */ + iov_iter_reexpand(&io->u.ci_rw.rw_iter, vio->vui_tot_count); } - - vio->vui_iter->nr_segs = i + 1; - LASSERTF(vio->vui_tot_nrsegs >= vio->vui_iter->nr_segs, - "tot_nrsegs: %lu, nrsegs: %lu\n", - vio->vui_tot_nrsegs, vio->vui_iter->nr_segs); -#endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ } static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, enum cl_lock_mode mode, loff_t start, loff_t end) { - struct vvp_io *vio = vvp_env_io(env); int result; int ast_flags = 0; LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); ENTRY; - vvp_io_update_iov(env, vio, io); + if (cl_is_normalio(env, io)) + iov_iter_truncate(&io->u.ci_rw.rw_iter, + io->u.ci_rw.rw_range.cir_count); - if (io->u.ci_rw.crw_nonblock) + if (io->u.ci_rw.rw_nonblock) ast_flags |= CEF_NONBLOCK; - result = vvp_mmap_locks(env, vio, io); + result = vvp_mmap_locks(env, io); if (result == 0) result = vvp_io_one_lock(env, io, ast_flags, mode, start, end); - RETURN(result); + RETURN(result); } static int vvp_io_read_lock(const struct lu_env *env, const struct cl_io_slice *ios) { - struct cl_io *io = ios->cis_io; - struct cl_io_rw_common *rd = &io->u.ci_rd.rd; - int result; + struct cl_io *io = ios->cis_io; + struct cl_io_range *range = &io->u.ci_rw.rw_range; + int rc; ENTRY; - result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos, - rd->crw_pos + rd->crw_count - 1); - RETURN(result); + rc = vvp_io_rw_lock(env, io, CLM_READ, range->cir_pos, + range->cir_pos + range->cir_count - 1); + RETURN(rc); } static int vvp_io_fault_lock(const struct lu_env *env, @@ -627,18 +580,21 @@ static int vvp_io_fault_lock(const struct lu_env *env, static int vvp_io_write_lock(const struct lu_env *env, const struct cl_io_slice *ios) { - struct cl_io *io = ios->cis_io; - loff_t start; - loff_t end; + struct cl_io *io = ios->cis_io; + loff_t start; + loff_t end; + int rc; - if (io->u.ci_wr.wr_append) { - start = 0; - end = OBD_OBJECT_EOF; - } else { - start = io->u.ci_wr.wr.crw_pos; - end = start + io->u.ci_wr.wr.crw_count - 1; - } - return vvp_io_rw_lock(env, io, CLM_WRITE, start, end); + ENTRY; + if (io->u.ci_rw.rw_append) { + start = 0; + end = OBD_OBJECT_EOF; + } else { + start = io->u.ci_rw.rw_range.cir_pos; + end = start + io->u.ci_rw.rw_range.cir_count - 1; + } + rc = vvp_io_rw_lock(env, io, CLM_WRITE, start, end); + RETURN(rc); } static int vvp_io_setattr_iter_init(const struct lu_env *env, @@ -736,10 +692,12 @@ static int vvp_io_setattr_start(const struct lu_env *env, struct inode *inode = vvp_object_inode(io->ci_obj); struct ll_inode_info *lli = ll_i2info(inode); - mutex_lock(&inode->i_mutex); if (cl_io_is_trunc(io)) { down_write(&lli->lli_trunc_sem); + inode_lock(inode); inode_dio_wait(inode); + } else { + inode_lock(inode); } if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS) @@ -760,9 +718,11 @@ static void vvp_io_setattr_end(const struct lu_env *env, * because osc has already notified to destroy osc_extents. */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); inode_dio_write_done(inode); + inode_unlock(inode); up_write(&lli->lli_trunc_sem); + } else { + inode_unlock(inode); } - mutex_unlock(&inode->i_mutex); } static void vvp_io_setattr_fini(const struct lu_env *env, @@ -788,16 +748,17 @@ static int vvp_io_read_start(const struct lu_env *env, struct inode *inode = vvp_object_inode(obj); struct ll_inode_info *lli = ll_i2info(inode); struct file *file = vio->vui_fd->fd_file; - - int result; - loff_t pos = io->u.ci_rd.rd.crw_pos; - long cnt = io->u.ci_rd.rd.crw_count; - long tot = vio->vui_tot_count; - int exceed = 0; + struct cl_io_range *range = &io->u.ci_rw.rw_range; + loff_t pos = range->cir_pos; /* for generic_file_splice_read() only */ + size_t tot = vio->vui_tot_count; + int exceed = 0; + int result; CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); + CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n", + file_dentry(file)->d_name.name, + range->cir_pos, range->cir_pos + range->cir_count); if (vio->vui_io_subtype == IO_NORMAL) down_read(&lli->lli_trunc_sem); @@ -805,15 +766,16 @@ static int vvp_io_read_start(const struct lu_env *env, if (!can_populate_pages(env, io, inode)) return 0; - result = vvp_prep_size(env, obj, io, pos, tot, &exceed); + result = vvp_prep_size(env, obj, io, range->cir_pos, tot, &exceed); if (result != 0) return result; else if (exceed != 0) goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, - "Read ino %lu, %lu bytes, offset %lld, size %llu\n", - inode->i_ino, cnt, pos, i_size_read(inode)); + "Read ino %lu, %lu bytes, offset %lld, size %llu\n", + inode->i_ino, range->cir_count, range->cir_pos, + i_size_read(inode)); /* turn off the kernel's read-ahead */ vio->vui_fd->fd_file->f_ra.ra_pages = 0; @@ -821,8 +783,8 @@ static int vvp_io_read_start(const struct lu_env *env, /* initialize read-ahead window once per syscall */ if (!vio->vui_ra_valid) { vio->vui_ra_valid = true; - vio->vui_ra_start = cl_index(obj, pos); - vio->vui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); + vio->vui_ra_start = cl_index(obj, range->cir_pos); + vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1); ll_ras_enter(file); } @@ -830,19 +792,17 @@ static int vvp_io_read_start(const struct lu_env *env, file_accessed(file); switch (vio->vui_io_subtype) { case IO_NORMAL: - LASSERT(vio->vui_iocb->ki_pos == pos); -#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter); -#else - result = generic_file_aio_read(vio->vui_iocb, - vio->vui_iter->iov, - vio->vui_iter->nr_segs, - vio->vui_iocb->ki_pos); -#endif + LASSERTF(io->u.ci_rw.rw_iocb.ki_pos == range->cir_pos, + "ki_pos %lld [%lld, %lld)\n", + io->u.ci_rw.rw_iocb.ki_pos, + range->cir_pos, range->cir_pos + range->cir_count); + result = generic_file_read_iter(&io->u.ci_rw.rw_iocb, + &io->u.ci_rw.rw_iter); break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, - vio->u.splice.vui_pipe, cnt, + vio->u.splice.vui_pipe, + range->cir_count, vio->u.splice.vui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe @@ -856,11 +816,11 @@ static int vvp_io_read_start(const struct lu_env *env, out: if (result >= 0) { - if (result < cnt) + if (result < range->cir_count) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->vui_fd, - pos, result, READ); + range->cir_pos, result, READ); result = 0; } @@ -916,7 +876,6 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io, SetPageUptodate(cl_page_vmpage(page)); cl_page_disown(env, io, page); - /* held in ll_cl_init() */ lu_ref_del(&page->cp_reference, "cl_io", io); cl_page_put(env, page); } @@ -935,7 +894,6 @@ static void write_commit_callback(const struct lu_env *env, struct cl_io *io, cl_page_disown(env, io, page); - /* held in ll_cl_init() */ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io)); cl_page_put(env, page); } @@ -1036,7 +994,6 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io) cl_page_disown(env, io, page); - /* held in ll_cl_init() */ lu_ref_del(&page->cp_reference, "cl_io", io); cl_page_put(env, page); } @@ -1053,10 +1010,11 @@ static int vvp_io_write_start(const struct lu_env *env, struct cl_object *obj = io->ci_obj; struct inode *inode = vvp_object_inode(obj); struct ll_inode_info *lli = ll_i2info(inode); + struct file *file = vio->vui_fd->fd_file; + struct cl_io_range *range = &io->u.ci_rw.rw_range; + bool lock_inode = !lli->lli_inode_locked && + !IS_NOSEC(inode); ssize_t result = 0; - loff_t pos = io->u.ci_wr.wr.crw_pos; - size_t cnt = io->u.ci_wr.wr.crw_count; - ENTRY; if (vio->vui_io_subtype == IO_NORMAL) @@ -1071,73 +1029,86 @@ static int vvp_io_write_start(const struct lu_env *env, * out-of-order writes. */ ll_merge_attr(env, inode); - pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); - vio->vui_iocb->ki_pos = pos; + range->cir_pos = i_size_read(inode); + io->u.ci_rw.rw_iocb.ki_pos = range->cir_pos; } else { - LASSERT(vio->vui_iocb->ki_pos == pos); + LASSERTF(io->u.ci_rw.rw_iocb.ki_pos == range->cir_pos, + "ki_pos %lld [%lld, %lld)\n", + io->u.ci_rw.rw_iocb.ki_pos, + range->cir_pos, range->cir_pos + range->cir_count); } - CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); + CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n", + file_dentry(file)->d_name.name, + range->cir_pos, range->cir_pos + range->cir_count); /* The maximum Lustre file size is variable, based on the OST maximum * object size and number of stripes. This needs another check in * addition to the VFS checks earlier. */ - if (pos + cnt > ll_file_maxbytes(inode)) { + if (range->cir_pos + range->cir_count > ll_file_maxbytes(inode)) { CDEBUG(D_INODE, - "%s: file "DFID" offset %llu > maxbytes "LPU64"\n", + "%s: file %s ("DFID") offset %llu > maxbytes %llu\n", ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), pos + cnt, + file_dentry(file)->d_name.name, + PFID(ll_inode2fid(inode)), + range->cir_pos + range->cir_count, ll_file_maxbytes(inode)); RETURN(-EFBIG); } - if (vio->vui_iter == NULL) { - /* from a temp io in ll_cl_init(). */ - result = 0; - } else { - /* - * When using the locked AIO function (generic_file_aio_write()) - * testing has shown the inode mutex to be a limiting factor - * with multi-threaded single shared file performance. To get - * around this, we now use the lockless version. To maintain - * consistency, proper locking to protect against writes, - * trucates, etc. is handled in the higher layers of lustre. - */ -#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - result = generic_file_write_iter(vio->vui_iocb, vio->vui_iter); -#else - result = __generic_file_aio_write(vio->vui_iocb, - vio->vui_iter->iov, - vio->vui_iter->nr_segs, - &vio->vui_iocb->ki_pos); -#endif - if (result > 0 || result == -EIOCBQUEUED) { - ssize_t err; + /* Tests to verify we take the i_mutex correctly */ + if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_SEC) && !lock_inode) + RETURN(-EINVAL); - err = generic_write_sync(vio->vui_iocb->ki_filp, - pos, result); - if (err < 0 && result > 0) - result = err; - } + if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_NOSEC) && lock_inode) + RETURN(-EINVAL); + /* + * When using the locked AIO function (generic_file_aio_write()) + * testing has shown the inode mutex to be a limiting factor + * with multi-threaded single shared file performance. To get + * around this, we now use the lockless version. To maintain + * consistency, proper locking to protect against writes, + * trucates, etc. is handled in the higher layers of lustre. + */ + if (lock_inode) + inode_lock(inode); + result = __generic_file_write_iter(&io->u.ci_rw.rw_iocb, + &io->u.ci_rw.rw_iter); + if (lock_inode) + inode_unlock(inode); + + if (result > 0 || result == -EIOCBQUEUED) +#ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS + result = generic_write_sync(&io->u.ci_rw.rw_iocb, result); +#else + { + ssize_t err; + + err = generic_write_sync(io->u.ci_rw.rw_iocb.ki_filp, + range->cir_pos, result); + if (err < 0 && result > 0) + result = err; } +#endif + if (result > 0) { result = vvp_io_write_commit(env, io); if (vio->u.write.vui_written > 0) { result = vio->u.write.vui_written; - io->ci_nob += result; - - CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n", + CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n", + file_dentry(file)->d_name.name, io->ci_nob, result); + io->ci_nob += result; } } if (result > 0) { ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED); - if (result < cnt) + if (result < range->cir_count) io->ci_continue = 0; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, - vio->vui_fd, pos, result, WRITE); + vio->vui_fd, range->cir_pos, result, WRITE); result = 0; } @@ -1159,12 +1130,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) { struct vm_fault *vmf = cfio->ft_vmf; - cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf); + cfio->ft_flags = ll_filemap_fault(cfio->ft_vma, vmf); cfio->ft_flags_valid = 1; if (vmf->page) { LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n", - vmf->virtual_address); + get_vmf_address(vmf)); if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) { lock_page(vmf->page); cfio->ft_flags |= VM_FAULT_LOCKED; @@ -1176,12 +1147,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) } if (cfio->ft_flags & VM_FAULT_SIGBUS) { - CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", get_vmf_address(vmf)); return -EFAULT; } if (cfio->ft_flags & VM_FAULT_OOM) { - CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); + CDEBUG(D_PAGE, "got addr %p - OOM\n", get_vmf_address(vmf)); return -ENOMEM; } @@ -1466,20 +1437,13 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, vio->vui_ra_valid = false; result = 0; if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { - size_t count; struct ll_inode_info *lli = ll_i2info(inode); - count = io->u.ci_rw.crw_count; - /* "If nbyte is 0, read() will return 0 and have no other - * results." -- Single Unix Spec */ - if (count == 0) - result = 1; - else { - vio->vui_tot_count = count; -#ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER - vio->vui_tot_nrsegs = 0; -#endif - } + vio->vui_tot_count = io->u.ci_rw.rw_range.cir_count; + /* "If nbyte is 0, read() will return 0 and have no other + * results." -- Single Unix Spec */ + if (vio->vui_tot_count == 0) + result = 1; /* for read/write, we store the jobid in the inode, and * it'll be fetched by osc when building RPC. @@ -1493,12 +1457,6 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, io->ci_lockreq = CILR_MANDATORY; } - /* ignore layout change for generic CIT_MISC but not for glimpse. - * io context for glimpse must set ci_verify_layout to true, - * see cl_glimpse_size0() for details. */ - if (io->ci_type == CIT_MISC && !io->ci_verify_layout) - io->ci_ignore_layout = 1; - /* Enqueue layout lock and get layout version. We need to do this * even for operations requiring to open file, such as read and write, * because it might not grant layout lock in IT_OPEN. */