-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011 Whamcloud, Inc.
- *
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Implementation of cl_io for VVP layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
*/
#define DEBUG_SUBSYSTEM S_LLITE
return vio->cui_io_subtype == IO_NORMAL;
}
+/**
+ * For swapping layout. The file's layout may have changed.
+ * To avoid populating pages to a wrong stripe, we have to verify the
+ * correctness of layout. It works because swapping layout processes
+ * have to acquire group lock.
+ */
+static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_io *cio = ccc_env_io(env);
+ bool rc = true;
+
+ switch (io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ /* don't need lock here to check lli_layout_gen as we have held
+ * extent lock and GROUP lock has to hold to swap layout */
+ if (lli->lli_layout_gen != cio->cui_layout_gen) {
+ io->ci_need_restart = 1;
+ /* this will return application a short read/write */
+ io->ci_continue = 0;
+ rc = false;
+ }
+ case CIT_FAULT:
+ /* fault is okay because we've already had a page. */
+ default:
+ break;
+ }
+
+ return rc;
+}
+
/*****************************************************************************
*
* io operations.
static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct ccc_io *cio = cl2ccc_io(env, ios);
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- if (io->ci_type == CIT_READ) {
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
- }
+ CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n",
+ io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen);
+ if (!io->ci_ignore_layout && io->ci_verify_layout) {
+ __u32 gen = 0;
+
+ /* check layout version */
+ ll_layout_refresh(ccc_object_inode(obj), &gen);
+ io->ci_need_restart = cio->cui_layout_gen != gen;
+ if (io->ci_need_restart)
+ CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n",
+ cio->cui_layout_gen, gen);
+ }
}
static void vvp_io_fault_fini(const struct lu_env *env,
struct ccc_io *vio, struct cl_io *io)
{
struct ccc_thread_info *cti = ccc_env_info(env);
+ struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct cl_lock_descr *descr = &cti->cti_descr;
ldlm_policy_data_t policy;
if (!cl_is_normalio(env, io))
RETURN(0);
+ if (vio->cui_iov == NULL) /* nfs or loop back device write */
+ RETURN(0);
+
+ /* No MM (e.g. NFS)? No vmas too. */
+ if (mm == NULL)
+ RETURN(0);
+
for (seg = 0; seg < vio->cui_nrsegs; seg++) {
const struct iovec *iv = &vio->cui_iov[seg];
count += addr & (~CFS_PAGE_MASK);
addr &= CFS_PAGE_MASK;
- while((vma = our_vma(addr, count)) != NULL) {
+
+ down_read(&mm->mmap_sem);
+ while((vma = our_vma(mm, addr, count)) != NULL) {
struct inode *inode = vma->vm_file->f_dentry->d_inode;
int flags = CEF_MUST;
if (ll_file_nolock(vma->vm_file)) {
- /*
+ /*
* For no lock case, a lockless lock will be
* generated.
*/
count -= vma->vm_end - addr;
addr = vma->vm_end;
}
+ up_read(&mm->mmap_sem);
}
RETURN(0);
}
ENTRY;
/* XXX: Layer violation, we shouldn't see lsm at llite level. */
- if (lli->lli_smd != NULL) /* lsm-less file, don't need to lock */
+ if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
result = vvp_io_rw_lock(env, io, CLM_READ,
io->u.ci_rd.rd.crw_pos,
io->u.ci_rd.rd.crw_pos +
}
static int vvp_io_setattr_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
-
- /*
- * We really need to get our PW lock before we change inode->i_size.
- * If we don't we can race with other i_size updaters on our node,
- * like ll_file_read. We can also race with i_size propogation to
- * other nodes through dirtying and writeback of final cached pages.
- * This last one is especially bad for racing o_append users on other
- * nodes.
- */
- UNLOCK_INODE_MUTEX(inode);
- if (cl_io_is_trunc(ios->cis_io))
- UP_WRITE_I_ALLOC_SEM(inode);
- cio->u.setattr.cui_locks_released = 1;
- return 0;
+ return 0;
}
/**
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_io *io = ios->cis_io;
- size_t new_size;
- __u32 enqflags = 0;
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_io *io = ios->cis_io;
+ __u64 new_size;
+ __u32 enqflags = 0;
if (cl_io_is_trunc(io)) {
new_size = io->u.ci_setattr.sa_attr.lvb_size;
static int vvp_do_vmtruncate(struct inode *inode, size_t size)
{
- int result;
- /*
- * Only ll_inode_size_lock is taken at this level. lov_stripe_lock()
- * is grabbed by ll_truncate() only over call to obd_adjust_kms(). If
- * vmtruncate returns 0, then ll_truncate dropped ll_inode_size_lock()
- */
- ll_inode_size_lock(inode, 0);
- result = vmtruncate(inode, size);
- if (result != 0)
- ll_inode_size_unlock(inode, 0);
-
- return result;
+ int result;
+ /*
+ * Only ll_inode_size_lock is taken at this level.
+ */
+ ll_inode_size_lock(inode);
+ result = vmtruncate(inode, size);
+ ll_inode_size_unlock(inode);
+
+ return result;
}
static int vvp_io_setattr_trunc(const struct lu_env *env,
const struct cl_io_slice *ios,
struct inode *inode, loff_t size)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = ios->cis_obj;
- pgoff_t start = cl_index(obj, size);
- int result;
-
- DOWN_WRITE_I_ALLOC_SEM(inode);
-
- result = vvp_do_vmtruncate(inode, size);
-
- /*
- * If a page is partially truncated, keep it owned across truncate to
- * prevent... races.
- *
- * XXX this properly belongs to osc, because races in question are OST
- * specific.
- */
- if (cl_offset(obj, start) != size) {
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(obj);
- cfs_spin_lock(&hdr->coh_page_guard);
- vio->cui_partpage = cl_page_lookup(hdr, start);
- cfs_spin_unlock(&hdr->coh_page_guard);
-
- if (vio->cui_partpage != NULL)
- /*
- * Wait for the transfer completion for a partially
- * truncated page to avoid dead-locking an OST with
- * the concurrent page-wise overlapping WRITE and
- * PUNCH requests. BUG:17397.
- *
- * Partial page is disowned in vvp_io_trunc_end().
- */
- cl_page_own(env, io, vio->cui_partpage);
- } else
- vio->cui_partpage = NULL;
- return result;
+ inode_dio_wait(inode);
+ return 0;
}
static int vvp_io_setattr_time(const struct lu_env *env,
}
static int vvp_io_setattr_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
-
- LASSERT(cio->u.setattr.cui_locks_released);
-
- LOCK_INODE_MUTEX(inode);
- cio->u.setattr.cui_locks_released = 0;
-
- if (cl_io_is_trunc(io))
- return vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- else
- return vvp_io_setattr_time(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
+
+ mutex_lock(&inode->i_mutex);
+ if (cl_io_is_trunc(io))
+ return vvp_io_setattr_trunc(env, ios, inode,
+ io->u.ci_setattr.sa_attr.lvb_size);
+ else
+ return vvp_io_setattr_time(env, ios);
}
static void vvp_io_setattr_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
-
- if (!cl_io_is_trunc(io))
- return;
- if (vio->cui_partpage != NULL) {
- cl_page_disown(env, ios->cis_io, vio->cui_partpage);
- cl_page_put(env, vio->cui_partpage);
- vio->cui_partpage = NULL;
- }
-
- /*
- * Do vmtruncate again, to remove possible stale pages populated by
- * competing read threads. bz20645.
- */
- vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
+
+ if (cl_io_is_trunc(io)) {
+ /* Truncate in memory pages - they must be clean pages
+ * because osc has already notified to destroy osc_extents. */
+ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
+ inode_dio_write_done(inode);
+ }
+ mutex_unlock(&inode->i_mutex);
}
static void vvp_io_setattr_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(ios->cis_io->ci_obj);
-
- if (cio->u.setattr.cui_locks_released) {
- LOCK_INODE_MUTEX(inode);
- if (cl_io_is_trunc(io))
- DOWN_WRITE_I_ALLOC_SEM(inode);
- cio->u.setattr.cui_locks_released = 0;
- }
- vvp_io_fini(env, ios);
+ vvp_io_fini(env, ios);
}
#ifdef HAVE_FILE_READV
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
- result = ccc_prep_size(env, obj, io, pos, tot, 1, &exceed);
+ if (!can_populate_pages(env, io, inode))
+ return 0;
+
+ result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
else if (exceed != 0)
result = generic_file_splice_read(file, &pos,
vio->u.splice.cui_pipe, cnt,
vio->u.splice.cui_flags);
+ /* LU-1109: do splice read stripe by stripe otherwise if it
+ * may make nfsd stuck if this read occupied all internal pipe
+ * buffers. */
+ io->ci_continue = 0;
break;
#endif
default:
}
out:
- if (result >= 0) {
- if (result < cnt)
- io->ci_continue = 0;
- io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, 0);
- result = 0;
- }
- return result;
+ if (result >= 0) {
+ if (result < cnt)
+ io->ci_continue = 0;
+ io->ci_nob += result;
+ ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd,
+ pos, result, READ);
+ result = 0;
+ }
+
+ return result;
+}
+
+static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+
+ if (vio->cui_ra_window_set)
+ ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
+
+ vvp_io_fini(env, ios);
}
static int vvp_io_write_start(const struct lu_env *env,
ENTRY;
+ if (!can_populate_pages(env, io, inode))
+ return 0;
+
if (cl_io_is_append(io)) {
/*
* PARALLEL IO This has to be changed for parallel IO doing
else
result = lustre_generic_file_write(file, cio, &pos);
- if (result > 0) {
- if (result < cnt)
- io->ci_continue = 0;
- io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, 0);
- result = 0;
- }
- RETURN(result);
+ if (result > 0) {
+ if (result < cnt)
+ io->ci_continue = 0;
+ io->ci_nob += result;
+ ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
+ cio->cui_fd, pos, result, WRITE);
+ result = 0;
+ }
+
+ RETURN(result);
}
#ifndef HAVE_VM_OP_FAULT
cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
cfio->ft_vmpage = vmpage;
+ lock_page(vmpage);
return 0;
}
#else
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, cfio->fault.ft_vmf);
-
- if (cfio->fault.ft_vmf->page) {
- LL_CDEBUG_PAGE(D_PAGE, cfio->fault.ft_vmf->page,
- "got addr %p type NOPAGE\n",
- cfio->fault.ft_vmf->virtual_address);
- /*XXX workaround to bug in CLIO - he deadlocked with
- lock cancel if page locked */
- if (likely(cfio->fault.ft_flags & VM_FAULT_LOCKED)) {
- unlock_page(cfio->fault.ft_vmf->page);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+ struct vm_fault *vmf = cfio->fault.ft_vmf;
+
+ cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+
+ if (vmf->page) {
+ LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
+ vmf->virtual_address);
+ if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
+ lock_page(vmf->page);
+ cfio->fault.ft_flags &= VM_FAULT_LOCKED;
}
- cfio->ft_vmpage = cfio->fault.ft_vmf->page;
+ cfio->ft_vmpage = vmf->page;
return 0;
}
- if (unlikely (cfio->fault.ft_flags & VM_FAULT_ERROR)) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n",
- cfio->fault.ft_vmf->virtual_address);
+ if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
- if (unlikely (cfio->fault.ft_flags & VM_FAULT_NOPAGE)) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n",
- cfio->fault.ft_vmf->virtual_address);
+ if (cfio->fault.ft_flags & VM_FAULT_OOM) {
+ CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
return -ENOMEM;
}
- if (unlikely(cfio->fault.ft_flags & VM_FAULT_RETRY))
+ if (cfio->fault.ft_flags & VM_FAULT_RETRY)
return -EAGAIN;
- CERROR("unknow error in page fault!\n");
+ CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
return -EINVAL;
}
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct cl_fault_io *fio = &io->u.ci_fault;
- struct vvp_fault_io *cfio = &vio->u.fault;
- loff_t offset;
- int kernel_result = 0;
- int result = 0;
- struct cl_page *page;
- loff_t size;
- pgoff_t last; /* last page in a file data region */
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = ccc_object_inode(obj);
+ struct cl_fault_io *fio = &io->u.ci_fault;
+ struct vvp_fault_io *cfio = &vio->u.fault;
+ loff_t offset;
+ int result = 0;
+ cfs_page_t *vmpage = NULL;
+ struct cl_page *page;
+ loff_t size;
+ pgoff_t last; /* last page in a file data region */
if (fio->ft_executable &&
LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, 0, NULL);
+ result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
return result;
- /* must return unlocked page */
- kernel_result = vvp_io_kernel_fault(cfio);
- if (kernel_result != 0)
- return kernel_result;
+ /* must return locked page */
+ if (fio->ft_mkwrite) {
+ LASSERT(cfio->ft_vmpage != NULL);
+ lock_page(cfio->ft_vmpage);
+ } else {
+ result = vvp_io_kernel_fault(cfio);
+ if (result != 0)
+ return result;
+ }
- if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) {
- truncate_inode_pages_range(inode->i_mapping,
- cl_offset(obj, fio->ft_index), offset);
- }
+ vmpage = cfio->ft_vmpage;
+ LASSERT(PageLocked(vmpage));
- /* Temporarily lock vmpage to keep cl_page_find() happy. */
- lock_page(cfio->ft_vmpage);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
+ ll_invalidate_page(vmpage);
+ size = i_size_read(inode);
/* Though we have already held a cl_lock upon this page, but
* it still can be truncated locally. */
- if (unlikely(cfio->ft_vmpage->mapping == NULL)) {
- unlock_page(cfio->ft_vmpage);
-
+ if (unlikely((vmpage->mapping != inode->i_mapping) ||
+ (page_offset(vmpage) > size))) {
CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
/* return +1 to stop cl_io_loop() and ll_fault() will catch
* and retry. */
- return +1;
+ GOTO(out, result = +1);
}
- page = cl_page_find(env, obj, fio->ft_index, cfio->ft_vmpage,
- CPT_CACHEABLE);
- unlock_page(cfio->ft_vmpage);
- if (IS_ERR(page)) {
- page_cache_release(cfio->ft_vmpage);
- cfio->ft_vmpage = NULL;
- return PTR_ERR(page);
- }
- size = i_size_read(inode);
- last = cl_index(obj, size - 1);
+ if (fio->ft_mkwrite ) {
+ pgoff_t last_index;
+ /*
+ * Capture the size while holding the lli_trunc_sem from above
+ * we want to make sure that we complete the mkwrite action
+ * while holding this lock. We need to make sure that we are
+ * not past the end of the file.
+ */
+ last_index = cl_index(obj, size - 1);
+ if (last_index < fio->ft_index) {
+ CDEBUG(D_PAGE,
+ "llite: mkwrite and truncate race happened: "
+ "%p: 0x%lx 0x%lx\n",
+ vmpage->mapping,fio->ft_index,last_index);
+ /*
+ * We need to return if we are
+ * passed the end of the file. This will propagate
+ * up the call stack to ll_page_mkwrite where
+ * we will return VM_FAULT_NOPAGE. Any non-negative
+ * value returned here will be silently
+ * converted to 0. If the vmpage->mapping is null
+ * the error code would be converted back to ENODATA
+ * in ll_page_mkwrite0. Thus we return -ENODATA
+ * to handle both cases
+ */
+ GOTO(out, result = -ENODATA);
+ }
+ }
+
+ page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page))
+ GOTO(out, result = PTR_ERR(page));
+
+ /* if page is going to be written, we should add this page into cache
+ * earlier. */
+ if (fio->ft_mkwrite) {
+ wait_on_page_writeback(vmpage);
+ if (set_page_dirty(vmpage)) {
+ struct ccc_page *cp;
+
+ /* vvp_page_assume() calls wait_on_page_writeback(). */
+ cl_page_assume(env, io, page);
+
+ cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ vvp_write_pending(cl2ccc(obj), cp);
+
+ /* Do not set Dirty bit here so that in case IO is
+ * started before the page is really made dirty, we
+ * still have chance to detect it. */
+ result = cl_page_cache_add(env, io, page, CRT_WRITE);
+ LASSERT(cl_page_is_owned(page, io));
+
+ vmpage = NULL;
+ if (result < 0) {
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+
+ cl_page_put(env, page);
+
+ /* we're in big trouble, what can we do now? */
+ if (result == -EDQUOT)
+ result = -ENOSPC;
+ GOTO(out, result);
+ } else
+ cl_page_disown(env, io, page);
+ }
+ }
+
+ last = cl_index(obj, size - 1);
+ /*
+ * The ft_index is only used in the case of
+ * a mkwrite action. We need to check
+ * our assertions are correct, since
+ * we should have caught this above
+ */
+ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
if (fio->ft_index == last)
/*
* Last page is mapped partially.
*/
fio->ft_nob = size - cl_offset(obj, fio->ft_index);
- else
+ else
fio->ft_nob = cl_page_size(obj);
- lu_ref_add(&page->cp_reference, "fault", io);
- fio->ft_page = page;
- /*
- * Certain 2.6 kernels return not-NULL from
- * filemap_nopage() when page is beyond the file size,
- * on the grounds that "An external ptracer can access
- * pages that normally aren't accessible.." Don't
- * propagate such page fault to the lower layers to
- * avoid side-effects like KMS updates.
- */
- if (fio->ft_index > last)
- result = +1;
+ lu_ref_add(&page->cp_reference, "fault", io);
+ fio->ft_page = page;
+ EXIT;
- return result;
+out:
+ /* return unlocked vmpage to avoid deadlocking */
+ if (vmpage != NULL)
+ unlock_page(vmpage);
+#ifdef HAVE_VM_OP_FAULT
+ cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+#endif
+ return result;
+}
+
+static int vvp_io_fsync_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ /* we should mark TOWRITE bit to each dirty page in radix tree to
+ * verify pages have been written, but this is difficult because of
+ * race. */
+ return 0;
}
static int vvp_io_read_page(const struct lu_env *env,
queue = &io->ci_queue;
cl_2queue_init_page(queue, page);
- result = cl_io_submit_sync(env, io, crt, queue, CRP_NORMAL, 0);
+ result = cl_io_submit_sync(env, io, crt, queue, 0);
LASSERT(cl_page_is_owned(page, io));
if (crt == CRT_READ)
* purposes here we can treat it like i_size.
*/
if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(cp->cpg_page, KM_USER0);
+ char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr, KM_USER0);
+ ll_kunmap_atomic(kaddr, KM_USER0);
} else if (cp->cpg_defer_uptodate)
cp->cpg_ra_used = 1;
else
struct cl_page *pg = slice->cpl_page;
struct inode *inode = ccc_object_inode(obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
cfs_page_t *vmpage = cp->cpg_page;
int result;
*/
if (!PageDirty(vmpage)) {
tallyop = LPROC_LL_DIRTY_MISSES;
- vvp_write_pending(cl2ccc(obj), cp);
- set_page_dirty(vmpage);
- /* ll_set_page_dirty() does the same for now, but
- * it will not soon. */
- vvp_write_pending(cl2ccc(obj), cp);
result = cl_page_cache_add(env, io, pg, CRT_WRITE);
- if (result == -EDQUOT) {
+ if (result == 0) {
+ /* page was added into cache successfully. */
+ set_page_dirty(vmpage);
+ vvp_write_pending(cl2ccc(obj), cp);
+ } else if (result == -EDQUOT) {
pgoff_t last_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
bool need_clip = true;
}
ll_stats_ops_tally(sbi, tallyop, 1);
+ /* Inode should be marked DIRTY even if no new page was marked DIRTY
+ * because page could have been not flushed between 2 modifications.
+ * It is important the file is marked DIRTY as soon as the I/O is done
+ * Indeed, when cache is flushed, file could be already closed and it
+ * is too late to warn the MDT.
+ * It is acceptable that file is marked DIRTY even if I/O is dropped
+ * for some reasons before being flushed to OST.
+ */
+ if (result == 0) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
size = cl_offset(obj, pg->cp_index) + to;
- ll_inode_size_lock(inode, 0);
+ ll_inode_size_lock(inode);
if (result == 0) {
if (size > i_size_read(inode)) {
cl_isize_write_nolock(inode, size);
if (size > i_size_read(inode))
cl_page_discard(env, io, pg);
}
- ll_inode_size_unlock(inode, 0);
- RETURN(result);
+ ll_inode_size_unlock(inode);
+ RETURN(result);
}
static const struct cl_io_operations vvp_io_ops = {
.op = {
[CIT_READ] = {
- .cio_fini = vvp_io_fini,
+ .cio_fini = vvp_io_read_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
.cio_advance = ccc_io_advance
.cio_start = vvp_io_fault_start,
.cio_end = ccc_io_end
},
+ [CIT_FSYNC] = {
+ .cio_start = vvp_io_fsync_start,
+ .cio_fini = vvp_io_fini
+ },
[CIT_MISC] = {
.cio_fini = vvp_io_fini
}
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io)
{
- struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
+ struct inode *inode = ccc_object_inode(obj);
int result;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
CL_IO_SLICE_CLEAN(cio, cui_cl);
cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
vio->cui_ra_window_set = 0;
- result = 0;
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
- size_t count;
+ result = 0;
+ if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
+ size_t count;
+ struct ll_inode_info *lli = ll_i2info(inode);
count = io->u.ci_rw.crw_count;
/* "If nbyte is 0, read() will return 0 and have no other
cio->cui_tot_count = count;
cio->cui_tot_nrsegs = 0;
}
- } else if (io->ci_type == CIT_SETATTR) {
- if (cl_io_is_trunc(io))
- /* lockless truncate? */
- ll_stats_ops_tally(sbi, LPROC_LL_TRUNC, 1);
- else
- io->ci_lockreq = CILR_MANDATORY;
- }
- RETURN(result);
+ /* for read/write, we store the jobid in the inode, and
+ * it'll be fetched by osc when building RPC.
+ *
+ * it's not accurate if the file is shared by different
+ * jobs.
+ */
+ lustre_get_jobid(lli->lli_jobid);
+ } else if (io->ci_type == CIT_SETATTR) {
+ if (!cl_io_is_trunc(io))
+ io->ci_lockreq = CILR_MANDATORY;
+ }
+
+ /* ignore layout change for generic CIT_MISC but not for glimpse.
+ * io context for glimpse must set ci_verify_layout to true,
+ * see cl_glimpse_size0() for details. */
+ if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
+ io->ci_ignore_layout = 1;
+
+ /* Enqueue layout lock and get layout version. We need to do this
+ * even for operations requiring to open file, such as read and write,
+ * because it might not grant layout lock in IT_OPEN. */
+ if (result == 0 && !io->ci_ignore_layout) {
+ result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ if (result == -ENOENT)
+ /* If the inode on MDS has been removed, but the objects
+ * on OSTs haven't been destroyed (async unlink), layout
+ * fetch will return -ENOENT, we'd ingore this error
+ * and continue with dirty flush. LU-3230. */
+ result = 0;
+ if (result < 0)
+ CERROR("%s: refresh file layout " DFID " error %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(lu_object_fid(&obj->co_lu)), result);
+ }
+
+ RETURN(result);
}
static struct vvp_io *cl2vvp_io(const struct lu_env *env,