#define DEBUG_SUBSYSTEM S_LLITE
-#ifndef __KERNEL__
-# error This file is kernel only.
-#endif
#include <obd.h>
-#include <lustre_lite.h>
-
#include "vvp_internal.h"
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
unsigned long addr;
unsigned long seg;
ssize_t count;
- int result;
+ int result = 0;
ENTRY;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
struct inode *inode = vma->vm_file->f_dentry->d_inode;
int flags = CEF_MUST;
- if (ll_file_nolock(vma->vm_file)) {
- /*
- * For no lock case, a lockless lock will be
- * generated.
- */
- flags = CEF_NEVER;
- }
+ if (ll_file_nolock(vma->vm_file)) {
+ /*
+ * For no lock case is not allowed for mmap
+ */
+ result = -EINVAL;
+ break;
+ }
/*
* XXX: Required lock mode can be weakened: CIT_WRITE
descr->cld_mode, descr->cld_start,
descr->cld_end);
- if (result < 0)
- RETURN(result);
+ if (result < 0)
+ break;
- if (vma->vm_end - addr >= count)
- break;
+ if (vma->vm_end - addr >= count)
+ break;
- count -= vma->vm_end - addr;
- addr = vma->vm_end;
- }
- up_read(&mm->mmap_sem);
- }
- RETURN(0);
+ count -= vma->vm_end - addr;
+ addr = vma->vm_end;
+ }
+ up_read(&mm->mmap_sem);
+ if (result < 0)
+ break;
+ }
+ RETURN(result);
}
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
static int vvp_io_read_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
- int result;
+ struct cl_io *io = ios->cis_io;
+ struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
+ int result;
- ENTRY;
- /* XXX: Layer violation, we shouldn't see lsm at llite level. */
- if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
- result = vvp_io_rw_lock(env, io, CLM_READ,
- io->u.ci_rd.rd.crw_pos,
- io->u.ci_rd.rd.crw_pos +
- io->u.ci_rd.rd.crw_count - 1);
- else
- result = 0;
- RETURN(result);
+ ENTRY;
+ result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
+ rd->crw_pos + rd->crw_count - 1);
+ RETURN(result);
}
static int vvp_io_fault_lock(const struct lu_env *env,
page = cl_page_list_first(plist);
if (plist->pl_nr == 1) {
cl_page_clip(env, page, from, to);
- } else if (from > 0) {
- cl_page_clip(env, page, from, PAGE_SIZE);
} else {
- page = cl_page_list_last(plist);
- cl_page_clip(env, page, 0, to);
+ if (from > 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE) {
+ page = cl_page_list_last(plist);
+ cl_page_clip(env, page, 0, to);
+ }
}
}
cl_page_disown(env, io, page);
/* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", io);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
cl_page_put(env, page);
}
}
/* update inode size */
- ll_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
/* Now the pages in queue were failed to commit, discard them
* unless they were dirtied before. */
ENTRY;
if (!can_populate_pages(env, io, inode))
- return 0;
+ RETURN(0);
if (cl_io_is_append(io)) {
/*
* PARALLEL IO This has to be changed for parallel IO doing
* out-of-order writes.
*/
+ ll_merge_attr(env, inode);
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
cio->cui_iocb->ki_pos = pos;
} else {
LASSERT(cio->cui_iocb->ki_pos == pos);
}
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+ CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
- result = 0;
- else
- result = generic_file_aio_write(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ if (cio->cui_iov == NULL) {
+ /* from a temp io in ll_cl_init(). */
+ result = 0;
+ } else {
+ /*
+ * When using the locked AIO function (generic_file_aio_write())
+ * testing has shown the inode mutex to be a limiting factor
+ * with multi-threaded single shared file performance. To get
+ * around this, we now use the lockless version. To maintain
+ * consistency, proper locking to protect against writes,
+ * trucates, etc. is handled in the higher layers of lustre.
+ */
+ result = __generic_file_aio_write(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ &cio->cui_iocb->ki_pos);
+ if (result > 0 || result == -EIOCBQUEUED) {
+ ssize_t err;
+
+ err = generic_write_sync(cio->cui_iocb->ki_filp,
+ pos, result);
+ if (err < 0 && result > 0)
+ result = err;
+ }
+
+ }
if (result > 0) {
result = vvp_io_write_commit(env, io);
if (cio->u.write.cui_written > 0) {
struct vm_fault *vmf = cfio->fault.ft_vmf;
cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+ cfio->fault.ft_flags_valid = 1;
if (vmf->page) {
LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
vmf->virtual_address);
if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
- cfio->fault.ft_flags &= VM_FAULT_LOCKED;
+ cfio->fault.ft_flags |= VM_FAULT_LOCKED;
}
cfio->ft_vmpage = vmf->page;