* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/version.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#define DEBUG_SUBSYSTEM S_LLITE
size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << CFS_PAGE_SHIFT);
+ (vma->vm_pgoff << PAGE_CACHE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
-struct vm_area_struct * our_vma(unsigned long addr, size_t count)
+struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
+ size_t count)
{
- struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *ret = NULL;
ENTRY;
- /* No MM (e.g. NFS)? No vmas too. */
- if (!mm)
- RETURN(NULL);
+ /* mmap_sem must have been held by caller. */
+ LASSERT(!down_write_trylock(&mm->mmap_sem));
- spin_lock(&mm->page_table_lock);
for(vma = find_vma(mm, addr);
vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
break;
}
}
- spin_unlock(&mm->page_table_lock);
RETURN(ret);
}
* \return other error codes from cl_io_init.
*/
struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
- struct lu_env **env_ret,
- struct cl_env_nest *nest,
- pgoff_t index, unsigned long *ra_flags)
+ struct lu_env **env_ret,
+ struct cl_env_nest *nest,
+ pgoff_t index, unsigned long *ra_flags)
{
- struct file *file = vma->vm_file;
- struct inode *inode = file->f_dentry->d_inode;
- struct cl_io *io;
- struct cl_fault_io *fio;
- struct lu_env *env;
- ENTRY;
+ struct file *file = vma->vm_file;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+ struct lu_env *env;
+ int rc;
+ ENTRY;
*env_ret = NULL;
if (ll_file_nolock(file))
CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
fio->ft_index, fio->ft_executable);
- if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(cio->cui_cl.cis_io == io);
-
- /* mmap lock must be MANDATORY
- * it has to cache pages. */
- io->ci_lockreq = CILR_MANDATORY;
-
- cio->cui_fd = fd;
- }
-
- return io;
+ rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+ if (rc == 0) {
+ struct ccc_io *cio = ccc_env_io(env);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(cio->cui_cl.cis_io == io);
+
+ /* mmap lock must be MANDATORY it has to cache
+ * pages. */
+ io->ci_lockreq = CILR_MANDATORY;
+ cio->cui_fd = fd;
+ } else {
+ LASSERT(rc < 0);
+ cl_io_fini(env, io);
+ cl_env_nested_put(nest, env);
+ io = ERR_PTR(rc);
+ }
+
+ return io;
}
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
bool *retry)
{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio;
- struct cl_env_nest nest;
- int result;
- ENTRY;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ struct cl_env_nest nest;
+ int result;
+ cfs_sigset_t set;
+ struct inode *inode;
+ struct ll_inode_info *lli;
+ ENTRY;
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage != NULL);
- io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
- if (IS_ERR(io))
- GOTO(out, result = PTR_ERR(io));
+ io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
+ if (IS_ERR(io))
+ GOTO(out, result = PTR_ERR(io));
- result = io->ci_result;
- if (result < 0)
- GOTO(out, result);
+ result = io->ci_result;
+ if (result < 0)
+ GOTO(out_io, result);
- /* Don't enqueue new locks for page_mkwrite().
- * If the lock has been cancelled then page must have been
- * truncated, in that case, kernel will handle it.
- */
- io->ci_lockreq = CILR_PEEK;
- io->u.ci_fault.ft_mkwrite = 1;
- io->u.ci_fault.ft_writable = 1;
+ io->u.ci_fault.ft_mkwrite = 1;
+ io->u.ci_fault.ft_writable = 1;
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = vmpage;
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = vmpage;
+
+ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
- result = cl_io_loop(env, io);
+ /* we grab lli_trunc_sem to exclude truncate case.
+ * Otherwise, we could add dirty pages into osc cache
+ * while truncate is on-going. */
+ inode = ccc_object_inode(io->ci_obj);
+ lli = ll_i2info(inode);
+ down_read(&lli->lli_trunc_sem);
- if (result == -ENODATA) /* peek failed, no lock caching. */
- CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n",
- vma->vm_flags, io->u.ci_fault.ft_index, vmpage);
+ result = cl_io_loop(env, io);
+
+ up_read(&lli->lli_trunc_sem);
+
+ cfs_restore_sigs(set);
+
+ if (result == 0) {
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
- if (result == 0 || result == -ENODATA) {
lock_page(vmpage);
if (vmpage->mapping == NULL) {
unlock_page(vmpage);
* to handle_mm_fault(). */
if (result == 0)
result = -ENODATA;
- } else if (result == -ENODATA) {
- /* Invalidate it if the cl_lock is being revoked.
- * This piece of code is definitely needed for RHEL5,
- * otherwise, SIGBUS will be wrongly returned to
- * applications. */
- write_one_page(vmpage, 1);
- lock_page(vmpage);
- if (vmpage->mapping != NULL) {
- ll_invalidate_page(vmpage);
- LASSERT(vmpage->mapping == NULL);
- }
- unlock_page(vmpage);
} else if (!PageDirty(vmpage)) {
/* race, the page has been cleaned by ptlrpcd after
* it was unlocked, it has to be added into dirty
*retry = true;
result = -EAGAIN;
}
+
+ if (result == 0) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
}
EXIT;
+out_io:
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
out:
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
+ LASSERT(ergo(result == 0, PageLocked(vmpage)));
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
-
- LASSERT(ergo(result == 0, PageLocked(vmpage)));
- return(result);
+ return result;
}
pgoff_t pg_offset;
int result;
const unsigned long writable = VM_SHARED|VM_WRITE;
+ cfs_sigset_t set;
ENTRY;
pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
vio->u.fault.nopage.ft_type = type;
vio->u.fault.ft_vmpage = NULL;
- result = cl_io_loop(env, io);
- page = vio->u.fault.ft_vmpage;
- if (result != 0 && page != NULL)
- page_cache_release(page);
+ set = cfs_block_sigsinv(sigmask(SIGKILL)|sigmask(SIGTERM));
+ result = cl_io_loop(env, io);
+ cfs_restore_sigs(set);
+
+ page = vio->u.fault.ft_vmpage;
+ if (result != 0 && page != NULL) {
+ page_cache_release(page);
+ page = NOPAGE_SIGBUS;
+ }
out_err:
if (result == -ENOMEM)
}
#else
+
+static inline int to_fault_error(int result)
+{
+ switch(result) {
+ case 0:
+ result = VM_FAULT_LOCKED;
+ break;
+ case -EFAULT:
+ result = VM_FAULT_NOPAGE;
+ break;
+ case -ENOMEM:
+ result = VM_FAULT_OOM;
+ break;
+ default:
+ result = VM_FAULT_SIGBUS;
+ break;
+ }
+ return result;
+}
+
/**
* Lustre implementation of a vm_operations_struct::fault() method, called by
* VM to server page fault (both in kernel and user space).
io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
if (IS_ERR(io))
- RETURN(VM_FAULT_ERROR);
+ RETURN(to_fault_error(PTR_ERR(io)));
result = io->ci_result;
- if (result < 0)
- goto out_err;
-
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.fault.ft_vmf = vmf;
-
- result = cl_io_loop(env, io);
-
- vmpage = vio->u.fault.ft_vmpage;
- if (result != 0 && vmpage != NULL) {
- page_cache_release(vmpage);
- vmf->page = NULL;
+ if (result == 0) {
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = NULL;
+ vio->u.fault.fault.ft_vmf = vmf;
+
+ result = cl_io_loop(env, io);
+
+ fault_ret = vio->u.fault.fault.ft_flags;
+ vmpage = vio->u.fault.ft_vmpage;
+ if (result != 0 && vmpage != NULL) {
+ page_cache_release(vmpage);
+ vmf->page = NULL;
+ }
}
-
- fault_ret = vio->u.fault.fault.ft_flags;
-
-out_err:
- if (result != 0 && fault_ret == 0)
- fault_ret = VM_FAULT_ERROR;
-
- vma->vm_flags |= ra_flags;
-
cl_io_fini(env, io);
cl_env_nested_put(&nest, env);
+ vma->vm_flags |= ra_flags;
+ if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
+ fault_ret |= to_fault_error(result);
+
CDEBUG(D_MMAP, "%s fault %d/%d\n",
cfs_current()->comm, fault_ret, result);
RETURN(fault_ret);
static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- int count = 0;
- bool printed = false;
- int result;
+ int count = 0;
+ bool printed = false;
+ int result;
+ cfs_sigset_t set;
+
+ /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
+ * so that it can be killed by admin but not cause segfault by
+ * other signals. */
+ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
restart:
result = ll_fault0(vma, vmf);
result |= VM_FAULT_LOCKED;
}
+ cfs_restore_sigs(set);
return result;
}
#endif
/* return the user space pointer that maps to a file offset via a vma */
static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
{
- return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+ return vma->vm_start +
+ (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
}
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
last - first + 1, 0);
}
static struct vm_operations_struct ll_file_vm_ops = {
#ifndef HAVE_VM_OP_FAULT
- .nopage = ll_nopage,
- .populate = ll_populate,
-
+ .nopage = ll_nopage,
+ .populate = ll_populate,
+#else
+ .fault = ll_fault,
+#endif
+#ifndef HAVE_PGMKWRITE_COMPACT
+ .page_mkwrite = ll_page_mkwrite,
#else
- .fault = ll_fault,
+ ._pmkw.page_mkwrite = ll_page_mkwrite,
#endif
- .page_mkwrite = ll_page_mkwrite,
- .open = ll_vm_open,
- .close = ll_vm_close,
+ .open = ll_vm_open,
+ .close = ll_vm_close,
};
int ll_file_mmap(struct file *file, struct vm_area_struct * vma)