* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/version.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << CFS_PAGE_SHIFT);
+ (vma->vm_pgoff << PAGE_CACHE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
* \return other error codes from cl_io_init.
*/
struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
- struct lu_env **env_ret,
- struct cl_env_nest *nest,
- pgoff_t index, unsigned long *ra_flags)
+ struct lu_env **env_ret,
+ struct cl_env_nest *nest,
+ pgoff_t index, unsigned long *ra_flags)
{
- struct file *file = vma->vm_file;
- struct inode *inode = file->f_dentry->d_inode;
- struct cl_io *io;
- struct cl_fault_io *fio;
- struct lu_env *env;
- ENTRY;
+ struct file *file = vma->vm_file;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+ struct lu_env *env;
+ int rc;
+ ENTRY;
*env_ret = NULL;
if (ll_file_nolock(file))
CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
fio->ft_index, fio->ft_executable);
- if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(cio->cui_cl.cis_io == io);
-
- /* mmap lock must be MANDATORY
- * it has to cache pages. */
- io->ci_lockreq = CILR_MANDATORY;
-
- cio->cui_fd = fd;
- }
+ rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+ if (rc == 0) {
+ struct ccc_io *cio = ccc_env_io(env);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(cio->cui_cl.cis_io == io);
+
+ /* mmap lock must be MANDATORY it has to cache
+ * pages. */
+ io->ci_lockreq = CILR_MANDATORY;
+ cio->cui_fd = fd;
+ } else {
+ LASSERT(rc < 0);
+ cl_io_fini(env, io);
+ cl_env_nested_put(nest, env);
+ io = ERR_PTR(rc);
+ }
- return io;
+ return io;
}
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
bool *retry)
{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio;
- struct cl_env_nest nest;
- int result;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ struct cl_env_nest nest;
+ int result;
cfs_sigset_t set;
- ENTRY;
+ struct inode *inode;
+ struct ll_inode_info *lli;
+ ENTRY;
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage != NULL);
- io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
- if (IS_ERR(io))
- GOTO(out, result = PTR_ERR(io));
+ io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
+ if (IS_ERR(io))
+ GOTO(out, result = PTR_ERR(io));
- result = io->ci_result;
- if (result < 0)
- GOTO(out, result);
+ result = io->ci_result;
+ if (result < 0)
+ GOTO(out_io, result);
- /* Don't enqueue new locks for page_mkwrite().
- * If the lock has been cancelled then page must have been
- * truncated, in that case, kernel will handle it.
- */
- io->ci_lockreq = CILR_PEEK;
- io->u.ci_fault.ft_mkwrite = 1;
- io->u.ci_fault.ft_writable = 1;
+ io->u.ci_fault.ft_mkwrite = 1;
+ io->u.ci_fault.ft_writable = 1;
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = vmpage;
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = vmpage;
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+ /* we grab lli_trunc_sem to exclude truncate case.
+ * Otherwise, we could add dirty pages into osc cache
+ * while truncate is on-going. */
+ inode = ccc_object_inode(io->ci_obj);
+ lli = ll_i2info(inode);
+ down_read(&lli->lli_trunc_sem);
+
result = cl_io_loop(env, io);
+
+ up_read(&lli->lli_trunc_sem);
+
cfs_restore_sigs(set);
- if (result == -ENODATA) /* peek failed, no lock caching. */
- CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n",
- vma->vm_flags, io->u.ci_fault.ft_index, vmpage);
+ if (result == 0) {
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
- if (result == 0 || result == -ENODATA) {
lock_page(vmpage);
if (vmpage->mapping == NULL) {
unlock_page(vmpage);
* to handle_mm_fault(). */
if (result == 0)
result = -ENODATA;
- } else if (result == -ENODATA) {
- /* Invalidate it if the cl_lock is being revoked.
- * This piece of code is definitely needed for RHEL5,
- * otherwise, SIGBUS will be wrongly returned to
- * applications. */
- write_one_page(vmpage, 1);
- lock_page(vmpage);
- if (vmpage->mapping != NULL) {
- ll_invalidate_page(vmpage);
- LASSERT(vmpage->mapping == NULL);
- }
- unlock_page(vmpage);
} else if (!PageDirty(vmpage)) {
/* race, the page has been cleaned by ptlrpcd after
* it was unlocked, it has to be added into dirty
*retry = true;
result = -EAGAIN;
}
+
+ if (result == 0) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
}
EXIT;
+out_io:
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
out:
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
-
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
+ LASSERT(ergo(result == 0, PageLocked(vmpage)));
- LASSERT(ergo(result == 0, PageLocked(vmpage)));
- return(result);
+ return result;
}
/* return the user space pointer that maps to a file offset via a vma */
static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
{
- return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+ return vma->vm_start +
+ (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
}
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
last - first + 1, 0);
}