* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
-#include <lustre_lite.h>
#include "llite_internal.h"
-#include <linux/lustre_compat25.h>
+#include <lustre_compat.h>
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type);
+static const struct vm_operations_struct ll_file_vm_ops;
-static struct vm_operations_struct ll_file_vm_ops;
-
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr,
- size_t count)
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
+ unsigned long addr, size_t count)
{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_CACHE_SHIFT);
- policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
+ policy->l_extent.end = (policy->l_extent.start + count - 1) |
+ ~PAGE_MASK;
}
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
* \retval EINVAL if env can't allocated
* \return other error codes from cl_io_init.
*/
-struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
- struct lu_env **env_ret,
- struct cl_env_nest *nest,
- pgoff_t index, unsigned long *ra_flags)
+static struct cl_io *
+ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
+ struct cl_env_nest *nest, pgoff_t index,
+ unsigned long *ra_flags)
{
struct file *file = vma->vm_file;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
struct cl_io *io;
struct cl_fault_io *fio;
struct lu_env *env;
*env_ret = env;
- io = ccc_env_thread_io(env);
+restart:
+ io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
LASSERT(io->ci_obj != NULL);
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) {
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(cio->cui_cl.cis_io == io);
+ LASSERT(vio->vui_cl.cis_io == io);
/* mmap lock must be MANDATORY it has to cache
* pages. */
io->ci_lockreq = CILR_MANDATORY;
- cio->cui_fd = fd;
+ vio->vui_fd = fd;
} else {
LASSERT(rc < 0);
cl_io_fini(env, io);
+ if (io->ci_need_restart)
+ goto restart;
+
cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
}
- return io;
+ RETURN(io);
}
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
struct vvp_io *vio;
struct cl_env_nest nest;
int result;
- cfs_sigset_t set;
+ sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
ENTRY;
/* we grab lli_trunc_sem to exclude truncate case.
* Otherwise, we could add dirty pages into osc cache
* while truncate is on-going. */
- inode = ccc_object_inode(io->ci_obj);
+ inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
down_read(&lli->lli_trunc_sem);
cfs_restore_sigs(set);
if (result == 0) {
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
-
lock_page(vmpage);
if (vmpage->mapping == NULL) {
unlock_page(vmpage);
result = -EAGAIN;
}
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
+ if (result == 0)
+ ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
}
EXIT;
cl_io_fini(env, io);
cl_env_nested_put(&nest, env);
out:
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
return result;
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.fault.ft_vmf = vmf;
+ vio->u.fault.ft_vmf = vmf;
+ vio->u.fault.ft_flags = 0;
+ vio->u.fault.ft_flags_valid = 0;
+
+ /* May call ll_readpage() */
+ ll_cl_add(vma->vm_file, env, io);
result = cl_io_loop(env, io);
- fault_ret = vio->u.fault.fault.ft_flags;
+ ll_cl_remove(vma->vm_file, env);
+
+ /* ft_flags are only valid if we reached
+ * the call to filemap_fault */
+ if (vio->u.fault.ft_flags_valid)
+ fault_ret = vio->u.fault.ft_flags;
+
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage != NULL) {
page_cache_release(vmpage);
vmf->page = NULL;
}
}
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
vma->vm_flags |= ra_flags;
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- cfs_current()->comm, fault_ret, result);
- RETURN(fault_ret);
+ CDEBUG(D_MMAP, "%s fault %d/%d\n",
+ current->comm, fault_ret, result);
+ RETURN(fault_ret);
}
static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int count = 0;
bool printed = false;
int result;
- cfs_sigset_t set;
+ sigset_t set;
/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
* so that it can be killed by admin but not cause segfault by
bool retry;
int result;
+ file_update_time(vma->vm_file);
do {
retry = false;
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy"
- " contention.\n",
- current->comm, vmf->pgoff,
- vma->vm_file->f_dentry->d_inode->i_ino);
+ const struct dentry *de = vma->vm_file->f_path.dentry;
+
+ CWARN("app(%s): the page %lu of file "DFID" is under"
+ " heavy contention\n",
+ current->comm, vmf->pgoff,
+ PFID(ll_inode2fid(de->d_inode)));
printed = true;
}
} while (retry);
/**
* To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ * we track the mapped vma count in vvp_object::vob_mmap_cnt.
*/
static void ll_vm_open(struct vm_area_struct * vma)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct vvp_object *vob = cl_inode2vvp(inode);
- ENTRY;
- LASSERT(vma->vm_file);
- LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
- cfs_atomic_inc(&vob->cob_mmap_cnt);
- EXIT;
+ ENTRY;
+ LASSERT(vma->vm_file);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ atomic_inc(&vob->vob_mmap_cnt);
+ EXIT;
}
/**
*/
static void ll_vm_close(struct vm_area_struct *vma)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ccc_object *vob = cl_inode2ccc(inode);
-
- ENTRY;
- LASSERT(vma->vm_file);
- cfs_atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
- EXIT;
-}
-
-/* return the user space pointer that maps to a file offset via a vma */
-static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
-{
- return vma->vm_start +
- (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct vvp_object *vob = cl_inode2vvp(inode);
+ ENTRY;
+ LASSERT(vma->vm_file);
+ atomic_dec(&vob->vob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ EXIT;
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
RETURN(rc);
}
-static struct vm_operations_struct ll_file_vm_ops = {
+static const struct vm_operations_struct ll_file_vm_ops = {
.fault = ll_fault,
.page_mkwrite = ll_page_mkwrite,
.open = ll_vm_open,
int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
int rc;
ENTRY;