*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/version.h>
-#include <asm/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/pagemap.h>
#define DEBUG_SUBSYSTEM S_LLITE
unsigned long addr, size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
- (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+ (vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~PAGE_MASK;
}
/**
* API independent part for page fault initialization.
- * \param vma - virtual memory area addressed to page fault
* \param env - corespondent lu_env to processing
- * \param nest - nested level
+ * \param vma - virtual memory area addressed to page fault
* \param index - page index corespondent to fault.
- * \parm ra_flags - vma readahead flags.
*
- * \return allocated and initialized env for fault operation.
- * \retval EINVAL if env can't allocated
- * \return other error codes from cl_io_init.
+ * \return error codes from cl_io_init.
*/
static struct cl_io *
-ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
- struct cl_env_nest *nest, pgoff_t index,
- unsigned long *ra_flags)
+ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, pgoff_t index)
{
struct file *file = vma->vm_file;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct cl_io *io;
struct cl_fault_io *fio;
- struct lu_env *env;
int rc;
ENTRY;
- *env_ret = NULL;
- if (ll_file_nolock(file))
- RETURN(ERR_PTR(-EOPNOTSUPP));
+ if (ll_file_nolock(file))
+ RETURN(ERR_PTR(-EOPNOTSUPP));
- /*
- * page fault can be called when lustre IO is
- * already active for the current thread, e.g., when doing read/write
- * against user level buffer mapped from Lustre buffer. To avoid
- * stomping on existing context, optionally force an allocation of a new
- * one.
- */
- env = cl_env_nested_get(nest);
- if (IS_ERR(env))
- RETURN(ERR_PTR(-EINVAL));
+restart:
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ LASSERT(io->ci_obj != NULL);
- *env_ret = env;
+ fio = &io->u.ci_fault;
+ fio->ft_index = index;
+ fio->ft_executable = vma->vm_flags & VM_EXEC;
- io = vvp_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
- LASSERT(io->ci_obj != NULL);
-
- fio = &io->u.ci_fault;
- fio->ft_index = index;
- fio->ft_executable = vma->vm_flags&VM_EXEC;
-
- /*
- * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
- * the kernel will not read other pages not covered by ldlm in
- * filemap_nopage. we do our readahead in ll_readpage.
- */
- if (ra_flags != NULL)
- *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
- vma->vm_flags &= ~VM_SEQ_READ;
- vma->vm_flags |= VM_RAND_READ;
-
- CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
- fio->ft_index, fio->ft_executable);
+ CDEBUG(D_MMAP,
+ DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
+ PFID(&ll_i2info(inode)->lli_fid), vma, vma->vm_start,
+ vma->vm_end, vma->vm_flags, fio->ft_index);
+
+ if (vma->vm_flags & VM_SEQ_READ)
+ io->ci_seq_read = 1;
+ else if (vma->vm_flags & VM_RAND_READ)
+ io->ci_rand_read = 1;
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) {
struct vvp_io *vio = vvp_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
LASSERT(vio->vui_cl.cis_io == io);
} else {
LASSERT(rc < 0);
cl_io_fini(env, io);
- cl_env_nested_put(nest, env);
+ if (io->ci_need_restart)
+ goto restart;
+
io = ERR_PTR(rc);
}
- return io;
+ RETURN(io);
}
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
- struct cl_env_nest nest;
int result;
- sigset_t set;
- struct inode *inode;
+ __u16 refcheck;
+ sigset_t old, new;
+ struct inode *inode = NULL;
struct ll_inode_info *lli;
ENTRY;
LASSERT(vmpage != NULL);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
+ io = ll_fault_io_init(env, vma, vmpage->index);
if (IS_ERR(io))
GOTO(out, result = PTR_ERR(io));
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = vmpage;
- set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+ siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
+ sigprocmask(SIG_BLOCK, &new, &old);
- /* we grab lli_trunc_sem to exclude truncate case.
- * Otherwise, we could add dirty pages into osc cache
- * while truncate is on-going. */
inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
- down_read(&lli->lli_trunc_sem);
result = cl_io_loop(env, io);
- up_read(&lli->lli_trunc_sem);
-
- cfs_restore_sigs(set);
+ sigprocmask(SIG_SETMASK, &old, NULL);
if (result == 0) {
lock_page(vmpage);
out_io:
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
out:
+ cl_env_put(env, &refcheck);
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
+ /* if page has been unmapped, presumably due to lock reclaim for
+ * concurrent usage, add some delay before retrying to prevent
+ * entering live-lock situation with competitors
+ */
+ if (result == -ENODATA && inode != NULL) {
+ CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
+ "prevent live-lock\n", inode);
+ msleep(10);
+ }
+
return result;
}
case 0:
result = VM_FAULT_LOCKED;
break;
- case -EFAULT:
- result = VM_FAULT_NOPAGE;
- break;
case -ENOMEM:
result = VM_FAULT_OOM;
break;
* \retval VM_FAULT_ERROR on general error
* \retval NOPAGE_OOM not have memory for allocate new page
*/
-static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
+static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio = NULL;
- struct page *vmpage;
- unsigned long ra_flags;
- struct cl_env_nest nest;
- int result;
- int fault_ret = 0;
- ENTRY;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio = NULL;
+ struct page *vmpage;
+ int result = 0;
+ int fault_ret = 0;
+ __u16 refcheck;
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
+ /* do fast fault */
+ bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
+
+ /* To avoid loops, instruct downstream to not drop mmap_sem */
+ vmf->flags |= FAULT_FLAG_RETRY_NOWAIT;
+ ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
+ fault_ret = ll_filemap_fault(vma, vmf);
+ ll_cl_remove(vma->vm_file, env);
+ if (!has_retry)
+ vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
+
+ /* - If there is no error, then the page was found in cache and
+ * uptodate;
+ * - If VM_FAULT_RETRY is set, the page existed but failed to
+ * lock. We will try slow path to avoid loops.
+ * - Otherwise, it should try normal fault under DLM lock. */
+ if (!(fault_ret & VM_FAULT_RETRY) &&
+ !(fault_ret & VM_FAULT_ERROR))
+ GOTO(out, result = 0);
+
+ fault_ret = 0;
+ }
- io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
- if (IS_ERR(io))
- RETURN(to_fault_error(PTR_ERR(io)));
+ io = ll_fault_io_init(env, vma, vmf->pgoff);
+ if (IS_ERR(io))
+ GOTO(out, result = PTR_ERR(io));
- result = io->ci_result;
+ result = io->ci_result;
if (result == 0) {
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_flags_valid = 0;
/* May call ll_readpage() */
- ll_cl_add(vma->vm_file, env, io);
+ ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
result = cl_io_loop(env, io);
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage != NULL) {
- page_cache_release(vmpage);
+ put_page(vmpage);
vmf->page = NULL;
}
}
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
- vma->vm_flags |= ra_flags;
+out:
+ cl_env_put(env, &refcheck);
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- current->comm, fault_ret, result);
+ CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
RETURN(fault_ret);
}
-static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
+static vm_fault_t ll_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
+#else
+static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#endif
int count = 0;
bool printed = false;
- int result;
- sigset_t set;
+ bool cached;
+ vm_fault_t result;
+ ktime_t kstart = ktime_get();
+ sigset_t old, new;
+
+ result = pcc_fault(vma, vmf, &cached);
+ if (cached)
+ goto out;
+
+ CDEBUG(D_MMAP, DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
+ PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
+ vma, vma->vm_start, vma->vm_end, vma->vm_flags);
/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
* so that it can be killed by admin but not cause segfault by
- * other signals. */
- set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+ * other signals.
+ */
+ siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
+ sigprocmask(SIG_BLOCK, &new, &old);
+
+ /* make sure offset is not a negative number */
+ if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
+ return VM_FAULT_SIGBUS;
restart:
- result = ll_fault0(vma, vmf);
- LASSERT(!(result & VM_FAULT_LOCKED));
- if (result == 0) {
- struct page *vmpage = vmf->page;
+ result = ll_fault0(vma, vmf);
+ if (vmf->page &&
+ !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
+ struct page *vmpage = vmf->page;
+
+ /* check if this page has been truncated */
+ lock_page(vmpage);
+ if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmf->page = NULL;
- /* check if this page has been truncated */
- lock_page(vmpage);
- if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
- unlock_page(vmpage);
- page_cache_release(vmpage);
- vmf->page = NULL;
+ if (!printed && ++count > 16) {
+ CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
+ current->comm);
+ printed = true;
+ }
- if (!printed && ++count > 16) {
- CWARN("the page is under heavy contention,"
- "maybe your app(%s) needs revising :-)\n",
- current->comm);
- printed = true;
- }
+ goto restart;
+ }
- goto restart;
- }
+ result |= VM_FAULT_LOCKED;
+ }
+ sigprocmask(SIG_SETMASK, &old, NULL);
- result |= VM_FAULT_LOCKED;
- }
- cfs_restore_sigs(set);
- return result;
+out:
+ if (vmf->page && result == VM_FAULT_LOCKED) {
+ ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
+ current->pid, vma->vm_file->private_data,
+ cl_offset(NULL, vmf->page->index), PAGE_SIZE,
+ READ);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
+ LPROC_LL_FAULT,
+ ktime_us_delta(ktime_get(), kstart));
+ }
+
+ return result;
}
-static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
+static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
{
- int count = 0;
- bool printed = false;
- bool retry;
- int result;
+ struct vm_area_struct *vma = vmf->vma;
+#else
+static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+#endif
+ int count = 0;
+ bool printed = false;
+ bool retry;
+ bool cached;
+ ktime_t kstart = ktime_get();
+ vm_fault_t result;
- do {
- retry = false;
- result = ll_page_mkwrite0(vma, vmf->page, &retry);
+ result = pcc_page_mkwrite(vma, vmf, &cached);
+ if (cached)
+ goto out;
- if (!printed && ++count > 16) {
- const struct dentry *de = vma->vm_file->f_path.dentry;
+ file_update_time(vma->vm_file);
+ do {
+ retry = false;
+ result = ll_page_mkwrite0(vma, vmf->page, &retry);
- CWARN("app(%s): the page %lu of file "DFID" is under"
- " heavy contention\n",
+ if (!printed && ++count > 16) {
+ const struct dentry *de = file_dentry(vma->vm_file);
+
+ CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
current->comm, vmf->pgoff,
PFID(ll_inode2fid(de->d_inode)));
- printed = true;
- }
- } while (retry);
-
- switch(result) {
- case 0:
- LASSERT(PageLocked(vmf->page));
- result = VM_FAULT_LOCKED;
- break;
- case -ENODATA:
- case -EFAULT:
- result = VM_FAULT_NOPAGE;
- break;
- case -ENOMEM:
- result = VM_FAULT_OOM;
- break;
- case -EAGAIN:
- result = VM_FAULT_RETRY;
- break;
- default:
- result = VM_FAULT_SIGBUS;
- break;
- }
+ printed = true;
+ }
+ } while (retry);
- return result;
+ switch (result) {
+ case 0:
+ LASSERT(PageLocked(vmf->page));
+ result = VM_FAULT_LOCKED;
+ break;
+ case -ENODATA:
+ case -EFAULT:
+ result = VM_FAULT_NOPAGE;
+ break;
+ case -ENOMEM:
+ result = VM_FAULT_OOM;
+ break;
+ case -EAGAIN:
+ result = VM_FAULT_RETRY;
+ break;
+ default:
+ result = VM_FAULT_SIGBUS;
+ break;
+ }
+
+out:
+ if (result == VM_FAULT_LOCKED) {
+ ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
+ current->pid, vma->vm_file->private_data,
+ cl_offset(NULL, vmf->page->index), PAGE_SIZE,
+ WRITE);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
+ LPROC_LL_MKWRITE,
+ ktime_us_delta(ktime_get(), kstart));
+ }
+
+ return result;
}
/**
*/
static void ll_vm_open(struct vm_area_struct * vma)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct vvp_object *vob = cl_inode2vvp(inode);
ENTRY;
- LASSERT(vma->vm_file);
LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
atomic_inc(&vob->vob_mmap_cnt);
+ pcc_vm_open(vma);
EXIT;
}
*/
static void ll_vm_close(struct vm_area_struct *vma)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct vvp_object *vob = cl_inode2vvp(inode);
ENTRY;
- LASSERT(vma->vm_file);
atomic_dec(&vob->vob_mmap_cnt);
LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ pcc_vm_close(vma);
EXIT;
}
int rc = -ENOENT;
ENTRY;
- LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
+ LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
- last - first + 1, 0);
+ unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
+ last - first + 1, 0);
}
RETURN(rc);
int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- int rc;
- ENTRY;
+ struct inode *inode = file_inode(file);
+ ktime_t kstart = ktime_get();
+ bool cached;
+ int rc;
- if (ll_file_nolock(file))
- RETURN(-EOPNOTSUPP);
+ ENTRY;
+ CDEBUG(D_VFSTRACE | D_MMAP,
+ "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
+ PFID(&ll_i2info(inode)->lli_fid),
+ vma, vma->vm_start, vma->vm_end, vma->vm_flags);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
- rc = generic_file_mmap(file, vma);
- if (rc == 0) {
- vma->vm_ops = &ll_file_vm_ops;
- vma->vm_ops->open(vma);
- /* update the inode's size and mtime */
- rc = ll_glimpse_size(inode);
- }
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
- RETURN(rc);
+ rc = pcc_file_mmap(file, vma, &cached);
+ if (cached && rc != 0)
+ RETURN(rc);
+
+ rc = generic_file_mmap(file, vma);
+ if (rc == 0) {
+ vma->vm_ops = &ll_file_vm_ops;
+ vma->vm_ops->open(vma);
+ /* update the inode's size and mtime */
+ if (!cached)
+ rc = ll_glimpse_size(inode);
+ }
+
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
+ ktime_us_delta(ktime_get(), kstart));
+
+ RETURN(rc);
}