* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
-//#include <lustre_mdc.h>
#include <lustre_lite.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
* \retval EINVAL if env can't allocated
* \return other error codes from cl_io_init.
*/
-int ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
- struct cl_env_nest *nest, pgoff_t index, unsigned long *ra_flags)
+struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
+ struct lu_env **env_ret,
+ struct cl_env_nest *nest,
+ pgoff_t index, unsigned long *ra_flags)
{
struct file *file = vma->vm_file;
struct inode *inode = file->f_dentry->d_inode;
struct lu_env *env;
ENTRY;
+ *env_ret = NULL;
if (ll_file_nolock(file))
- RETURN(-EOPNOTSUPP);
+ RETURN(ERR_PTR(-EOPNOTSUPP));
/*
* page fault can be called when lustre IO is
* one.
*/
env = cl_env_nested_get(nest);
- if (IS_ERR(env)) {
- *env_ret = NULL;
- RETURN(-EINVAL);
- }
+ if (IS_ERR(env))
+ RETURN(ERR_PTR(-EINVAL));
*env_ret = env;
- io = &ccc_env_info(env)->cti_io;
+ io = ccc_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
LASSERT(io->ci_obj != NULL);
fio = &io->u.ci_fault;
- fio->ft_index = vma->vm_pgoff + index;
+ fio->ft_index = index;
fio->ft_writable = (vma->vm_flags&writable) == writable;
fio->ft_executable = vma->vm_flags&VM_EXEC;
vma->vm_flags &= ~VM_SEQ_READ;
vma->vm_flags |= VM_RAND_READ;
- CDEBUG(D_INFO, "vm_flags: %lx (%lu %i %i)\n", vma->vm_flags,
+ CDEBUG(D_INFO, "vm_flags: %lx (%lu %d %d)\n", vma->vm_flags,
fio->ft_index, fio->ft_writable, fio->ft_executable);
if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
cio->cui_fd = fd;
}
- return io->ci_result;
+ return io;
}
#ifndef HAVE_VM_OP_FAULT
int result;
ENTRY;
- pg_offset = (address - vma->vm_start) >> PAGE_SHIFT;
- result = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags);
- if (env == NULL)
+ pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ io = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags);
+ if (IS_ERR(io))
return NOPAGE_SIGBUS;
- io = &ccc_env_info(env)->cti_io;
+ result = io->ci_result;
if (result < 0)
goto out_err;
vio = vvp_env_io(env);
-
vio->u.fault.ft_vma = vma;
vio->u.fault.nopage.ft_address = address;
vio->u.fault.nopage.ft_type = type;
result = cl_io_loop(env, io);
out_err:
- if (result == 0) {
- LASSERT(io->u.ci_fault.ft_page != NULL);
+ if (result == 0)
page = vio->u.fault.ft_vmpage;
- } else {
- if (result == -ENOMEM)
- page = NOPAGE_OOM;
- }
+ else if (result == -ENOMEM)
+ page = NOPAGE_OOM;
vma->vm_flags &= ~VM_RAND_READ;
vma->vm_flags |= ra_flags;
* \retval VM_FAULT_ERROR on general error
* \retval NOPAGE_OOM not have memory for allocate new page
*/
-int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct lu_env *env;
struct cl_io *io;
int fault_ret = 0;
ENTRY;
- result = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
- if (env == NULL)
+ io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
+ if (IS_ERR(io))
RETURN(VM_FAULT_ERROR);
- io = &ccc_env_info(env)->cti_io;
+ result = io->ci_result;
if (result < 0)
goto out_err;
vio = vvp_env_io(env);
-
vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = NULL;
vio->u.fault.fault.ft_vmf = vmf;
result = cl_io_loop(env, io);
fault_ret = vio->u.fault.fault.ft_flags;
out_err:
- if (result != 0)
+ if ((result != 0) && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= VM_FAULT_ERROR;
vma->vm_flags |= ra_flags;
RETURN(fault_ret);
}
+int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int count = 0;
+ bool printed = false;
+ int result;
+
+restart:
+ result = ll_fault0(vma, vmf);
+ LASSERT(!(result & VM_FAULT_LOCKED));
+ if (result == 0) {
+ struct page *vmpage = vmf->page;
+
+ /* check if this page has been truncated */
+ lock_page(vmpage);
+ if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
+ vmf->page = NULL;
+
+ if (!printed && ++count > 16) {
+ CWARN("the page is under heavy contention,"
+ "maybe your app(%s) needs revising :-)\n",
+ current->comm);
+ printed = true;
+ }
+
+ goto restart;
+ }
+
+ result |= VM_FAULT_LOCKED;
+ }
+ return result;
+}
#endif
/**