#include "llite_internal.h"
#include <linux/lustre_compat25.h>
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type);
-
-static struct vm_operations_struct ll_file_vm_ops;
+static const struct vm_operations_struct ll_file_vm_ops;
void policy_from_vma(ldlm_policy_data_t *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << CFS_PAGE_SHIFT);
+ (vma->vm_pgoff << PAGE_CACHE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
* \retval EINVAL if env can't allocated
* \return other error codes from cl_io_init.
*/
-struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
- struct lu_env **env_ret,
- struct cl_env_nest *nest,
- pgoff_t index, unsigned long *ra_flags)
+static struct cl_io *
+ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
+ struct cl_env_nest *nest, pgoff_t index,
+ unsigned long *ra_flags)
{
- struct file *file = vma->vm_file;
- struct inode *inode = file->f_dentry->d_inode;
- struct cl_io *io;
- struct cl_fault_io *fio;
- struct lu_env *env;
- ENTRY;
+ struct file *file = vma->vm_file;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+ struct lu_env *env;
+ int rc;
+ ENTRY;
*env_ret = NULL;
if (ll_file_nolock(file))
CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
fio->ft_index, fio->ft_executable);
- if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(cio->cui_cl.cis_io == io);
-
- /* mmap lock must be MANDATORY
- * it has to cache pages. */
- io->ci_lockreq = CILR_MANDATORY;
-
- cio->cui_fd = fd;
- }
+ rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+ if (rc == 0) {
+ struct ccc_io *cio = ccc_env_io(env);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(cio->cui_cl.cis_io == io);
+
+ /* mmap lock must be MANDATORY it has to cache
+ * pages. */
+ io->ci_lockreq = CILR_MANDATORY;
+ cio->cui_fd = fd;
+ } else {
+ LASSERT(rc < 0);
+ cl_io_fini(env, io);
+ cl_env_nested_put(nest, env);
+ io = ERR_PTR(rc);
+ }
- return io;
+ return io;
}
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
struct vvp_io *vio;
struct cl_env_nest nest;
int result;
- cfs_sigset_t set;
+ sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
ENTRY;
result = io->ci_result;
if (result < 0)
- GOTO(out, result);
+ GOTO(out_io, result);
- /* Don't enqueue new locks for page_mkwrite().
- * If the lock has been cancelled then page must have been
- * truncated, in that case, kernel will handle it.
- */
- io->ci_lockreq = CILR_PEEK;
io->u.ci_fault.ft_mkwrite = 1;
io->u.ci_fault.ft_writable = 1;
cfs_restore_sigs(set);
- if (result == -ENODATA) /* peek failed, no lock caching. */
- CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n",
- vma->vm_flags, io->u.ci_fault.ft_index, vmpage);
-
- if (result == 0 || result == -ENODATA) {
+ if (result == 0) {
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
* to handle_mm_fault(). */
if (result == 0)
result = -ENODATA;
- } else if (result == -ENODATA) {
- /* Invalidate it if the cl_lock is being revoked.
- * This piece of code is definitely needed for RHEL5,
- * otherwise, SIGBUS will be wrongly returned to
- * applications. */
- write_one_page(vmpage, 1);
- lock_page(vmpage);
- if (vmpage->mapping != NULL) {
- ll_invalidate_page(vmpage);
- LASSERT(vmpage->mapping == NULL);
- }
- unlock_page(vmpage);
} else if (!PageDirty(vmpage)) {
/* race, the page has been cleaned by ptlrpcd after
* it was unlocked, it has to be added into dirty
}
EXIT;
+out_io:
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
out:
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
-
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
-
- LASSERT(ergo(result == 0, PageLocked(vmpage)));
- return(result);
-}
-
-
-#ifndef HAVE_VM_OP_FAULT
-/**
- * Lustre implementation of a vm_operations_struct::nopage() method, called by
- * VM to server page fault (both in kernel and user space).
- *
- * This function sets up CIT_FAULT cl_io that does the job.
- *
- * \param vma - is virtiual area struct related to page fault
- * \param address - address when hit fault
- * \param type - of fault
- *
- * \return allocated and filled _unlocked_ page for address
- * \retval NOPAGE_SIGBUS if page not exist on this address
- * \retval NOPAGE_OOM not have memory for allocate new page
- */
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type)
-{
- struct lu_env *env;
- struct cl_env_nest nest;
- struct cl_io *io;
- struct page *page = NOPAGE_SIGBUS;
- struct vvp_io *vio = NULL;
- unsigned long ra_flags;
- pgoff_t pg_offset;
- int result;
- const unsigned long writable = VM_SHARED|VM_WRITE;
- cfs_sigset_t set;
- ENTRY;
-
- pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- io = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags);
- if (IS_ERR(io))
- return NOPAGE_SIGBUS;
-
- result = io->ci_result;
- if (result < 0)
- goto out_err;
-
- io->u.ci_fault.ft_writable = (vma->vm_flags&writable) == writable;
-
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.nopage.ft_address = address;
- vio->u.fault.nopage.ft_type = type;
- vio->u.fault.ft_vmpage = NULL;
-
- set = cfs_block_sigsinv(sigmask(SIGKILL)|sigmask(SIGTERM));
- result = cl_io_loop(env, io);
- cfs_restore_sigs(set);
-
- page = vio->u.fault.ft_vmpage;
- if (result != 0 && page != NULL) {
- page_cache_release(page);
- page = NOPAGE_SIGBUS;
- }
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
+ LASSERT(ergo(result == 0, PageLocked(vmpage)));
-out_err:
- if (result == -ENOMEM)
- page = NOPAGE_OOM;
-
- vma->vm_flags &= ~VM_RAND_READ;
- vma->vm_flags |= ra_flags;
-
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
-
- RETURN(page);
+ return result;
}
-#else
-
static inline int to_fault_error(int result)
{
switch(result) {
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL;
vio->u.fault.fault.ft_vmf = vmf;
+ vio->u.fault.fault.ft_flags = 0;
+ vio->u.fault.fault.ft_flags_valid = 0;
+
+ /* May call ll_readpage() */
+ ll_cl_add(vma->vm_file, env, io);
result = cl_io_loop(env, io);
- fault_ret = vio->u.fault.fault.ft_flags;
+ ll_cl_remove(vma->vm_file, env);
+
+ /* ft_flags are only valid if we reached
+ * the call to filemap_fault */
+ if (vio->u.fault.fault.ft_flags_valid)
+ fault_ret = vio->u.fault.fault.ft_flags;
+
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage != NULL) {
page_cache_release(vmpage);
vmf->page = NULL;
}
}
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
vma->vm_flags |= ra_flags;
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- cfs_current()->comm, fault_ret, result);
- RETURN(fault_ret);
+ CDEBUG(D_MMAP, "%s fault %d/%d\n",
+ current->comm, fault_ret, result);
+ RETURN(fault_ret);
}
static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int count = 0;
bool printed = false;
int result;
- cfs_sigset_t set;
+ sigset_t set;
/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
* so that it can be killed by admin but not cause segfault by
cfs_restore_sigs(set);
return result;
}
-#endif
-#ifndef HAVE_PGMKWRITE_USE_VMFAULT
-static int ll_page_mkwrite(struct vm_area_struct *vma, struct page *vmpage)
-{
- int count = 0;
- bool printed = false;
- bool retry;
- int result;
-
- do {
- retry = false;
- result = ll_page_mkwrite0(vma, vmpage, &retry);
-
- if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy"
- " contention.\n",
- current->comm, page_index(vmpage),
- vma->vm_file->f_dentry->d_inode->i_ino);
- printed = true;
- }
- } while (retry);
-
- if (result == 0)
- unlock_page(vmpage);
- else if (result == -ENODATA)
- result = 0; /* kernel will know truncate has happened and
- * retry */
-
- return result;
-}
-#else
static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int count = 0;
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy"
- " contention.\n",
- current->comm, vmf->pgoff,
- vma->vm_file->f_dentry->d_inode->i_ino);
+ CWARN("app(%s): the page %lu of file "DFID" is under"
+ " heavy contention\n",
+ current->comm, vmf->pgoff,
+ PFID(ll_inode2fid(vma->vm_file->f_dentry->d_inode)));
printed = true;
}
} while (retry);
return result;
}
-#endif
/**
* To avoid cancel the locks covering mmapped region for lock cache pressure,
*/
static void ll_vm_open(struct vm_area_struct * vma)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ccc_object *vob = cl_inode2ccc(inode);
- ENTRY;
- LASSERT(vma->vm_file);
- LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
- cfs_atomic_inc(&vob->cob_mmap_cnt);
- EXIT;
+ ENTRY;
+ LASSERT(vma->vm_file);
+ LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ atomic_inc(&vob->cob_mmap_cnt);
+ EXIT;
}
/**
*/
static void ll_vm_close(struct vm_area_struct *vma)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ccc_object *vob = cl_inode2ccc(inode);
-
- ENTRY;
- LASSERT(vma->vm_file);
- cfs_atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
- EXIT;
-}
-
-#ifndef HAVE_VM_OP_FAULT
-#ifndef HAVE_FILEMAP_POPULATE
-static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
-#endif
-static int ll_populate(struct vm_area_struct *area, unsigned long address,
- unsigned long len, pgprot_t prot, unsigned long pgoff,
- int nonblock)
-{
- int rc = 0;
- ENTRY;
-
- /* always set nonblock as true to avoid page read ahead */
- rc = filemap_populate(area, address, len, prot, pgoff, 1);
- RETURN(rc);
-}
-#endif
-
-/* return the user space pointer that maps to a file offset via a vma */
-static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
-{
- return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ccc_object *vob = cl_inode2ccc(inode);
+ ENTRY;
+ LASSERT(vma->vm_file);
+ atomic_dec(&vob->cob_mmap_cnt);
+ LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ EXIT;
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
last - first + 1, 0);
}
RETURN(rc);
}
-static struct vm_operations_struct ll_file_vm_ops = {
-#ifndef HAVE_VM_OP_FAULT
- .nopage = ll_nopage,
- .populate = ll_populate,
-#else
+static const struct vm_operations_struct ll_file_vm_ops = {
.fault = ll_fault,
-#endif
-#ifndef HAVE_PGMKWRITE_COMPACT
.page_mkwrite = ll_page_mkwrite,
-#else
- ._pmkw.page_mkwrite = ll_page_mkwrite,
-#endif
.open = ll_vm_open,
.close = ll_vm_close,
};
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
rc = generic_file_mmap(file, vma);
if (rc == 0) {
-#if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
- if (!filemap_populate)
- filemap_populate = vma->vm_ops->populate;
-#endif
vma->vm_ops = &ll_file_vm_ops;
vma->vm_ops->open(vma);
/* update the inode's size and mtime */