X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=febad1d322125e5ef50ed2f57e56236889bce3de;hb=dcd2b724b7c1e90b5f71625657dc6fb6107cfbe0;hp=5fbf6f0b9134352e25064cf69e26791562adc25d;hpb=1c40b08b051855d6dbd5a9a24818d59518f65e26;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 5fbf6f0..febad1d 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -1,584 +1,639 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * Copyright (c) 2001-2003 Cluster File Systems, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of Lustre, http://www.lustre.org. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ -#include #include #include #include #include #include -#include #include #include -#include #include #include #include #include -#include #include #include -#include -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) -#include -#endif - -#include #define DEBUG_SUBSYSTEM S_LLITE -#include -#include -#include +#include #include "llite_internal.h" #include -__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms); -int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode, - unsigned long addr, size_t count); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, int *type); -#else -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int unused); -#endif -struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start, - __u64 end, ldlm_mode_t mode) +static struct vm_operations_struct ll_file_vm_ops; + +void policy_from_vma(ldlm_policy_data_t *policy, + struct vm_area_struct *vma, unsigned long addr, + size_t count) { - struct ll_lock_tree_node *node; + policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + + (vma->vm_pgoff << CFS_PAGE_SHIFT); + policy->l_extent.end = (policy->l_extent.start + count - 1) | + ~CFS_PAGE_MASK; +} - OBD_ALLOC(node, sizeof(*node)); - if (node == NULL) - RETURN(ERR_PTR(-ENOMEM)); +struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, + size_t count) +{ + struct vm_area_struct *vma, *ret = NULL; + ENTRY; - node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id; - node->lt_policy.l_extent.start = start; - node->lt_policy.l_extent.end = end; - memset(&node->lt_lockh, 0, sizeof(node->lt_lockh)); - INIT_LIST_HEAD(&node->lt_locked_item); - node->lt_mode = mode; + /* mmap_sem must have been held by caller. */ + LASSERT(!down_write_trylock(&mm->mmap_sem)); - return node; + for(vma = find_vma(mm, addr); + vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { + if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && + vma->vm_flags & VM_SHARED) { + ret = vma; + break; + } + } + RETURN(ret); } -int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two) +/** + * API independent part for page fault initialization. + * \param vma - virtual memory area addressed to page fault + * \param env - corespondent lu_env to processing + * \param nest - nested level + * \param index - page index corespondent to fault. + * \parm ra_flags - vma readahead flags. + * + * \return allocated and initialized env for fault operation. + * \retval EINVAL if env can't allocated + * \return other error codes from cl_io_init. + */ +struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, + struct lu_env **env_ret, + struct cl_env_nest *nest, + pgoff_t index, unsigned long *ra_flags) { - /* XXX remove this assert when we really want to use this function - * to compare different file's region */ - LASSERT(one->lt_oid == two->lt_oid); + struct file *file = vma->vm_file; + struct inode *inode = file->f_dentry->d_inode; + struct cl_io *io; + struct cl_fault_io *fio; + struct lu_env *env; + ENTRY; - if ( one->lt_oid < two->lt_oid) - return -1; - if ( one->lt_oid > two->lt_oid) - return 1; + *env_ret = NULL; + if (ll_file_nolock(file)) + RETURN(ERR_PTR(-EOPNOTSUPP)); - if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start ) - return -1; - if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end ) - return 1; + /* + * page fault can be called when lustre IO is + * already active for the current thread, e.g., when doing read/write + * against user level buffer mapped from Lustre buffer. To avoid + * stomping on existing context, optionally force an allocation of a new + * one. + */ + env = cl_env_nested_get(nest); + if (IS_ERR(env)) + RETURN(ERR_PTR(-EINVAL)); - return 0; /* they are the same object and overlap */ -} + *env_ret = env; -static void lt_merge(struct ll_lock_tree_node *dst, - struct ll_lock_tree_node *src) -{ - dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start, - src->lt_policy.l_extent.start); - dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end, - src->lt_policy.l_extent.end); - - /* XXX could be a real call to the dlm to find superset modes */ - if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW) - dst->lt_mode = LCK_PW; -} + io = ccc_env_thread_io(env); + io->ci_obj = ll_i2info(inode)->lli_clob; + LASSERT(io->ci_obj != NULL); -static void lt_insert(struct ll_lock_tree *tree, - struct ll_lock_tree_node *node) -{ - struct ll_lock_tree_node *walk; - rb_node_t **p, *parent; - ENTRY; + fio = &io->u.ci_fault; + fio->ft_index = index; + fio->ft_executable = vma->vm_flags&VM_EXEC; -restart: - p = &tree->lt_root.rb_node; - parent = NULL; - while (*p) { - parent = *p; - walk = rb_entry(parent, struct ll_lock_tree_node, lt_node); - switch (lt_compare(node, walk)) { - case -1: - p = &(*p)->rb_left; - break; - case 1: - p = &(*p)->rb_right; - break; - case 0: - lt_merge(node, walk); - rb_erase(&walk->lt_node, &tree->lt_root); - OBD_FREE(walk, sizeof(*walk)); - goto restart; - break; - default: - LBUG(); - break; - } + /* + * disable VM_SEQ_READ and use VM_RAND_READ to make sure that + * the kernel will not read other pages not covered by ldlm in + * filemap_nopage. we do our readahead in ll_readpage. + */ + if (ra_flags != NULL) + *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); + vma->vm_flags &= ~VM_SEQ_READ; + vma->vm_flags |= VM_RAND_READ; + + CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, + fio->ft_index, fio->ft_executable); + + if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) { + struct ccc_io *cio = ccc_env_io(env); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + + LASSERT(cio->cui_cl.cis_io == io); + + /* mmap lock must be MANDATORY + * it has to cache pages. */ + io->ci_lockreq = CILR_MANDATORY; + + cio->cui_fd = fd; } - rb_link_node(&node->lt_node, parent, p); - rb_insert_color(&node->lt_node, &tree->lt_root); - EXIT; + + return io; } -static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree) +/* Sharing code of page_mkwrite method for rhel5 and rhel6 */ +static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, + bool *retry) { - rb_node_t *rbnode; - struct ll_lock_tree_node *node = NULL; - - for ( rbnode = tree->lt_root.rb_node; rbnode != NULL; - rbnode = rbnode->rb_left) { - if (rbnode->rb_left == NULL) { - node = rb_entry(rbnode, struct ll_lock_tree_node, - lt_node); - break; + struct lu_env *env; + struct cl_io *io; + struct vvp_io *vio; + struct cl_env_nest nest; + int result; + cfs_sigset_t set; + struct inode *inode; + struct ll_inode_info *lli; + ENTRY; + + LASSERT(vmpage != NULL); + + io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); + if (IS_ERR(io)) + GOTO(out, result = PTR_ERR(io)); + + result = io->ci_result; + if (result < 0) + GOTO(out, result); + + io->u.ci_fault.ft_mkwrite = 1; + io->u.ci_fault.ft_writable = 1; + + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.ft_vmpage = vmpage; + + set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); + + /* we grab lli_trunc_sem to exclude truncate case. + * Otherwise, we could add dirty pages into osc cache + * while truncate is on-going. */ + inode = ccc_object_inode(io->ci_obj); + lli = ll_i2info(inode); + down_read(&lli->lli_trunc_sem); + + result = cl_io_loop(env, io); + + up_read(&lli->lli_trunc_sem); + + cfs_restore_sigs(set); + + if (result == 0) { + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct ll_inode_info *lli = ll_i2info(inode); + + lock_page(vmpage); + if (vmpage->mapping == NULL) { + unlock_page(vmpage); + + /* page was truncated and lock was cancelled, return + * ENODATA so that VM_FAULT_NOPAGE will be returned + * to handle_mm_fault(). */ + if (result == 0) + result = -ENODATA; + } else if (!PageDirty(vmpage)) { + /* race, the page has been cleaned by ptlrpcd after + * it was unlocked, it has to be added into dirty + * cache again otherwise this soon-to-dirty page won't + * consume any grants, even worse if this page is being + * transferred because it will break RPC checksum. + */ + unlock_page(vmpage); + + CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has " + "been written out, retry.\n", + vmpage, vmpage->index); + + *retry = true; + result = -EAGAIN; } + + if (result == 0) { + spin_lock(&lli->lli_lock); + lli->lli_flags |= LLIF_DATA_MODIFIED; + spin_unlock(&lli->lli_lock); + } } - RETURN(node); + EXIT; + +out: + cl_io_fini(env, io); + cl_env_nested_put(&nest, env); + + CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result); + + LASSERT(ergo(result == 0, PageLocked(vmpage))); + return(result); } -int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode) + +#ifndef HAVE_VM_OP_FAULT +/** + * Lustre implementation of a vm_operations_struct::nopage() method, called by + * VM to server page fault (both in kernel and user space). + * + * This function sets up CIT_FAULT cl_io that does the job. + * + * \param vma - is virtiual area struct related to page fault + * \param address - address when hit fault + * \param type - of fault + * + * \return allocated and filled _unlocked_ page for address + * \retval NOPAGE_SIGBUS if page not exist on this address + * \retval NOPAGE_OOM not have memory for allocate new page + */ +struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, + int *type) { - struct ll_lock_tree_node *node; - struct list_head *pos, *n; - int rc = 0; + struct lu_env *env; + struct cl_env_nest nest; + struct cl_io *io; + struct page *page = NOPAGE_SIGBUS; + struct vvp_io *vio = NULL; + unsigned long ra_flags; + pgoff_t pg_offset; + int result; + const unsigned long writable = VM_SHARED|VM_WRITE; + cfs_sigset_t set; ENTRY; - list_for_each_safe(pos, n, &tree->lt_locked_list) { - node = list_entry(pos, struct ll_lock_tree_node, - lt_locked_item); + pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + io = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags); + if (IS_ERR(io)) + return NOPAGE_SIGBUS; - rc = ll_extent_unlock(tree->lt_fd, inode, - ll_i2info(inode)->lli_smd, node->lt_mode, - &node->lt_lockh); - if (rc != 0) { - /* XXX better message */ - CERROR("couldn't unlock %d\n", rc); - } - list_del(&node->lt_locked_item); - OBD_FREE(node, sizeof(*node)); - } + result = io->ci_result; + if (result < 0) + goto out_err; - while ((node = lt_least_node(tree))) { - rb_erase(&node->lt_node, &tree->lt_root); - OBD_FREE(node, sizeof(*node)); - } + io->u.ci_fault.ft_writable = (vma->vm_flags&writable) == writable; - RETURN(rc); -} + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.nopage.ft_address = address; + vio->u.fault.nopage.ft_type = type; + vio->u.fault.ft_vmpage = NULL; -int ll_tree_lock(struct ll_lock_tree *tree, - struct ll_lock_tree_node *first_node, struct inode *inode, - const char *buf, size_t count, int ast_flags) -{ - struct ll_lock_tree_node *node; - int rc = 0; - ENTRY; + set = cfs_block_sigsinv(sigmask(SIGKILL)|sigmask(SIGTERM)); + result = cl_io_loop(env, io); + cfs_restore_sigs(set); - tree->lt_root.rb_node = NULL; - INIT_LIST_HEAD(&tree->lt_locked_list); - if (first_node != NULL) - lt_insert(tree, first_node); - - /* order locking. what we have to concern about is ONLY double lock: - * the buffer is mapped to exactly this file. */ - if (mapping_mapped(inode->i_mapping)) { - rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count); - if (rc) - GOTO(out, rc); - } + page = vio->u.fault.ft_vmpage; + if (result != 0 && page != NULL) { + page_cache_release(page); + page = NOPAGE_SIGBUS; + } - while ((node = lt_least_node(tree))) { - struct obd_service_time *stime; - stime = (node->lt_mode & LCK_PW) ? - &ll_i2sbi(inode)->ll_write_stime : - &ll_i2sbi(inode)->ll_read_stime; - - rc = ll_extent_lock(tree->lt_fd, inode, - ll_i2info(inode)->lli_smd, node->lt_mode, - &node->lt_policy, &node->lt_lockh, - ast_flags, stime); - if (rc != 0) - GOTO(out, rc); - - rb_erase(&node->lt_node, &tree->lt_root); - list_add_tail(&node->lt_locked_item, &tree->lt_locked_list); - } - RETURN(rc); -out: - ll_tree_unlock(tree, inode); - return rc; -} +out_err: + if (result == -ENOMEM) + page = NOPAGE_OOM; -static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma) -{ - /* we only want to hold PW locks if the mmap() can generate - * writes back to the file and that only happens in shared - * writable vmas */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return LCK_PW; - return LCK_PR; + vma->vm_flags &= ~VM_RAND_READ; + vma->vm_flags |= ra_flags; + + cl_io_fini(env, io); + cl_env_nested_put(&nest, env); + + RETURN(page); } -static void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) +#else + +static inline int to_fault_error(int result) { - policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) + - (vma->vm_pgoff << PAGE_CACHE_SHIFT); - policy->l_extent.end = (policy->l_extent.start + count - 1) | - (PAGE_CACHE_SIZE - 1); + switch(result) { + case 0: + result = VM_FAULT_LOCKED; + break; + case -EFAULT: + result = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + result = VM_FAULT_OOM; + break; + default: + result = VM_FAULT_SIGBUS; + break; + } + return result; } -static struct vm_area_struct *our_vma(unsigned long addr, size_t count, - struct inode *inode) +/** + * Lustre implementation of a vm_operations_struct::fault() method, called by + * VM to server page fault (both in kernel and user space). + * + * \param vma - is virtiual area struct related to page fault + * \param vmf - structure which describe type and address where hit fault + * + * \return allocated and filled _locked_ page for address + * \retval VM_FAULT_ERROR on general error + * \retval NOPAGE_OOM not have memory for allocate new page + */ +static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *ret = NULL; + struct lu_env *env; + struct cl_io *io; + struct vvp_io *vio = NULL; + struct page *vmpage; + unsigned long ra_flags; + struct cl_env_nest nest; + int result; + int fault_ret = 0; ENTRY; - spin_lock(&mm->page_table_lock); - for(vma = find_vma(mm, addr); - vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { - if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage && - vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) { - ret = vma; - break; - } + io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); + if (IS_ERR(io)) + RETURN(to_fault_error(PTR_ERR(io))); + + result = io->ci_result; + if (result == 0) { + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.ft_vmpage = NULL; + vio->u.fault.fault.ft_vmf = vmf; + + result = cl_io_loop(env, io); + + fault_ret = vio->u.fault.fault.ft_flags; + vmpage = vio->u.fault.ft_vmpage; + if (result != 0 && vmpage != NULL) { + page_cache_release(vmpage); + vmf->page = NULL; + } } - spin_unlock(&mm->page_table_lock); - RETURN(ret); + cl_io_fini(env, io); + cl_env_nested_put(&nest, env); + + vma->vm_flags |= ra_flags; + if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) + fault_ret |= to_fault_error(result); + + CDEBUG(D_MMAP, "%s fault %d/%d\n", + cfs_current()->comm, fault_ret, result); + RETURN(fault_ret); } -int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode, - unsigned long addr, size_t count) +static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct vm_area_struct *vma; - struct ll_lock_tree_node *node; - ldlm_policy_data_t policy; - ENTRY; - - if (count == 0) - RETURN(0); + int count = 0; + bool printed = false; + int result; + cfs_sigset_t set; - /* we need to look up vmas on page aligned addresses */ - count += addr & (PAGE_SIZE - 1); - addr -= addr & (PAGE_SIZE - 1); + /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite + * so that it can be killed by admin but not cause segfault by + * other signals. */ + set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); - while ((vma = our_vma(addr, count, inode)) != NULL) { +restart: + result = ll_fault0(vma, vmf); + LASSERT(!(result & VM_FAULT_LOCKED)); + if (result == 0) { + struct page *vmpage = vmf->page; + + /* check if this page has been truncated */ + lock_page(vmpage); + if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ + unlock_page(vmpage); + page_cache_release(vmpage); + vmf->page = NULL; + + if (!printed && ++count > 16) { + CWARN("the page is under heavy contention," + "maybe your app(%s) needs revising :-)\n", + current->comm); + printed = true; + } - policy_from_vma(&policy, vma, addr, count); - node = ll_node_from_inode(inode, policy.l_extent.start, - policy.l_extent.end, - mode_from_vma(vma)); - if (IS_ERR(node)) { - CERROR("not enough mem for lock_tree_node!\n"); - RETURN(-ENOMEM); + goto restart; } - lt_insert(tree, node); - if (vma->vm_end - addr >= count) - break; - count -= vma->vm_end - addr; - addr = vma->vm_end; + result |= VM_FAULT_LOCKED; } - RETURN(0); + cfs_restore_sigs(set); + return result; } -/* FIXME: there is a pagefault race goes as follow: - * 1. A user process on node A accesses a portion of a mapped file, - * resulting in a page fault. The pagefault handler invokes the - * ll_nopage function, which reads the page into memory. - * 2. A user process on node B writes to the same portion of the file - * (either via mmap or write()), that cause node A to cancel the - * lock and truncate the page. - * 3. Node A then executes the rest of do_no_page(), entering the - * now-invalid page into the PTEs. - * - * Make the whole do_no_page as a hook to cover both the page cache - * and page mapping installing with dlm lock would eliminate this race. - */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int *type) -#else -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int unused) #endif + +#ifndef HAVE_PGMKWRITE_USE_VMFAULT +static int ll_page_mkwrite(struct vm_area_struct *vma, struct page *vmpage) { - struct file *filp = vma->vm_file; - struct ll_file_data *fd = filp->private_data; - struct inode *inode = filp->f_dentry->d_inode; - struct lustre_handle lockh = { 0 }; - ldlm_policy_data_t policy; - ldlm_mode_t mode; - struct page *page = NULL; - struct ll_inode_info *lli = ll_i2info(inode); - struct obd_service_time *stime; - __u64 kms; - unsigned long pgoff, size, rand_read, seq_read; - int rc = 0; - ENTRY; + int count = 0; + bool printed = false; + bool retry; + int result; + + do { + retry = false; + result = ll_page_mkwrite0(vma, vmpage, &retry); + + if (!printed && ++count > 16) { + CWARN("app(%s): the page %lu of file %lu is under heavy" + " contention.\n", + current->comm, page_index(vmpage), + vma->vm_file->f_dentry->d_inode->i_ino); + printed = true; + } + } while (retry); - if (lli->lli_smd == NULL) { - CERROR("No lsm on fault?\n"); - RETURN(NULL); - } + if (result == 0) + unlock_page(vmpage); + else if (result == -ENODATA) + result = 0; /* kernel will know truncate has happened and + * retry */ - /* start and end the lock on the first and last bytes in the page */ - policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE); - - CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n", - vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end); - - mode = mode_from_vma(vma); - stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime : - &ll_i2sbi(inode)->ll_read_stime; - - rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy, - &lockh, LDLM_FL_CBPENDING, stime); - if (rc != 0) - RETURN(NULL); - - /* XXX change inode size without i_sem hold! there is a race condition - * with truncate path. (see ll_extent_lock) */ - down(&lli->lli_size_sem); - kms = lov_merge_size(lli->lli_smd, 1); - pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; - size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - - if (pgoff >= size) { - up(&lli->lli_size_sem); - ll_glimpse_size(inode); - } else { - inode->i_size = kms; - up(&lli->lli_size_sem); + return result; +} +#else +static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + int count = 0; + bool printed = false; + bool retry; + int result; + + do { + retry = false; + result = ll_page_mkwrite0(vma, vmf->page, &retry); + + if (!printed && ++count > 16) { + CWARN("app(%s): the page %lu of file %lu is under heavy" + " contention.\n", + current->comm, vmf->pgoff, + vma->vm_file->f_dentry->d_inode->i_ino); + printed = true; + } + } while (retry); + + switch(result) { + case 0: + LASSERT(PageLocked(vmf->page)); + result = VM_FAULT_LOCKED; + break; + case -ENODATA: + case -EFAULT: + result = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + result = VM_FAULT_OOM; + break; + case -EAGAIN: + result = VM_FAULT_RETRY; + break; + default: + result = VM_FAULT_SIGBUS; + break; } - /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that - * the kernel will not read other pages not covered by ldlm in - * filemap_nopage. we do our readahead in ll_readpage. - */ - rand_read = vma->vm_flags & VM_RAND_READ; - seq_read = vma->vm_flags & VM_SEQ_READ; - vma->vm_flags &= ~ VM_SEQ_READ; - vma->vm_flags |= VM_RAND_READ; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) - page = filemap_nopage(vma, address, type); -#else - page = filemap_nopage(vma, address, unused); + return result; +} #endif - vma->vm_flags &= ~VM_RAND_READ; - vma->vm_flags |= (rand_read | seq_read); - ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh); - RETURN(page); +/** + * To avoid cancel the locks covering mmapped region for lock cache pressure, + * we track the mapped vma count in ccc_object::cob_mmap_cnt. + */ +static void ll_vm_open(struct vm_area_struct * vma) +{ + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct ccc_object *vob = cl_inode2ccc(inode); + + ENTRY; + LASSERT(vma->vm_file); + LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0); + cfs_atomic_inc(&vob->cob_mmap_cnt); + EXIT; } -/* return the user space pointer that maps to a file offset via a vma */ -static inline unsigned long file_to_user(struct vm_area_struct *vma, - __u64 byte) +/** + * Dual to ll_vm_open(). + */ +static void ll_vm_close(struct vm_area_struct *vma) { - return vma->vm_start + - (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT)); + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct ccc_object *vob = cl_inode2ccc(inode); + + ENTRY; + LASSERT(vma->vm_file); + cfs_atomic_dec(&vob->cob_mmap_cnt); + LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0); + EXIT; } -#define VMA_DEBUG(vma, fmt, arg...) \ - CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \ - "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \ - vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \ - vma->vm_file->f_dentry->d_inode->i_ino, \ - vma->vm_file->f_dentry->d_iname, ## arg); \ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) -/* [first, last] are the byte offsets affected. - * vm_{start, end} are user addresses of the first byte of the mapping and - * the next byte beyond it - * vm_pgoff is the page index of the first byte in the mapping */ -static void teardown_vmas(struct vm_area_struct *vma, __u64 first, - __u64 last) +#ifndef HAVE_VM_OP_FAULT +#ifndef HAVE_FILEMAP_POPULATE +static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); +#endif +static int ll_populate(struct vm_area_struct *area, unsigned long address, + unsigned long len, pgprot_t prot, unsigned long pgoff, + int nonblock) { - unsigned long address, len; - for (; vma ; vma = vma->vm_next_share) { - if (last >> PAGE_SHIFT < vma->vm_pgoff) - continue; - if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff + - ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT))) - continue; - - /* XXX in case of unmap the cow pages of a running file, - * don't unmap these private writeable mapping here! - * though that will break private mappping a little. - * - * the clean way is to check the mapping of every page - * and just unmap the non-cow pages, just like - * unmap_mapping_range() with even_cow=0 in kernel 2.6. - */ - if (!(vma->vm_flags & VM_SHARED) && - (vma->vm_flags & VM_WRITE)) - continue; - - address = max((unsigned long)vma->vm_start, - file_to_user(vma, first)); - len = min((unsigned long)vma->vm_end, - file_to_user(vma, last) + 1) - address; - - VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" " - "address=%ld len=%ld]\n", first, last, address, len); - LASSERT(len > 0); - ll_zap_page_range(vma, address, len); - } + int rc = 0; + ENTRY; + + /* always set nonblock as true to avoid page read ahead */ + rc = filemap_populate(area, address, len, prot, pgoff, 1); + RETURN(rc); } #endif +/* return the user space pointer that maps to a file offset via a vma */ +static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) +{ + return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT)); + +} + /* XXX put nice comment here. talk about __free_pte -> dirty pages and * nopage's reference passing to the pte */ -int ll_teardown_mmaps(struct address_space *mapping, __u64 first, - __u64 last) +int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) { int rc = -ENOENT; ENTRY; LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + PAGE_SIZE - 1, + unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, last - first + 1, 0); } -#else - spin_lock(&mapping->i_shared_lock); - if (mapping->i_mmap != NULL) { - rc = 0; - teardown_vmas(mapping->i_mmap, first, last); - } - if (mapping->i_mmap_shared != NULL) { - rc = 0; - teardown_vmas(mapping->i_mmap_shared, first, last); - } - spin_unlock(&mapping->i_shared_lock); -#endif RETURN(rc); } - -static void ll_close_vma(struct vm_area_struct *vma) -{ - struct inode *inode = vma->vm_file->f_dentry->d_inode; - struct address_space *mapping = inode->i_mapping; - unsigned long next, size, end; - struct ll_async_page *llap; - struct obd_export *exp; - struct pagevec pvec; - int i; - - if (!(vma->vm_flags & VM_SHARED)) - return; - - /* all pte's are synced to mem_map by the moment - * we scan backing store and put all dirty pages - * onto pending list to track flushing */ - - LASSERT(LLI_DIRTY_HANDLE(inode)); - exp = ll_i2dtexp(inode); - if (exp == NULL) { - CERROR("can't get export for the inode\n"); - return; - } - - pagevec_init(&pvec, 0); - next = vma->vm_pgoff; - size = (vma->vm_end - vma->vm_start) / PAGE_SIZE; - end = next + size - 1; - - CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma, - next, size, end, inode->i_ino, inode->i_generation); - - while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - - if (page->index > next) - next = page->index; - if (next > end) - continue; - next++; - - lock_page(page); - if (page->mapping != mapping || !PageDirty(page)) { - unlock_page(page); - continue; - } - - llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE); - if (IS_ERR(llap)) { - CERROR("can't get llap\n"); - unlock_page(page); - continue; - } - - llap_write_pending(inode, llap); - unlock_page(page); - } - pagevec_release(&pvec); - } -} - static struct vm_operations_struct ll_file_vm_ops = { - .nopage = ll_nopage, - .close = ll_close_vma, +#ifndef HAVE_VM_OP_FAULT + .nopage = ll_nopage, + .populate = ll_populate, +#else + .fault = ll_fault, +#endif +#ifndef HAVE_PGMKWRITE_COMPACT + .page_mkwrite = ll_page_mkwrite, +#else + ._pmkw.page_mkwrite = ll_page_mkwrite, +#endif + .open = ll_vm_open, + .close = ll_vm_close, }; -/* Audit functions */ -extern int ll_audit_log (struct inode *, audit_op, int); - -int ll_file_mmap(struct file * file, struct vm_area_struct * vma) +int ll_file_mmap(struct file *file, struct vm_area_struct * vma) { + struct inode *inode = file->f_dentry->d_inode; int rc; ENTRY; + if (ll_file_nolock(file)) + RETURN(-EOPNOTSUPP); + + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1); rc = generic_file_mmap(file, vma); if (rc == 0) { - struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode); +#if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT) + if (!filemap_populate) + filemap_populate = vma->vm_ops->populate; +#endif vma->vm_ops = &ll_file_vm_ops; - - /* mark i/o epoch dirty */ - if (vma->vm_flags & VM_SHARED) - set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags); + vma->vm_ops->open(vma); + /* update the inode's size and mtime */ + rc = ll_glimpse_size(inode); } - - ll_audit_log(file->f_dentry->d_inode, AUDIT_MMAP, rc); RETURN(rc); } -