X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=673ff2c0c22dc48d61ae049988bdca8a787f7536;hp=f8c030b929836e3765d96d749022aa0fec139f20;hb=9e5cb57addbb5d7bc1596096821ad8dcac7a939b;hpb=6869932b552ac705f411de3362f01bd50c1f6f7d diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index f8c030b..673ff2c 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,545 +15,495 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ -#ifndef AUTOCONF_INCLUDED -#include -#endif -#include -#include -#include -#include #include -#include -#include -#include -#include -#include - -#include -#include -#include +#include +#include #include -#include -#include #define DEBUG_SUBSYSTEM S_LLITE -//#include -#include #include "llite_internal.h" -#include - -#define VMA_DEBUG(vma, fmt, arg...) \ - CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \ - "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \ - vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \ - vma->vm_file->f_dentry->d_inode->i_ino, \ - vma->vm_file->f_dentry->d_iname, ## arg); \ - - -struct ll_lock_tree_node { - rb_node_t lt_node; - struct list_head lt_locked_item; - __u64 lt_oid; - ldlm_policy_data_t lt_policy; - struct lustre_handle lt_lockh; - ldlm_mode_t lt_mode; - struct inode *lt_inode; -}; +#include -int lt_get_mmap_locks(struct ll_lock_tree *tree, - unsigned long addr, size_t count); +static const struct vm_operations_struct ll_file_vm_ops; -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int *type); - -struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start, - __u64 end, ldlm_mode_t mode) +void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, + unsigned long addr, size_t count) { - struct ll_lock_tree_node *node; - - OBD_ALLOC(node, sizeof(*node)); - if (node == NULL) - RETURN(ERR_PTR(-ENOMEM)); - - node->lt_inode = inode; - node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id; - node->lt_policy.l_extent.start = start; - node->lt_policy.l_extent.end = end; - memset(&node->lt_lockh, 0, sizeof(node->lt_lockh)); - INIT_LIST_HEAD(&node->lt_locked_item); - node->lt_mode = mode; - - return node; + policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) + + (vma->vm_pgoff << PAGE_SHIFT); + policy->l_extent.end = (policy->l_extent.start + count - 1) | + ~PAGE_MASK; } -int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two) +struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, + size_t count) { - /* To avoid multiple fs deadlock */ - if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev) - return -1; - if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev) - return 1; - - if (one->lt_oid < two->lt_oid) - return -1; - if (one->lt_oid > two->lt_oid) - return 1; - - if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start) - return -1; - if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end) - return 1; - - return 0; /* they are the same object and overlap */ -} - -static void lt_merge(struct ll_lock_tree_node *dst, - struct ll_lock_tree_node *src) -{ - dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start, - src->lt_policy.l_extent.start); - dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end, - src->lt_policy.l_extent.end); - - /* XXX could be a real call to the dlm to find superset modes */ - if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW) - dst->lt_mode = LCK_PW; -} - -static void lt_insert(struct ll_lock_tree *tree, - struct ll_lock_tree_node *node) -{ - struct ll_lock_tree_node *walk; - rb_node_t **p, *parent; + struct vm_area_struct *vma, *ret = NULL; ENTRY; -restart: - p = &tree->lt_root.rb_node; - parent = NULL; - while (*p) { - parent = *p; - walk = rb_entry(parent, struct ll_lock_tree_node, lt_node); - switch (lt_compare(node, walk)) { - case -1: - p = &(*p)->rb_left; - break; - case 1: - p = &(*p)->rb_right; - break; - case 0: - lt_merge(node, walk); - rb_erase(&walk->lt_node, &tree->lt_root); - OBD_FREE(walk, sizeof(*walk)); - goto restart; - break; - default: - LBUG(); - break; - } - } - rb_link_node(&node->lt_node, parent, p); - rb_insert_color(&node->lt_node, &tree->lt_root); - EXIT; -} + /* mmap_sem must have been held by caller. */ + LASSERT(!down_write_trylock(&mm->mmap_sem)); -static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree) -{ - rb_node_t *rbnode; - struct ll_lock_tree_node *node = NULL; - - for ( rbnode = tree->lt_root.rb_node; rbnode != NULL; - rbnode = rbnode->rb_left) { - if (rbnode->rb_left == NULL) { - node = rb_entry(rbnode, struct ll_lock_tree_node, - lt_node); + for(vma = find_vma(mm, addr); + vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { + if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && + vma->vm_flags & VM_SHARED) { + ret = vma; break; } } - RETURN(node); + RETURN(ret); } -int ll_tree_unlock(struct ll_lock_tree *tree) +/** + * API independent part for page fault initialization. + * \param env - corespondent lu_env to processing + * \param vma - virtual memory area addressed to page fault + * \param index - page index corespondent to fault. + * \parm ra_flags - vma readahead flags. + * + * \return error codes from cl_io_init. + */ +static struct cl_io * +ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, + pgoff_t index, unsigned long *ra_flags) { - struct ll_lock_tree_node *node; - struct list_head *pos, *n; - struct inode *inode; - int rc = 0; - ENTRY; + struct file *file = vma->vm_file; + struct inode *inode = file_inode(file); + struct cl_io *io; + struct cl_fault_io *fio; + int rc; + ENTRY; - list_for_each_safe(pos, n, &tree->lt_locked_list) { - node = list_entry(pos, struct ll_lock_tree_node, - lt_locked_item); - - inode = node->lt_inode; - rc = ll_extent_unlock(tree->lt_fd, inode, - ll_i2info(inode)->lli_smd, node->lt_mode, - &node->lt_lockh); - if (rc != 0) { - /* XXX better message */ - CERROR("couldn't unlock %d\n", rc); - } - list_del(&node->lt_locked_item); - OBD_FREE(node, sizeof(*node)); - } - - while ((node = lt_least_node(tree))) { - rb_erase(&node->lt_node, &tree->lt_root); - OBD_FREE(node, sizeof(*node)); - } + if (ll_file_nolock(file)) + RETURN(ERR_PTR(-EOPNOTSUPP)); - RETURN(rc); -} +restart: + io = vvp_env_thread_io(env); + io->ci_obj = ll_i2info(inode)->lli_clob; + LASSERT(io->ci_obj != NULL); -int ll_tree_lock(struct ll_lock_tree *tree, - struct ll_lock_tree_node *first_node, - const char *buf, size_t count, int ast_flags) -{ - struct ll_lock_tree_node *node; - int rc = 0; - ENTRY; + fio = &io->u.ci_fault; + fio->ft_index = index; + fio->ft_executable = vma->vm_flags&VM_EXEC; - tree->lt_root.rb_node = NULL; - INIT_LIST_HEAD(&tree->lt_locked_list); - if (first_node != NULL) - lt_insert(tree, first_node); - - /* To avoid such subtle deadlock case: client1 try to read file1 to - * mmapped file2, on the same time, client2 try to read file2 to - * mmapped file1.*/ - rc = lt_get_mmap_locks(tree, (unsigned long)buf, count); - if (rc) - GOTO(out, rc); - - while ((node = lt_least_node(tree))) { - struct inode *inode = node->lt_inode; - rc = ll_extent_lock(tree->lt_fd, inode, - ll_i2info(inode)->lli_smd, node->lt_mode, - &node->lt_policy, &node->lt_lockh, - ast_flags); - if (rc != 0) - GOTO(out, rc); - - rb_erase(&node->lt_node, &tree->lt_root); - list_add_tail(&node->lt_locked_item, &tree->lt_locked_list); - } - RETURN(rc); -out: - ll_tree_unlock(tree); - RETURN(rc); -} + /* + * disable VM_SEQ_READ and use VM_RAND_READ to make sure that + * the kernel will not read other pages not covered by ldlm in + * filemap_nopage. we do our readahead in ll_readpage. + */ + if (ra_flags != NULL) + *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); + vma->vm_flags &= ~VM_SEQ_READ; + vma->vm_flags |= VM_RAND_READ; -static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma) -{ - /* we only want to hold PW locks if the mmap() can generate - * writes back to the file and that only happens in shared - * writable vmas */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return LCK_PW; - return LCK_PR; -} + CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, + fio->ft_index, fio->ft_executable); -static void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) -{ - policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << CFS_PAGE_SHIFT); - policy->l_extent.end = (policy->l_extent.start + count - 1) | - ~CFS_PAGE_MASK; -} + rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); + if (rc == 0) { + struct vvp_io *vio = vvp_env_io(env); + struct ll_file_data *fd = file->private_data; -static struct vm_area_struct * our_vma(unsigned long addr, size_t count) -{ - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *ret = NULL; - ENTRY; + LASSERT(vio->vui_cl.cis_io == io); - /* No MM (e.g. NFS)? No vmas too. */ - if (!mm) - RETURN(NULL); + /* mmap lock must be MANDATORY it has to cache + * pages. */ + io->ci_lockreq = CILR_MANDATORY; + vio->vui_fd = fd; + } else { + LASSERT(rc < 0); + cl_io_fini(env, io); + if (io->ci_need_restart) + goto restart; - spin_lock(&mm->page_table_lock); - for(vma = find_vma(mm, addr); - vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { - if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage && - vma->vm_flags & VM_SHARED) { - ret = vma; - break; - } - } - spin_unlock(&mm->page_table_lock); - RETURN(ret); -} + io = ERR_PTR(rc); + } -int ll_region_mapped(unsigned long addr, size_t count) -{ - return !!our_vma(addr, count); + RETURN(io); } -int lt_get_mmap_locks(struct ll_lock_tree *tree, - unsigned long addr, size_t count) +/* Sharing code of page_mkwrite method for rhel5 and rhel6 */ +static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, + bool *retry) { - struct vm_area_struct *vma; - struct ll_lock_tree_node *node; - ldlm_policy_data_t policy; - struct inode *inode; - ENTRY; - - if (count == 0) - RETURN(0); - - /* we need to look up vmas on page aligned addresses */ - count += addr & (~CFS_PAGE_MASK); - addr &= CFS_PAGE_MASK; - - while ((vma = our_vma(addr, count)) != NULL) { - LASSERT(vma->vm_file); - - inode = vma->vm_file->f_dentry->d_inode; - policy_from_vma(&policy, vma, addr, count); - node = ll_node_from_inode(inode, policy.l_extent.start, - policy.l_extent.end, - mode_from_vma(vma)); - if (IS_ERR(node)) { - CERROR("not enough mem for lock_tree_node!\n"); - RETURN(-ENOMEM); + struct lu_env *env; + struct cl_io *io; + struct vvp_io *vio; + int result; + __u16 refcheck; + sigset_t set; + struct inode *inode = NULL; + struct ll_inode_info *lli; + ENTRY; + + LASSERT(vmpage != NULL); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + io = ll_fault_io_init(env, vma, vmpage->index, NULL); + if (IS_ERR(io)) + GOTO(out, result = PTR_ERR(io)); + + result = io->ci_result; + if (result < 0) + GOTO(out_io, result); + + io->u.ci_fault.ft_mkwrite = 1; + io->u.ci_fault.ft_writable = 1; + + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.ft_vmpage = vmpage; + + set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); + + inode = vvp_object_inode(io->ci_obj); + lli = ll_i2info(inode); + + result = cl_io_loop(env, io); + + cfs_restore_sigs(set); + + if (result == 0) { + lock_page(vmpage); + if (vmpage->mapping == NULL) { + unlock_page(vmpage); + + /* page was truncated and lock was cancelled, return + * ENODATA so that VM_FAULT_NOPAGE will be returned + * to handle_mm_fault(). */ + if (result == 0) + result = -ENODATA; + } else if (!PageDirty(vmpage)) { + /* race, the page has been cleaned by ptlrpcd after + * it was unlocked, it has to be added into dirty + * cache again otherwise this soon-to-dirty page won't + * consume any grants, even worse if this page is being + * transferred because it will break RPC checksum. + */ + unlock_page(vmpage); + + CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has " + "been written out, retry.\n", + vmpage, vmpage->index); + + *retry = true; + result = -EAGAIN; } - lt_insert(tree, node); - if (vma->vm_end - addr >= count) - break; - count -= vma->vm_end - addr; - addr = vma->vm_end; + if (result == 0) + ll_file_set_flag(lli, LLIF_DATA_MODIFIED); } - RETURN(0); + EXIT; + +out_io: + cl_io_fini(env, io); +out: + cl_env_put(env, &refcheck); + CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); + LASSERT(ergo(result == 0, PageLocked(vmpage))); + + /* if page has been unmapped, presumably due to lock reclaim for + * concurrent usage, add some delay before retrying to prevent + * entering live-lock situation with competitors + */ + if (result == -ENODATA && inode != NULL) { + CDEBUG(D_MMAP, "delaying new page-fault for inode %p to " + "prevent live-lock\n", inode); + msleep(10); + } + + return result; +} + +static inline int to_fault_error(int result) +{ + switch(result) { + case 0: + result = VM_FAULT_LOCKED; + break; + case -ENOMEM: + result = VM_FAULT_OOM; + break; + default: + result = VM_FAULT_SIGBUS; + break; + } + return result; } /** - * Page fault handler. + * Lustre implementation of a vm_operations_struct::fault() method, called by + * VM to server page fault (both in kernel and user space). * * \param vma - is virtiual area struct related to page fault - * \param address - address when hit fault - * \param type - of fault + * \param vmf - structure which describe type and address where hit fault * - * \return allocated and filled page for address - * \retval NOPAGE_SIGBUS if page not exist on this address + * \return allocated and filled _locked_ page for address + * \retval VM_FAULT_ERROR on general error * \retval NOPAGE_OOM not have memory for allocate new page */ -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int *type) +static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct file *filp = vma->vm_file; - struct ll_file_data *fd = LUSTRE_FPRIVATE(filp); - struct inode *inode = filp->f_dentry->d_inode; - struct lustre_handle lockh = { 0 }; - ldlm_policy_data_t policy; - ldlm_mode_t mode; - struct page *page = NULL; - struct ll_inode_info *lli = ll_i2info(inode); - struct lov_stripe_md *lsm; - struct ost_lvb lvb; - __u64 kms, old_mtime; - unsigned long pgoff, size, rand_read, seq_read; - int rc = 0; - ENTRY; - - if (lli->lli_smd == NULL) { - CERROR("No lsm on fault?\n"); - RETURN(NULL); + struct lu_env *env; + struct cl_io *io; + struct vvp_io *vio = NULL; + struct page *vmpage; + unsigned long ra_flags; + int result = 0; + int fault_ret = 0; + __u16 refcheck; + ENTRY; + + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) { + /* do fast fault */ + ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP); + fault_ret = ll_filemap_fault(vma, vmf); + ll_cl_remove(vma->vm_file, env); + + /* - If there is no error, then the page was found in cache and + * uptodate; + * - If VM_FAULT_RETRY is set, the page existed but failed to + * lock. It will return to kernel and retry; + * - Otherwise, it should try normal fault under DLM lock. */ + if ((fault_ret & VM_FAULT_RETRY) || + !(fault_ret & VM_FAULT_ERROR)) + GOTO(out, result = 0); + + fault_ret = 0; + } + + io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags); + if (IS_ERR(io)) + GOTO(out, result = PTR_ERR(io)); + + result = io->ci_result; + if (result == 0) { + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.ft_vmpage = NULL; + vio->u.fault.ft_vmf = vmf; + vio->u.fault.ft_flags = 0; + vio->u.fault.ft_flags_valid = 0; + + /* May call ll_readpage() */ + ll_cl_add(vma->vm_file, env, io, LCC_MMAP); + + result = cl_io_loop(env, io); + + ll_cl_remove(vma->vm_file, env); + + /* ft_flags are only valid if we reached + * the call to filemap_fault */ + if (vio->u.fault.ft_flags_valid) + fault_ret = vio->u.fault.ft_flags; + + vmpage = vio->u.fault.ft_vmpage; + if (result != 0 && vmpage != NULL) { + put_page(vmpage); + vmf->page = NULL; + } } + cl_io_fini(env, io); - ll_clear_file_contended(inode); - - /* start and end the lock on the first and last bytes in the page */ - policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE); - - CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n", - vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end); - - mode = mode_from_vma(vma); - old_mtime = LTIME_S(inode->i_mtime); - - lsm = lli->lli_smd; - rc = ll_extent_lock(fd, inode, lsm, mode, &policy, - &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU); - if (rc != 0) - RETURN(NULL); - - if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime) - CWARN("binary changed. inode %lu\n", inode->i_ino); - - lov_stripe_lock(lsm); - inode_init_lvb(inode, &lvb); - obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1); - kms = lvb.lvb_size; - - pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff; - size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; - - if (pgoff >= size) { - lov_stripe_unlock(lsm); - ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED); - } else { - /* XXX change inode size without ll_inode_size_lock() held! - * there is a race condition with truncate path. (see - * ll_extent_lock) */ - /* XXX i_size_write() is not used because it is not safe to - * take the ll_inode_size_lock() due to a potential lock - * inversion (bug 6077). And since it's not safe to use - * i_size_write() without a covering mutex we do the - * assignment directly. It is not critical that the - * size be correct. */ - /* region is within kms and, hence, within real file size (A). - * We need to increase i_size to cover the read region so that - * generic_file_read() will do its job, but that doesn't mean - * the kms size is _correct_, it is only the _minimum_ size. - * If someone does a stat they will get the correct size which - * will always be >= the kms value here. b=11081 */ - if (i_size_read(inode) < kms) { - inode->i_size = kms; - CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n", - inode->i_ino, i_size_read(inode)); - } - lov_stripe_unlock(lsm); - } - - /* If mapping is writeable, adjust kms to cover this page, - * but do not extend kms beyond actual file size. - * policy.l_extent.end is set to the end of the page by policy_from_vma - * bug 10919 */ - lov_stripe_lock(lsm); - if (mode == LCK_PW) - obd_adjust_kms(ll_i2dtexp(inode), lsm, - min_t(loff_t, policy.l_extent.end + 1, - i_size_read(inode)), 0); - lov_stripe_unlock(lsm); - - /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that - * the kernel will not read other pages not covered by ldlm in - * filemap_nopage. we do our readahead in ll_readpage. - */ - rand_read = vma->vm_flags & VM_RAND_READ; - seq_read = vma->vm_flags & VM_SEQ_READ; - vma->vm_flags &= ~ VM_SEQ_READ; - vma->vm_flags |= VM_RAND_READ; - - page = filemap_nopage(vma, address, type); - if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM) - LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address, - (long)type); - else - CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n", address, - (long)type); + vma->vm_flags |= ra_flags; - vma->vm_flags &= ~VM_RAND_READ; - vma->vm_flags |= (rand_read | seq_read); +out: + cl_env_put(env, &refcheck); + if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) + fault_ret |= to_fault_error(result); - ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh); - RETURN(page); + CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result); + RETURN(fault_ret); } -/* To avoid cancel the locks covering mmapped region for lock cache pressure, - * we track the mapped vma count by lli_mmap_cnt. - * ll_vm_open(): when first vma is linked, split locks from lru. - * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru. - * - * XXX we don't check the if the region of vma/lock for performance. - */ -static void ll_vm_open(struct vm_area_struct * vma) +#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY +static vm_fault_t ll_fault(struct vm_fault *vmf) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; - struct ll_inode_info *lli = ll_i2info(inode); - ENTRY; - - LASSERT(vma->vm_file); - - spin_lock(&lli->lli_lock); - LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0); - - atomic_inc(&lli->lli_mmap_cnt); - if (atomic_read(&lli->lli_mmap_cnt) == 1) { - struct lov_stripe_md *lsm = lli->lli_smd; - struct ll_sb_info *sbi = ll_i2sbi(inode); - int count; - - spin_unlock(&lli->lli_lock); - - if (!lsm) - return; - count = obd_join_lru(sbi->ll_dt_exp, lsm, 0); - VMA_DEBUG(vma, "split %d unused locks from lru\n", count); - } else { - spin_unlock(&lli->lli_lock); - } + struct vm_area_struct *vma = vmf->vma; +#else +static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ +#endif + int count = 0; + bool printed = false; + bool cached; + vm_fault_t result; + ktime_t kstart = ktime_get(); + sigset_t set; + + result = pcc_fault(vma, vmf, &cached); + if (cached) + goto out; + + /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite + * so that it can be killed by admin but not cause segfault by + * other signals. */ + set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); + + /* make sure offset is not a negative number */ + if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) + return VM_FAULT_SIGBUS; +restart: + result = ll_fault0(vma, vmf); + if (vmf->page && + !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) { + struct page *vmpage = vmf->page; + + /* check if this page has been truncated */ + lock_page(vmpage); + if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ + unlock_page(vmpage); + put_page(vmpage); + vmf->page = NULL; + + if (!printed && ++count > 16) { + CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n", + current->comm); + printed = true; + } + + goto restart; + } + + result |= VM_FAULT_LOCKED; + } + cfs_restore_sigs(set); +out: + if (vmf->page && result == VM_FAULT_LOCKED) { + ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)), + current->pid, vma->vm_file->private_data, + cl_offset(NULL, vmf->page->index), PAGE_SIZE, + READ); + ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), + LPROC_LL_FAULT, + ktime_us_delta(ktime_get(), kstart)); + } + + return result; } -static void ll_vm_close(struct vm_area_struct *vma) +#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY +static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; - struct ll_inode_info *lli = ll_i2info(inode); - ENTRY; - - LASSERT(vma->vm_file); - - spin_lock(&lli->lli_lock); - LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0); - - atomic_dec(&lli->lli_mmap_cnt); - if (atomic_read(&lli->lli_mmap_cnt) == 0) { - struct lov_stripe_md *lsm = lli->lli_smd; - struct ll_sb_info *sbi = ll_i2sbi(inode); - int count; - - spin_unlock(&lli->lli_lock); + struct vm_area_struct *vma = vmf->vma; +#else +static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ +#endif + int count = 0; + bool printed = false; + bool retry; + bool cached; + ktime_t kstart = ktime_get(); + vm_fault_t result; + + result = pcc_page_mkwrite(vma, vmf, &cached); + if (cached) + goto out; + + file_update_time(vma->vm_file); + do { + retry = false; + result = ll_page_mkwrite0(vma, vmf->page, &retry); + + if (!printed && ++count > 16) { + const struct dentry *de = file_dentry(vma->vm_file); + + CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n", + current->comm, vmf->pgoff, + PFID(ll_inode2fid(de->d_inode))); + printed = true; + } + } while (retry); + + switch (result) { + case 0: + LASSERT(PageLocked(vmf->page)); + result = VM_FAULT_LOCKED; + break; + case -ENODATA: + case -EFAULT: + result = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + result = VM_FAULT_OOM; + break; + case -EAGAIN: + result = VM_FAULT_RETRY; + break; + default: + result = VM_FAULT_SIGBUS; + break; + } - if (!lsm) - return; - count = obd_join_lru(sbi->ll_dt_exp, lsm, 1); - VMA_DEBUG(vma, "join %d unused locks to lru\n", count); - } else { - spin_unlock(&lli->lli_lock); - } +out: + if (result == VM_FAULT_LOCKED) { + ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)), + current->pid, vma->vm_file->private_data, + cl_offset(NULL, vmf->page->index), PAGE_SIZE, + WRITE); + ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), + LPROC_LL_MKWRITE, + ktime_us_delta(ktime_get(), kstart)); + } + + return result; } -#ifndef HAVE_FILEMAP_POPULATE -static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); -#endif -static int ll_populate(struct vm_area_struct *area, unsigned long address, - unsigned long len, pgprot_t prot, unsigned long pgoff, - int nonblock) +/** + * To avoid cancel the locks covering mmapped region for lock cache pressure, + * we track the mapped vma count in vvp_object::vob_mmap_cnt. + */ +static void ll_vm_open(struct vm_area_struct * vma) { - int rc = 0; - ENTRY; - - /* always set nonblock as true to avoid page read ahead */ - rc = filemap_populate(area, address, len, prot, pgoff, 1); - RETURN(rc); + struct inode *inode = file_inode(vma->vm_file); + struct vvp_object *vob = cl_inode2vvp(inode); + + ENTRY; + LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); + atomic_inc(&vob->vob_mmap_cnt); + pcc_vm_open(vma); + EXIT; } -/* return the user space pointer that maps to a file offset via a vma */ -static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) +/** + * Dual to ll_vm_open(). + */ +static void ll_vm_close(struct vm_area_struct *vma) { - return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT)); - + struct inode *inode = file_inode(vma->vm_file); + struct vvp_object *vob = cl_inode2vvp(inode); + + ENTRY; + atomic_dec(&vob->vob_mmap_cnt); + LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); + pcc_vm_close(vma); + EXIT; } /* XXX put nice comment here. talk about __free_pte -> dirty pages and @@ -565,40 +513,51 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) int rc = -ENOENT; ENTRY; - LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); + LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, - last - first + 1, 0); + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, + last - first + 1, 1); } RETURN(rc); } -static struct vm_operations_struct ll_file_vm_ops = { - .nopage = ll_nopage, - .open = ll_vm_open, - .close = ll_vm_close, - .populate = ll_populate, +static const struct vm_operations_struct ll_file_vm_ops = { + .fault = ll_fault, + .page_mkwrite = ll_page_mkwrite, + .open = ll_vm_open, + .close = ll_vm_close, }; -int ll_file_mmap(struct file * file, struct vm_area_struct * vma) +int ll_file_mmap(struct file *file, struct vm_area_struct * vma) { - int rc; - ENTRY; - - ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1); - rc = generic_file_mmap(file, vma); - if (rc == 0) { -#if !defined(HAVE_FILEMAP_POPULATE) - if (!filemap_populate) - filemap_populate = vma->vm_ops->populate; -#endif - vma->vm_ops = &ll_file_vm_ops; - vma->vm_ops->open(vma); - /* update the inode's size and mtime */ - rc = ll_glimpse_size(file->f_dentry->d_inode, 0); - } - - RETURN(rc); + struct inode *inode = file_inode(file); + ktime_t kstart = ktime_get(); + bool cached; + int rc; + + ENTRY; + + if (ll_file_nolock(file)) + RETURN(-EOPNOTSUPP); + + rc = pcc_file_mmap(file, vma, &cached); + if (cached && rc != 0) + RETURN(rc); + + rc = generic_file_mmap(file, vma); + if (rc == 0) { + vma->vm_ops = &ll_file_vm_ops; + vma->vm_ops->open(vma); + /* update the inode's size and mtime */ + if (!cached) + rc = ll_glimpse_size(inode); + } + + if (!rc) + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP, + ktime_us_delta(ktime_get(), kstart)); + + RETURN(rc); }