X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=8ac6698a91c8fa8835b677963ffa2b6b7be115bd;hp=94f2b12911cfacb9ff8fdfd6f98f738565a3b3a5;hb=7542820698696ed5853ded30c9bf7fd5a78f0937;hpb=500f334631c6ebec72f5791472f21603da3e0ef9 diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 94f2b12..8ac6698 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,27 +23,17 @@ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2015, Intel Corporation. + * Copyright (c) 2011, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ -#include -#include -#include -#include #include -#include -#include -#include - -#include -#include -#include +#include +#include #include -#include #define DEBUG_SUBSYSTEM S_LLITE @@ -60,7 +46,7 @@ void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) + - (vma->vm_pgoff << PAGE_CACHE_SHIFT); + (vma->vm_pgoff << PAGE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~PAGE_MASK; } @@ -90,13 +76,11 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, * \param env - corespondent lu_env to processing * \param vma - virtual memory area addressed to page fault * \param index - page index corespondent to fault. - * \parm ra_flags - vma readahead flags. * * \return error codes from cl_io_init. */ static struct cl_io * -ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, - pgoff_t index, unsigned long *ra_flags) +ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, pgoff_t index) { struct file *file = vma->vm_file; struct inode *inode = file_inode(file); @@ -105,35 +89,32 @@ ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, int rc; ENTRY; - if (ll_file_nolock(file)) - RETURN(ERR_PTR(-EOPNOTSUPP)); + if (ll_file_nolock(file)) + RETURN(ERR_PTR(-EOPNOTSUPP)); restart: io = vvp_env_thread_io(env); - io->ci_obj = ll_i2info(inode)->lli_clob; - LASSERT(io->ci_obj != NULL); - - fio = &io->u.ci_fault; - fio->ft_index = index; - fio->ft_executable = vma->vm_flags&VM_EXEC; - - /* - * disable VM_SEQ_READ and use VM_RAND_READ to make sure that - * the kernel will not read other pages not covered by ldlm in - * filemap_nopage. we do our readahead in ll_readpage. - */ - if (ra_flags != NULL) - *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); - vma->vm_flags &= ~VM_SEQ_READ; - vma->vm_flags |= VM_RAND_READ; - - CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, - fio->ft_index, fio->ft_executable); + io->ci_obj = ll_i2info(inode)->lli_clob; + LASSERT(io->ci_obj != NULL); + + fio = &io->u.ci_fault; + fio->ft_index = index; + fio->ft_executable = vma->vm_flags & VM_EXEC; + + CDEBUG(D_MMAP, + DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n", + PFID(&ll_i2info(inode)->lli_fid), vma, vma->vm_start, + vma->vm_end, vma->vm_flags, fio->ft_index); + + if (vma->vm_flags & VM_SEQ_READ) + io->ci_seq_read = 1; + else if (vma->vm_flags & VM_RAND_READ) + io->ci_rand_read = 1; rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); if (rc == 0) { struct vvp_io *vio = vvp_env_io(env); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; LASSERT(vio->vui_cl.cis_io == io); @@ -162,8 +143,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct vvp_io *vio; int result; __u16 refcheck; - sigset_t set; - struct inode *inode; + sigset_t old, new; + struct inode *inode = NULL; struct ll_inode_info *lli; ENTRY; @@ -172,7 +153,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, if (IS_ERR(env)) RETURN(PTR_ERR(env)); - io = ll_fault_io_init(env, vma, vmpage->index, NULL); + io = ll_fault_io_init(env, vma, vmpage->index); if (IS_ERR(io)) GOTO(out, result = PTR_ERR(io)); @@ -187,14 +168,15 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, vio->u.fault.ft_vma = vma; vio->u.fault.ft_vmpage = vmpage; - set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); + siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM)); + sigprocmask(SIG_BLOCK, &new, &old); inode = vvp_object_inode(io->ci_obj); lli = ll_i2info(inode); result = cl_io_loop(env, io); - cfs_restore_sigs(set); + sigprocmask(SIG_SETMASK, &old, NULL); if (result == 0) { lock_page(vmpage); @@ -235,6 +217,16 @@ out: CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); LASSERT(ergo(result == 0, PageLocked(vmpage))); + /* if page has been unmapped, presumably due to lock reclaim for + * concurrent usage, add some delay before retrying to prevent + * entering live-lock situation with competitors + */ + if (result == -ENODATA && inode != NULL) { + CDEBUG(D_MMAP, "delaying new page-fault for inode %p to " + "prevent live-lock\n", inode); + msleep(10); + } + return result; } @@ -244,9 +236,6 @@ static inline int to_fault_error(int result) case 0: result = VM_FAULT_LOCKED; break; - case -EFAULT: - result = VM_FAULT_NOPAGE; - break; case -ENOMEM: result = VM_FAULT_OOM; break; @@ -268,13 +257,12 @@ static inline int to_fault_error(int result) * \retval VM_FAULT_ERROR on general error * \retval NOPAGE_OOM not have memory for allocate new page */ -static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) +static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) { struct lu_env *env; struct cl_io *io; struct vvp_io *vio = NULL; struct page *vmpage; - unsigned long ra_flags; int result = 0; int fault_ret = 0; __u16 refcheck; @@ -286,23 +274,29 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) { /* do fast fault */ + bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT; + + /* To avoid loops, instruct downstream to not drop mmap_sem */ + vmf->flags |= FAULT_FLAG_RETRY_NOWAIT; ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP); - fault_ret = filemap_fault(vma, vmf); + fault_ret = ll_filemap_fault(vma, vmf); ll_cl_remove(vma->vm_file, env); + if (!has_retry) + vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT; /* - If there is no error, then the page was found in cache and * uptodate; * - If VM_FAULT_RETRY is set, the page existed but failed to - * lock. It will return to kernel and retry; + * lock. We will try slow path to avoid loops. * - Otherwise, it should try normal fault under DLM lock. */ - if ((fault_ret & VM_FAULT_RETRY) || + if (!(fault_ret & VM_FAULT_RETRY) && !(fault_ret & VM_FAULT_ERROR)) GOTO(out, result = 0); fault_ret = 0; } - io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags); + io = ll_fault_io_init(env, vma, vmf->pgoff); if (IS_ERR(io)) GOTO(out, result = PTR_ERR(io)); @@ -329,14 +323,12 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) vmpage = vio->u.fault.ft_vmpage; if (result != 0 && vmpage != NULL) { - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; } } cl_io_fini(env, io); - vma->vm_flags |= ra_flags; - out: cl_env_put(env, &refcheck); if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) @@ -346,96 +338,147 @@ out: RETURN(fault_ret); } -static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY +static vm_fault_t ll_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; +#else +static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { +#endif int count = 0; bool printed = false; - int result; - sigset_t set; + bool cached; + vm_fault_t result; + ktime_t kstart = ktime_get(); + sigset_t old, new; + + result = pcc_fault(vma, vmf, &cached); + if (cached) + goto out; + + CDEBUG(D_MMAP, DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx\n", + PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid), + vma, vma->vm_start, vma->vm_end, vma->vm_flags); /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite * so that it can be killed by admin but not cause segfault by - * other signals. */ - set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); + * other signals. + */ + siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM)); + sigprocmask(SIG_BLOCK, &new, &old); - ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), - LPROC_LL_FAULT, 1); + /* make sure offset is not a negative number */ + if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) + return VM_FAULT_SIGBUS; restart: result = ll_fault0(vma, vmf); - if (!(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) { - struct page *vmpage = vmf->page; + if (vmf->page && + !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) { + struct page *vmpage = vmf->page; + + /* check if this page has been truncated */ + lock_page(vmpage); + if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ + unlock_page(vmpage); + put_page(vmpage); + vmf->page = NULL; - /* check if this page has been truncated */ - lock_page(vmpage); - if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ - unlock_page(vmpage); - page_cache_release(vmpage); - vmf->page = NULL; + if (!printed && ++count > 16) { + CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n", + current->comm); + printed = true; + } - if (!printed && ++count > 16) { - CWARN("the page is under heavy contention," - "maybe your app(%s) needs revising :-)\n", - current->comm); - printed = true; - } + goto restart; + } - goto restart; - } + result |= VM_FAULT_LOCKED; + } + sigprocmask(SIG_SETMASK, &old, NULL); - result |= VM_FAULT_LOCKED; - } - cfs_restore_sigs(set); - return result; +out: + if (vmf->page && result == VM_FAULT_LOCKED) { + ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)), + current->pid, vma->vm_file->private_data, + cl_offset(NULL, vmf->page->index), PAGE_SIZE, + READ); + ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), + LPROC_LL_FAULT, + ktime_us_delta(ktime_get(), kstart)); + } + + return result; } -static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY +static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; +#else +static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma, + struct vm_fault *vmf) { +#endif int count = 0; bool printed = false; bool retry; - int result; + bool cached; + ktime_t kstart = ktime_get(); + vm_fault_t result; - ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), - LPROC_LL_MKWRITE, 1); + result = pcc_page_mkwrite(vma, vmf, &cached); + if (cached) + goto out; file_update_time(vma->vm_file); - do { - retry = false; - result = ll_page_mkwrite0(vma, vmf->page, &retry); + do { + retry = false; + result = ll_page_mkwrite0(vma, vmf->page, &retry); - if (!printed && ++count > 16) { + if (!printed && ++count > 16) { const struct dentry *de = file_dentry(vma->vm_file); - CWARN("app(%s): the page %lu of file "DFID" is under" - " heavy contention\n", + CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n", current->comm, vmf->pgoff, PFID(ll_inode2fid(de->d_inode))); - printed = true; - } - } while (retry); - - switch(result) { - case 0: - LASSERT(PageLocked(vmf->page)); - result = VM_FAULT_LOCKED; - break; - case -ENODATA: - case -EFAULT: - result = VM_FAULT_NOPAGE; - break; - case -ENOMEM: - result = VM_FAULT_OOM; - break; - case -EAGAIN: - result = VM_FAULT_RETRY; - break; - default: - result = VM_FAULT_SIGBUS; - break; - } + printed = true; + } + } while (retry); - return result; + switch (result) { + case 0: + LASSERT(PageLocked(vmf->page)); + result = VM_FAULT_LOCKED; + break; + case -ENODATA: + case -EFAULT: + result = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + result = VM_FAULT_OOM; + break; + case -EAGAIN: + result = VM_FAULT_RETRY; + break; + default: + result = VM_FAULT_SIGBUS; + break; + } + +out: + if (result == VM_FAULT_LOCKED) { + ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)), + current->pid, vma->vm_file->private_data, + cl_offset(NULL, vmf->page->index), PAGE_SIZE, + WRITE); + ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), + LPROC_LL_MKWRITE, + ktime_us_delta(ktime_get(), kstart)); + } + + return result; } /** @@ -450,6 +493,7 @@ static void ll_vm_open(struct vm_area_struct * vma) ENTRY; LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); atomic_inc(&vob->vob_mmap_cnt); + pcc_vm_open(vma); EXIT; } @@ -464,6 +508,7 @@ static void ll_vm_close(struct vm_area_struct *vma) ENTRY; atomic_dec(&vob->vob_mmap_cnt); LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); + pcc_vm_close(vma); EXIT; } @@ -477,8 +522,8 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, - last - first + 1, 0); + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, + last - first + 1, 0); } RETURN(rc); @@ -494,20 +539,35 @@ static const struct vm_operations_struct ll_file_vm_ops = { int ll_file_mmap(struct file *file, struct vm_area_struct * vma) { struct inode *inode = file_inode(file); - int rc; - ENTRY; + ktime_t kstart = ktime_get(); + bool cached; + int rc; - if (ll_file_nolock(file)) - RETURN(-EOPNOTSUPP); + ENTRY; + CDEBUG(D_VFSTRACE | D_MMAP, + "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n", + PFID(&ll_i2info(inode)->lli_fid), + vma, vma->vm_start, vma->vm_end, vma->vm_flags); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1); - rc = generic_file_mmap(file, vma); - if (rc == 0) { - vma->vm_ops = &ll_file_vm_ops; - vma->vm_ops->open(vma); - /* update the inode's size and mtime */ - rc = ll_glimpse_size(inode); - } + if (ll_file_nolock(file)) + RETURN(-EOPNOTSUPP); - RETURN(rc); + rc = pcc_file_mmap(file, vma, &cached); + if (cached && rc != 0) + RETURN(rc); + + rc = generic_file_mmap(file, vma); + if (rc == 0) { + vma->vm_ops = &ll_file_vm_ops; + vma->vm_ops->open(vma); + /* update the inode's size and mtime */ + if (!cached) + rc = ll_glimpse_size(inode); + } + + if (!rc) + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP, + ktime_us_delta(ktime_get(), kstart)); + + RETURN(rc); }