X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=eeeefdb1282e1a8973f3292f75c322df46af09c6;hb=046f50c36e2cc0f976612c056bd44479173be886;hp=821f96fb557a85d7a3918f30a76d0d0f6305a4b2;hpb=e8ffe16619baf1ef7c5c6b117d338956372aa752;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 821f96f..eeeefdb 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -29,26 +27,20 @@ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011 Whamcloud, Inc. - * + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ -#ifndef AUTOCONF_INCLUDED -#include -#endif #include #include #include #include #include -#include #include #include -#include #include #include @@ -56,7 +48,6 @@ #include #include #include -#include #define DEBUG_SUBSYSTEM S_LLITE @@ -74,22 +65,20 @@ void policy_from_vma(ldlm_policy_data_t *policy, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << CFS_PAGE_SHIFT); + (vma->vm_pgoff << PAGE_CACHE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } -struct vm_area_struct * our_vma(unsigned long addr, size_t count) +struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, + size_t count) { - struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *ret = NULL; ENTRY; - /* No MM (e.g. NFS)? No vmas too. */ - if (!mm) - RETURN(NULL); + /* mmap_sem must have been held by caller. */ + LASSERT(!down_write_trylock(&mm->mmap_sem)); - spin_lock(&mm->page_table_lock); for(vma = find_vma(mm, addr); vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && @@ -98,7 +87,6 @@ struct vm_area_struct * our_vma(unsigned long addr, size_t count) break; } } - spin_unlock(&mm->page_table_lock); RETURN(ret); } @@ -115,16 +103,17 @@ struct vm_area_struct * our_vma(unsigned long addr, size_t count) * \return other error codes from cl_io_init. */ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, - struct lu_env **env_ret, - struct cl_env_nest *nest, - pgoff_t index, unsigned long *ra_flags) + struct lu_env **env_ret, + struct cl_env_nest *nest, + pgoff_t index, unsigned long *ra_flags) { - struct file *file = vma->vm_file; - struct inode *inode = file->f_dentry->d_inode; - struct cl_io *io; - struct cl_fault_io *fio; - struct lu_env *env; - ENTRY; + struct file *file = vma->vm_file; + struct inode *inode = file->f_dentry->d_inode; + struct cl_io *io; + struct cl_fault_io *fio; + struct lu_env *env; + int rc; + ENTRY; *env_ret = NULL; if (ll_file_nolock(file)) @@ -164,62 +153,77 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); - if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) { - struct ccc_io *cio = ccc_env_io(env); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - - LASSERT(cio->cui_cl.cis_io == io); - - /* mmap lock must be MANDATORY - * it has to cache pages. */ - io->ci_lockreq = CILR_MANDATORY; - - cio->cui_fd = fd; - } - - return io; + rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); + if (rc == 0) { + struct ccc_io *cio = ccc_env_io(env); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + + LASSERT(cio->cui_cl.cis_io == io); + + /* mmap lock must be MANDATORY it has to cache + * pages. */ + io->ci_lockreq = CILR_MANDATORY; + cio->cui_fd = fd; + } else { + LASSERT(rc < 0); + cl_io_fini(env, io); + cl_env_nested_put(nest, env); + io = ERR_PTR(rc); + } + + return io; } /* Sharing code of page_mkwrite method for rhel5 and rhel6 */ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, bool *retry) { - struct lu_env *env; - struct cl_io *io; - struct vvp_io *vio; - struct cl_env_nest nest; - int result; - ENTRY; + struct lu_env *env; + struct cl_io *io; + struct vvp_io *vio; + struct cl_env_nest nest; + int result; + sigset_t set; + struct inode *inode; + struct ll_inode_info *lli; + ENTRY; - LASSERT(vmpage != NULL); + LASSERT(vmpage != NULL); - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); - if (IS_ERR(io)) - GOTO(out, result = PTR_ERR(io)); + io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); + if (IS_ERR(io)) + GOTO(out, result = PTR_ERR(io)); - result = io->ci_result; - if (result < 0) - GOTO(out, result); + result = io->ci_result; + if (result < 0) + GOTO(out_io, result); - /* Don't enqueue new locks for page_mkwrite(). - * If the lock has been cancelled then page must have been - * truncated, in that case, kernel will handle it. - */ - io->ci_lockreq = CILR_PEEK; - io->u.ci_fault.ft_mkwrite = 1; - io->u.ci_fault.ft_writable = 1; + io->u.ci_fault.ft_mkwrite = 1; + io->u.ci_fault.ft_writable = 1; - vio = vvp_env_io(env); - vio->u.fault.ft_vma = vma; - vio->u.fault.ft_vmpage = vmpage; + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.ft_vmpage = vmpage; - result = cl_io_loop(env, io); + set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); - if (result == -ENODATA) /* peek failed, no lock caching. */ - CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n", - vma->vm_flags, io->u.ci_fault.ft_index, vmpage); + /* we grab lli_trunc_sem to exclude truncate case. + * Otherwise, we could add dirty pages into osc cache + * while truncate is on-going. */ + inode = ccc_object_inode(io->ci_obj); + lli = ll_i2info(inode); + down_read(&lli->lli_trunc_sem); + + result = cl_io_loop(env, io); + + up_read(&lli->lli_trunc_sem); + + cfs_restore_sigs(set); + + if (result == 0) { + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct ll_inode_info *lli = ll_i2info(inode); - if (result == 0 || result == -ENODATA) { lock_page(vmpage); if (vmpage->mapping == NULL) { unlock_page(vmpage); @@ -229,14 +233,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, * to handle_mm_fault(). */ if (result == 0) result = -ENODATA; - } else if (result == -ENODATA) { - /* Invalidate it if the cl_lock is being revoked. - * This piece of code is definitely needed for RHEL5, - * otherwise, SIGBUS will be wrongly returned to - * applications. */ - ll_invalidate_page(vmpage); - LASSERT(vmpage->mapping == NULL); - unlock_page(vmpage); } else if (!PageDirty(vmpage)) { /* race, the page has been cleaned by ptlrpcd after * it was unlocked, it has to be added into dirty @@ -253,114 +249,44 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, *retry = true; result = -EAGAIN; } + + if (result == 0) { + spin_lock(&lli->lli_lock); + lli->lli_flags |= LLIF_DATA_MODIFIED; + spin_unlock(&lli->lli_lock); + } } EXIT; +out_io: + cl_io_fini(env, io); + cl_env_nested_put(&nest, env); out: - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); - - CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result); + CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); + LASSERT(ergo(result == 0, PageLocked(vmpage))); - LASSERT(ergo(result == 0, PageLocked(vmpage))); - return(result); + return result; } - -#ifndef HAVE_VM_OP_FAULT -/** - * Lustre implementation of a vm_operations_struct::nopage() method, called by - * VM to server page fault (both in kernel and user space). - * - * This function sets up CIT_FAULT cl_io that does the job. - * - * \param vma - is virtiual area struct related to page fault - * \param address - address when hit fault - * \param type - of fault - * - * \return allocated and filled _unlocked_ page for address - * \retval NOPAGE_SIGBUS if page not exist on this address - * \retval NOPAGE_OOM not have memory for allocate new page - */ -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int *type) +static inline int to_fault_error(int result) { - struct lu_env *env; - struct cl_env_nest nest; - struct cl_io *io; - struct page *page = NOPAGE_SIGBUS; - struct vvp_io *vio = NULL; - unsigned long ra_flags; - pgoff_t pg_offset; - int result; - const unsigned long writable = VM_SHARED|VM_WRITE; - ENTRY; - - pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - io = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags); - if (IS_ERR(io)) - return NOPAGE_SIGBUS; - - result = io->ci_result; - if (result < 0) - goto out_err; - - io->u.ci_fault.ft_writable = (vma->vm_flags&writable) == writable; - - vio = vvp_env_io(env); - vio->u.fault.ft_vma = vma; - vio->u.fault.nopage.ft_address = address; - vio->u.fault.nopage.ft_type = type; - vio->u.fault.ft_vmpage = NULL; - - result = cl_io_loop(env, io); - page = vio->u.fault.ft_vmpage; - if (result != 0 && page != NULL) - page_cache_release(page); - -out_err: - if (result == -ENOMEM) - page = NOPAGE_OOM; - - vma->vm_flags &= ~VM_RAND_READ; - vma->vm_flags |= ra_flags; - - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); - - RETURN(page); + switch(result) { + case 0: + result = VM_FAULT_LOCKED; + break; + case -EFAULT: + result = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + result = VM_FAULT_OOM; + break; + default: + result = VM_FAULT_SIGBUS; + break; + } + return result; } -static int ll_page_mkwrite(struct vm_area_struct *vma, struct page *vmpage) -{ - int count = 0; - bool printed = false; - bool retry; - int result; - - do { - retry = false; - result = ll_page_mkwrite0(vma, vmpage, &retry); - - if (!printed && ++count > 16) { - CWARN("app(%s): the page %lu of file %lu is under heavy" - " contention.\n", - current->comm, page_index(vmpage), - vma->vm_file->f_dentry->d_inode->i_ino); - printed = true; - } - } while (retry); - - if (result == 0) - unlock_page(vmpage); - else if (result == -ENODATA) - result = 0; /* kernel will know truncate has happened and - * retry */ - - return result; -} - -#else /** * Lustre implementation of a vm_operations_struct::fault() method, called by * VM to server page fault (both in kernel and user space). @@ -386,46 +312,47 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); if (IS_ERR(io)) - RETURN(VM_FAULT_ERROR); + RETURN(to_fault_error(PTR_ERR(io))); result = io->ci_result; - if (result < 0) - goto out_err; - - vio = vvp_env_io(env); - vio->u.fault.ft_vma = vma; - vio->u.fault.ft_vmpage = NULL; - vio->u.fault.fault.ft_vmf = vmf; - - result = cl_io_loop(env, io); - - vmpage = vio->u.fault.ft_vmpage; - if (result != 0 && vmpage != NULL) { - page_cache_release(vmpage); - vmf->page = NULL; + if (result == 0) { + vio = vvp_env_io(env); + vio->u.fault.ft_vma = vma; + vio->u.fault.ft_vmpage = NULL; + vio->u.fault.fault.ft_vmf = vmf; + + result = cl_io_loop(env, io); + + fault_ret = vio->u.fault.fault.ft_flags; + vmpage = vio->u.fault.ft_vmpage; + if (result != 0 && vmpage != NULL) { + page_cache_release(vmpage); + vmf->page = NULL; + } } + cl_io_fini(env, io); + cl_env_nested_put(&nest, env); - fault_ret = vio->u.fault.fault.ft_flags; - -out_err: - if (result != 0 && fault_ret == 0) - fault_ret = VM_FAULT_ERROR; - - vma->vm_flags |= ra_flags; - - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); + vma->vm_flags |= ra_flags; + if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) + fault_ret |= to_fault_error(result); - CDEBUG(D_MMAP, "%s fault %d/%d\n", - cfs_current()->comm, fault_ret, result); - RETURN(fault_ret); + CDEBUG(D_MMAP, "%s fault %d/%d\n", + current->comm, fault_ret, result); + RETURN(fault_ret); } static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - int count = 0; - bool printed = false; - int result; + int count = 0; + bool printed = false; + int result; + sigset_t set; + + /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite + * so that it can be killed by admin but not cause segfault by + * other signals. */ + set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); restart: result = ll_fault0(vma, vmf); @@ -452,6 +379,7 @@ restart: result |= VM_FAULT_LOCKED; } + cfs_restore_sigs(set); return result; } @@ -497,7 +425,6 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) return result; } -#endif /** * To avoid cancel the locks covering mmapped region for lock cache pressure, @@ -530,27 +457,11 @@ static void ll_vm_close(struct vm_area_struct *vma) EXIT; } -#ifndef HAVE_VM_OP_FAULT -#ifndef HAVE_FILEMAP_POPULATE -static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); -#endif -static int ll_populate(struct vm_area_struct *area, unsigned long address, - unsigned long len, pgprot_t prot, unsigned long pgoff, - int nonblock) -{ - int rc = 0; - ENTRY; - - /* always set nonblock as true to avoid page read ahead */ - rc = filemap_populate(area, address, len, prot, pgoff, 1); - RETURN(rc); -} -#endif - /* return the user space pointer that maps to a file offset via a vma */ static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) { - return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT)); + return vma->vm_start + + (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT)); } @@ -564,7 +475,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, last - first + 1, 0); } @@ -572,16 +483,10 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) } static struct vm_operations_struct ll_file_vm_ops = { -#ifndef HAVE_VM_OP_FAULT - .nopage = ll_nopage, - .populate = ll_populate, - -#else - .fault = ll_fault, -#endif - .page_mkwrite = ll_page_mkwrite, - .open = ll_vm_open, - .close = ll_vm_close, + .fault = ll_fault, + .page_mkwrite = ll_page_mkwrite, + .open = ll_vm_open, + .close = ll_vm_close, }; int ll_file_mmap(struct file *file, struct vm_area_struct * vma) @@ -596,14 +501,10 @@ int ll_file_mmap(struct file *file, struct vm_area_struct * vma) ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1); rc = generic_file_mmap(file, vma); if (rc == 0) { -#if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT) - if (!filemap_populate) - filemap_populate = vma->vm_ops->populate; -#endif vma->vm_ops = &ll_file_vm_ops; vma->vm_ops->open(vma); /* update the inode's size and mtime */ - rc = cl_glimpse_size(inode); + rc = ll_glimpse_size(inode); } RETURN(rc);