X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=ead6b392c91ce567b49040b81d1d4194c4568d4e;hp=5fec39afa49d36a6da364c88d9b001bab0f1f5fb;hb=ea58c4cfb0fc255befbbb7754bd4ed71704a2a2c;hpb=08aa217ce49aba1ded52e0f7adb8a607035123fd diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 5fec39a..ead6b39 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,47 +23,32 @@ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ -#include -#include -#include -#include #include -#include -#include -#include - -#include -#include -#include +#include +#include #include -#include #define DEBUG_SUBSYSTEM S_LLITE -#include #include "llite_internal.h" -#include - -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int *type); +#include -static struct vm_operations_struct ll_file_vm_ops; +static const struct vm_operations_struct ll_file_vm_ops; -void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) +void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, + unsigned long addr, size_t count) { - policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << CFS_PAGE_SHIFT); - policy->l_extent.end = (policy->l_extent.start + count - 1) | - ~CFS_PAGE_MASK; + policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) + + (vma->vm_pgoff << PAGE_SHIFT); + policy->l_extent.end = (policy->l_extent.start + count - 1) | + ~PAGE_MASK; } struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, @@ -92,46 +73,29 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, /** * API independent part for page fault initialization. - * \param vma - virtual memory area addressed to page fault * \param env - corespondent lu_env to processing - * \param nest - nested level + * \param vma - virtual memory area addressed to page fault * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * - * \return allocated and initialized env for fault operation. - * \retval EINVAL if env can't allocated - * \return other error codes from cl_io_init. + * \return error codes from cl_io_init. */ -struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, - struct lu_env **env_ret, - struct cl_env_nest *nest, - pgoff_t index, unsigned long *ra_flags) +static struct cl_io * +ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, + pgoff_t index, unsigned long *ra_flags) { - struct file *file = vma->vm_file; - struct inode *inode = file->f_dentry->d_inode; - struct cl_io *io; - struct cl_fault_io *fio; - struct lu_env *env; - ENTRY; + struct file *file = vma->vm_file; + struct inode *inode = file_inode(file); + struct cl_io *io; + struct cl_fault_io *fio; + int rc; + ENTRY; - *env_ret = NULL; if (ll_file_nolock(file)) RETURN(ERR_PTR(-EOPNOTSUPP)); - /* - * page fault can be called when lustre IO is - * already active for the current thread, e.g., when doing read/write - * against user level buffer mapped from Lustre buffer. To avoid - * stomping on existing context, optionally force an allocation of a new - * one. - */ - env = cl_env_nested_get(nest); - if (IS_ERR(env)) - RETURN(ERR_PTR(-EINVAL)); - - *env_ret = env; - - io = ccc_env_thread_io(env); +restart: + io = vvp_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; LASSERT(io->ci_obj != NULL); @@ -152,20 +116,27 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); - if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) { - struct ccc_io *cio = ccc_env_io(env); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); + if (rc == 0) { + struct vvp_io *vio = vvp_env_io(env); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(cio->cui_cl.cis_io == io); + LASSERT(vio->vui_cl.cis_io == io); - /* mmap lock must be MANDATORY - * it has to cache pages. */ - io->ci_lockreq = CILR_MANDATORY; + /* mmap lock must be MANDATORY it has to cache + * pages. */ + io->ci_lockreq = CILR_MANDATORY; + vio->vui_fd = fd; + } else { + LASSERT(rc < 0); + cl_io_fini(env, io); + if (io->ci_need_restart) + goto restart; - cio->cui_fd = fd; - } + io = ERR_PTR(rc); + } - return io; + RETURN(io); } /* Sharing code of page_mkwrite method for rhel5 and rhel6 */ @@ -175,28 +146,26 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct lu_env *env; struct cl_io *io; struct vvp_io *vio; - struct cl_env_nest nest; int result; - cfs_sigset_t set; - struct inode *inode; + __u16 refcheck; + sigset_t set; + struct inode *inode = NULL; struct ll_inode_info *lli; ENTRY; LASSERT(vmpage != NULL); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); + io = ll_fault_io_init(env, vma, vmpage->index, NULL); if (IS_ERR(io)) GOTO(out, result = PTR_ERR(io)); result = io->ci_result; if (result < 0) - GOTO(out, result); + GOTO(out_io, result); - /* Don't enqueue new locks for page_mkwrite(). - * If the lock has been cancelled then page must have been - * truncated, in that case, kernel will handle it. - */ - io->ci_lockreq = CILR_PEEK; io->u.ci_fault.ft_mkwrite = 1; io->u.ci_fault.ft_writable = 1; @@ -206,24 +175,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); - /* we grab lli_trunc_sem to exclude truncate case. - * Otherwise, we could add dirty pages into osc cache - * while truncate is on-going. */ - inode = ccc_object_inode(io->ci_obj); + inode = vvp_object_inode(io->ci_obj); lli = ll_i2info(inode); - down_read(&lli->lli_trunc_sem); result = cl_io_loop(env, io); - up_read(&lli->lli_trunc_sem); - cfs_restore_sigs(set); - if (result == -ENODATA) /* peek failed, no lock caching. */ - CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n", - vma->vm_flags, io->u.ci_fault.ft_index, vmpage); - - if (result == 0 || result == -ENODATA) { + if (result == 0) { lock_page(vmpage); if (vmpage->mapping == NULL) { unlock_page(vmpage); @@ -233,18 +192,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, * to handle_mm_fault(). */ if (result == 0) result = -ENODATA; - } else if (result == -ENODATA) { - /* Invalidate it if the cl_lock is being revoked. - * This piece of code is definitely needed for RHEL5, - * otherwise, SIGBUS will be wrongly returned to - * applications. */ - write_one_page(vmpage, 1); - lock_page(vmpage); - if (vmpage->mapping != NULL) { - ll_invalidate_page(vmpage); - LASSERT(vmpage->mapping == NULL); - } - unlock_page(vmpage); } else if (!PageDirty(vmpage)) { /* race, the page has been cleaned by ptlrpcd after * it was unlocked, it has to be added into dirty @@ -261,101 +208,38 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, *retry = true; result = -EAGAIN; } + + if (result == 0) + ll_file_set_flag(lli, LLIF_DATA_MODIFIED); } EXIT; +out_io: + cl_io_fini(env, io); out: - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); + CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); + LASSERT(ergo(result == 0, PageLocked(vmpage))); - CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result); - - LASSERT(ergo(result == 0, PageLocked(vmpage))); - return(result); -} - - -#ifndef HAVE_VM_OP_FAULT -/** - * Lustre implementation of a vm_operations_struct::nopage() method, called by - * VM to server page fault (both in kernel and user space). - * - * This function sets up CIT_FAULT cl_io that does the job. - * - * \param vma - is virtiual area struct related to page fault - * \param address - address when hit fault - * \param type - of fault - * - * \return allocated and filled _unlocked_ page for address - * \retval NOPAGE_SIGBUS if page not exist on this address - * \retval NOPAGE_OOM not have memory for allocate new page - */ -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int *type) -{ - struct lu_env *env; - struct cl_env_nest nest; - struct cl_io *io; - struct page *page = NOPAGE_SIGBUS; - struct vvp_io *vio = NULL; - unsigned long ra_flags; - pgoff_t pg_offset; - int result; - const unsigned long writable = VM_SHARED|VM_WRITE; - cfs_sigset_t set; - ENTRY; - - pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - io = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags); - if (IS_ERR(io)) - return NOPAGE_SIGBUS; - - result = io->ci_result; - if (result < 0) - goto out_err; - - io->u.ci_fault.ft_writable = (vma->vm_flags&writable) == writable; - - vio = vvp_env_io(env); - vio->u.fault.ft_vma = vma; - vio->u.fault.nopage.ft_address = address; - vio->u.fault.nopage.ft_type = type; - vio->u.fault.ft_vmpage = NULL; - - set = cfs_block_sigsinv(sigmask(SIGKILL)|sigmask(SIGTERM)); - result = cl_io_loop(env, io); - cfs_restore_sigs(set); - - page = vio->u.fault.ft_vmpage; - if (result != 0 && page != NULL) { - page_cache_release(page); - page = NOPAGE_SIGBUS; + /* if page has been unmapped, presumably due to lock reclaim for + * concurrent usage, add some delay before retrying to prevent + * entering live-lock situation with competitors + */ + if (result == -ENODATA && inode != NULL) { + CDEBUG(D_MMAP, "delaying new page-fault for inode %p to " + "prevent live-lock\n", inode); + msleep(10); } -out_err: - if (result == -ENOMEM) - page = NOPAGE_OOM; - - vma->vm_flags &= ~VM_RAND_READ; - vma->vm_flags |= ra_flags; - - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); - - RETURN(page); + return result; } -#else - static inline int to_fault_error(int result) { switch(result) { case 0: result = VM_FAULT_LOCKED; break; - case -EFAULT: - result = VM_FAULT_NOPAGE; - break; case -ENOMEM: result = VM_FAULT_OOM; break; @@ -377,73 +261,122 @@ static inline int to_fault_error(int result) * \retval VM_FAULT_ERROR on general error * \retval NOPAGE_OOM not have memory for allocate new page */ -static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) +static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct lu_env *env; - struct cl_io *io; - struct vvp_io *vio = NULL; - struct page *vmpage; - unsigned long ra_flags; - struct cl_env_nest nest; - int result; - int fault_ret = 0; - ENTRY; + struct lu_env *env; + struct cl_io *io; + struct vvp_io *vio = NULL; + struct page *vmpage; + unsigned long ra_flags; + int result = 0; + int fault_ret = 0; + __u16 refcheck; + ENTRY; - io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); - if (IS_ERR(io)) - RETURN(to_fault_error(PTR_ERR(io))); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) { + /* do fast fault */ + ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP); + fault_ret = ll_filemap_fault(vma, vmf); + ll_cl_remove(vma->vm_file, env); + + /* - If there is no error, then the page was found in cache and + * uptodate; + * - If VM_FAULT_RETRY is set, the page existed but failed to + * lock. It will return to kernel and retry; + * - Otherwise, it should try normal fault under DLM lock. */ + if ((fault_ret & VM_FAULT_RETRY) || + !(fault_ret & VM_FAULT_ERROR)) + GOTO(out, result = 0); + + fault_ret = 0; + } - result = io->ci_result; + io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags); + if (IS_ERR(io)) + GOTO(out, result = PTR_ERR(io)); + + result = io->ci_result; if (result == 0) { vio = vvp_env_io(env); vio->u.fault.ft_vma = vma; vio->u.fault.ft_vmpage = NULL; - vio->u.fault.fault.ft_vmf = vmf; + vio->u.fault.ft_vmf = vmf; + vio->u.fault.ft_flags = 0; + vio->u.fault.ft_flags_valid = 0; + + /* May call ll_readpage() */ + ll_cl_add(vma->vm_file, env, io, LCC_MMAP); result = cl_io_loop(env, io); - fault_ret = vio->u.fault.fault.ft_flags; + ll_cl_remove(vma->vm_file, env); + + /* ft_flags are only valid if we reached + * the call to filemap_fault */ + if (vio->u.fault.ft_flags_valid) + fault_ret = vio->u.fault.ft_flags; + vmpage = vio->u.fault.ft_vmpage; if (result != 0 && vmpage != NULL) { - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; } } - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); + cl_io_fini(env, io); vma->vm_flags |= ra_flags; + +out: + cl_env_put(env, &refcheck); if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) fault_ret |= to_fault_error(result); - CDEBUG(D_MMAP, "%s fault %d/%d\n", - cfs_current()->comm, fault_ret, result); - RETURN(fault_ret); + CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result); + RETURN(fault_ret); } -static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY +static vm_fault_t ll_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; +#else +static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { +#endif int count = 0; bool printed = false; - int result; - cfs_sigset_t set; + bool cached; + vm_fault_t result; + ktime_t kstart = ktime_get(); + sigset_t set; + + result = pcc_fault(vma, vmf, &cached); + if (cached) + goto out; /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite * so that it can be killed by admin but not cause segfault by * other signals. */ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); + /* make sure offset is not a negative number */ + if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) + return VM_FAULT_SIGBUS; restart: - result = ll_fault0(vma, vmf); - LASSERT(!(result & VM_FAULT_LOCKED)); - if (result == 0) { + result = ll_fault0(vma, vmf); + if (vmf->page && + !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) { struct page *vmpage = vmf->page; /* check if this page has been truncated */ lock_page(vmpage); if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; if (!printed && ++count > 16) { @@ -459,56 +392,53 @@ restart: result |= VM_FAULT_LOCKED; } cfs_restore_sigs(set); - return result; -} -#endif - -#ifndef HAVE_PGMKWRITE_USE_VMFAULT -static int ll_page_mkwrite(struct vm_area_struct *vma, struct page *vmpage) -{ - int count = 0; - bool printed = false; - bool retry; - int result; - - do { - retry = false; - result = ll_page_mkwrite0(vma, vmpage, &retry); - - if (!printed && ++count > 16) { - CWARN("app(%s): the page %lu of file %lu is under heavy" - " contention.\n", - current->comm, page_index(vmpage), - vma->vm_file->f_dentry->d_inode->i_ino); - printed = true; - } - } while (retry); - if (result == 0) - unlock_page(vmpage); - else if (result == -ENODATA) - result = 0; /* kernel will know truncate has happened and - * retry */ +out: + if (vmf->page && result == VM_FAULT_LOCKED) { + ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)), + current->pid, LUSTRE_FPRIVATE(vma->vm_file), + cl_offset(NULL, vmf->page->index), PAGE_SIZE, + READ); + ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), + LPROC_LL_FAULT, + ktime_us_delta(ktime_get(), kstart)); + } - return result; + return result; } + +#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY +static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; #else -static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma, + struct vm_fault *vmf) { - int count = 0; - bool printed = false; - bool retry; - int result; - - do { - retry = false; - result = ll_page_mkwrite0(vma, vmf->page, &retry); - - if (!printed && ++count > 16) { - CWARN("app(%s): the page %lu of file %lu is under heavy" - " contention.\n", - current->comm, vmf->pgoff, - vma->vm_file->f_dentry->d_inode->i_ino); +#endif + int count = 0; + bool printed = false; + bool retry; + bool cached; + ktime_t kstart = ktime_get(); + vm_fault_t result; + + result = pcc_page_mkwrite(vma, vmf, &cached); + if (cached) + goto out; + + file_update_time(vma->vm_file); + do { + retry = false; + result = ll_page_mkwrite0(vma, vmf->page, &retry); + + if (!printed && ++count > 16) { + const struct dentry *de = file_dentry(vma->vm_file); + + CWARN("app(%s): the page %lu of file "DFID" is under" + " heavy contention\n", + current->comm, vmf->pgoff, + PFID(ll_inode2fid(de->d_inode))); printed = true; } } while (retry); @@ -533,24 +463,34 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) break; } - return result; +out: + if (result == VM_FAULT_LOCKED) { + ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)), + current->pid, LUSTRE_FPRIVATE(vma->vm_file), + cl_offset(NULL, vmf->page->index), PAGE_SIZE, + WRITE); + ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)), + LPROC_LL_MKWRITE, + ktime_us_delta(ktime_get(), kstart)); + } + + return result; } -#endif /** * To avoid cancel the locks covering mmapped region for lock cache pressure, - * we track the mapped vma count in ccc_object::cob_mmap_cnt. + * we track the mapped vma count in vvp_object::vob_mmap_cnt. */ static void ll_vm_open(struct vm_area_struct * vma) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; - struct ccc_object *vob = cl_inode2ccc(inode); + struct inode *inode = file_inode(vma->vm_file); + struct vvp_object *vob = cl_inode2vvp(inode); - ENTRY; - LASSERT(vma->vm_file); - LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0); - cfs_atomic_inc(&vob->cob_mmap_cnt); - EXIT; + ENTRY; + LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); + atomic_inc(&vob->vob_mmap_cnt); + pcc_vm_open(vma); + EXIT; } /** @@ -558,38 +498,14 @@ static void ll_vm_open(struct vm_area_struct * vma) */ static void ll_vm_close(struct vm_area_struct *vma) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; - struct ccc_object *vob = cl_inode2ccc(inode); - - ENTRY; - LASSERT(vma->vm_file); - cfs_atomic_dec(&vob->cob_mmap_cnt); - LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0); - EXIT; -} - -#ifndef HAVE_VM_OP_FAULT -#ifndef HAVE_FILEMAP_POPULATE -static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); -#endif -static int ll_populate(struct vm_area_struct *area, unsigned long address, - unsigned long len, pgprot_t prot, unsigned long pgoff, - int nonblock) -{ - int rc = 0; - ENTRY; - - /* always set nonblock as true to avoid page read ahead */ - rc = filemap_populate(area, address, len, prot, pgoff, 1); - RETURN(rc); -} -#endif - -/* return the user space pointer that maps to a file offset via a vma */ -static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) -{ - return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT)); + struct inode *inode = file_inode(vma->vm_file); + struct vvp_object *vob = cl_inode2vvp(inode); + ENTRY; + atomic_dec(&vob->vob_mmap_cnt); + LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); + pcc_vm_close(vma); + EXIT; } /* XXX put nice comment here. talk about __free_pte -> dirty pages and @@ -599,53 +515,51 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) int rc = -ENOENT; ENTRY; - LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); + LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, - last - first + 1, 0); + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, + last - first + 1, 1); } RETURN(rc); } -static struct vm_operations_struct ll_file_vm_ops = { -#ifndef HAVE_VM_OP_FAULT - .nopage = ll_nopage, - .populate = ll_populate, -#else +static const struct vm_operations_struct ll_file_vm_ops = { .fault = ll_fault, -#endif -#ifndef HAVE_PGMKWRITE_COMPACT .page_mkwrite = ll_page_mkwrite, -#else - ._pmkw.page_mkwrite = ll_page_mkwrite, -#endif .open = ll_vm_open, .close = ll_vm_close, }; int ll_file_mmap(struct file *file, struct vm_area_struct * vma) { - struct inode *inode = file->f_dentry->d_inode; - int rc; - ENTRY; + struct inode *inode = file_inode(file); + ktime_t kstart = ktime_get(); + bool cached; + int rc; - if (ll_file_nolock(file)) - RETURN(-EOPNOTSUPP); - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1); - rc = generic_file_mmap(file, vma); - if (rc == 0) { -#if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT) - if (!filemap_populate) - filemap_populate = vma->vm_ops->populate; -#endif - vma->vm_ops = &ll_file_vm_ops; - vma->vm_ops->open(vma); - /* update the inode's size and mtime */ - rc = ll_glimpse_size(inode); - } + ENTRY; - RETURN(rc); + if (ll_file_nolock(file)) + RETURN(-EOPNOTSUPP); + + rc = pcc_file_mmap(file, vma, &cached); + if (cached && rc != 0) + RETURN(rc); + + rc = generic_file_mmap(file, vma); + if (rc == 0) { + vma->vm_ops = &ll_file_vm_ops; + vma->vm_ops->open(vma); + /* update the inode's size and mtime */ + if (!cached) + rc = ll_glimpse_size(inode); + } + + if (!rc) + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP, + ktime_us_delta(ktime_get(), kstart)); + + RETURN(rc); }