X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=a42cb46ed9eb0c7245e83b1ac9535ade430c2082;hb=86586a487771c2903072cc18fb46771943dc1bdf;hp=3a0c07744053bcb2038102003e119b9a2849fa01;hpb=77e2f09e3ba863018ff373e198ac0e62487aa675;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 3a0c077..a42cb46 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -27,7 +27,7 @@ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -65,7 +65,7 @@ void policy_from_vma(ldlm_policy_data_t *policy, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << CFS_PAGE_SHIFT); + (vma->vm_pgoff << PAGE_CACHE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } @@ -103,16 +103,17 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, * \return other error codes from cl_io_init. */ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, - struct lu_env **env_ret, - struct cl_env_nest *nest, - pgoff_t index, unsigned long *ra_flags) + struct lu_env **env_ret, + struct cl_env_nest *nest, + pgoff_t index, unsigned long *ra_flags) { - struct file *file = vma->vm_file; - struct inode *inode = file->f_dentry->d_inode; - struct cl_io *io; - struct cl_fault_io *fio; - struct lu_env *env; - ENTRY; + struct file *file = vma->vm_file; + struct inode *inode = file->f_dentry->d_inode; + struct cl_io *io; + struct cl_fault_io *fio; + struct lu_env *env; + int rc; + ENTRY; *env_ret = NULL; if (ll_file_nolock(file)) @@ -152,20 +153,25 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); - if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) { - struct ccc_io *cio = ccc_env_io(env); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - - LASSERT(cio->cui_cl.cis_io == io); - - /* mmap lock must be MANDATORY - * it has to cache pages. */ - io->ci_lockreq = CILR_MANDATORY; - - cio->cui_fd = fd; - } + rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); + if (rc == 0) { + struct ccc_io *cio = ccc_env_io(env); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + + LASSERT(cio->cui_cl.cis_io == io); + + /* mmap lock must be MANDATORY it has to cache + * pages. */ + io->ci_lockreq = CILR_MANDATORY; + cio->cui_fd = fd; + } else { + LASSERT(rc < 0); + cl_io_fini(env, io); + cl_env_nested_put(nest, env); + io = ERR_PTR(rc); + } - return io; + return io; } /* Sharing code of page_mkwrite method for rhel5 and rhel6 */ @@ -190,13 +196,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, result = io->ci_result; if (result < 0) - GOTO(out, result); + GOTO(out_io, result); - /* Don't enqueue new locks for page_mkwrite(). - * If the lock has been cancelled then page must have been - * truncated, in that case, kernel will handle it. - */ - io->ci_lockreq = CILR_PEEK; io->u.ci_fault.ft_mkwrite = 1; io->u.ci_fault.ft_writable = 1; @@ -211,19 +212,18 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, * while truncate is on-going. */ inode = ccc_object_inode(io->ci_obj); lli = ll_i2info(inode); - cfs_down_read(&lli->lli_trunc_sem); + down_read(&lli->lli_trunc_sem); result = cl_io_loop(env, io); - cfs_up_read(&lli->lli_trunc_sem); + up_read(&lli->lli_trunc_sem); cfs_restore_sigs(set); - if (result == -ENODATA) /* peek failed, no lock caching. */ - CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n", - vma->vm_flags, io->u.ci_fault.ft_index, vmpage); + if (result == 0) { + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct ll_inode_info *lli = ll_i2info(inode); - if (result == 0 || result == -ENODATA) { lock_page(vmpage); if (vmpage->mapping == NULL) { unlock_page(vmpage); @@ -233,18 +233,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, * to handle_mm_fault(). */ if (result == 0) result = -ENODATA; - } else if (result == -ENODATA) { - /* Invalidate it if the cl_lock is being revoked. - * This piece of code is definitely needed for RHEL5, - * otherwise, SIGBUS will be wrongly returned to - * applications. */ - write_one_page(vmpage, 1); - lock_page(vmpage); - if (vmpage->mapping != NULL) { - ll_invalidate_page(vmpage); - LASSERT(vmpage->mapping == NULL); - } - unlock_page(vmpage); } else if (!PageDirty(vmpage)) { /* race, the page has been cleaned by ptlrpcd after * it was unlocked, it has to be added into dirty @@ -261,17 +249,23 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, *retry = true; result = -EAGAIN; } + + if (result == 0) { + spin_lock(&lli->lli_lock); + lli->lli_flags |= LLIF_DATA_MODIFIED; + spin_unlock(&lli->lli_lock); + } } EXIT; +out_io: + cl_io_fini(env, io); + cl_env_nested_put(&nest, env); out: - cl_io_fini(env, io); - cl_env_nested_put(&nest, env); + CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result); + LASSERT(ergo(result == 0, PageLocked(vmpage))); - CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result); - - LASSERT(ergo(result == 0, PageLocked(vmpage))); - return(result); + return result; } @@ -588,7 +582,8 @@ static int ll_populate(struct vm_area_struct *area, unsigned long address, /* return the user space pointer that maps to a file offset via a vma */ static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) { - return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT)); + return vma->vm_start + + (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT)); } @@ -602,7 +597,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, last - first + 1, 0); }