X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=f8c030b929836e3765d96d749022aa0fec139f20;hb=e42415db2707d3a19b7cac6a97fbab396fb0f638;hp=f1ff91a6407a20eed07cb45f8f95d39f3a099c8c;hpb=113303973ec9f8484eb2355a1a6ef3c4c7fd6a56;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index f1ff91a..f8c030b 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -1,24 +1,40 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (c) 2001-2003 Cluster File Systems, Inc. + * GPL HEADER START * - * This file is part of Lustre, http://www.lustre.org. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ -#ifdef HAVE_KERNEL_CONFIG_H + +#ifndef AUTOCONF_INCLUDED #include #endif #include @@ -35,16 +51,13 @@ #include #include #include -#include #include #include #include -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) -#include -#endif #define DEBUG_SUBSYSTEM S_LLITE +//#include #include #include "llite_internal.h" #include @@ -70,14 +83,8 @@ struct ll_lock_tree_node { int lt_get_mmap_locks(struct ll_lock_tree *tree, unsigned long addr, size_t count); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, int *type); -#else - -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int unused); -#endif struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start, __u64 end, ldlm_mode_t mode) @@ -270,7 +277,7 @@ static void policy_from_vma(ldlm_policy_data_t *policy, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT); + (vma->vm_pgoff << CFS_PAGE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } @@ -298,6 +305,11 @@ static struct vm_area_struct * our_vma(unsigned long addr, size_t count) RETURN(ret); } +int ll_region_mapped(unsigned long addr, size_t count) +{ + return !!our_vma(addr, count); +} + int lt_get_mmap_locks(struct ll_lock_tree *tree, unsigned long addr, size_t count) { @@ -336,28 +348,19 @@ int lt_get_mmap_locks(struct ll_lock_tree *tree, RETURN(0); } -/* FIXME: there is a pagefault race goes as follow (only 2.4): - * 1. A user process on node A accesses a portion of a mapped file, - * resulting in a page fault. The pagefault handler invokes the - * ll_nopage function, which reads the page into memory. - * 2. A user process on node B writes to the same portion of the file - * (either via mmap or write()), that cause node A to cancel the - * lock and truncate the page. - * 3. Node A then executes the rest of do_no_page(), entering the - * now-invalid page into the PTEs. +/** + * Page fault handler. * - * Make the whole do_no_page as a hook to cover both the page cache - * and page mapping installing with dlm lock would eliminate this race. + * \param vma - is virtiual area struct related to page fault + * \param address - address when hit fault + * \param type - of fault * - * In 2.6, the truncate_count of address_space can cover this race. + * \return allocated and filled page for address + * \retval NOPAGE_SIGBUS if page not exist on this address + * \retval NOPAGE_OOM not have memory for allocate new page */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, int *type) -#else -struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int type /* unused */) -#endif { struct file *filp = vma->vm_file; struct ll_file_data *fd = LUSTRE_FPRIVATE(filp); @@ -379,6 +382,8 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, RETURN(NULL); } + ll_clear_file_contended(inode); + /* start and end the lock on the first and last bytes in the page */ policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE); @@ -399,7 +404,7 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, lov_stripe_lock(lsm); inode_init_lvb(inode, &lvb); - obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1); + obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1); kms = lvb.lvb_size; pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff; @@ -412,16 +417,22 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, /* XXX change inode size without ll_inode_size_lock() held! * there is a race condition with truncate path. (see * ll_extent_lock) */ - /* region is within kms and, hence, within real file size (A). + /* XXX i_size_write() is not used because it is not safe to + * take the ll_inode_size_lock() due to a potential lock + * inversion (bug 6077). And since it's not safe to use + * i_size_write() without a covering mutex we do the + * assignment directly. It is not critical that the + * size be correct. */ + /* region is within kms and, hence, within real file size (A). * We need to increase i_size to cover the read region so that * generic_file_read() will do its job, but that doesn't mean * the kms size is _correct_, it is only the _minimum_ size. * If someone does a stat they will get the correct size which * will always be >= the kms value here. b=11081 */ - if (inode->i_size < kms) { + if (i_size_read(inode) < kms) { inode->i_size = kms; - CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n", - inode->i_ino, inode->i_size); + CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n", + inode->i_ino, i_size_read(inode)); } lov_stripe_unlock(lsm); } @@ -432,9 +443,9 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, * bug 10919 */ lov_stripe_lock(lsm); if (mode == LCK_PW) - obd_adjust_kms(ll_i2obdexp(inode), lsm, - min_t(loff_t, policy.l_extent.end,inode->i_size), - 0); + obd_adjust_kms(ll_i2dtexp(inode), lsm, + min_t(loff_t, policy.l_extent.end + 1, + i_size_read(inode)), 0); lov_stripe_unlock(lsm); /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that @@ -447,8 +458,13 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, vma->vm_flags |= VM_RAND_READ; page = filemap_nopage(vma, address, type); - LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address, - (long)type); + if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM) + LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address, + (long)type); + else + CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n", address, + (long)type); + vma->vm_flags &= ~VM_RAND_READ; vma->vm_flags |= (rand_read | seq_read); @@ -484,7 +500,7 @@ static void ll_vm_open(struct vm_area_struct * vma) if (!lsm) return; - count = obd_join_lru(sbi->ll_osc_exp, lsm, 0); + count = obd_join_lru(sbi->ll_dt_exp, lsm, 0); VMA_DEBUG(vma, "split %d unused locks from lru\n", count); } else { spin_unlock(&lli->lli_lock); @@ -513,14 +529,13 @@ static void ll_vm_close(struct vm_area_struct *vma) if (!lsm) return; - count = obd_join_lru(sbi->ll_osc_exp, lsm, 1); + count = obd_join_lru(sbi->ll_dt_exp, lsm, 1); VMA_DEBUG(vma, "join %d unused locks to lru\n", count); } else { spin_unlock(&lli->lli_lock); } } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) #ifndef HAVE_FILEMAP_POPULATE static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); #endif @@ -535,7 +550,6 @@ static int ll_populate(struct vm_area_struct *area, unsigned long address, rc = filemap_populate(area, address, len, prot, pgoff, 1); RETURN(rc); } -#endif /* return the user space pointer that maps to a file offset via a vma */ static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) @@ -544,47 +558,6 @@ static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) } -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) -/* [first, last] are the byte offsets affected. - * vm_{start, end} are user addresses of the first byte of the mapping and - * the next byte beyond it - * vm_pgoff is the page index of the first byte in the mapping */ -static void teardown_vmas(struct vm_area_struct *vma, __u64 first, - __u64 last) -{ - unsigned long address, len; - for (; vma ; vma = vma->vm_next_share) { - if (last >> CFS_PAGE_SHIFT < vma->vm_pgoff) - continue; - if (first >> CFS_PAGE_SHIFT >= (vma->vm_pgoff + - ((vma->vm_end - vma->vm_start) >> CFS_PAGE_SHIFT))) - continue; - - /* XXX in case of unmap the cow pages of a running file, - * don't unmap these private writeable mapping here! - * though that will break private mappping a little. - * - * the clean way is to check the mapping of every page - * and just unmap the non-cow pages, just like - * unmap_mapping_range() with even_cow=0 in kernel 2.6. - */ - if (!(vma->vm_flags & VM_SHARED) && - (vma->vm_flags & VM_WRITE)) - continue; - - address = max((unsigned long)vma->vm_start, - file_to_user(vma, first)); - len = min((unsigned long)vma->vm_end, - file_to_user(vma, last) + 1) - address; - - VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" " - "address=%ld len=%ld]\n", first, last, address, len); - LASSERT(len > 0); - ll_zap_page_range(vma, address, len); - } -} -#endif - /* XXX put nice comment here. talk about __free_pte -> dirty pages and * nopage's reference passing to the pte */ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) @@ -593,24 +566,12 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) ENTRY; LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) if (mapping_mapped(mapping)) { rc = 0; unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, last - first + 1, 0); } -#else - spin_lock(&mapping->i_shared_lock); - if (mapping->i_mmap != NULL) { - rc = 0; - teardown_vmas(mapping->i_mmap, first, last); - } - if (mapping->i_mmap_shared != NULL) { - rc = 0; - teardown_vmas(mapping->i_mmap_shared, first, last); - } - spin_unlock(&mapping->i_shared_lock); -#endif + RETURN(rc); } @@ -618,9 +579,7 @@ static struct vm_operations_struct ll_file_vm_ops = { .nopage = ll_nopage, .open = ll_vm_open, .close = ll_vm_close, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) .populate = ll_populate, -#endif }; int ll_file_mmap(struct file * file, struct vm_area_struct * vma) @@ -628,11 +587,10 @@ int ll_file_mmap(struct file * file, struct vm_area_struct * vma) int rc; ENTRY; - ll_vfs_ops_tally(ll_i2sbi(file->f_dentry->d_inode), VFS_OPS_MMAP); + ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1); rc = generic_file_mmap(file, vma); if (rc == 0) { -#if !defined(HAVE_FILEMAP_POPULATE) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) +#if !defined(HAVE_FILEMAP_POPULATE) if (!filemap_populate) filemap_populate = vma->vm_ops->populate; #endif