/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * GPL HEADER START
*
- * This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
*/
-
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#ifndef AUTOCONF_INCLUDED
#include <linux/config.h>
+#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
-#include <asm/segment.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/iobuf.h>
-#endif
-
#define DEBUG_SUBSYSTEM S_LLITE
-#include <linux/lustre_mds.h>
-#include <linux/lustre_lite.h>
+#include <lustre_lite.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
struct inode *lt_inode;
};
-__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
int lt_get_mmap_locks(struct ll_lock_tree *tree,
unsigned long addr, size_t count);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type);
-#else
-
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int unused);
-#endif
+static struct vm_operations_struct ll_file_vm_ops;
struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
__u64 end, ldlm_mode_t mode)
RETURN(rc);
}
-int ll_tree_lock(struct ll_lock_tree *tree,
+int ll_tree_lock_iov(struct ll_lock_tree *tree,
struct ll_lock_tree_node *first_node,
- const char *buf, size_t count, int ast_flags)
+ const struct iovec *iov, unsigned long nr_segs, int ast_flags)
{
struct ll_lock_tree_node *node;
int rc = 0;
+ unsigned long seg;
ENTRY;
tree->lt_root.rb_node = NULL;
/* To avoid such subtle deadlock case: client1 try to read file1 to
* mmapped file2, on the same time, client2 try to read file2 to
* mmapped file1.*/
- rc = lt_get_mmap_locks(tree, (unsigned long)buf, count);
- if (rc)
- GOTO(out, rc);
+ for (seg = 0; seg < nr_segs; seg++) {
+ const struct iovec *iv = &iov[seg];
+ rc = lt_get_mmap_locks(tree, (unsigned long)iv->iov_base,
+ iv->iov_len);
+ if (rc)
+ GOTO(out, rc);
+ }
while ((node = lt_least_node(tree))) {
struct inode *inode = node->lt_inode;
RETURN(rc);
}
+int ll_tree_lock(struct ll_lock_tree *tree,
+ struct ll_lock_tree_node *first_node,
+ const char *buf, size_t count, int ast_flags)
+{
+ struct iovec local_iov = { .iov_base = (void __user *)buf,
+ .iov_len = count };
+
+ return ll_tree_lock_iov(tree, first_node, &local_iov, 1, ast_flags);
+}
+
static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
{
/* we only want to hold PW locks if the mmap() can generate
return LCK_PR;
}
+static void policy_from_vma_pgoff(ldlm_policy_data_t *policy,
+ struct vm_area_struct *vma,
+ __u64 pgoff, size_t count)
+{
+ policy->l_extent.start = pgoff << CFS_PAGE_SHIFT;
+ policy->l_extent.end = (policy->l_extent.start + count - 1) |
+ ~CFS_PAGE_MASK;
+}
+
static void policy_from_vma(ldlm_policy_data_t *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
+
{
- policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
- (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+ policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
- (PAGE_CACHE_SIZE - 1);
+ ~CFS_PAGE_MASK;
}
static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
spin_lock(&mm->page_table_lock);
for(vma = find_vma(mm, addr);
vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
+ if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
vma->vm_flags & VM_SHARED) {
ret = vma;
break;
RETURN(ret);
}
+int ll_region_mapped(unsigned long addr, size_t count)
+{
+ return !!our_vma(addr, count);
+}
+
int lt_get_mmap_locks(struct ll_lock_tree *tree,
unsigned long addr, size_t count)
{
RETURN(0);
/* we need to look up vmas on page aligned addresses */
- count += addr & (PAGE_SIZE - 1);
- addr &= PAGE_MASK;
+ count += addr & (~CFS_PAGE_MASK);
+ addr &= CFS_PAGE_MASK;
while ((vma = our_vma(addr, count)) != NULL) {
LASSERT(vma->vm_file);
RETURN(0);
}
-/* FIXME: there is a pagefault race goes as follow (only 2.4):
- * 1. A user process on node A accesses a portion of a mapped file,
- * resulting in a page fault. The pagefault handler invokes the
- * ll_nopage function, which reads the page into memory.
- * 2. A user process on node B writes to the same portion of the file
- * (either via mmap or write()), that cause node A to cancel the
- * lock and truncate the page.
- * 3. Node A then executes the rest of do_no_page(), entering the
- * now-invalid page into the PTEs.
- *
- * Make the whole do_no_page as a hook to cover both the page cache
- * and page mapping installing with dlm lock would eliminate this race.
- *
- * In 2.6, the truncate_count of address_space can cover this race.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type)
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int type /* unused */)
-#endif
+static int ll_get_extent_lock(struct vm_area_struct *vma, unsigned long pgoff,
+ int *save_flags, struct lustre_handle *lockh)
{
struct file *filp = vma->vm_file;
- struct ll_file_data *fd = filp->private_data;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
struct inode *inode = filp->f_dentry->d_inode;
- struct lustre_handle lockh = { 0 };
ldlm_policy_data_t policy;
ldlm_mode_t mode;
- struct page *page = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
+ struct ost_lvb lvb;
__u64 kms, old_mtime;
- unsigned long pgoff, size, rand_read, seq_read;
- int rc = 0;
+ unsigned long size;
ENTRY;
if (lli->lli_smd == NULL) {
CERROR("No lsm on fault?\n");
- RETURN(NULL);
+ RETURN(0);
}
+ ll_clear_file_contended(inode);
+
/* start and end the lock on the first and last bytes in the page */
- policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
+ policy_from_vma_pgoff(&policy, vma, pgoff, CFS_PAGE_SIZE);
CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
mode = mode_from_vma(vma);
old_mtime = LTIME_S(inode->i_mtime);
- rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
- &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
- if (rc != 0)
- RETURN(NULL);
+ if(ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
+ lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU) != 0)
+ RETURN(0);
if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
CWARN("binary changed. inode %lu\n", inode->i_ino);
- /* XXX change inode size without i_sem hold! there is a race condition
- * with truncate path. (see ll_extent_lock) */
- //down(&lli->lli_size_sem);
- kms = lov_merge_size(lli->lli_smd, 1);
- pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
- size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ lov_stripe_lock(lli->lli_smd);
+ inode_init_lvb(inode, &lvb);
+ if(obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 1)) {
+ lov_stripe_unlock(lli->lli_smd);
+ RETURN(0);
+ }
+ kms = lvb.lvb_size;
+
+ size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ CDEBUG(D_INFO, "Kms %lu - %lu\n", size, pgoff);
if (pgoff >= size) {
- //up(&lli->lli_size_sem);
- ll_glimpse_size(inode);
+ lov_stripe_unlock(lli->lli_smd);
+ ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
+ lov_stripe_lock(lli->lli_smd);
} else {
- inode->i_size = kms;
- //up(&lli->lli_size_sem);
+ /* XXX change inode size without ll_inode_size_lock() held!
+ * there is a race condition with truncate path. (see
+ * ll_extent_lock) */
+ /* XXX i_size_write() is not used because it is not safe to
+ * take the ll_inode_size_lock() due to a potential lock
+ * inversion (bug 6077). And since it's not safe to use
+ * i_size_write() without a covering mutex we do the
+ * assignment directly. It is not critical that the
+ * size be correct. */
+ /* NOTE: region is within kms and, hence, within real file size (A).
+ * We need to increase i_size to cover the read region so that
+ * generic_file_read() will do its job, but that doesn't mean
+ * the kms size is _correct_, it is only the _minimum_ size.
+ * If someone does a stat they will get the correct size which
+ * will always be >= the kms value here. b=11081 */
+ if (i_size_read(inode) < kms) {
+ inode->i_size = kms;
+ CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
+ inode->i_ino, i_size_read(inode));
+ }
}
+ /* If mapping is writeable, adjust kms to cover this page,
+ * but do not extend kms beyond actual file size.
+ * policy.l_extent.end is set to the end of the page by policy_from_vma
+ * bug 10919 */
+ if (mode == LCK_PW)
+ obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
+ min_t(loff_t, policy.l_extent.end + 1,
+ i_size_read(inode)), 0);
+ lov_stripe_unlock(lli->lli_smd);
+
/* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
* the kernel will not read other pages not covered by ldlm in
* filemap_nopage. we do our readahead in ll_readpage.
*/
- rand_read = vma->vm_flags & VM_RAND_READ;
- seq_read = vma->vm_flags & VM_SEQ_READ;
+ *save_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
vma->vm_flags &= ~ VM_SEQ_READ;
vma->vm_flags |= VM_RAND_READ;
+ return 1;
+}
+
+static void ll_put_extent_lock(struct vm_area_struct *vma, int save_flags,
+ struct lustre_handle *lockh)
+{
+ struct file *filp = vma->vm_file;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
+ struct inode *inode = filp->f_dentry->d_inode;
+ ldlm_mode_t mode;
+
+ mode = mode_from_vma(vma);
+ vma->vm_flags &= ~(VM_RAND_READ | VM_SEQ_READ);
+ vma->vm_flags |= save_flags;
+
+ ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, lockh);
+}
+
+#ifndef HAVE_VM_OP_FAULT
+/**
+ * Page fault handler.
+ *
+ * \param vma - is virtiual area struct related to page fault
+ * \param address - address when hit fault
+ * \param type - of fault
+ *
+ * \return allocated and filled page for address
+ * \retval NOPAGE_SIGBUS if page not exist on this address
+ * \retval NOPAGE_OOM not have memory for allocate new page
+ */
+struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
+ int *type)
+{
+ struct lustre_handle lockh = { 0 };
+ int save_fags = 0;
+ unsigned long pgoff;
+ struct page *page;
+ ENTRY;
+
+ pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
+ if(!ll_get_extent_lock(vma, pgoff, &save_fags, &lockh))
+ RETURN(NOPAGE_SIGBUS);
+
page = filemap_nopage(vma, address, type);
- LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
- (long)type);
- vma->vm_flags &= ~VM_RAND_READ;
- vma->vm_flags |= (rand_read | seq_read);
+ if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM)
+ LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
+ (long)type);
+ else
+ CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n", address,
+ (long)type);
+
+ ll_put_extent_lock(vma, save_fags, &lockh);
- ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
RETURN(page);
}
+#else
+/* New fault() API*/
+/**
+ * Page fault handler.
+ *
+ * \param vma - is virtiual area struct related to page fault
+ * \param address - address when hit fault
+ * \param type - of fault
+ *
+ * \return allocated and filled page for address
+ * \retval NOPAGE_SIGBUS if page not exist on this address
+ * \retval NOPAGE_OOM not have memory for allocate new page
+ */
+int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct lustre_handle lockh = { 0 };
+ int save_fags = 0;
+ int rc;
+ ENTRY;
+
+ if(!ll_get_extent_lock(vma, vmf->pgoff, &save_fags, &lockh))
+ RETURN(VM_FAULT_SIGBUS);
+
+ rc = filemap_fault(vma, vmf);
+ if (vmf->page)
+ LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
+ vmf->virtual_address);
+ else
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n",
+ vmf->virtual_address);
+
+ ll_put_extent_lock(vma, save_fags, &lockh);
+
+ RETURN(rc);
+}
+#endif
+
/* To avoid cancel the locks covering mmapped region for lock cache pressure,
* we track the mapped vma count by lli_mmap_cnt.
* ll_vm_open(): when first vma is linked, split locks from lru.
int count;
spin_unlock(&lli->lli_lock);
+
+ if (!lsm)
+ return;
count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
} else {
int count;
spin_unlock(&lli->lli_lock);
+
+ if (!lsm)
+ return;
count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
} else {
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+#ifndef HAVE_VM_OP_FAULT
+#ifndef HAVE_FILEMAP_POPULATE
+static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+#endif
static int ll_populate(struct vm_area_struct *area, unsigned long address,
unsigned long len, pgprot_t prot, unsigned long pgoff,
int nonblock)
/* return the user space pointer that maps to a file offset via a vma */
static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
{
- return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << PAGE_SHIFT));
+ return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-/* [first, last] are the byte offsets affected.
- * vm_{start, end} are user addresses of the first byte of the mapping and
- * the next byte beyond it
- * vm_pgoff is the page index of the first byte in the mapping */
-static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
- __u64 last)
-{
- unsigned long address, len;
- for (; vma ; vma = vma->vm_next_share) {
- if (last >> PAGE_SHIFT < vma->vm_pgoff)
- continue;
- if (first >> PAGE_SHIFT >= (vma->vm_pgoff +
- ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
- continue;
-
- /* XXX in case of unmap the cow pages of a running file,
- * don't unmap these private writeable mapping here!
- * though that will break private mappping a little.
- *
- * the clean way is to check the mapping of every page
- * and just unmap the non-cow pages, just like
- * unmap_mapping_range() with even_cow=0 in kernel 2.6.
- */
- if (!(vma->vm_flags & VM_SHARED) &&
- (vma->vm_flags & VM_WRITE))
- continue;
-
- address = max((unsigned long)vma->vm_start,
- file_to_user(vma, first));
- len = min((unsigned long)vma->vm_end,
- file_to_user(vma, last) + 1) - address;
-
- VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
- "address=%ld len=%ld]\n", first, last, address, len);
- LASSERT(len > 0);
- ll_zap_page_range(vma, address, len);
- }
-}
-#endif
-
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
* nopage's reference passing to the pte */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
ENTRY;
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
+ unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
last - first + 1, 0);
}
-#else
- spin_lock(&mapping->i_shared_lock);
- if (mapping->i_mmap != NULL) {
- rc = 0;
- teardown_vmas(mapping->i_mmap, first, last);
- }
- if (mapping->i_mmap_shared != NULL) {
- rc = 0;
- teardown_vmas(mapping->i_mmap_shared, first, last);
- }
- spin_unlock(&mapping->i_shared_lock);
-#endif
+
RETURN(rc);
}
static struct vm_operations_struct ll_file_vm_ops = {
- .nopage = ll_nopage,
.open = ll_vm_open,
.close = ll_vm_close,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+#ifdef HAVE_VM_OP_FAULT
+ .fault = ll_fault,
+#else
+ .nopage = ll_nopage,
.populate = ll_populate,
#endif
};
int rc;
ENTRY;
+ ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
rc = generic_file_mmap(file, vma);
if (rc == 0) {
+#if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
+ if (!filemap_populate)
+ filemap_populate = vma->vm_ops->populate;
+#endif
vma->vm_ops = &ll_file_vm_ops;
vma->vm_ops->open(vma);
/* update the inode's size and mtime */
- rc = ll_glimpse_size(file->f_dentry->d_inode);
+ rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
}
RETURN(rc);