From fb4622d61c67755152e9fcf82ab1140e74d19db7 Mon Sep 17 00:00:00 2001 From: adilger Date: Thu, 24 Feb 2005 22:36:51 +0000 Subject: [PATCH] Branch: b1_4 Add newlines to VMA_DEBUG messages in ll_vm_{open,close} to avoid console spew. --- lustre/llite/llite_mmap.c | 97 ++++++++++++++++++++++------------------------- 1 file changed, 46 insertions(+), 51 deletions(-) diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 9aab20a..9c8bb0f 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -122,7 +122,7 @@ int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two) return 0; /* they are the same object and overlap */ } -static void lt_merge(struct ll_lock_tree_node *dst, +static void lt_merge(struct ll_lock_tree_node *dst, struct ll_lock_tree_node *src) { dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start, @@ -135,7 +135,7 @@ static void lt_merge(struct ll_lock_tree_node *dst, dst->lt_mode = LCK_PW; } -static void lt_insert(struct ll_lock_tree *tree, +static void lt_insert(struct ll_lock_tree *tree, struct ll_lock_tree_node *node) { struct ll_lock_tree_node *walk; @@ -176,10 +176,10 @@ static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree) rb_node_t *rbnode; struct ll_lock_tree_node *node = NULL; - for ( rbnode = tree->lt_root.rb_node; rbnode != NULL; + for ( rbnode = tree->lt_root.rb_node; rbnode != NULL; rbnode = rbnode->rb_left) { if (rbnode->rb_left == NULL) { - node = rb_entry(rbnode, struct ll_lock_tree_node, + node = rb_entry(rbnode, struct ll_lock_tree_node, lt_node); break; } @@ -196,12 +196,12 @@ int ll_tree_unlock(struct ll_lock_tree *tree) ENTRY; list_for_each_safe(pos, n, &tree->lt_locked_list) { - node = list_entry(pos, struct ll_lock_tree_node, + node = list_entry(pos, struct ll_lock_tree_node, lt_locked_item); inode = node->lt_inode; - rc = ll_extent_unlock(tree->lt_fd, inode, - ll_i2info(inode)->lli_smd, node->lt_mode, + rc = ll_extent_unlock(tree->lt_fd, inode, + ll_i2info(inode)->lli_smd, node->lt_mode, &node->lt_lockh); if (rc != 0) { /* XXX better message */ @@ -241,8 +241,8 @@ int ll_tree_lock(struct ll_lock_tree *tree, while ((node = lt_least_node(tree))) { struct inode *inode = node->lt_inode; - rc = ll_extent_lock(tree->lt_fd, inode, - ll_i2info(inode)->lli_smd, node->lt_mode, + rc = ll_extent_lock(tree->lt_fd, inode, + ll_i2info(inode)->lli_smd, node->lt_mode, &node->lt_policy, &node->lt_lockh, ast_flags); if (rc != 0) @@ -259,7 +259,7 @@ out: static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma) { - /* we only want to hold PW locks if the mmap() can generate + /* we only want to hold PW locks if the mmap() can generate * writes back to the file and that only happens in shared * writable vmas */ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) @@ -267,13 +267,13 @@ static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma) return LCK_PR; } -static void policy_from_vma(ldlm_policy_data_t *policy, +static void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) + (vma->vm_pgoff << PAGE_CACHE_SHIFT); - policy->l_extent.end = (policy->l_extent.start + count - 1) | + policy->l_extent.end = (policy->l_extent.start + count - 1) | (PAGE_CACHE_SIZE - 1); } @@ -284,7 +284,7 @@ static struct vm_area_struct * our_vma(unsigned long addr, size_t count) ENTRY; spin_lock(&mm->page_table_lock); - for(vma = find_vma(mm, addr); + for(vma = find_vma(mm, addr); vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage && vma->vm_flags & VM_SHARED) { @@ -317,8 +317,8 @@ int lt_get_mmap_locks(struct ll_lock_tree *tree, inode = vma->vm_file->f_dentry->d_inode; policy_from_vma(&policy, vma, addr, count); - node = ll_node_from_inode(inode, policy.l_extent.start, - policy.l_extent.end, + node = ll_node_from_inode(inode, policy.l_extent.start, + policy.l_extent.end, mode_from_vma(vma)); if (IS_ERR(node)) { CERROR("not enough mem for lock_tree_node!\n"); @@ -335,13 +335,13 @@ int lt_get_mmap_locks(struct ll_lock_tree *tree, } /* FIXME: there is a pagefault race goes as follow (only 2.4): - * 1. A user process on node A accesses a portion of a mapped file, - * resulting in a page fault. The pagefault handler invokes the + * 1. A user process on node A accesses a portion of a mapped file, + * resulting in a page fault. The pagefault handler invokes the * ll_nopage function, which reads the page into memory. - * 2. A user process on node B writes to the same portion of the file + * 2. A user process on node B writes to the same portion of the file * (either via mmap or write()), that cause node A to cancel the * lock and truncate the page. - * 3. Node A then executes the rest of do_no_page(), entering the + * 3. Node A then executes the rest of do_no_page(), entering the * now-invalid page into the PTEs. * * Make the whole do_no_page as a hook to cover both the page cache @@ -354,7 +354,7 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, int *type) #else struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, - int unused) + int type) #endif { struct file *filp = vma->vm_file; @@ -378,7 +378,7 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE); CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n", - vma, inode->i_ino, policy.l_extent.start, + vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end); mode = mode_from_vma(vma); @@ -388,35 +388,31 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU); if (rc != 0) RETURN(NULL); - + if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime) CWARN("binary changed. inode %lu\n", inode->i_ino); - + /* XXX change inode size without i_sem hold! there is a race condition * with truncate path. (see ll_extent_lock) */ kms = lov_merge_size(ll_i2info(inode)->lli_smd, 1); pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - + if (pgoff >= size) ll_glimpse_size(inode); else inode->i_size = kms; - - /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that - * the kernel will not read other pages not covered by ldlm in - * filemap_nopage. we do our readahead in ll_readpage. + + /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that + * the kernel will not read other pages not covered by ldlm in + * filemap_nopage. we do our readahead in ll_readpage. */ rand_read = vma->vm_flags & VM_RAND_READ; seq_read = vma->vm_flags & VM_SEQ_READ; vma->vm_flags &= ~ VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) page = filemap_nopage(vma, address, type); -#else - page = filemap_nopage(vma, address, unused); -#endif vma->vm_flags &= ~VM_RAND_READ; vma->vm_flags |= (rand_read | seq_read); @@ -424,8 +420,8 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address, RETURN(page); } -/* To avoid cancel the locks covering mmapped region for lock cache pressure, - * we track the mapped vma count by lli_mmap_cnt. +/* To avoid cancel the locks covering mmapped region for lock cache pressure, + * we track the mapped vma count by lli_mmap_cnt. * ll_vm_open(): when first vma is linked, split locks from lru. * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru. * @@ -438,19 +434,19 @@ static void ll_vm_open(struct vm_area_struct * vma) ENTRY; LASSERT(vma->vm_file); - + spin_lock(&lli->lli_lock); LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0); - + atomic_inc(&lli->lli_mmap_cnt); if (atomic_read(&lli->lli_mmap_cnt) == 1) { struct lov_stripe_md *lsm = lli->lli_smd; struct ll_sb_info *sbi = ll_i2sbi(inode); int count; - + spin_unlock(&lli->lli_lock); count = obd_join_lru(sbi->ll_osc_exp, lsm, 0); - VMA_DEBUG(vma, "split %d unused locks from lru", count); + VMA_DEBUG(vma, "split %d unused locks from lru\n", count); } else { spin_unlock(&lli->lli_lock); } @@ -476,7 +472,7 @@ static void ll_vm_close(struct vm_area_struct *vma) spin_unlock(&lli->lli_lock); count = obd_join_lru(sbi->ll_osc_exp, lsm, 1); - VMA_DEBUG(vma, "join %d unused locks to lru", count); + VMA_DEBUG(vma, "join %d unused locks to lru\n", count); } else { spin_unlock(&lli->lli_lock); } @@ -484,7 +480,7 @@ static void ll_vm_close(struct vm_area_struct *vma) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) static int ll_populate(struct vm_area_struct *area, unsigned long address, - unsigned long len, pgprot_t prot, unsigned long pgoff, + unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock) { int rc = 0; @@ -500,13 +496,13 @@ static int ll_populate(struct vm_area_struct *area, unsigned long address, static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) { - return vma->vm_start + + return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << PAGE_SHIFT)); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) -/* [first, last] are the byte offsets affected. +/* [first, last] are the byte offsets affected. * vm_{start, end} are user addresses of the first byte of the mapping and * the next byte beyond it * vm_pgoff is the page index of the first byte in the mapping */ @@ -517,10 +513,10 @@ static void teardown_vmas(struct vm_area_struct *vma, __u64 first, for (; vma ; vma = vma->vm_next_share) { if (last >> PAGE_SHIFT < vma->vm_pgoff) continue; - if (first >> PAGE_SHIFT >= (vma->vm_pgoff + + if (first >> PAGE_SHIFT >= (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) continue; - + /* XXX in case of unmap the cow pages of a running file, * don't unmap these private writeable mapping here! * though that will break private mappping a little. @@ -532,10 +528,10 @@ static void teardown_vmas(struct vm_area_struct *vma, __u64 first, if (!(vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) continue; - - address = max((unsigned long)vma->vm_start, + + address = max((unsigned long)vma->vm_start, file_to_user(vma, first)); - len = min((unsigned long)vma->vm_end, + len = min((unsigned long)vma->vm_end, file_to_user(vma, last) + 1) - address; VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" " @@ -548,12 +544,11 @@ static void teardown_vmas(struct vm_area_struct *vma, __u64 first, /* XXX put nice comment here. talk about __free_pte -> dirty pages and * nopage's reference passing to the pte */ -int ll_teardown_mmaps(struct address_space *mapping, __u64 first, - __u64 last) +int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) { int rc = -ENOENT; ENTRY; - + LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) if (mapping_mapped(mapping)) { @@ -597,6 +592,6 @@ int ll_file_mmap(struct file * file, struct vm_area_struct * vma) /* update the inode's size and mtime */ rc = ll_glimpse_size(file->f_dentry->d_inode); } - + RETURN(rc); } -- 1.8.3.1