1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #ifndef AUTOCONF_INCLUDED
23 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
44 #include <linux/iobuf.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
49 //#include <lustre_mdc.h>
50 #include <lustre_lite.h>
51 #include "llite_internal.h"
52 #include <linux/lustre_compat25.h>
54 #define VMA_DEBUG(vma, fmt, arg...) \
55 CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \
56 "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \
57 vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \
58 vma->vm_file->f_dentry->d_inode->i_ino, \
59 vma->vm_file->f_dentry->d_iname, ## arg); \
62 struct ll_lock_tree_node {
64 struct list_head lt_locked_item;
66 ldlm_policy_data_t lt_policy;
67 struct lustre_handle lt_lockh;
69 struct inode *lt_inode;
72 int lt_get_mmap_locks(struct ll_lock_tree *tree,
73 unsigned long addr, size_t count);
75 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
76 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
80 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
84 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
85 __u64 end, ldlm_mode_t mode)
87 struct ll_lock_tree_node *node;
89 OBD_ALLOC(node, sizeof(*node));
91 RETURN(ERR_PTR(-ENOMEM));
93 node->lt_inode = inode;
94 node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
95 node->lt_policy.l_extent.start = start;
96 node->lt_policy.l_extent.end = end;
97 memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
98 INIT_LIST_HEAD(&node->lt_locked_item);
104 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
106 /* To avoid multiple fs deadlock */
107 if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
109 if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
112 if (one->lt_oid < two->lt_oid)
114 if (one->lt_oid > two->lt_oid)
117 if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
119 if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
122 return 0; /* they are the same object and overlap */
125 static void lt_merge(struct ll_lock_tree_node *dst,
126 struct ll_lock_tree_node *src)
128 dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
129 src->lt_policy.l_extent.start);
130 dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
131 src->lt_policy.l_extent.end);
133 /* XXX could be a real call to the dlm to find superset modes */
134 if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
135 dst->lt_mode = LCK_PW;
138 static void lt_insert(struct ll_lock_tree *tree,
139 struct ll_lock_tree_node *node)
141 struct ll_lock_tree_node *walk;
142 rb_node_t **p, *parent;
146 p = &tree->lt_root.rb_node;
150 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
151 switch (lt_compare(node, walk)) {
159 lt_merge(node, walk);
160 rb_erase(&walk->lt_node, &tree->lt_root);
161 OBD_FREE(walk, sizeof(*walk));
169 rb_link_node(&node->lt_node, parent, p);
170 rb_insert_color(&node->lt_node, &tree->lt_root);
174 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
177 struct ll_lock_tree_node *node = NULL;
179 for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
180 rbnode = rbnode->rb_left) {
181 if (rbnode->rb_left == NULL) {
182 node = rb_entry(rbnode, struct ll_lock_tree_node,
190 int ll_tree_unlock(struct ll_lock_tree *tree)
192 struct ll_lock_tree_node *node;
193 struct list_head *pos, *n;
198 list_for_each_safe(pos, n, &tree->lt_locked_list) {
199 node = list_entry(pos, struct ll_lock_tree_node,
202 inode = node->lt_inode;
203 rc = ll_extent_unlock(tree->lt_fd, inode,
204 ll_i2info(inode)->lli_smd, node->lt_mode,
207 /* XXX better message */
208 CERROR("couldn't unlock %d\n", rc);
210 list_del(&node->lt_locked_item);
211 OBD_FREE(node, sizeof(*node));
214 while ((node = lt_least_node(tree))) {
215 rb_erase(&node->lt_node, &tree->lt_root);
216 OBD_FREE(node, sizeof(*node));
222 int ll_tree_lock(struct ll_lock_tree *tree,
223 struct ll_lock_tree_node *first_node,
224 const char *buf, size_t count, int ast_flags)
226 struct ll_lock_tree_node *node;
230 tree->lt_root.rb_node = NULL;
231 INIT_LIST_HEAD(&tree->lt_locked_list);
232 if (first_node != NULL)
233 lt_insert(tree, first_node);
235 /* To avoid such subtle deadlock case: client1 try to read file1 to
236 * mmapped file2, on the same time, client2 try to read file2 to
238 rc = lt_get_mmap_locks(tree, (unsigned long)buf, count);
242 while ((node = lt_least_node(tree))) {
243 struct inode *inode = node->lt_inode;
244 rc = ll_extent_lock(tree->lt_fd, inode,
245 ll_i2info(inode)->lli_smd, node->lt_mode,
246 &node->lt_policy, &node->lt_lockh,
251 rb_erase(&node->lt_node, &tree->lt_root);
252 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
256 ll_tree_unlock(tree);
260 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
262 /* we only want to hold PW locks if the mmap() can generate
263 * writes back to the file and that only happens in shared
265 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
270 static void policy_from_vma(ldlm_policy_data_t *policy,
271 struct vm_area_struct *vma, unsigned long addr,
274 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
275 (vma->vm_pgoff << CFS_PAGE_SHIFT);
276 policy->l_extent.end = (policy->l_extent.start + count - 1) |
280 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
282 struct mm_struct *mm = current->mm;
283 struct vm_area_struct *vma, *ret = NULL;
286 /* No MM (e.g. NFS)? No vmas too. */
290 spin_lock(&mm->page_table_lock);
291 for(vma = find_vma(mm, addr);
292 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
293 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
294 vma->vm_flags & VM_SHARED) {
299 spin_unlock(&mm->page_table_lock);
303 int lt_get_mmap_locks(struct ll_lock_tree *tree,
304 unsigned long addr, size_t count)
306 struct vm_area_struct *vma;
307 struct ll_lock_tree_node *node;
308 ldlm_policy_data_t policy;
315 /* we need to look up vmas on page aligned addresses */
316 count += addr & (~CFS_PAGE_MASK);
317 addr &= CFS_PAGE_MASK;
319 while ((vma = our_vma(addr, count)) != NULL) {
320 LASSERT(vma->vm_file);
322 inode = vma->vm_file->f_dentry->d_inode;
323 policy_from_vma(&policy, vma, addr, count);
324 node = ll_node_from_inode(inode, policy.l_extent.start,
328 CERROR("not enough mem for lock_tree_node!\n");
331 lt_insert(tree, node);
333 if (vma->vm_end - addr >= count)
335 count -= vma->vm_end - addr;
341 /* FIXME: there is a pagefault race goes as follow (only 2.4):
342 * 1. A user process on node A accesses a portion of a mapped file,
343 * resulting in a page fault. The pagefault handler invokes the
344 * ll_nopage function, which reads the page into memory.
345 * 2. A user process on node B writes to the same portion of the file
346 * (either via mmap or write()), that cause node A to cancel the
347 * lock and truncate the page.
348 * 3. Node A then executes the rest of do_no_page(), entering the
349 * now-invalid page into the PTEs.
351 * Make the whole do_no_page as a hook to cover both the page cache
352 * and page mapping installing with dlm lock would eliminate this race.
354 * In 2.6, the truncate_count of address_space can cover this race.
356 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
357 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
360 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
361 int type /* unused */)
364 struct file *filp = vma->vm_file;
365 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
366 struct inode *inode = filp->f_dentry->d_inode;
367 struct lustre_handle lockh = { 0 };
368 ldlm_policy_data_t policy;
370 struct page *page = NULL;
371 struct ll_inode_info *lli = ll_i2info(inode);
372 struct lov_stripe_md *lsm;
374 __u64 kms, old_mtime;
375 unsigned long pgoff, size, rand_read, seq_read;
379 if (lli->lli_smd == NULL) {
380 CERROR("No lsm on fault?\n");
384 /* start and end the lock on the first and last bytes in the page */
385 policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
387 CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
388 vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
390 mode = mode_from_vma(vma);
391 old_mtime = LTIME_S(inode->i_mtime);
394 rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
395 &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
399 if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
400 CWARN("binary changed. inode %lu\n", inode->i_ino);
402 lov_stripe_lock(lsm);
403 inode_init_lvb(inode, &lvb);
404 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
407 pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
408 size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
411 lov_stripe_unlock(lsm);
412 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
414 /* XXX change inode size without ll_inode_size_lock() held!
415 * there is a race condition with truncate path. (see
417 /* region is within kms and, hence, within real file size (A).
418 * We need to increase i_size to cover the read region so that
419 * generic_file_read() will do its job, but that doesn't mean
420 * the kms size is _correct_, it is only the _minimum_ size.
421 * If someone does a stat they will get the correct size which
422 * will always be >= the kms value here. b=11081 */
423 if (i_size_read(inode) < kms) {
424 i_size_write(inode, kms);
425 CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
426 inode->i_ino, i_size_read(inode));
428 lov_stripe_unlock(lsm);
431 /* If mapping is writeable, adjust kms to cover this page,
432 * but do not extend kms beyond actual file size.
433 * policy.l_extent.end is set to the end of the page by policy_from_vma
435 lov_stripe_lock(lsm);
437 obd_adjust_kms(ll_i2dtexp(inode), lsm,
438 min_t(loff_t, policy.l_extent.end + 1,
439 i_size_read(inode)), 0);
440 lov_stripe_unlock(lsm);
442 /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
443 * the kernel will not read other pages not covered by ldlm in
444 * filemap_nopage. we do our readahead in ll_readpage.
446 rand_read = vma->vm_flags & VM_RAND_READ;
447 seq_read = vma->vm_flags & VM_SEQ_READ;
448 vma->vm_flags &= ~ VM_SEQ_READ;
449 vma->vm_flags |= VM_RAND_READ;
451 page = filemap_nopage(vma, address, type);
452 LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
454 vma->vm_flags &= ~VM_RAND_READ;
455 vma->vm_flags |= (rand_read | seq_read);
457 ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
461 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
462 * we track the mapped vma count by lli_mmap_cnt.
463 * ll_vm_open(): when first vma is linked, split locks from lru.
464 * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
466 * XXX we don't check the if the region of vma/lock for performance.
468 static void ll_vm_open(struct vm_area_struct * vma)
470 struct inode *inode = vma->vm_file->f_dentry->d_inode;
471 struct ll_inode_info *lli = ll_i2info(inode);
474 LASSERT(vma->vm_file);
476 spin_lock(&lli->lli_lock);
477 LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
479 atomic_inc(&lli->lli_mmap_cnt);
480 if (atomic_read(&lli->lli_mmap_cnt) == 1) {
481 struct lov_stripe_md *lsm = lli->lli_smd;
482 struct ll_sb_info *sbi = ll_i2sbi(inode);
485 spin_unlock(&lli->lli_lock);
489 count = obd_join_lru(sbi->ll_dt_exp, lsm, 0);
490 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
492 spin_unlock(&lli->lli_lock);
497 static void ll_vm_close(struct vm_area_struct *vma)
499 struct inode *inode = vma->vm_file->f_dentry->d_inode;
500 struct ll_inode_info *lli = ll_i2info(inode);
503 LASSERT(vma->vm_file);
505 spin_lock(&lli->lli_lock);
506 LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
508 atomic_dec(&lli->lli_mmap_cnt);
509 if (atomic_read(&lli->lli_mmap_cnt) == 0) {
510 struct lov_stripe_md *lsm = lli->lli_smd;
511 struct ll_sb_info *sbi = ll_i2sbi(inode);
514 spin_unlock(&lli->lli_lock);
518 count = obd_join_lru(sbi->ll_dt_exp, lsm, 1);
519 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
521 spin_unlock(&lli->lli_lock);
525 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
526 #ifndef HAVE_FILEMAP_POPULATE
527 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
529 static int ll_populate(struct vm_area_struct *area, unsigned long address,
530 unsigned long len, pgprot_t prot, unsigned long pgoff,
536 /* always set nonblock as true to avoid page read ahead */
537 rc = filemap_populate(area, address, len, prot, pgoff, 1);
542 /* return the user space pointer that maps to a file offset via a vma */
543 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
545 return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
549 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
550 /* [first, last] are the byte offsets affected.
551 * vm_{start, end} are user addresses of the first byte of the mapping and
552 * the next byte beyond it
553 * vm_pgoff is the page index of the first byte in the mapping */
554 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
557 unsigned long address, len;
558 for (; vma ; vma = vma->vm_next_share) {
559 if (last >> CFS_PAGE_SHIFT < vma->vm_pgoff)
561 if (first >> CFS_PAGE_SHIFT >= (vma->vm_pgoff +
562 ((vma->vm_end - vma->vm_start) >> CFS_PAGE_SHIFT)))
565 /* XXX in case of unmap the cow pages of a running file,
566 * don't unmap these private writeable mapping here!
567 * though that will break private mappping a little.
569 * the clean way is to check the mapping of every page
570 * and just unmap the non-cow pages, just like
571 * unmap_mapping_range() with even_cow=0 in kernel 2.6.
573 if (!(vma->vm_flags & VM_SHARED) &&
574 (vma->vm_flags & VM_WRITE))
577 address = max((unsigned long)vma->vm_start,
578 file_to_user(vma, first));
579 len = min((unsigned long)vma->vm_end,
580 file_to_user(vma, last) + 1) - address;
582 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
583 "address=%ld len=%ld]\n", first, last, address, len);
585 ll_zap_page_range(vma, address, len);
590 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
591 * nopage's reference passing to the pte */
592 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
597 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
599 if (mapping_mapped(mapping)) {
601 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
602 last - first + 1, 0);
605 spin_lock(&mapping->i_shared_lock);
606 if (mapping->i_mmap != NULL) {
608 teardown_vmas(mapping->i_mmap, first, last);
610 if (mapping->i_mmap_shared != NULL) {
612 teardown_vmas(mapping->i_mmap_shared, first, last);
614 spin_unlock(&mapping->i_shared_lock);
619 static struct vm_operations_struct ll_file_vm_ops = {
622 .close = ll_vm_close,
623 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
624 .populate = ll_populate,
628 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
633 ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
634 rc = generic_file_mmap(file, vma);
636 #if !defined(HAVE_FILEMAP_POPULATE) && \
637 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
638 if (!filemap_populate)
639 filemap_populate = vma->vm_ops->populate;
641 vma->vm_ops = &ll_file_vm_ops;
642 vma->vm_ops->open(vma);
643 /* update the inode's size and mtime */
644 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);