1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/config.h>
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/stat.h>
27 #include <linux/errno.h>
28 #include <linux/smp_lock.h>
29 #include <linux/unistd.h>
30 #include <linux/version.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
35 #include <linux/stat.h>
36 #include <asm/uaccess.h>
37 #include <asm/segment.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp_lock.h>
41 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
42 #include <linux/iobuf.h>
45 #include <linux/pagevec.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
49 #include <linux/lustre_mds.h>
50 #include <linux/lustre_lite.h>
51 #include <linux/lustre_audit.h>
52 #include "llite_internal.h"
53 #include <linux/lustre_compat25.h>
56 struct ll_lock_tree_node {
58 struct list_head lt_locked_item;
60 ldlm_policy_data_t lt_policy;
61 struct lustre_handle lt_lockh;
65 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
66 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
67 unsigned long addr, size_t count);
68 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
69 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
72 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
76 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
77 __u64 end, ldlm_mode_t mode)
79 struct ll_lock_tree_node *node;
81 OBD_ALLOC(node, sizeof(*node));
83 RETURN(ERR_PTR(-ENOMEM));
85 node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
86 node->lt_policy.l_extent.start = start;
87 node->lt_policy.l_extent.end = end;
88 memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
89 INIT_LIST_HEAD(&node->lt_locked_item);
95 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
97 /* XXX remove this assert when we really want to use this function
98 * to compare different file's region */
99 LASSERT(one->lt_oid == two->lt_oid);
101 if ( one->lt_oid < two->lt_oid)
103 if ( one->lt_oid > two->lt_oid)
106 if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start )
108 if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end )
111 return 0; /* they are the same object and overlap */
114 static void lt_merge(struct ll_lock_tree_node *dst,
115 struct ll_lock_tree_node *src)
117 dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
118 src->lt_policy.l_extent.start);
119 dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
120 src->lt_policy.l_extent.end);
122 /* XXX could be a real call to the dlm to find superset modes */
123 if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
124 dst->lt_mode = LCK_PW;
127 static void lt_insert(struct ll_lock_tree *tree,
128 struct ll_lock_tree_node *node)
130 struct ll_lock_tree_node *walk;
131 rb_node_t **p, *parent;
135 p = &tree->lt_root.rb_node;
139 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
140 switch (lt_compare(node, walk)) {
148 lt_merge(node, walk);
149 rb_erase(&walk->lt_node, &tree->lt_root);
150 OBD_FREE(walk, sizeof(*walk));
158 rb_link_node(&node->lt_node, parent, p);
159 rb_insert_color(&node->lt_node, &tree->lt_root);
163 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
166 struct ll_lock_tree_node *node = NULL;
168 for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
169 rbnode = rbnode->rb_left) {
170 if (rbnode->rb_left == NULL) {
171 node = rb_entry(rbnode, struct ll_lock_tree_node,
179 int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
181 struct ll_lock_tree_node *node;
182 struct list_head *pos, *n;
186 list_for_each_safe(pos, n, &tree->lt_locked_list) {
187 node = list_entry(pos, struct ll_lock_tree_node,
190 rc = ll_extent_unlock(tree->lt_fd, inode,
191 ll_i2info(inode)->lli_smd, node->lt_mode,
194 /* XXX better message */
195 CERROR("couldn't unlock %d\n", rc);
197 list_del(&node->lt_locked_item);
198 OBD_FREE(node, sizeof(*node));
201 while ((node = lt_least_node(tree))) {
202 rb_erase(&node->lt_node, &tree->lt_root);
203 OBD_FREE(node, sizeof(*node));
209 int ll_tree_lock(struct ll_lock_tree *tree,
210 struct ll_lock_tree_node *first_node, struct inode *inode,
211 const char *buf, size_t count, int ast_flags)
213 struct ll_lock_tree_node *node;
217 tree->lt_root.rb_node = NULL;
218 INIT_LIST_HEAD(&tree->lt_locked_list);
219 if (first_node != NULL)
220 lt_insert(tree, first_node);
222 /* order locking. what we have to concern about is ONLY double lock:
223 * the buffer is mapped to exactly this file. */
224 if (mapping_mapped(inode->i_mapping)) {
225 rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
230 while ((node = lt_least_node(tree))) {
231 struct obd_service_time *stime;
232 stime = (node->lt_mode & LCK_PW) ?
233 &ll_i2sbi(inode)->ll_write_stime :
234 &ll_i2sbi(inode)->ll_read_stime;
236 rc = ll_extent_lock(tree->lt_fd, inode,
237 ll_i2info(inode)->lli_smd, node->lt_mode,
238 &node->lt_policy, &node->lt_lockh,
243 rb_erase(&node->lt_node, &tree->lt_root);
244 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
248 ll_tree_unlock(tree, inode);
252 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
254 /* we only want to hold PW locks if the mmap() can generate
255 * writes back to the file and that only happens in shared
257 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
262 static void policy_from_vma(ldlm_policy_data_t *policy,
263 struct vm_area_struct *vma, unsigned long addr,
266 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
267 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
268 policy->l_extent.end = (policy->l_extent.start + count - 1) |
269 (PAGE_CACHE_SIZE - 1);
272 static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
275 struct mm_struct *mm = current->mm;
276 struct vm_area_struct *vma, *ret = NULL;
279 spin_lock(&mm->page_table_lock);
280 for(vma = find_vma(mm, addr);
281 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
282 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
283 vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) {
288 spin_unlock(&mm->page_table_lock);
292 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
293 unsigned long addr, size_t count)
295 struct vm_area_struct *vma;
296 struct ll_lock_tree_node *node;
297 ldlm_policy_data_t policy;
303 /* we need to look up vmas on page aligned addresses */
304 count += addr & (PAGE_SIZE - 1);
305 addr -= addr & (PAGE_SIZE - 1);
307 while ((vma = our_vma(addr, count, inode)) != NULL) {
309 policy_from_vma(&policy, vma, addr, count);
310 node = ll_node_from_inode(inode, policy.l_extent.start,
314 CERROR("not enough mem for lock_tree_node!\n");
317 lt_insert(tree, node);
319 if (vma->vm_end - addr >= count)
321 count -= vma->vm_end - addr;
326 /* FIXME: there is a pagefault race goes as follow:
327 * 1. A user process on node A accesses a portion of a mapped file,
328 * resulting in a page fault. The pagefault handler invokes the
329 * ll_nopage function, which reads the page into memory.
330 * 2. A user process on node B writes to the same portion of the file
331 * (either via mmap or write()), that cause node A to cancel the
332 * lock and truncate the page.
333 * 3. Node A then executes the rest of do_no_page(), entering the
334 * now-invalid page into the PTEs.
336 * Make the whole do_no_page as a hook to cover both the page cache
337 * and page mapping installing with dlm lock would eliminate this race.
339 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
340 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
343 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
347 struct file *filp = vma->vm_file;
348 struct ll_file_data *fd = filp->private_data;
349 struct inode *inode = filp->f_dentry->d_inode;
350 struct lustre_handle lockh = { 0 };
351 ldlm_policy_data_t policy;
353 struct page *page = NULL;
354 struct ll_inode_info *lli = ll_i2info(inode);
355 struct obd_service_time *stime;
357 unsigned long pgoff, size, rand_read, seq_read;
361 if (lli->lli_smd == NULL) {
362 CERROR("No lsm on fault?\n");
366 /* start and end the lock on the first and last bytes in the page */
367 policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
369 CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
370 vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
372 mode = mode_from_vma(vma);
373 stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
374 &ll_i2sbi(inode)->ll_read_stime;
376 rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
377 &lockh, LDLM_FL_CBPENDING, stime);
381 /* XXX change inode size without i_sem hold! there is a race condition
382 * with truncate path. (see ll_extent_lock) */
383 down(&lli->lli_size_sem);
384 kms = lov_merge_size(lli->lli_smd, 1);
385 pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
386 size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
389 up(&lli->lli_size_sem);
390 ll_glimpse_size(inode);
393 up(&lli->lli_size_sem);
396 /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
397 * the kernel will not read other pages not covered by ldlm in
398 * filemap_nopage. we do our readahead in ll_readpage.
400 rand_read = vma->vm_flags & VM_RAND_READ;
401 seq_read = vma->vm_flags & VM_SEQ_READ;
402 vma->vm_flags &= ~ VM_SEQ_READ;
403 vma->vm_flags |= VM_RAND_READ;
405 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
406 page = filemap_nopage(vma, address, type);
408 page = filemap_nopage(vma, address, unused);
410 vma->vm_flags &= ~VM_RAND_READ;
411 vma->vm_flags |= (rand_read | seq_read);
413 ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
417 /* return the user space pointer that maps to a file offset via a vma */
418 static inline unsigned long file_to_user(struct vm_area_struct *vma,
421 return vma->vm_start +
422 (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
425 #define VMA_DEBUG(vma, fmt, arg...) \
426 CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \
427 "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \
428 vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \
429 vma->vm_file->f_dentry->d_inode->i_ino, \
430 vma->vm_file->f_dentry->d_iname, ## arg); \
432 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
433 /* [first, last] are the byte offsets affected.
434 * vm_{start, end} are user addresses of the first byte of the mapping and
435 * the next byte beyond it
436 * vm_pgoff is the page index of the first byte in the mapping */
437 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
440 unsigned long address, len;
441 for (; vma ; vma = vma->vm_next_share) {
442 if (last >> PAGE_SHIFT < vma->vm_pgoff)
444 if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
445 ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
448 /* XXX in case of unmap the cow pages of a running file,
449 * don't unmap these private writeable mapping here!
450 * though that will break private mappping a little.
452 * the clean way is to check the mapping of every page
453 * and just unmap the non-cow pages, just like
454 * unmap_mapping_range() with even_cow=0 in kernel 2.6.
456 if (!(vma->vm_flags & VM_SHARED) &&
457 (vma->vm_flags & VM_WRITE))
460 address = max((unsigned long)vma->vm_start,
461 file_to_user(vma, first));
462 len = min((unsigned long)vma->vm_end,
463 file_to_user(vma, last) + 1) - address;
465 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
466 "address=%ld len=%ld]\n", first, last, address, len);
468 ll_zap_page_range(vma, address, len);
473 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
474 * nopage's reference passing to the pte */
475 int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
481 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
482 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
483 if (mapping_mapped(mapping)) {
485 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
486 last - first + 1, 0);
489 spin_lock(&mapping->i_shared_lock);
490 if (mapping->i_mmap != NULL) {
492 teardown_vmas(mapping->i_mmap, first, last);
494 if (mapping->i_mmap_shared != NULL) {
496 teardown_vmas(mapping->i_mmap_shared, first, last);
498 spin_unlock(&mapping->i_shared_lock);
505 static void ll_close_vma(struct vm_area_struct *vma)
507 struct inode *inode = vma->vm_file->f_dentry->d_inode;
508 struct address_space *mapping = inode->i_mapping;
509 unsigned long next, size, end;
510 struct ll_async_page *llap;
511 struct obd_export *exp;
515 if (!(vma->vm_flags & VM_SHARED))
518 /* all pte's are synced to mem_map by the moment
519 * we scan backing store and put all dirty pages
520 * onto pending list to track flushing */
522 LASSERT(LLI_DIRTY_HANDLE(inode));
523 exp = ll_i2dtexp(inode);
525 CERROR("can't get export for the inode\n");
529 pagevec_init(&pvec, 0);
530 next = vma->vm_pgoff;
531 size = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
532 end = next + size - 1;
534 CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma,
535 next, size, end, inode->i_ino, inode->i_generation);
537 while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
538 for (i = 0; i < pagevec_count(&pvec); i++) {
539 struct page *page = pvec.pages[i];
541 if (page->index > next)
548 if (page->mapping != mapping || !PageDirty(page)) {
553 llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
555 CERROR("can't get llap\n");
560 llap_write_pending(inode, llap);
563 pagevec_release(&pvec);
567 static struct vm_operations_struct ll_file_vm_ops = {
569 .close = ll_close_vma,
572 /* Audit functions */
573 extern int ll_audit_log (struct inode *, audit_op, int);
575 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
580 rc = generic_file_mmap(file, vma);
582 struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
583 vma->vm_ops = &ll_file_vm_ops;
585 /* mark i/o epoch dirty */
586 if (vma->vm_flags & VM_SHARED)
587 set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
590 ll_audit_log(file->f_dentry->d_inode, AUDIT_MMAP, rc);