Whamcloud - gitweb
55db9c336ef3af4d764600cc8669b88372ead99a
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 #ifndef AUTOCONF_INCLUDED
22 #include <linux/config.h>
23 #endif
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/string.h>
27 #include <linux/stat.h>
28 #include <linux/errno.h>
29 #include <linux/smp_lock.h>
30 #include <linux/unistd.h>
31 #include <linux/version.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34
35 #include <linux/fs.h>
36 #include <linux/stat.h>
37 #include <asm/uaccess.h>
38 #include <linux/mm.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp_lock.h>
41 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
42 #include <linux/iobuf.h>
43 #endif
44
45 #define DEBUG_SUBSYSTEM S_LLITE
46
47 #include <lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #define VMA_DEBUG(vma, fmt, arg...)                                     \
52         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
53                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
54                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
55                vma->vm_file->f_dentry->d_inode->i_ino,                       \
56                vma->vm_file->f_dentry->d_iname, ## arg);                     \
57
58
59 struct ll_lock_tree_node {
60         rb_node_t               lt_node;
61         struct list_head        lt_locked_item;
62         __u64                   lt_oid;
63         ldlm_policy_data_t      lt_policy;
64         struct lustre_handle    lt_lockh;
65         ldlm_mode_t             lt_mode;
66         struct inode           *lt_inode;
67 };
68
69 int lt_get_mmap_locks(struct ll_lock_tree *tree,
70                       unsigned long addr, size_t count);
71
72 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
73 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
74                        int *type);
75 #else
76
77 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
78                        int unused);
79 #endif
80
81 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
82                                               __u64 end, ldlm_mode_t mode)
83 {
84         struct ll_lock_tree_node *node;
85
86         OBD_ALLOC(node, sizeof(*node));
87         if (node == NULL)
88                 RETURN(ERR_PTR(-ENOMEM));
89
90         node->lt_inode = inode;
91         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
92         node->lt_policy.l_extent.start = start;
93         node->lt_policy.l_extent.end = end;
94         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
95         INIT_LIST_HEAD(&node->lt_locked_item);
96         node->lt_mode = mode;
97
98         return node;
99 }
100
101 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
102 {
103         /* To avoid multiple fs deadlock */
104         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
105                 return -1;
106         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
107                 return 1;
108
109         if (one->lt_oid < two->lt_oid)
110                 return -1;
111         if (one->lt_oid > two->lt_oid)
112                 return 1;
113
114         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
115                 return -1;
116         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
117                 return 1;
118
119         return 0; /* they are the same object and overlap */
120 }
121
122 static void lt_merge(struct ll_lock_tree_node *dst,
123                      struct ll_lock_tree_node *src)
124 {
125         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
126                                             src->lt_policy.l_extent.start);
127         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
128                                           src->lt_policy.l_extent.end);
129
130         /* XXX could be a real call to the dlm to find superset modes */
131         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
132                 dst->lt_mode = LCK_PW;
133 }
134
135 static void lt_insert(struct ll_lock_tree *tree,
136                       struct ll_lock_tree_node *node)
137 {
138         struct ll_lock_tree_node *walk;
139         rb_node_t **p, *parent;
140         ENTRY;
141
142 restart:
143         p = &tree->lt_root.rb_node;
144         parent = NULL;
145         while (*p) {
146                 parent = *p;
147                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
148                 switch (lt_compare(node, walk)) {
149                 case -1:
150                         p = &(*p)->rb_left;
151                         break;
152                 case 1:
153                         p = &(*p)->rb_right;
154                         break;
155                 case 0:
156                         lt_merge(node, walk);
157                         rb_erase(&walk->lt_node, &tree->lt_root);
158                         OBD_FREE(walk, sizeof(*walk));
159                         goto restart;
160                         break;
161                 default:
162                         LBUG();
163                         break;
164                 }
165         }
166         rb_link_node(&node->lt_node, parent, p);
167         rb_insert_color(&node->lt_node, &tree->lt_root);
168         EXIT;
169 }
170
171 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
172 {
173         rb_node_t *rbnode;
174         struct ll_lock_tree_node *node = NULL;
175
176         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
177               rbnode = rbnode->rb_left) {
178                 if (rbnode->rb_left == NULL) {
179                         node = rb_entry(rbnode, struct ll_lock_tree_node,
180                                         lt_node);
181                         break;
182                 }
183         }
184         RETURN(node);
185 }
186
187 int ll_tree_unlock(struct ll_lock_tree *tree)
188 {
189         struct ll_lock_tree_node *node;
190         struct list_head *pos, *n;
191         struct inode *inode;
192         int rc = 0;
193         ENTRY;
194
195         list_for_each_safe(pos, n, &tree->lt_locked_list) {
196                 node = list_entry(pos, struct ll_lock_tree_node,
197                                   lt_locked_item);
198
199                 inode = node->lt_inode;
200                 rc = ll_extent_unlock(tree->lt_fd, inode,
201                                       ll_i2info(inode)->lli_smd, node->lt_mode,
202                                       &node->lt_lockh);
203                 if (rc != 0) {
204                         /* XXX better message */
205                         CERROR("couldn't unlock %d\n", rc);
206                 }
207                 list_del(&node->lt_locked_item);
208                 OBD_FREE(node, sizeof(*node));
209         }
210
211         while ((node = lt_least_node(tree))) {
212                 rb_erase(&node->lt_node, &tree->lt_root);
213                 OBD_FREE(node, sizeof(*node));
214         }
215
216         RETURN(rc);
217 }
218
219 int ll_tree_lock_iov(struct ll_lock_tree *tree,
220                  struct ll_lock_tree_node *first_node,
221                  const struct iovec *iov, unsigned long nr_segs, int ast_flags)
222 {
223         struct ll_lock_tree_node *node;
224         int rc = 0;
225         unsigned long seg;
226         ENTRY;
227
228         tree->lt_root.rb_node = NULL;
229         INIT_LIST_HEAD(&tree->lt_locked_list);
230         if (first_node != NULL)
231                 lt_insert(tree, first_node);
232
233         /* To avoid such subtle deadlock case: client1 try to read file1 to
234          * mmapped file2, on the same time, client2 try to read file2 to
235          * mmapped file1.*/
236         for (seg = 0; seg < nr_segs; seg++) {
237                 const struct iovec *iv = &iov[seg];
238                 rc = lt_get_mmap_locks(tree, (unsigned long)iv->iov_base,
239                                        iv->iov_len);
240                 if (rc)
241                         GOTO(out, rc);
242         }
243
244         while ((node = lt_least_node(tree))) {
245                 struct inode *inode = node->lt_inode;
246                 rc = ll_extent_lock(tree->lt_fd, inode,
247                                     ll_i2info(inode)->lli_smd, node->lt_mode,
248                                     &node->lt_policy, &node->lt_lockh,
249                                     ast_flags);
250                 if (rc != 0)
251                         GOTO(out, rc);
252
253                 rb_erase(&node->lt_node, &tree->lt_root);
254                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
255         }
256         RETURN(rc);
257 out:
258         ll_tree_unlock(tree);
259         RETURN(rc);
260 }
261
262 int ll_tree_lock(struct ll_lock_tree *tree,
263                  struct ll_lock_tree_node *first_node,
264                  const char *buf, size_t count, int ast_flags)
265 {
266         struct iovec local_iov = { .iov_base = (void __user *)buf,
267                                    .iov_len = count };
268
269         return ll_tree_lock_iov(tree, first_node, &local_iov, 1, ast_flags);
270 }
271
272 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
273 {
274         /* we only want to hold PW locks if the mmap() can generate
275          * writes back to the file and that only happens in shared
276          * writable vmas */
277         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
278                 return LCK_PW;
279         return LCK_PR;
280 }
281
282 static void policy_from_vma(ldlm_policy_data_t *policy,
283                             struct vm_area_struct *vma, unsigned long addr,
284                             size_t count)
285 {
286         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
287                                  ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
288         policy->l_extent.end = (policy->l_extent.start + count - 1) |
289                                ~CFS_PAGE_MASK;
290 }
291
292 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
293 {
294         struct mm_struct *mm = current->mm;
295         struct vm_area_struct *vma, *ret = NULL;
296         ENTRY;
297
298         /* No MM (e.g. NFS)? No vmas too. */
299         if (!mm)
300                 RETURN(NULL);
301
302         spin_lock(&mm->page_table_lock);
303         for(vma = find_vma(mm, addr);
304             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
305                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
306                     vma->vm_flags & VM_SHARED) {
307                         ret = vma;
308                         break;
309                 }
310         }
311         spin_unlock(&mm->page_table_lock);
312         RETURN(ret);
313 }
314
315 int ll_region_mapped(unsigned long addr, size_t count)
316 {
317         return !!our_vma(addr, count);
318 }
319
320 int lt_get_mmap_locks(struct ll_lock_tree *tree,
321                       unsigned long addr, size_t count)
322 {
323         struct vm_area_struct *vma;
324         struct ll_lock_tree_node *node;
325         ldlm_policy_data_t policy;
326         struct inode *inode;
327         ENTRY;
328
329         if (count == 0)
330                 RETURN(0);
331
332         /* we need to look up vmas on page aligned addresses */
333         count += addr & (~CFS_PAGE_MASK);
334         addr &= CFS_PAGE_MASK;
335
336         while ((vma = our_vma(addr, count)) != NULL) {
337                 LASSERT(vma->vm_file);
338
339                 inode = vma->vm_file->f_dentry->d_inode;
340                 policy_from_vma(&policy, vma, addr, count);
341                 node = ll_node_from_inode(inode, policy.l_extent.start,
342                                           policy.l_extent.end,
343                                           mode_from_vma(vma));
344                 if (IS_ERR(node)) {
345                         CERROR("not enough mem for lock_tree_node!\n");
346                         RETURN(-ENOMEM);
347                 }
348                 lt_insert(tree, node);
349
350                 if (vma->vm_end - addr >= count)
351                         break;
352                 count -= vma->vm_end - addr;
353                 addr = vma->vm_end;
354         }
355         RETURN(0);
356 }
357
358 /* FIXME: there is a pagefault race goes as follow (only 2.4):
359  * 1. A user process on node A accesses a portion of a mapped file,
360  *    resulting in a page fault.  The pagefault handler invokes the
361  *    ll_nopage function, which reads the page into memory.
362  * 2. A user process on node B writes to the same portion of the file
363  *    (either via mmap or write()), that cause node A to cancel the
364  *    lock and truncate the page.
365  * 3. Node A then executes the rest of do_no_page(), entering the
366  *    now-invalid page into the PTEs.
367  *
368  * Make the whole do_no_page as a hook to cover both the page cache
369  * and page mapping installing with dlm lock would eliminate this race.
370  *
371  * In 2.6, the truncate_count of address_space can cover this race.
372  */
373 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
374 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
375                        int *type)
376 #else
377 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
378                        int type /* unused */)
379 #endif
380 {
381         struct file *filp = vma->vm_file;
382         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
383         struct inode *inode = filp->f_dentry->d_inode;
384         struct lustre_handle lockh = { 0 };
385         ldlm_policy_data_t policy;
386         ldlm_mode_t mode;
387         struct page *page = NULL;
388         struct ll_inode_info *lli = ll_i2info(inode);
389         struct lov_stripe_md *lsm;
390         struct ost_lvb lvb;
391         __u64 kms, old_mtime;
392         unsigned long pgoff, size, rand_read, seq_read;
393         int rc = 0;
394         ENTRY;
395
396         if (lli->lli_smd == NULL) {
397                 CERROR("No lsm on fault?\n");
398                 RETURN(NULL);
399         }
400
401         ll_clear_file_contended(inode);
402
403         /* start and end the lock on the first and last bytes in the page */
404         policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
405
406         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
407                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
408
409         mode = mode_from_vma(vma);
410         old_mtime = LTIME_S(inode->i_mtime);
411
412         lsm = lli->lli_smd;
413         rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
414                             &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
415         if (rc != 0)
416                 RETURN(NULL);
417
418         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
419                 CWARN("binary changed. inode %lu\n", inode->i_ino);
420
421         lov_stripe_lock(lsm);
422         inode_init_lvb(inode, &lvb);
423         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
424         kms = lvb.lvb_size;
425
426         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
427         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
428
429         if (pgoff >= size) {
430                 lov_stripe_unlock(lsm);
431                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
432         } else {
433                 /* XXX change inode size without ll_inode_size_lock() held!
434                  *     there is a race condition with truncate path. (see
435                  *     ll_extent_lock) */
436                 /* XXX i_size_write() is not used because it is not safe to
437                  *     take the ll_inode_size_lock() due to a potential lock
438                  *     inversion (bug 6077).  And since it's not safe to use
439                  *     i_size_write() without a covering mutex we do the
440                  *     assignment directly.  It is not critical that the
441                  *     size be correct. */
442                 /* NOTE: region is within kms and, hence, within real file size (A).
443                  * We need to increase i_size to cover the read region so that
444                  * generic_file_read() will do its job, but that doesn't mean
445                  * the kms size is _correct_, it is only the _minimum_ size.
446                  * If someone does a stat they will get the correct size which
447                  * will always be >= the kms value here.  b=11081 */
448                 if (i_size_read(inode) < kms) {
449                         inode->i_size = kms;
450                         CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
451                                inode->i_ino, i_size_read(inode));
452                 }
453                 lov_stripe_unlock(lsm);
454         }
455
456         /* If mapping is writeable, adjust kms to cover this page,
457          * but do not extend kms beyond actual file size.
458          * policy.l_extent.end is set to the end of the page by policy_from_vma
459          * bug 10919 */
460         lov_stripe_lock(lsm);
461         if (mode == LCK_PW)
462                 obd_adjust_kms(ll_i2obdexp(inode), lsm,
463                                min_t(loff_t, policy.l_extent.end + 1,
464                                i_size_read(inode)), 0);
465         lov_stripe_unlock(lsm);
466
467         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
468          * the kernel will not read other pages not covered by ldlm in
469          * filemap_nopage. we do our readahead in ll_readpage.
470          */
471         rand_read = vma->vm_flags & VM_RAND_READ;
472         seq_read = vma->vm_flags & VM_SEQ_READ;
473         vma->vm_flags &= ~ VM_SEQ_READ;
474         vma->vm_flags |= VM_RAND_READ;
475
476         page = filemap_nopage(vma, address, type);
477         LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
478                        (long)type);
479         vma->vm_flags &= ~VM_RAND_READ;
480         vma->vm_flags |= (rand_read | seq_read);
481
482         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
483         RETURN(page);
484 }
485
486 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
487  * we track the mapped vma count by lli_mmap_cnt.
488  * ll_vm_open():  when first vma is linked, split locks from lru.
489  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
490  *
491  * XXX we don't check the if the region of vma/lock for performance.
492  */
493 static void ll_vm_open(struct vm_area_struct * vma)
494 {
495         struct inode *inode = vma->vm_file->f_dentry->d_inode;
496         struct ll_inode_info *lli = ll_i2info(inode);
497         ENTRY;
498
499         LASSERT(vma->vm_file);
500
501         spin_lock(&lli->lli_lock);
502         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
503
504         atomic_inc(&lli->lli_mmap_cnt);
505         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
506                 struct lov_stripe_md *lsm = lli->lli_smd;
507                 struct ll_sb_info *sbi = ll_i2sbi(inode);
508                 int count;
509
510                 spin_unlock(&lli->lli_lock);
511
512                 if (!lsm)
513                         return;
514                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
515                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
516         } else {
517                 spin_unlock(&lli->lli_lock);
518         }
519
520 }
521
522 static void ll_vm_close(struct vm_area_struct *vma)
523 {
524         struct inode *inode = vma->vm_file->f_dentry->d_inode;
525         struct ll_inode_info *lli = ll_i2info(inode);
526         ENTRY;
527
528         LASSERT(vma->vm_file);
529
530         spin_lock(&lli->lli_lock);
531         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
532
533         atomic_dec(&lli->lli_mmap_cnt);
534         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
535                 struct lov_stripe_md *lsm = lli->lli_smd;
536                 struct ll_sb_info *sbi = ll_i2sbi(inode);
537                 int count;
538
539                 spin_unlock(&lli->lli_lock);
540
541                 if (!lsm)
542                         return;
543                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
544                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
545         } else {
546                 spin_unlock(&lli->lli_lock);
547         }
548 }
549
550 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
551 #ifndef HAVE_FILEMAP_POPULATE
552 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
553 #endif
554 static int ll_populate(struct vm_area_struct *area, unsigned long address,
555                        unsigned long len, pgprot_t prot, unsigned long pgoff,
556                        int nonblock)
557 {
558         int rc = 0;
559         ENTRY;
560
561         /* always set nonblock as true to avoid page read ahead */
562         rc = filemap_populate(area, address, len, prot, pgoff, 1);
563         RETURN(rc);
564 }
565 #endif
566
567 /* return the user space pointer that maps to a file offset via a vma */
568 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
569 {
570         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
571
572 }
573
574 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
575 /* [first, last] are the byte offsets affected.
576  * vm_{start, end} are user addresses of the first byte of the mapping and
577  *      the next byte beyond it
578  * vm_pgoff is the page index of the first byte in the mapping */
579 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
580                           __u64 last)
581 {
582         unsigned long address, len;
583         for (; vma ; vma = vma->vm_next_share) {
584                 if (last >> CFS_PAGE_SHIFT < vma->vm_pgoff)
585                         continue;
586                 if (first >> CFS_PAGE_SHIFT >= (vma->vm_pgoff +
587                     ((vma->vm_end - vma->vm_start) >> CFS_PAGE_SHIFT)))
588                         continue;
589
590                 /* XXX in case of unmap the cow pages of a running file,
591                  * don't unmap these private writeable mapping here!
592                  * though that will break private mappping a little.
593                  *
594                  * the clean way is to check the mapping of every page
595                  * and just unmap the non-cow pages, just like
596                  * unmap_mapping_range() with even_cow=0 in kernel 2.6.
597                  */
598                 if (!(vma->vm_flags & VM_SHARED) &&
599                     (vma->vm_flags & VM_WRITE))
600                         continue;
601
602                 address = max((unsigned long)vma->vm_start,
603                               file_to_user(vma, first));
604                 len = min((unsigned long)vma->vm_end,
605                           file_to_user(vma, last) + 1) - address;
606
607                 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
608                           "address=%ld len=%ld]\n", first, last, address, len);
609                 LASSERT(len > 0);
610                 ll_zap_page_range(vma, address, len);
611         }
612 }
613 #endif
614
615 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
616  * nopage's reference passing to the pte */
617 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
618 {
619         int rc = -ENOENT;
620         ENTRY;
621
622         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
623 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
624         if (mapping_mapped(mapping)) {
625                 rc = 0;
626                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
627                                     last - first + 1, 0);
628         }
629 #else
630         spin_lock(&mapping->i_shared_lock);
631         if (mapping->i_mmap != NULL) {
632                 rc = 0;
633                 teardown_vmas(mapping->i_mmap, first, last);
634         }
635         if (mapping->i_mmap_shared != NULL) {
636                 rc = 0;
637                 teardown_vmas(mapping->i_mmap_shared, first, last);
638         }
639         spin_unlock(&mapping->i_shared_lock);
640 #endif
641         RETURN(rc);
642 }
643
644 static struct vm_operations_struct ll_file_vm_ops = {
645         .nopage         = ll_nopage,
646         .open           = ll_vm_open,
647         .close          = ll_vm_close,
648 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
649         .populate       = ll_populate,
650 #endif
651 };
652
653 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
654 {
655         int rc;
656         ENTRY;
657
658         ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
659         rc = generic_file_mmap(file, vma);
660         if (rc == 0) {
661 #if !defined(HAVE_FILEMAP_POPULATE) && \
662     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
663                 if (!filemap_populate)
664                         filemap_populate = vma->vm_ops->populate;
665 #endif
666                 vma->vm_ops = &ll_file_vm_ops;
667                 vma->vm_ops->open(vma);
668                 /* update the inode's size and mtime */
669                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
670         }
671
672         RETURN(rc);
673 }