Whamcloud - gitweb
e45f84db4e0061efdc2c32cbb092643430697a8d
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef AUTOCONF_INCLUDED
23 #include <linux/config.h>
24 #endif
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
44 #include <linux/iobuf.h>
45 #endif
46
47 #define DEBUG_SUBSYSTEM S_LLITE
48
49 //#include <lustre_mdc.h>
50 #include <lustre_lite.h>
51 #include "llite_internal.h"
52 #include <linux/lustre_compat25.h>
53
54 #define VMA_DEBUG(vma, fmt, arg...)                                     \
55         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
56                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
57                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
58                vma->vm_file->f_dentry->d_inode->i_ino,                       \
59                vma->vm_file->f_dentry->d_iname, ## arg);                     \
60
61
62 struct ll_lock_tree_node {
63         rb_node_t               lt_node;
64         struct list_head        lt_locked_item;
65         __u64                   lt_oid;
66         ldlm_policy_data_t      lt_policy;
67         struct lustre_handle    lt_lockh;
68         ldlm_mode_t             lt_mode;
69         struct inode           *lt_inode;
70 };
71
72 int lt_get_mmap_locks(struct ll_lock_tree *tree,
73                       unsigned long addr, size_t count);
74
75 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
76 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
77                        int *type);
78 #else
79
80 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
81                        int unused);
82 #endif
83
84 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
85                                               __u64 end, ldlm_mode_t mode)
86 {
87         struct ll_lock_tree_node *node;
88
89         OBD_ALLOC(node, sizeof(*node));
90         if (node == NULL)
91                 RETURN(ERR_PTR(-ENOMEM));
92
93         node->lt_inode = inode;
94         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
95         node->lt_policy.l_extent.start = start;
96         node->lt_policy.l_extent.end = end;
97         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
98         INIT_LIST_HEAD(&node->lt_locked_item);
99         node->lt_mode = mode;
100
101         return node;
102 }
103
104 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
105 {
106         /* To avoid multiple fs deadlock */
107         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
108                 return -1;
109         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
110                 return 1;
111
112         if (one->lt_oid < two->lt_oid)
113                 return -1;
114         if (one->lt_oid > two->lt_oid)
115                 return 1;
116
117         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
118                 return -1;
119         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
120                 return 1;
121
122         return 0; /* they are the same object and overlap */
123 }
124
125 static void lt_merge(struct ll_lock_tree_node *dst,
126                      struct ll_lock_tree_node *src)
127 {
128         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
129                                             src->lt_policy.l_extent.start);
130         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
131                                           src->lt_policy.l_extent.end);
132
133         /* XXX could be a real call to the dlm to find superset modes */
134         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
135                 dst->lt_mode = LCK_PW;
136 }
137
138 static void lt_insert(struct ll_lock_tree *tree,
139                       struct ll_lock_tree_node *node)
140 {
141         struct ll_lock_tree_node *walk;
142         rb_node_t **p, *parent;
143         ENTRY;
144
145 restart:
146         p = &tree->lt_root.rb_node;
147         parent = NULL;
148         while (*p) {
149                 parent = *p;
150                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
151                 switch (lt_compare(node, walk)) {
152                 case -1:
153                         p = &(*p)->rb_left;
154                         break;
155                 case 1:
156                         p = &(*p)->rb_right;
157                         break;
158                 case 0:
159                         lt_merge(node, walk);
160                         rb_erase(&walk->lt_node, &tree->lt_root);
161                         OBD_FREE(walk, sizeof(*walk));
162                         goto restart;
163                         break;
164                 default:
165                         LBUG();
166                         break;
167                 }
168         }
169         rb_link_node(&node->lt_node, parent, p);
170         rb_insert_color(&node->lt_node, &tree->lt_root);
171         EXIT;
172 }
173
174 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
175 {
176         rb_node_t *rbnode;
177         struct ll_lock_tree_node *node = NULL;
178
179         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
180               rbnode = rbnode->rb_left) {
181                 if (rbnode->rb_left == NULL) {
182                         node = rb_entry(rbnode, struct ll_lock_tree_node,
183                                         lt_node);
184                         break;
185                 }
186         }
187         RETURN(node);
188 }
189
190 int ll_tree_unlock(struct ll_lock_tree *tree)
191 {
192         struct ll_lock_tree_node *node;
193         struct list_head *pos, *n;
194         struct inode *inode;
195         int rc = 0;
196         ENTRY;
197
198         list_for_each_safe(pos, n, &tree->lt_locked_list) {
199                 node = list_entry(pos, struct ll_lock_tree_node,
200                                   lt_locked_item);
201
202                 inode = node->lt_inode;
203                 rc = ll_extent_unlock(tree->lt_fd, inode,
204                                       ll_i2info(inode)->lli_smd, node->lt_mode,
205                                       &node->lt_lockh);
206                 if (rc != 0) {
207                         /* XXX better message */
208                         CERROR("couldn't unlock %d\n", rc);
209                 }
210                 list_del(&node->lt_locked_item);
211                 OBD_FREE(node, sizeof(*node));
212         }
213
214         while ((node = lt_least_node(tree))) {
215                 rb_erase(&node->lt_node, &tree->lt_root);
216                 OBD_FREE(node, sizeof(*node));
217         }
218
219         RETURN(rc);
220 }
221
222 int ll_tree_lock(struct ll_lock_tree *tree,
223                  struct ll_lock_tree_node *first_node,
224                  const char *buf, size_t count, int ast_flags)
225 {
226         struct ll_lock_tree_node *node;
227         int rc = 0;
228         ENTRY;
229
230         tree->lt_root.rb_node = NULL;
231         INIT_LIST_HEAD(&tree->lt_locked_list);
232         if (first_node != NULL)
233                 lt_insert(tree, first_node);
234
235         /* To avoid such subtle deadlock case: client1 try to read file1 to
236          * mmapped file2, on the same time, client2 try to read file2 to
237          * mmapped file1.*/
238         rc = lt_get_mmap_locks(tree, (unsigned long)buf, count);
239         if (rc)
240                 GOTO(out, rc);
241
242         while ((node = lt_least_node(tree))) {
243                 struct inode *inode = node->lt_inode;
244                 rc = ll_extent_lock(tree->lt_fd, inode,
245                                     ll_i2info(inode)->lli_smd, node->lt_mode,
246                                     &node->lt_policy, &node->lt_lockh,
247                                     ast_flags);
248                 if (rc != 0)
249                         GOTO(out, rc);
250
251                 rb_erase(&node->lt_node, &tree->lt_root);
252                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
253         }
254         RETURN(rc);
255 out:
256         ll_tree_unlock(tree);
257         RETURN(rc);
258 }
259
260 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
261 {
262         /* we only want to hold PW locks if the mmap() can generate
263          * writes back to the file and that only happens in shared
264          * writable vmas */
265         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
266                 return LCK_PW;
267         return LCK_PR;
268 }
269
270 static void policy_from_vma(ldlm_policy_data_t *policy,
271                             struct vm_area_struct *vma, unsigned long addr,
272                             size_t count)
273 {
274         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
275                                  (vma->vm_pgoff << CFS_PAGE_SHIFT);
276         policy->l_extent.end = (policy->l_extent.start + count - 1) |
277                                ~CFS_PAGE_MASK;
278 }
279
280 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
281 {
282         struct mm_struct *mm = current->mm;
283         struct vm_area_struct *vma, *ret = NULL;
284         ENTRY;
285
286         /* No MM (e.g. NFS)? No vmas too. */
287         if (!mm)
288                 RETURN(NULL);
289
290         spin_lock(&mm->page_table_lock);
291         for(vma = find_vma(mm, addr);
292             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
293                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
294                     vma->vm_flags & VM_SHARED) {
295                         ret = vma;
296                         break;
297                 }
298         }
299         spin_unlock(&mm->page_table_lock);
300         RETURN(ret);
301 }
302
303 int lt_get_mmap_locks(struct ll_lock_tree *tree,
304                       unsigned long addr, size_t count)
305 {
306         struct vm_area_struct *vma;
307         struct ll_lock_tree_node *node;
308         ldlm_policy_data_t policy;
309         struct inode *inode;
310         ENTRY;
311
312         if (count == 0)
313                 RETURN(0);
314
315         /* we need to look up vmas on page aligned addresses */
316         count += addr & (~CFS_PAGE_MASK);
317         addr &= CFS_PAGE_MASK;
318
319         while ((vma = our_vma(addr, count)) != NULL) {
320                 LASSERT(vma->vm_file);
321
322                 inode = vma->vm_file->f_dentry->d_inode;
323                 policy_from_vma(&policy, vma, addr, count);
324                 node = ll_node_from_inode(inode, policy.l_extent.start,
325                                           policy.l_extent.end,
326                                           mode_from_vma(vma));
327                 if (IS_ERR(node)) {
328                         CERROR("not enough mem for lock_tree_node!\n");
329                         RETURN(-ENOMEM);
330                 }
331                 lt_insert(tree, node);
332
333                 if (vma->vm_end - addr >= count)
334                         break;
335                 count -= vma->vm_end - addr;
336                 addr = vma->vm_end;
337         }
338         RETURN(0);
339 }
340
341 /* FIXME: there is a pagefault race goes as follow (only 2.4):
342  * 1. A user process on node A accesses a portion of a mapped file,
343  *    resulting in a page fault.  The pagefault handler invokes the
344  *    ll_nopage function, which reads the page into memory.
345  * 2. A user process on node B writes to the same portion of the file
346  *    (either via mmap or write()), that cause node A to cancel the
347  *    lock and truncate the page.
348  * 3. Node A then executes the rest of do_no_page(), entering the
349  *    now-invalid page into the PTEs.
350  *
351  * Make the whole do_no_page as a hook to cover both the page cache
352  * and page mapping installing with dlm lock would eliminate this race.
353  *
354  * In 2.6, the truncate_count of address_space can cover this race.
355  */
356 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
357 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
358                        int *type)
359 #else
360 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
361                        int type /* unused */)
362 #endif
363 {
364         struct file *filp = vma->vm_file;
365         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
366         struct inode *inode = filp->f_dentry->d_inode;
367         struct lustre_handle lockh = { 0 };
368         ldlm_policy_data_t policy;
369         ldlm_mode_t mode;
370         struct page *page = NULL;
371         struct ll_inode_info *lli = ll_i2info(inode);
372         struct lov_stripe_md *lsm;
373         struct ost_lvb lvb;
374         __u64 kms, old_mtime;
375         unsigned long pgoff, size, rand_read, seq_read;
376         int rc = 0;
377         ENTRY;
378
379         if (lli->lli_smd == NULL) {
380                 CERROR("No lsm on fault?\n");
381                 RETURN(NULL);
382         }
383
384         /* start and end the lock on the first and last bytes in the page */
385         policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
386
387         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
388                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
389
390         mode = mode_from_vma(vma);
391         old_mtime = LTIME_S(inode->i_mtime);
392
393         lsm = lli->lli_smd;
394         rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
395                             &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
396         if (rc != 0)
397                 RETURN(NULL);
398
399         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
400                 CWARN("binary changed. inode %lu\n", inode->i_ino);
401
402         lov_stripe_lock(lsm);
403         inode_init_lvb(inode, &lvb);
404         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
405         kms = lvb.lvb_size;
406
407         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
408         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
409
410         if (pgoff >= size) {
411                 lov_stripe_unlock(lsm);
412                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
413         } else {
414                 /* XXX change inode size without ll_inode_size_lock() held!
415                  *     there is a race condition with truncate path. (see
416                  *     ll_extent_lock) */
417                /* region is within kms and, hence, within real file size (A).
418                  * We need to increase i_size to cover the read region so that
419                  * generic_file_read() will do its job, but that doesn't mean
420                  * the kms size is _correct_, it is only the _minimum_ size.
421                  * If someone does a stat they will get the correct size which
422                  * will always be >= the kms value here.  b=11081 */
423                 if (i_size_read(inode) < kms) {
424                         i_size_write(inode, kms);
425                         CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
426                                inode->i_ino, i_size_read(inode));
427                 }
428                 lov_stripe_unlock(lsm);
429         }
430
431         /* If mapping is writeable, adjust kms to cover this page,
432          * but do not extend kms beyond actual file size.
433          * policy.l_extent.end is set to the end of the page by policy_from_vma
434          * bug 10919 */
435         lov_stripe_lock(lsm);
436         if (mode == LCK_PW)
437                 obd_adjust_kms(ll_i2dtexp(inode), lsm,
438                                min_t(loff_t, policy.l_extent.end,
439                                i_size_read(inode)), 0);
440         lov_stripe_unlock(lsm);
441
442         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
443          * the kernel will not read other pages not covered by ldlm in
444          * filemap_nopage. we do our readahead in ll_readpage.
445          */
446         rand_read = vma->vm_flags & VM_RAND_READ;
447         seq_read = vma->vm_flags & VM_SEQ_READ;
448         vma->vm_flags &= ~ VM_SEQ_READ;
449         vma->vm_flags |= VM_RAND_READ;
450
451         page = filemap_nopage(vma, address, type);
452         LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
453                        (long)type);
454         vma->vm_flags &= ~VM_RAND_READ;
455         vma->vm_flags |= (rand_read | seq_read);
456
457         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
458         RETURN(page);
459 }
460
461 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
462  * we track the mapped vma count by lli_mmap_cnt.
463  * ll_vm_open():  when first vma is linked, split locks from lru.
464  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
465  *
466  * XXX we don't check the if the region of vma/lock for performance.
467  */
468 static void ll_vm_open(struct vm_area_struct * vma)
469 {
470         struct inode *inode = vma->vm_file->f_dentry->d_inode;
471         struct ll_inode_info *lli = ll_i2info(inode);
472         ENTRY;
473
474         LASSERT(vma->vm_file);
475
476         spin_lock(&lli->lli_lock);
477         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
478
479         atomic_inc(&lli->lli_mmap_cnt);
480         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
481                 struct lov_stripe_md *lsm = lli->lli_smd;
482                 struct ll_sb_info *sbi = ll_i2sbi(inode);
483                 int count;
484
485                 spin_unlock(&lli->lli_lock);
486
487                 if (!lsm)
488                         return;
489                 count = obd_join_lru(sbi->ll_dt_exp, lsm, 0);
490                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
491         } else {
492                 spin_unlock(&lli->lli_lock);
493         }
494
495 }
496
497 static void ll_vm_close(struct vm_area_struct *vma)
498 {
499         struct inode *inode = vma->vm_file->f_dentry->d_inode;
500         struct ll_inode_info *lli = ll_i2info(inode);
501         ENTRY;
502
503         LASSERT(vma->vm_file);
504
505         spin_lock(&lli->lli_lock);
506         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
507
508         atomic_dec(&lli->lli_mmap_cnt);
509         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
510                 struct lov_stripe_md *lsm = lli->lli_smd;
511                 struct ll_sb_info *sbi = ll_i2sbi(inode);
512                 int count;
513
514                 spin_unlock(&lli->lli_lock);
515
516                 if (!lsm)
517                         return;
518                 count = obd_join_lru(sbi->ll_dt_exp, lsm, 1);
519                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
520         } else {
521                 spin_unlock(&lli->lli_lock);
522         }
523 }
524
525 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
526 #ifndef HAVE_FILEMAP_POPULATE
527 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
528 #endif
529 static int ll_populate(struct vm_area_struct *area, unsigned long address,
530                        unsigned long len, pgprot_t prot, unsigned long pgoff,
531                        int nonblock)
532 {
533         int rc = 0;
534         ENTRY;
535
536         /* always set nonblock as true to avoid page read ahead */
537         rc = filemap_populate(area, address, len, prot, pgoff, 1);
538         RETURN(rc);
539 }
540 #endif
541
542 /* return the user space pointer that maps to a file offset via a vma */
543 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
544 {
545         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
546
547 }
548
549 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
550 /* [first, last] are the byte offsets affected.
551  * vm_{start, end} are user addresses of the first byte of the mapping and
552  *      the next byte beyond it
553  * vm_pgoff is the page index of the first byte in the mapping */
554 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
555                           __u64 last)
556 {
557         unsigned long address, len;
558         for (; vma ; vma = vma->vm_next_share) {
559                 if (last >> CFS_PAGE_SHIFT < vma->vm_pgoff)
560                         continue;
561                 if (first >> CFS_PAGE_SHIFT >= (vma->vm_pgoff +
562                     ((vma->vm_end - vma->vm_start) >> CFS_PAGE_SHIFT)))
563                         continue;
564
565                 /* XXX in case of unmap the cow pages of a running file,
566                  * don't unmap these private writeable mapping here!
567                  * though that will break private mappping a little.
568                  *
569                  * the clean way is to check the mapping of every page
570                  * and just unmap the non-cow pages, just like
571                  * unmap_mapping_range() with even_cow=0 in kernel 2.6.
572                  */
573                 if (!(vma->vm_flags & VM_SHARED) &&
574                     (vma->vm_flags & VM_WRITE))
575                         continue;
576
577                 address = max((unsigned long)vma->vm_start,
578                               file_to_user(vma, first));
579                 len = min((unsigned long)vma->vm_end,
580                           file_to_user(vma, last) + 1) - address;
581
582                 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
583                           "address=%ld len=%ld]\n", first, last, address, len);
584                 LASSERT(len > 0);
585                 ll_zap_page_range(vma, address, len);
586         }
587 }
588 #endif
589
590 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
591  * nopage's reference passing to the pte */
592 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
593 {
594         int rc = -ENOENT;
595         ENTRY;
596
597         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
599         if (mapping_mapped(mapping)) {
600                 rc = 0;
601                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
602                                     last - first + 1, 0);
603         }
604 #else
605         spin_lock(&mapping->i_shared_lock);
606         if (mapping->i_mmap != NULL) {
607                 rc = 0;
608                 teardown_vmas(mapping->i_mmap, first, last);
609         }
610         if (mapping->i_mmap_shared != NULL) {
611                 rc = 0;
612                 teardown_vmas(mapping->i_mmap_shared, first, last);
613         }
614         spin_unlock(&mapping->i_shared_lock);
615 #endif
616         RETURN(rc);
617 }
618
619 static struct vm_operations_struct ll_file_vm_ops = {
620         .nopage         = ll_nopage,
621         .open           = ll_vm_open,
622         .close          = ll_vm_close,
623 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
624         .populate       = ll_populate,
625 #endif
626 };
627
628 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
629 {
630         int rc;
631         ENTRY;
632
633         ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
634         rc = generic_file_mmap(file, vma);
635         if (rc == 0) {
636 #if !defined(HAVE_FILEMAP_POPULATE) && \
637     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
638                 if (!filemap_populate)
639                         filemap_populate = vma->vm_ops->populate;
640 #endif
641                 vma->vm_ops = &ll_file_vm_ops;
642                 vma->vm_ops->open(vma);
643                 /* update the inode's size and mtime */
644                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
645         }
646
647         RETURN(rc);
648 }