Whamcloud - gitweb
land b1_5 onto HEAD
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 #ifdef HAVE_KERNEL_CONFIG_H
22 #include <linux/config.h>
23 #endif
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/string.h>
27 #include <linux/stat.h>
28 #include <linux/errno.h>
29 #include <linux/smp_lock.h>
30 #include <linux/unistd.h>
31 #include <linux/version.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34
35 #include <linux/fs.h>
36 #include <linux/stat.h>
37 #include <asm/uaccess.h>
38 #include <asm/segment.h>
39 #include <linux/mm.h>
40 #include <linux/pagemap.h>
41 #include <linux/smp_lock.h>
42 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
43 #include <linux/iobuf.h>
44 #endif
45
46 #define DEBUG_SUBSYSTEM S_LLITE
47
48 #include <lustre_lite.h>
49 #include "llite_internal.h"
50 #include <linux/lustre_compat25.h>
51
52 #define VMA_DEBUG(vma, fmt, arg...)                                     \
53         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
54                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
55                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
56                vma->vm_file->f_dentry->d_inode->i_ino,                       \
57                vma->vm_file->f_dentry->d_iname, ## arg);                     \
58
59
60 struct ll_lock_tree_node {
61         rb_node_t               lt_node;
62         struct list_head        lt_locked_item;
63         __u64                   lt_oid;
64         ldlm_policy_data_t      lt_policy;
65         struct lustre_handle    lt_lockh;
66         ldlm_mode_t             lt_mode;
67         struct inode           *lt_inode;
68 };
69
70 int lt_get_mmap_locks(struct ll_lock_tree *tree,
71                       unsigned long addr, size_t count);
72
73 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
74 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
75                        int *type);
76 #else
77
78 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
79                        int unused);
80 #endif
81
82 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
83                                               __u64 end, ldlm_mode_t mode)
84 {
85         struct ll_lock_tree_node *node;
86
87         OBD_ALLOC(node, sizeof(*node));
88         if (node == NULL)
89                 RETURN(ERR_PTR(-ENOMEM));
90
91         node->lt_inode = inode;
92         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
93         node->lt_policy.l_extent.start = start;
94         node->lt_policy.l_extent.end = end;
95         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
96         INIT_LIST_HEAD(&node->lt_locked_item);
97         node->lt_mode = mode;
98
99         return node;
100 }
101
102 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
103 {
104         /* To avoid multiple fs deadlock */
105         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
106                 return -1;
107         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
108                 return 1;
109
110         if (one->lt_oid < two->lt_oid)
111                 return -1;
112         if (one->lt_oid > two->lt_oid)
113                 return 1;
114
115         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
116                 return -1;
117         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
118                 return 1;
119
120         return 0; /* they are the same object and overlap */
121 }
122
123 static void lt_merge(struct ll_lock_tree_node *dst,
124                      struct ll_lock_tree_node *src)
125 {
126         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
127                                             src->lt_policy.l_extent.start);
128         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
129                                           src->lt_policy.l_extent.end);
130
131         /* XXX could be a real call to the dlm to find superset modes */
132         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
133                 dst->lt_mode = LCK_PW;
134 }
135
136 static void lt_insert(struct ll_lock_tree *tree,
137                       struct ll_lock_tree_node *node)
138 {
139         struct ll_lock_tree_node *walk;
140         rb_node_t **p, *parent;
141         ENTRY;
142
143 restart:
144         p = &tree->lt_root.rb_node;
145         parent = NULL;
146         while (*p) {
147                 parent = *p;
148                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
149                 switch (lt_compare(node, walk)) {
150                 case -1:
151                         p = &(*p)->rb_left;
152                         break;
153                 case 1:
154                         p = &(*p)->rb_right;
155                         break;
156                 case 0:
157                         lt_merge(node, walk);
158                         rb_erase(&walk->lt_node, &tree->lt_root);
159                         OBD_FREE(walk, sizeof(*walk));
160                         goto restart;
161                         break;
162                 default:
163                         LBUG();
164                         break;
165                 }
166         }
167         rb_link_node(&node->lt_node, parent, p);
168         rb_insert_color(&node->lt_node, &tree->lt_root);
169         EXIT;
170 }
171
172 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
173 {
174         rb_node_t *rbnode;
175         struct ll_lock_tree_node *node = NULL;
176
177         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
178               rbnode = rbnode->rb_left) {
179                 if (rbnode->rb_left == NULL) {
180                         node = rb_entry(rbnode, struct ll_lock_tree_node,
181                                         lt_node);
182                         break;
183                 }
184         }
185         RETURN(node);
186 }
187
188 int ll_tree_unlock(struct ll_lock_tree *tree)
189 {
190         struct ll_lock_tree_node *node;
191         struct list_head *pos, *n;
192         struct inode *inode;
193         int rc = 0;
194         ENTRY;
195
196         list_for_each_safe(pos, n, &tree->lt_locked_list) {
197                 node = list_entry(pos, struct ll_lock_tree_node,
198                                   lt_locked_item);
199
200                 inode = node->lt_inode;
201                 rc = ll_extent_unlock(tree->lt_fd, inode,
202                                       ll_i2info(inode)->lli_smd, node->lt_mode,
203                                       &node->lt_lockh);
204                 if (rc != 0) {
205                         /* XXX better message */
206                         CERROR("couldn't unlock %d\n", rc);
207                 }
208                 list_del(&node->lt_locked_item);
209                 OBD_FREE(node, sizeof(*node));
210         }
211
212         while ((node = lt_least_node(tree))) {
213                 rb_erase(&node->lt_node, &tree->lt_root);
214                 OBD_FREE(node, sizeof(*node));
215         }
216
217         RETURN(rc);
218 }
219
220 int ll_tree_lock(struct ll_lock_tree *tree,
221                  struct ll_lock_tree_node *first_node,
222                  const char *buf, size_t count, int ast_flags)
223 {
224         struct ll_lock_tree_node *node;
225         int rc = 0;
226         ENTRY;
227
228         tree->lt_root.rb_node = NULL;
229         INIT_LIST_HEAD(&tree->lt_locked_list);
230         if (first_node != NULL)
231                 lt_insert(tree, first_node);
232
233         /* To avoid such subtle deadlock case: client1 try to read file1 to
234          * mmapped file2, on the same time, client2 try to read file2 to
235          * mmapped file1.*/
236         rc = lt_get_mmap_locks(tree, (unsigned long)buf, count);
237         if (rc)
238                 GOTO(out, rc);
239
240         while ((node = lt_least_node(tree))) {
241                 struct inode *inode = node->lt_inode;
242                 rc = ll_extent_lock(tree->lt_fd, inode,
243                                     ll_i2info(inode)->lli_smd, node->lt_mode,
244                                     &node->lt_policy, &node->lt_lockh,
245                                     ast_flags);
246                 if (rc != 0)
247                         GOTO(out, rc);
248
249                 rb_erase(&node->lt_node, &tree->lt_root);
250                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
251         }
252         RETURN(rc);
253 out:
254         ll_tree_unlock(tree);
255         RETURN(rc);
256 }
257
258 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
259 {
260         /* we only want to hold PW locks if the mmap() can generate
261          * writes back to the file and that only happens in shared
262          * writable vmas */
263         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
264                 return LCK_PW;
265         return LCK_PR;
266 }
267
268 static void policy_from_vma(ldlm_policy_data_t *policy,
269                             struct vm_area_struct *vma, unsigned long addr,
270                             size_t count)
271 {
272         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
273                                  ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
274         policy->l_extent.end = (policy->l_extent.start + count - 1) |
275                                ~CFS_PAGE_MASK;
276 }
277
278 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
279 {
280         struct mm_struct *mm = current->mm;
281         struct vm_area_struct *vma, *ret = NULL;
282         ENTRY;
283
284         /* No MM (e.g. NFS)? No vmas too. */
285         if (!mm)
286                 RETURN(NULL);
287
288         spin_lock(&mm->page_table_lock);
289         for(vma = find_vma(mm, addr);
290             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
291                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
292                     vma->vm_flags & VM_SHARED) {
293                         ret = vma;
294                         break;
295                 }
296         }
297         spin_unlock(&mm->page_table_lock);
298         RETURN(ret);
299 }
300
301 int lt_get_mmap_locks(struct ll_lock_tree *tree,
302                       unsigned long addr, size_t count)
303 {
304         struct vm_area_struct *vma;
305         struct ll_lock_tree_node *node;
306         ldlm_policy_data_t policy;
307         struct inode *inode;
308         ENTRY;
309
310         if (count == 0)
311                 RETURN(0);
312
313         /* we need to look up vmas on page aligned addresses */
314         count += addr & (~CFS_PAGE_MASK);
315         addr &= CFS_PAGE_MASK;
316
317         while ((vma = our_vma(addr, count)) != NULL) {
318                 LASSERT(vma->vm_file);
319
320                 inode = vma->vm_file->f_dentry->d_inode;
321                 policy_from_vma(&policy, vma, addr, count);
322                 node = ll_node_from_inode(inode, policy.l_extent.start,
323                                           policy.l_extent.end,
324                                           mode_from_vma(vma));
325                 if (IS_ERR(node)) {
326                         CERROR("not enough mem for lock_tree_node!\n");
327                         RETURN(-ENOMEM);
328                 }
329                 lt_insert(tree, node);
330
331                 if (vma->vm_end - addr >= count)
332                         break;
333                 count -= vma->vm_end - addr;
334                 addr = vma->vm_end;
335         }
336         RETURN(0);
337 }
338
339 /* FIXME: there is a pagefault race goes as follow (only 2.4):
340  * 1. A user process on node A accesses a portion of a mapped file,
341  *    resulting in a page fault.  The pagefault handler invokes the
342  *    ll_nopage function, which reads the page into memory.
343  * 2. A user process on node B writes to the same portion of the file
344  *    (either via mmap or write()), that cause node A to cancel the
345  *    lock and truncate the page.
346  * 3. Node A then executes the rest of do_no_page(), entering the
347  *    now-invalid page into the PTEs.
348  *
349  * Make the whole do_no_page as a hook to cover both the page cache
350  * and page mapping installing with dlm lock would eliminate this race.
351  *
352  * In 2.6, the truncate_count of address_space can cover this race.
353  */
354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
355 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
356                        int *type)
357 #else
358 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
359                        int type /* unused */)
360 #endif
361 {
362         struct file *filp = vma->vm_file;
363         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
364         struct inode *inode = filp->f_dentry->d_inode;
365         struct lustre_handle lockh = { 0 };
366         ldlm_policy_data_t policy;
367         ldlm_mode_t mode;
368         struct page *page = NULL;
369         struct ll_inode_info *lli = ll_i2info(inode);
370         struct lov_stripe_md *lsm;
371         struct ost_lvb lvb;
372         __u64 kms, old_mtime;
373         unsigned long pgoff, size, rand_read, seq_read;
374         int rc = 0;
375         ENTRY;
376
377         if (lli->lli_smd == NULL) {
378                 CERROR("No lsm on fault?\n");
379                 RETURN(NULL);
380         }
381
382         /* start and end the lock on the first and last bytes in the page */
383         policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
384
385         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
386                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
387
388         mode = mode_from_vma(vma);
389         old_mtime = LTIME_S(inode->i_mtime);
390
391         lsm = lli->lli_smd;
392         rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
393                             &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
394         if (rc != 0)
395                 RETURN(NULL);
396
397         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
398                 CWARN("binary changed. inode %lu\n", inode->i_ino);
399
400         lov_stripe_lock(lsm);
401         inode_init_lvb(inode, &lvb);
402         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
403         kms = lvb.lvb_size;
404
405         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
406         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
407
408         if (pgoff >= size) {
409                 lov_stripe_unlock(lsm);
410                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
411         } else {
412                 /* XXX change inode size without ll_inode_size_lock() held!
413                  *     there is a race condition with truncate path. (see
414                  *     ll_extent_lock) */
415                /* region is within kms and, hence, within real file size (A).
416                  * We need to increase i_size to cover the read region so that
417                  * generic_file_read() will do its job, but that doesn't mean
418                  * the kms size is _correct_, it is only the _minimum_ size.
419                  * If someone does a stat they will get the correct size which
420                  * will always be >= the kms value here.  b=11081 */
421                 if (inode->i_size < kms) {
422                         inode->i_size = kms;
423                          CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
424                                inode->i_ino, inode->i_size);
425                 }
426                 lov_stripe_unlock(lsm);
427         }
428
429         /* If mapping is writeable, adjust kms to cover this page,
430          * but do not extend kms beyond actual file size.
431          * policy.l_extent.end is set to the end of the page by policy_from_vma
432          * bug 10919 */
433         lov_stripe_lock(lsm);
434         if (mode == LCK_PW)
435                 obd_adjust_kms(ll_i2obdexp(inode), lsm,
436                                min_t(loff_t, policy.l_extent.end,inode->i_size),
437                                0);
438         lov_stripe_unlock(lsm);
439
440         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
441          * the kernel will not read other pages not covered by ldlm in
442          * filemap_nopage. we do our readahead in ll_readpage.
443          */
444         rand_read = vma->vm_flags & VM_RAND_READ;
445         seq_read = vma->vm_flags & VM_SEQ_READ;
446         vma->vm_flags &= ~ VM_SEQ_READ;
447         vma->vm_flags |= VM_RAND_READ;
448
449         page = filemap_nopage(vma, address, type);
450         LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
451                        (long)type);
452         vma->vm_flags &= ~VM_RAND_READ;
453         vma->vm_flags |= (rand_read | seq_read);
454
455         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
456         RETURN(page);
457 }
458
459 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
460  * we track the mapped vma count by lli_mmap_cnt.
461  * ll_vm_open():  when first vma is linked, split locks from lru.
462  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
463  *
464  * XXX we don't check the if the region of vma/lock for performance.
465  */
466 static void ll_vm_open(struct vm_area_struct * vma)
467 {
468         struct inode *inode = vma->vm_file->f_dentry->d_inode;
469         struct ll_inode_info *lli = ll_i2info(inode);
470         ENTRY;
471
472         LASSERT(vma->vm_file);
473
474         spin_lock(&lli->lli_lock);
475         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
476
477         atomic_inc(&lli->lli_mmap_cnt);
478         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
479                 struct lov_stripe_md *lsm = lli->lli_smd;
480                 struct ll_sb_info *sbi = ll_i2sbi(inode);
481                 int count;
482
483                 spin_unlock(&lli->lli_lock);
484
485                 if (!lsm)
486                         return;
487                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
488                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
489         } else {
490                 spin_unlock(&lli->lli_lock);
491         }
492
493 }
494
495 static void ll_vm_close(struct vm_area_struct *vma)
496 {
497         struct inode *inode = vma->vm_file->f_dentry->d_inode;
498         struct ll_inode_info *lli = ll_i2info(inode);
499         ENTRY;
500
501         LASSERT(vma->vm_file);
502
503         spin_lock(&lli->lli_lock);
504         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
505
506         atomic_dec(&lli->lli_mmap_cnt);
507         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
508                 struct lov_stripe_md *lsm = lli->lli_smd;
509                 struct ll_sb_info *sbi = ll_i2sbi(inode);
510                 int count;
511
512                 spin_unlock(&lli->lli_lock);
513
514                 if (!lsm)
515                         return;
516                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
517                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
518         } else {
519                 spin_unlock(&lli->lli_lock);
520         }
521 }
522
523 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
524 #ifndef HAVE_FILEMAP_POPULATE
525 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
526 #endif
527 static int ll_populate(struct vm_area_struct *area, unsigned long address,
528                        unsigned long len, pgprot_t prot, unsigned long pgoff,
529                        int nonblock)
530 {
531         int rc = 0;
532         ENTRY;
533
534         /* always set nonblock as true to avoid page read ahead */
535         rc = filemap_populate(area, address, len, prot, pgoff, 1);
536         RETURN(rc);
537 }
538 #endif
539
540 /* return the user space pointer that maps to a file offset via a vma */
541 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
542 {
543         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
544
545 }
546
547 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
548 /* [first, last] are the byte offsets affected.
549  * vm_{start, end} are user addresses of the first byte of the mapping and
550  *      the next byte beyond it
551  * vm_pgoff is the page index of the first byte in the mapping */
552 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
553                           __u64 last)
554 {
555         unsigned long address, len;
556         for (; vma ; vma = vma->vm_next_share) {
557                 if (last >> CFS_PAGE_SHIFT < vma->vm_pgoff)
558                         continue;
559                 if (first >> CFS_PAGE_SHIFT >= (vma->vm_pgoff +
560                     ((vma->vm_end - vma->vm_start) >> CFS_PAGE_SHIFT)))
561                         continue;
562
563                 /* XXX in case of unmap the cow pages of a running file,
564                  * don't unmap these private writeable mapping here!
565                  * though that will break private mappping a little.
566                  *
567                  * the clean way is to check the mapping of every page
568                  * and just unmap the non-cow pages, just like
569                  * unmap_mapping_range() with even_cow=0 in kernel 2.6.
570                  */
571                 if (!(vma->vm_flags & VM_SHARED) &&
572                     (vma->vm_flags & VM_WRITE))
573                         continue;
574
575                 address = max((unsigned long)vma->vm_start,
576                               file_to_user(vma, first));
577                 len = min((unsigned long)vma->vm_end,
578                           file_to_user(vma, last) + 1) - address;
579
580                 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
581                           "address=%ld len=%ld]\n", first, last, address, len);
582                 LASSERT(len > 0);
583                 ll_zap_page_range(vma, address, len);
584         }
585 }
586 #endif
587
588 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
589  * nopage's reference passing to the pte */
590 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
591 {
592         int rc = -ENOENT;
593         ENTRY;
594
595         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
596 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
597         if (mapping_mapped(mapping)) {
598                 rc = 0;
599                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
600                                     last - first + 1, 0);
601         }
602 #else
603         spin_lock(&mapping->i_shared_lock);
604         if (mapping->i_mmap != NULL) {
605                 rc = 0;
606                 teardown_vmas(mapping->i_mmap, first, last);
607         }
608         if (mapping->i_mmap_shared != NULL) {
609                 rc = 0;
610                 teardown_vmas(mapping->i_mmap_shared, first, last);
611         }
612         spin_unlock(&mapping->i_shared_lock);
613 #endif
614         RETURN(rc);
615 }
616
617 static struct vm_operations_struct ll_file_vm_ops = {
618         .nopage         = ll_nopage,
619         .open           = ll_vm_open,
620         .close          = ll_vm_close,
621 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
622         .populate       = ll_populate,
623 #endif
624 };
625
626 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
627 {
628         int rc;
629         ENTRY;
630
631         ll_vfs_ops_tally(ll_i2sbi(file->f_dentry->d_inode), VFS_OPS_MMAP);
632         rc = generic_file_mmap(file, vma);
633         if (rc == 0) {
634 #if !defined(HAVE_FILEMAP_POPULATE) && \
635     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
636                 if (!filemap_populate)
637                         filemap_populate = vma->vm_ops->populate;
638 #endif
639                 vma->vm_ops = &ll_file_vm_ops;
640                 vma->vm_ops->open(vma);
641                 /* update the inode's size and mtime */
642                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
643         }
644
645         RETURN(rc);
646 }