Whamcloud - gitweb
b=19312
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 #ifndef AUTOCONF_INCLUDED
37 #include <linux/config.h>
38 #endif
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/string.h>
42 #include <linux/stat.h>
43 #include <linux/errno.h>
44 #include <linux/smp_lock.h>
45 #include <linux/unistd.h>
46 #include <linux/version.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49
50 #include <linux/fs.h>
51 #include <linux/stat.h>
52 #include <asm/uaccess.h>
53 #include <linux/mm.h>
54 #include <linux/pagemap.h>
55 #include <linux/smp_lock.h>
56
57 #define DEBUG_SUBSYSTEM S_LLITE
58
59 #include <lustre_lite.h>
60 #include "llite_internal.h"
61 #include <linux/lustre_compat25.h>
62
63 #define VMA_DEBUG(vma, fmt, arg...)                                     \
64         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
65                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
66                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
67                vma->vm_file->f_dentry->d_inode->i_ino,                       \
68                vma->vm_file->f_dentry->d_iname, ## arg);                     \
69
70
71 struct ll_lock_tree_node {
72         rb_node_t               lt_node;
73         struct list_head        lt_locked_item;
74         __u64                   lt_oid;
75         ldlm_policy_data_t      lt_policy;
76         struct lustre_handle    lt_lockh;
77         ldlm_mode_t             lt_mode;
78         struct inode           *lt_inode;
79 };
80
81 int lt_get_mmap_locks(struct ll_lock_tree *tree,
82                       unsigned long addr, size_t count);
83
84 static struct vm_operations_struct ll_file_vm_ops;
85
86 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
87                                               __u64 end, ldlm_mode_t mode)
88 {
89         struct ll_lock_tree_node *node;
90
91         OBD_ALLOC(node, sizeof(*node));
92         if (node == NULL)
93                 RETURN(ERR_PTR(-ENOMEM));
94
95         node->lt_inode = inode;
96         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
97         node->lt_policy.l_extent.start = start;
98         node->lt_policy.l_extent.end = end;
99         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
100         INIT_LIST_HEAD(&node->lt_locked_item);
101         node->lt_mode = mode;
102
103         return node;
104 }
105
106 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
107 {
108         /* To avoid multiple fs deadlock */
109         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
110                 return -1;
111         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
112                 return 1;
113
114         if (one->lt_oid < two->lt_oid)
115                 return -1;
116         if (one->lt_oid > two->lt_oid)
117                 return 1;
118
119         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
120                 return -1;
121         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
122                 return 1;
123
124         return 0; /* they are the same object and overlap */
125 }
126
127 static void lt_merge(struct ll_lock_tree_node *dst,
128                      struct ll_lock_tree_node *src)
129 {
130         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
131                                             src->lt_policy.l_extent.start);
132         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
133                                           src->lt_policy.l_extent.end);
134
135         /* XXX could be a real call to the dlm to find superset modes */
136         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
137                 dst->lt_mode = LCK_PW;
138 }
139
140 static void lt_insert(struct ll_lock_tree *tree,
141                       struct ll_lock_tree_node *node)
142 {
143         struct ll_lock_tree_node *walk;
144         rb_node_t **p, *parent;
145         ENTRY;
146
147 restart:
148         p = &tree->lt_root.rb_node;
149         parent = NULL;
150         while (*p) {
151                 parent = *p;
152                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
153                 switch (lt_compare(node, walk)) {
154                 case -1:
155                         p = &(*p)->rb_left;
156                         break;
157                 case 1:
158                         p = &(*p)->rb_right;
159                         break;
160                 case 0:
161                         lt_merge(node, walk);
162                         rb_erase(&walk->lt_node, &tree->lt_root);
163                         OBD_FREE(walk, sizeof(*walk));
164                         goto restart;
165                         break;
166                 default:
167                         LBUG();
168                         break;
169                 }
170         }
171         rb_link_node(&node->lt_node, parent, p);
172         rb_insert_color(&node->lt_node, &tree->lt_root);
173         EXIT;
174 }
175
176 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
177 {
178         rb_node_t *rbnode;
179         struct ll_lock_tree_node *node = NULL;
180
181         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
182               rbnode = rbnode->rb_left) {
183                 if (rbnode->rb_left == NULL) {
184                         node = rb_entry(rbnode, struct ll_lock_tree_node,
185                                         lt_node);
186                         break;
187                 }
188         }
189         RETURN(node);
190 }
191
192 int ll_tree_unlock(struct ll_lock_tree *tree)
193 {
194         struct ll_lock_tree_node *node;
195         struct list_head *pos, *n;
196         struct inode *inode;
197         int rc = 0;
198         ENTRY;
199
200         list_for_each_safe(pos, n, &tree->lt_locked_list) {
201                 node = list_entry(pos, struct ll_lock_tree_node,
202                                   lt_locked_item);
203
204                 inode = node->lt_inode;
205                 rc = ll_extent_unlock(tree->lt_fd, inode,
206                                       ll_i2info(inode)->lli_smd, node->lt_mode,
207                                       &node->lt_lockh);
208                 if (rc != 0) {
209                         /* XXX better message */
210                         CERROR("couldn't unlock %d\n", rc);
211                 }
212                 list_del(&node->lt_locked_item);
213                 OBD_FREE(node, sizeof(*node));
214         }
215
216         while ((node = lt_least_node(tree))) {
217                 rb_erase(&node->lt_node, &tree->lt_root);
218                 OBD_FREE(node, sizeof(*node));
219         }
220
221         RETURN(rc);
222 }
223
224 int ll_tree_lock_iov(struct ll_lock_tree *tree,
225                  struct ll_lock_tree_node *first_node,
226                  const struct iovec *iov, unsigned long nr_segs, int ast_flags)
227 {
228         struct ll_lock_tree_node *node;
229         int rc = 0;
230         unsigned long seg;
231         ENTRY;
232
233         tree->lt_root.rb_node = NULL;
234         INIT_LIST_HEAD(&tree->lt_locked_list);
235         if (first_node != NULL)
236                 lt_insert(tree, first_node);
237
238         /* To avoid such subtle deadlock case: client1 try to read file1 to
239          * mmapped file2, on the same time, client2 try to read file2 to
240          * mmapped file1.*/
241         for (seg = 0; seg < nr_segs; seg++) {
242                 const struct iovec *iv = &iov[seg];
243                 rc = lt_get_mmap_locks(tree, (unsigned long)iv->iov_base,
244                                        iv->iov_len);
245                 if (rc)
246                         GOTO(out, rc);
247         }
248
249         while ((node = lt_least_node(tree))) {
250                 struct inode *inode = node->lt_inode;
251                 rc = ll_extent_lock(tree->lt_fd, inode,
252                                     ll_i2info(inode)->lli_smd, node->lt_mode,
253                                     &node->lt_policy, &node->lt_lockh,
254                                     ast_flags);
255                 if (rc != 0)
256                         GOTO(out, rc);
257
258                 rb_erase(&node->lt_node, &tree->lt_root);
259                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
260         }
261         RETURN(rc);
262 out:
263         ll_tree_unlock(tree);
264         RETURN(rc);
265 }
266
267 int ll_tree_lock(struct ll_lock_tree *tree,
268                  struct ll_lock_tree_node *first_node,
269                  const char *buf, size_t count, int ast_flags)
270 {
271         struct iovec local_iov = { .iov_base = (void __user *)buf,
272                                    .iov_len = count };
273
274         return ll_tree_lock_iov(tree, first_node, &local_iov, 1, ast_flags);
275 }
276
277 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
278 {
279         /* we only want to hold PW locks if the mmap() can generate
280          * writes back to the file and that only happens in shared
281          * writable vmas */
282         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
283                 return LCK_PW;
284         return LCK_PR;
285 }
286
287 static void policy_from_vma_pgoff(ldlm_policy_data_t *policy,
288                                   struct vm_area_struct *vma,
289                                   __u64 pgoff, size_t count)
290 {
291         policy->l_extent.start = pgoff << CFS_PAGE_SHIFT;
292         policy->l_extent.end = (policy->l_extent.start + count - 1) |
293                                ~CFS_PAGE_MASK;
294 }
295
296 static void policy_from_vma(ldlm_policy_data_t *policy,
297                             struct vm_area_struct *vma, unsigned long addr,
298                             size_t count)
299
300 {
301         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
302                                  ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
303         policy->l_extent.end = (policy->l_extent.start + count - 1) |
304                                ~CFS_PAGE_MASK;
305 }
306
307 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
308 {
309         struct mm_struct *mm = current->mm;
310         struct vm_area_struct *vma, *ret = NULL;
311         ENTRY;
312
313         /* No MM (e.g. NFS)? No vmas too. */
314         if (!mm)
315                 RETURN(NULL);
316
317         spin_lock(&mm->page_table_lock);
318         for(vma = find_vma(mm, addr);
319             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
320                 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
321                     vma->vm_flags & VM_SHARED) {
322                         ret = vma;
323                         break;
324                 }
325         }
326         spin_unlock(&mm->page_table_lock);
327         RETURN(ret);
328 }
329
330 int ll_region_mapped(unsigned long addr, size_t count)
331 {
332         return !!our_vma(addr, count);
333 }
334
335 int lt_get_mmap_locks(struct ll_lock_tree *tree,
336                       unsigned long addr, size_t count)
337 {
338         struct vm_area_struct *vma;
339         struct ll_lock_tree_node *node;
340         ldlm_policy_data_t policy;
341         struct inode *inode;
342         ENTRY;
343
344         if (count == 0)
345                 RETURN(0);
346
347         /* we need to look up vmas on page aligned addresses */
348         count += addr & (~CFS_PAGE_MASK);
349         addr &= CFS_PAGE_MASK;
350
351         while ((vma = our_vma(addr, count)) != NULL) {
352                 LASSERT(vma->vm_file);
353
354                 inode = vma->vm_file->f_dentry->d_inode;
355                 policy_from_vma(&policy, vma, addr, count);
356                 node = ll_node_from_inode(inode, policy.l_extent.start,
357                                           policy.l_extent.end,
358                                           mode_from_vma(vma));
359                 if (IS_ERR(node)) {
360                         CERROR("not enough mem for lock_tree_node!\n");
361                         RETURN(-ENOMEM);
362                 }
363                 lt_insert(tree, node);
364
365                 if (vma->vm_end - addr >= count)
366                         break;
367                 count -= vma->vm_end - addr;
368                 addr = vma->vm_end;
369         }
370         RETURN(0);
371 }
372
373 static int ll_get_extent_lock(struct vm_area_struct *vma, unsigned long pgoff,
374                               int *save_flags, struct lustre_handle *lockh)
375 {
376         struct file *filp = vma->vm_file;
377         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
378         struct inode *inode = filp->f_dentry->d_inode;
379         ldlm_policy_data_t policy;
380         ldlm_mode_t mode;
381         struct ll_inode_info *lli = ll_i2info(inode);
382         struct ost_lvb lvb;
383         __u64 kms, old_mtime;
384         unsigned long size;
385         ENTRY;
386
387         if (lli->lli_smd == NULL) {
388                 CERROR("No lsm on fault?\n");
389                 RETURN(0);
390         }
391
392         ll_clear_file_contended(inode);
393
394         /* start and end the lock on the first and last bytes in the page */
395         policy_from_vma_pgoff(&policy, vma, pgoff, CFS_PAGE_SIZE);
396
397         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
398                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
399
400         mode = mode_from_vma(vma);
401         old_mtime = LTIME_S(inode->i_mtime);
402
403         if(ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
404                           lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU) != 0)
405                 RETURN(0);
406
407         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
408                 CWARN("binary changed. inode %lu\n", inode->i_ino);
409
410         lov_stripe_lock(lli->lli_smd);
411         inode_init_lvb(inode, &lvb);
412         if(obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 1)) {
413                 lov_stripe_unlock(lli->lli_smd);
414                 RETURN(0);
415         }
416         kms = lvb.lvb_size;
417
418         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
419         CDEBUG(D_INFO, "Kms %lu - %lu\n", size, pgoff);
420
421         if (pgoff >= size) {
422                 lov_stripe_unlock(lli->lli_smd);
423                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
424                 lov_stripe_lock(lli->lli_smd);
425         } else {
426                 /* XXX change inode size without ll_inode_size_lock() held!
427                  *     there is a race condition with truncate path. (see
428                  *     ll_extent_lock) */
429                 /* XXX i_size_write() is not used because it is not safe to
430                  *     take the ll_inode_size_lock() due to a potential lock
431                  *     inversion (bug 6077).  And since it's not safe to use
432                  *     i_size_write() without a covering mutex we do the
433                  *     assignment directly.  It is not critical that the
434                  *     size be correct. */
435                 /* NOTE: region is within kms and, hence, within real file size (A).
436                  * We need to increase i_size to cover the read region so that
437                  * generic_file_read() will do its job, but that doesn't mean
438                  * the kms size is _correct_, it is only the _minimum_ size.
439                  * If someone does a stat they will get the correct size which
440                  * will always be >= the kms value here.  b=11081 */
441                 if (i_size_read(inode) < kms) {
442                         inode->i_size = kms;
443                         CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
444                                inode->i_ino, i_size_read(inode));
445                 }
446         }
447
448         /* If mapping is writeable, adjust kms to cover this page,
449          * but do not extend kms beyond actual file size.
450          * policy.l_extent.end is set to the end of the page by policy_from_vma
451          * bug 10919 */
452         if (mode == LCK_PW)
453                 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
454                                min_t(loff_t, policy.l_extent.end + 1,
455                                i_size_read(inode)), 0);
456         lov_stripe_unlock(lli->lli_smd);
457
458         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
459          * the kernel will not read other pages not covered by ldlm in
460          * filemap_nopage. we do our readahead in ll_readpage.
461          */
462         *save_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
463         vma->vm_flags &= ~ VM_SEQ_READ;
464         vma->vm_flags |= VM_RAND_READ;
465
466         return 1;
467 }
468
469 static void ll_put_extent_lock(struct vm_area_struct *vma, int save_flags,
470                              struct lustre_handle *lockh)
471 {
472         struct file *filp = vma->vm_file;
473         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
474         struct inode *inode = filp->f_dentry->d_inode;
475         ldlm_mode_t mode;
476
477         mode = mode_from_vma(vma);
478         vma->vm_flags &= ~(VM_RAND_READ | VM_SEQ_READ);
479         vma->vm_flags |= save_flags;
480
481         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, lockh);
482 }
483
484 struct lustre_handle *ltd2lockh(struct ll_thread_data *ltd,
485                                 __u64 start, __u64 end) {
486         ENTRY;
487         if (NULL == ltd)
488                 RETURN(NULL);
489         switch(ltd->lock_style) {
490                 case LL_LOCK_STYLE_FASTLOCK:
491                         RETURN(&ltd->u.lockh);
492                         break;
493                 case LL_LOCK_STYLE_TREELOCK: {
494                         struct ll_lock_tree_node *node;
495                         if (ltd->tree_list == NULL)
496                                 ltd->tree_list = &ltd->u.tree.lt_locked_list;
497
498                         list_for_each_entry(node, ltd->tree_list, lt_locked_item) {
499                                 if (node->lt_policy.l_extent.start <= start &&
500                                     node->lt_policy.l_extent.end >= end) {
501                                         ltd->tree_list = node->lt_locked_item.prev;
502                                         RETURN(&node->lt_lockh);
503                                 }
504                         }
505                 }
506                 default:
507                         break;
508         }
509         RETURN(NULL);
510 }
511
512 #ifndef HAVE_VM_OP_FAULT
513 /**
514  * Page fault handler.
515  *
516  * \param vma - is virtiual area struct related to page fault
517  * \param address - address when hit fault
518  * \param type - of fault
519  *
520  * \return allocated and filled page for address
521  * \retval NOPAGE_SIGBUS if page not exist on this address
522  * \retval NOPAGE_OOM not have memory for allocate new page
523  */
524 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
525                        int *type)
526 {
527         struct lustre_handle lockh = { 0 };
528         int save_fags = 0;
529         unsigned long pgoff;
530         struct page *page;
531         ENTRY;
532
533         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
534         if(!ll_get_extent_lock(vma, pgoff, &save_fags, &lockh))
535                 RETURN(NOPAGE_SIGBUS);
536
537         page = filemap_nopage(vma, address, type);
538         if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM)
539                 LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
540                                (long)type);
541         else
542                 CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",  address,
543                                (long)type);
544
545         ll_put_extent_lock(vma, save_fags, &lockh);
546
547         RETURN(page);
548 }
549
550 #else
551 /* New fault() API*/
552 /**
553  * Page fault handler.
554  *
555  * \param vma - is virtiual area struct related to page fault
556  * \param address - address when hit fault
557  * \param type - of fault
558  *
559  * \return allocated and filled page for address
560  * \retval NOPAGE_SIGBUS if page not exist on this address
561  * \retval NOPAGE_OOM not have memory for allocate new page
562  */
563 int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
564 {
565         struct lustre_handle lockh = { 0 };
566         int save_fags = 0;
567         int rc;
568         ENTRY;
569
570         if(!ll_get_extent_lock(vma, vmf->pgoff, &save_fags, &lockh))
571                RETURN(VM_FAULT_SIGBUS);
572
573         rc = filemap_fault(vma, vmf);
574         if (vmf->page)
575                 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
576                                vmf->virtual_address);
577         else
578                 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n",
579                        vmf->virtual_address);
580
581         ll_put_extent_lock(vma, save_fags, &lockh);
582
583         RETURN(rc);
584 }
585 #endif
586
587 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
588  * we track the mapped vma count by lli_mmap_cnt.
589  * ll_vm_open():  when first vma is linked, split locks from lru.
590  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
591  *
592  * XXX we don't check the if the region of vma/lock for performance.
593  */
594 static void ll_vm_open(struct vm_area_struct * vma)
595 {
596         struct inode *inode = vma->vm_file->f_dentry->d_inode;
597         struct ll_inode_info *lli = ll_i2info(inode);
598         ENTRY;
599
600         LASSERT(vma->vm_file);
601
602         spin_lock(&lli->lli_lock);
603         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
604
605         atomic_inc(&lli->lli_mmap_cnt);
606         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
607                 struct lov_stripe_md *lsm = lli->lli_smd;
608                 struct ll_sb_info *sbi = ll_i2sbi(inode);
609                 int count;
610
611                 spin_unlock(&lli->lli_lock);
612
613                 if (!lsm)
614                         return;
615                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
616                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
617         } else {
618                 spin_unlock(&lli->lli_lock);
619         }
620
621 }
622
623 static void ll_vm_close(struct vm_area_struct *vma)
624 {
625         struct inode *inode = vma->vm_file->f_dentry->d_inode;
626         struct ll_inode_info *lli = ll_i2info(inode);
627         ENTRY;
628
629         LASSERT(vma->vm_file);
630
631         spin_lock(&lli->lli_lock);
632         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
633
634         atomic_dec(&lli->lli_mmap_cnt);
635         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
636                 struct lov_stripe_md *lsm = lli->lli_smd;
637                 struct ll_sb_info *sbi = ll_i2sbi(inode);
638                 int count;
639
640                 spin_unlock(&lli->lli_lock);
641
642                 if (!lsm)
643                         return;
644                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
645                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
646         } else {
647                 spin_unlock(&lli->lli_lock);
648         }
649 }
650
651 #ifndef HAVE_VM_OP_FAULT
652 #ifndef HAVE_FILEMAP_POPULATE
653 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
654 #endif
655 static int ll_populate(struct vm_area_struct *area, unsigned long address,
656                        unsigned long len, pgprot_t prot, unsigned long pgoff,
657                        int nonblock)
658 {
659         int rc = 0;
660         ENTRY;
661
662         /* always set nonblock as true to avoid page read ahead */
663         rc = filemap_populate(area, address, len, prot, pgoff, 1);
664         RETURN(rc);
665 }
666 #endif
667
668 /* return the user space pointer that maps to a file offset via a vma */
669 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
670 {
671         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
672
673 }
674
675 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
676  * nopage's reference passing to the pte */
677 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
678 {
679         int rc = -ENOENT;
680         ENTRY;
681
682         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
683         if (mapping_mapped(mapping)) {
684                 rc = 0;
685                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
686                                     last - first + 1, 0);
687         }
688
689         RETURN(rc);
690 }
691
692 static struct vm_operations_struct ll_file_vm_ops = {
693         .open           = ll_vm_open,
694         .close          = ll_vm_close,
695 #ifdef HAVE_VM_OP_FAULT
696         .fault          = ll_fault,
697 #else
698         .nopage         = ll_nopage,
699         .populate       = ll_populate,
700 #endif
701 };
702
703 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
704 {
705         int rc;
706         ENTRY;
707
708         ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
709         rc = generic_file_mmap(file, vma);
710         if (rc == 0) {
711 #if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
712                 if (!filemap_populate)
713                         filemap_populate = vma->vm_ops->populate;
714 #endif
715                 vma->vm_ops = &ll_file_vm_ops;
716                 vma->vm_ops->open(vma);
717                 /* update the inode's size and mtime */
718                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
719         }
720
721         RETURN(rc);
722 }