Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef AUTOCONF_INCLUDED
23 #include <linux/config.h>
24 #endif
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <linux/mm.h>
40 #include <linux/pagemap.h>
41 #include <linux/smp_lock.h>
42
43 #define DEBUG_SUBSYSTEM S_LLITE
44
45 //#include <lustre_mdc.h>
46 #include <lustre_lite.h>
47 #include "llite_internal.h"
48 #include <linux/lustre_compat25.h>
49
50 #define VMA_DEBUG(vma, fmt, arg...)                                     \
51         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
52                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
53                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
54                vma->vm_file->f_dentry->d_inode->i_ino,                       \
55                vma->vm_file->f_dentry->d_iname, ## arg);                     \
56
57
58 struct ll_lock_tree_node {
59         rb_node_t               lt_node;
60         struct list_head        lt_locked_item;
61         __u64                   lt_oid;
62         ldlm_policy_data_t      lt_policy;
63         struct lustre_handle    lt_lockh;
64         ldlm_mode_t             lt_mode;
65         struct inode           *lt_inode;
66 };
67
68 int lt_get_mmap_locks(struct ll_lock_tree *tree,
69                       unsigned long addr, size_t count);
70
71 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
72                        int *type);
73
74 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
75                                               __u64 end, ldlm_mode_t mode)
76 {
77         struct ll_lock_tree_node *node;
78
79         OBD_ALLOC(node, sizeof(*node));
80         if (node == NULL)
81                 RETURN(ERR_PTR(-ENOMEM));
82
83         node->lt_inode = inode;
84         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
85         node->lt_policy.l_extent.start = start;
86         node->lt_policy.l_extent.end = end;
87         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
88         INIT_LIST_HEAD(&node->lt_locked_item);
89         node->lt_mode = mode;
90
91         return node;
92 }
93
94 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
95 {
96         /* To avoid multiple fs deadlock */
97         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
98                 return -1;
99         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
100                 return 1;
101
102         if (one->lt_oid < two->lt_oid)
103                 return -1;
104         if (one->lt_oid > two->lt_oid)
105                 return 1;
106
107         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
108                 return -1;
109         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
110                 return 1;
111
112         return 0; /* they are the same object and overlap */
113 }
114
115 static void lt_merge(struct ll_lock_tree_node *dst,
116                      struct ll_lock_tree_node *src)
117 {
118         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
119                                             src->lt_policy.l_extent.start);
120         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
121                                           src->lt_policy.l_extent.end);
122
123         /* XXX could be a real call to the dlm to find superset modes */
124         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
125                 dst->lt_mode = LCK_PW;
126 }
127
128 static void lt_insert(struct ll_lock_tree *tree,
129                       struct ll_lock_tree_node *node)
130 {
131         struct ll_lock_tree_node *walk;
132         rb_node_t **p, *parent;
133         ENTRY;
134
135 restart:
136         p = &tree->lt_root.rb_node;
137         parent = NULL;
138         while (*p) {
139                 parent = *p;
140                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
141                 switch (lt_compare(node, walk)) {
142                 case -1:
143                         p = &(*p)->rb_left;
144                         break;
145                 case 1:
146                         p = &(*p)->rb_right;
147                         break;
148                 case 0:
149                         lt_merge(node, walk);
150                         rb_erase(&walk->lt_node, &tree->lt_root);
151                         OBD_FREE(walk, sizeof(*walk));
152                         goto restart;
153                         break;
154                 default:
155                         LBUG();
156                         break;
157                 }
158         }
159         rb_link_node(&node->lt_node, parent, p);
160         rb_insert_color(&node->lt_node, &tree->lt_root);
161         EXIT;
162 }
163
164 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
165 {
166         rb_node_t *rbnode;
167         struct ll_lock_tree_node *node = NULL;
168
169         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
170               rbnode = rbnode->rb_left) {
171                 if (rbnode->rb_left == NULL) {
172                         node = rb_entry(rbnode, struct ll_lock_tree_node,
173                                         lt_node);
174                         break;
175                 }
176         }
177         RETURN(node);
178 }
179
180 int ll_tree_unlock(struct ll_lock_tree *tree)
181 {
182         struct ll_lock_tree_node *node;
183         struct list_head *pos, *n;
184         struct inode *inode;
185         int rc = 0;
186         ENTRY;
187
188         list_for_each_safe(pos, n, &tree->lt_locked_list) {
189                 node = list_entry(pos, struct ll_lock_tree_node,
190                                   lt_locked_item);
191
192                 inode = node->lt_inode;
193                 rc = ll_extent_unlock(tree->lt_fd, inode,
194                                       ll_i2info(inode)->lli_smd, node->lt_mode,
195                                       &node->lt_lockh);
196                 if (rc != 0) {
197                         /* XXX better message */
198                         CERROR("couldn't unlock %d\n", rc);
199                 }
200                 list_del(&node->lt_locked_item);
201                 OBD_FREE(node, sizeof(*node));
202         }
203
204         while ((node = lt_least_node(tree))) {
205                 rb_erase(&node->lt_node, &tree->lt_root);
206                 OBD_FREE(node, sizeof(*node));
207         }
208
209         RETURN(rc);
210 }
211
212 int ll_tree_lock(struct ll_lock_tree *tree,
213                  struct ll_lock_tree_node *first_node,
214                  const char *buf, size_t count, int ast_flags)
215 {
216         struct ll_lock_tree_node *node;
217         int rc = 0;
218         ENTRY;
219
220         tree->lt_root.rb_node = NULL;
221         INIT_LIST_HEAD(&tree->lt_locked_list);
222         if (first_node != NULL)
223                 lt_insert(tree, first_node);
224
225         /* To avoid such subtle deadlock case: client1 try to read file1 to
226          * mmapped file2, on the same time, client2 try to read file2 to
227          * mmapped file1.*/
228         rc = lt_get_mmap_locks(tree, (unsigned long)buf, count);
229         if (rc)
230                 GOTO(out, rc);
231
232         while ((node = lt_least_node(tree))) {
233                 struct inode *inode = node->lt_inode;
234                 rc = ll_extent_lock(tree->lt_fd, inode,
235                                     ll_i2info(inode)->lli_smd, node->lt_mode,
236                                     &node->lt_policy, &node->lt_lockh,
237                                     ast_flags);
238                 if (rc != 0)
239                         GOTO(out, rc);
240
241                 rb_erase(&node->lt_node, &tree->lt_root);
242                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
243         }
244         RETURN(rc);
245 out:
246         ll_tree_unlock(tree);
247         RETURN(rc);
248 }
249
250 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
251 {
252         /* we only want to hold PW locks if the mmap() can generate
253          * writes back to the file and that only happens in shared
254          * writable vmas */
255         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
256                 return LCK_PW;
257         return LCK_PR;
258 }
259
260 static void policy_from_vma(ldlm_policy_data_t *policy,
261                             struct vm_area_struct *vma, unsigned long addr,
262                             size_t count)
263 {
264         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
265                                  (vma->vm_pgoff << CFS_PAGE_SHIFT);
266         policy->l_extent.end = (policy->l_extent.start + count - 1) |
267                                ~CFS_PAGE_MASK;
268 }
269
270 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
271 {
272         struct mm_struct *mm = current->mm;
273         struct vm_area_struct *vma, *ret = NULL;
274         ENTRY;
275
276         /* No MM (e.g. NFS)? No vmas too. */
277         if (!mm)
278                 RETURN(NULL);
279
280         spin_lock(&mm->page_table_lock);
281         for(vma = find_vma(mm, addr);
282             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
283                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
284                     vma->vm_flags & VM_SHARED) {
285                         ret = vma;
286                         break;
287                 }
288         }
289         spin_unlock(&mm->page_table_lock);
290         RETURN(ret);
291 }
292
293 int lt_get_mmap_locks(struct ll_lock_tree *tree,
294                       unsigned long addr, size_t count)
295 {
296         struct vm_area_struct *vma;
297         struct ll_lock_tree_node *node;
298         ldlm_policy_data_t policy;
299         struct inode *inode;
300         ENTRY;
301
302         if (count == 0)
303                 RETURN(0);
304
305         /* we need to look up vmas on page aligned addresses */
306         count += addr & (~CFS_PAGE_MASK);
307         addr &= CFS_PAGE_MASK;
308
309         while ((vma = our_vma(addr, count)) != NULL) {
310                 LASSERT(vma->vm_file);
311
312                 inode = vma->vm_file->f_dentry->d_inode;
313                 policy_from_vma(&policy, vma, addr, count);
314                 node = ll_node_from_inode(inode, policy.l_extent.start,
315                                           policy.l_extent.end,
316                                           mode_from_vma(vma));
317                 if (IS_ERR(node)) {
318                         CERROR("not enough mem for lock_tree_node!\n");
319                         RETURN(-ENOMEM);
320                 }
321                 lt_insert(tree, node);
322
323                 if (vma->vm_end - addr >= count)
324                         break;
325                 count -= vma->vm_end - addr;
326                 addr = vma->vm_end;
327         }
328         RETURN(0);
329 }
330
331 /**
332  * Page fault handler.
333  *
334  * \param vma - is virtiual area struct related to page fault
335  * \param address - address when hit fault
336  * \param type - of fault
337  *
338  * \return allocated and filled page for address
339  * \retval NOPAGE_SIGBUS if page not exist on this address
340  * \retval NOPAGE_OOM not have memory for allocate new page
341  */
342 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
343                        int *type)
344 {
345         struct file *filp = vma->vm_file;
346         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
347         struct inode *inode = filp->f_dentry->d_inode;
348         struct lustre_handle lockh = { 0 };
349         ldlm_policy_data_t policy;
350         ldlm_mode_t mode;
351         struct page *page = NULL;
352         struct ll_inode_info *lli = ll_i2info(inode);
353         struct lov_stripe_md *lsm;
354         struct ost_lvb lvb;
355         __u64 kms, old_mtime;
356         unsigned long pgoff, size, rand_read, seq_read;
357         int rc = 0;
358         ENTRY;
359
360         if (lli->lli_smd == NULL) {
361                 CERROR("No lsm on fault?\n");
362                 RETURN(NULL);
363         }
364
365         ll_clear_file_contended(inode);
366
367         /* start and end the lock on the first and last bytes in the page */
368         policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
369
370         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
371                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
372
373         mode = mode_from_vma(vma);
374         old_mtime = LTIME_S(inode->i_mtime);
375
376         lsm = lli->lli_smd;
377         rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
378                             &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
379         if (rc != 0)
380                 RETURN(NULL);
381
382         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
383                 CWARN("binary changed. inode %lu\n", inode->i_ino);
384
385         lov_stripe_lock(lsm);
386         inode_init_lvb(inode, &lvb);
387         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
388         kms = lvb.lvb_size;
389
390         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
391         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
392
393         if (pgoff >= size) {
394                 lov_stripe_unlock(lsm);
395                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
396         } else {
397                 /* XXX change inode size without ll_inode_size_lock() held!
398                  *     there is a race condition with truncate path. (see
399                  *     ll_extent_lock) */
400                 /* XXX i_size_write() is not used because it is not safe to
401                  *     take the ll_inode_size_lock() due to a potential lock
402                  *     inversion (bug 6077).  And since it's not safe to use
403                  *     i_size_write() without a covering mutex we do the
404                  *     assignment directly.  It is not critical that the
405                  *     size be correct. */
406                 /* region is within kms and, hence, within real file size (A).
407                  * We need to increase i_size to cover the read region so that
408                  * generic_file_read() will do its job, but that doesn't mean
409                  * the kms size is _correct_, it is only the _minimum_ size.
410                  * If someone does a stat they will get the correct size which
411                  * will always be >= the kms value here.  b=11081 */
412                 if (i_size_read(inode) < kms) {
413                         inode->i_size = kms;
414                         CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
415                                inode->i_ino, i_size_read(inode));
416                 }
417                 lov_stripe_unlock(lsm);
418         }
419
420         /* If mapping is writeable, adjust kms to cover this page,
421          * but do not extend kms beyond actual file size.
422          * policy.l_extent.end is set to the end of the page by policy_from_vma
423          * bug 10919 */
424         lov_stripe_lock(lsm);
425         if (mode == LCK_PW)
426                 obd_adjust_kms(ll_i2dtexp(inode), lsm,
427                                min_t(loff_t, policy.l_extent.end + 1,
428                                i_size_read(inode)), 0);
429         lov_stripe_unlock(lsm);
430
431         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
432          * the kernel will not read other pages not covered by ldlm in
433          * filemap_nopage. we do our readahead in ll_readpage.
434          */
435         rand_read = vma->vm_flags & VM_RAND_READ;
436         seq_read = vma->vm_flags & VM_SEQ_READ;
437         vma->vm_flags &= ~ VM_SEQ_READ;
438         vma->vm_flags |= VM_RAND_READ;
439
440         page = filemap_nopage(vma, address, type);
441         if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM)
442                 LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
443                                (long)type);
444         else
445                 CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",  address,
446                                (long)type);
447
448         vma->vm_flags &= ~VM_RAND_READ;
449         vma->vm_flags |= (rand_read | seq_read);
450
451         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
452         RETURN(page);
453 }
454
455 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
456  * we track the mapped vma count by lli_mmap_cnt.
457  * ll_vm_open():  when first vma is linked, split locks from lru.
458  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
459  *
460  * XXX we don't check the if the region of vma/lock for performance.
461  */
462 static void ll_vm_open(struct vm_area_struct * vma)
463 {
464         struct inode *inode = vma->vm_file->f_dentry->d_inode;
465         struct ll_inode_info *lli = ll_i2info(inode);
466         ENTRY;
467
468         LASSERT(vma->vm_file);
469
470         spin_lock(&lli->lli_lock);
471         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
472
473         atomic_inc(&lli->lli_mmap_cnt);
474         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
475                 struct lov_stripe_md *lsm = lli->lli_smd;
476                 struct ll_sb_info *sbi = ll_i2sbi(inode);
477                 int count;
478
479                 spin_unlock(&lli->lli_lock);
480
481                 if (!lsm)
482                         return;
483                 count = obd_join_lru(sbi->ll_dt_exp, lsm, 0);
484                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
485         } else {
486                 spin_unlock(&lli->lli_lock);
487         }
488
489 }
490
491 static void ll_vm_close(struct vm_area_struct *vma)
492 {
493         struct inode *inode = vma->vm_file->f_dentry->d_inode;
494         struct ll_inode_info *lli = ll_i2info(inode);
495         ENTRY;
496
497         LASSERT(vma->vm_file);
498
499         spin_lock(&lli->lli_lock);
500         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
501
502         atomic_dec(&lli->lli_mmap_cnt);
503         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
504                 struct lov_stripe_md *lsm = lli->lli_smd;
505                 struct ll_sb_info *sbi = ll_i2sbi(inode);
506                 int count;
507
508                 spin_unlock(&lli->lli_lock);
509
510                 if (!lsm)
511                         return;
512                 count = obd_join_lru(sbi->ll_dt_exp, lsm, 1);
513                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
514         } else {
515                 spin_unlock(&lli->lli_lock);
516         }
517 }
518
519 #ifndef HAVE_FILEMAP_POPULATE
520 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
521 #endif
522 static int ll_populate(struct vm_area_struct *area, unsigned long address,
523                        unsigned long len, pgprot_t prot, unsigned long pgoff,
524                        int nonblock)
525 {
526         int rc = 0;
527         ENTRY;
528
529         /* always set nonblock as true to avoid page read ahead */
530         rc = filemap_populate(area, address, len, prot, pgoff, 1);
531         RETURN(rc);
532 }
533
534 /* return the user space pointer that maps to a file offset via a vma */
535 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
536 {
537         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
538
539 }
540
541 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
542  * nopage's reference passing to the pte */
543 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
544 {
545         int rc = -ENOENT;
546         ENTRY;
547
548         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
549         if (mapping_mapped(mapping)) {
550                 rc = 0;
551                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
552                                     last - first + 1, 0);
553         }
554
555         RETURN(rc);
556 }
557
558 static struct vm_operations_struct ll_file_vm_ops = {
559         .nopage         = ll_nopage,
560         .open           = ll_vm_open,
561         .close          = ll_vm_close,
562         .populate       = ll_populate,
563 };
564
565 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
566 {
567         int rc;
568         ENTRY;
569
570         ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
571         rc = generic_file_mmap(file, vma);
572         if (rc == 0) {
573 #if !defined(HAVE_FILEMAP_POPULATE)
574                 if (!filemap_populate)
575                         filemap_populate = vma->vm_ops->populate;
576 #endif
577                 vma->vm_ops = &ll_file_vm_ops;
578                 vma->vm_ops->open(vma);
579                 /* update the inode's size and mtime */
580                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
581         }
582
583         RETURN(rc);
584 }