Whamcloud - gitweb
70dcf8addec191f731859913ab3a2c04335fa75c
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include <linux/config.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/string.h>
26 #include <linux/stat.h>
27 #include <linux/errno.h>
28 #include <linux/smp_lock.h>
29 #include <linux/unistd.h>
30 #include <linux/version.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33
34 #include <linux/fs.h>
35 #include <linux/stat.h>
36 #include <asm/uaccess.h>
37 #include <asm/segment.h>
38 #include <linux/mm.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp_lock.h>
41 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
42 #include <linux/iobuf.h>
43 #endif
44
45 #include <linux/pagevec.h>
46
47 #define DEBUG_SUBSYSTEM S_LLITE
48
49 #include <linux/lustre_mds.h>
50 #include <linux/lustre_lite.h>
51 #include <linux/lustre_audit.h>
52 #include "llite_internal.h"
53 #include <linux/lustre_compat25.h>
54
55 /*
56 struct ll_lock_tree_node {
57         rb_node_t               lt_node;
58         struct list_head        lt_locked_item;
59         __u64                   lt_oid;
60         ldlm_policy_data_t      lt_policy;
61         struct lustre_handle    lt_lockh;
62         ldlm_mode_t             lt_mode;
63 };
64 */
65 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
66 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
67                       unsigned long addr, size_t count);
68 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
69 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
70                        int *type);
71 #else
72 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
73                        int unused);
74 #endif
75
76 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
77                                               __u64 end, ldlm_mode_t mode)
78 {
79         struct ll_lock_tree_node *node;
80
81         OBD_ALLOC(node, sizeof(*node));
82         if (node == NULL)
83                 RETURN(ERR_PTR(-ENOMEM));
84
85         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
86         node->lt_policy.l_extent.start = start;
87         node->lt_policy.l_extent.end = end;
88         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
89         INIT_LIST_HEAD(&node->lt_locked_item);
90         node->lt_mode = mode;
91
92         return node;
93 }
94
95 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
96 {
97         /* XXX remove this assert when we really want to use this function
98          * to compare different file's region */
99         LASSERT(one->lt_oid == two->lt_oid);
100
101         if ( one->lt_oid < two->lt_oid)
102                 return -1;
103         if ( one->lt_oid > two->lt_oid)
104                 return 1;
105
106         if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start )
107                 return -1;
108         if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end )
109                 return 1;
110
111         return 0; /* they are the same object and overlap */
112 }
113
114 static void lt_merge(struct ll_lock_tree_node *dst,
115                      struct ll_lock_tree_node *src)
116 {
117         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
118                                             src->lt_policy.l_extent.start);
119         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
120                                           src->lt_policy.l_extent.end);
121
122         /* XXX could be a real call to the dlm to find superset modes */
123         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
124                 dst->lt_mode = LCK_PW;
125 }
126
127 static void lt_insert(struct ll_lock_tree *tree,
128                       struct ll_lock_tree_node *node)
129 {
130         struct ll_lock_tree_node *walk;
131         rb_node_t **p, *parent;
132         ENTRY;
133
134 restart:
135         p = &tree->lt_root.rb_node;
136         parent = NULL;
137         while (*p) {
138                 parent = *p;
139                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
140                 switch (lt_compare(node, walk)) {
141                 case -1:
142                         p = &(*p)->rb_left;
143                         break;
144                 case 1:
145                         p = &(*p)->rb_right;
146                         break;
147                 case 0:
148                         lt_merge(node, walk);
149                         rb_erase(&walk->lt_node, &tree->lt_root);
150                         OBD_FREE(walk, sizeof(*walk));
151                         goto restart;
152                         break;
153                 default:
154                         LBUG();
155                         break;
156                 }
157         }
158         rb_link_node(&node->lt_node, parent, p);
159         rb_insert_color(&node->lt_node, &tree->lt_root);
160         EXIT;
161 }
162
163 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
164 {
165         rb_node_t *rbnode;
166         struct ll_lock_tree_node *node = NULL;
167
168         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
169               rbnode = rbnode->rb_left) {
170                 if (rbnode->rb_left == NULL) {
171                         node = rb_entry(rbnode, struct ll_lock_tree_node,
172                                         lt_node);
173                         break;
174                 }
175         }
176         RETURN(node);
177 }
178
179 int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
180 {
181         struct ll_lock_tree_node *node;
182         struct list_head *pos, *n;
183         int rc = 0;
184         ENTRY;
185
186         list_for_each_safe(pos, n, &tree->lt_locked_list) {
187                 node = list_entry(pos, struct ll_lock_tree_node,
188                                   lt_locked_item);
189
190                 rc = ll_extent_unlock(tree->lt_fd, inode,
191                                       ll_i2info(inode)->lli_smd, node->lt_mode,
192                                       &node->lt_lockh);
193                 if (rc != 0) {
194                         /* XXX better message */
195                         CERROR("couldn't unlock %d\n", rc);
196                 }
197                 list_del(&node->lt_locked_item);
198                 OBD_FREE(node, sizeof(*node));
199         }
200
201         while ((node = lt_least_node(tree))) {
202                 rb_erase(&node->lt_node, &tree->lt_root);
203                 OBD_FREE(node, sizeof(*node));
204         }
205
206         RETURN(rc);
207 }
208
209 int ll_tree_lock(struct ll_lock_tree *tree,
210                  struct ll_lock_tree_node *first_node, struct inode *inode,
211                  const char *buf, size_t count, int ast_flags)
212 {
213         struct ll_lock_tree_node *node;
214         int rc = 0;
215         ENTRY;
216
217         tree->lt_root.rb_node = NULL;
218         INIT_LIST_HEAD(&tree->lt_locked_list);
219         if (first_node != NULL)
220                 lt_insert(tree, first_node);
221
222         /* order locking. what we have to concern about is ONLY double lock:
223          * the buffer is mapped to exactly this file. */
224         if (mapping_mapped(inode->i_mapping)) {
225                 rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
226                 if (rc)
227                         GOTO(out, rc);
228         }
229
230         while ((node = lt_least_node(tree))) {
231                 struct obd_service_time *stime;
232                 stime = (node->lt_mode & LCK_PW) ?
233                         &ll_i2sbi(inode)->ll_write_stime :
234                         &ll_i2sbi(inode)->ll_read_stime;
235
236                 rc = ll_extent_lock(tree->lt_fd, inode,
237                                     ll_i2info(inode)->lli_smd, node->lt_mode,
238                                     &node->lt_policy, &node->lt_lockh,
239                                     ast_flags, stime);
240                 if (rc != 0)
241                         GOTO(out, rc);
242
243                 rb_erase(&node->lt_node, &tree->lt_root);
244                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
245         }
246         RETURN(rc);
247 out:
248         ll_tree_unlock(tree, inode);
249         return rc;
250 }
251
252 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
253 {
254         /* we only want to hold PW locks if the mmap() can generate
255          * writes back to the file and that only happens in shared
256          * writable vmas */
257         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
258                 return LCK_PW;
259         return LCK_PR;
260 }
261
262 static void policy_from_vma(ldlm_policy_data_t *policy,
263                             struct vm_area_struct *vma, unsigned long addr,
264                             size_t count)
265 {
266         policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
267                                  (vma->vm_pgoff << PAGE_CACHE_SHIFT);
268         policy->l_extent.end = (policy->l_extent.start + count - 1) |
269                                (PAGE_CACHE_SIZE - 1);
270 }
271
272 static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
273                                        struct inode *inode)
274 {
275         struct mm_struct *mm = current->mm;
276         struct vm_area_struct *vma, *ret = NULL;
277         ENTRY;
278
279         spin_lock(&mm->page_table_lock);
280         for(vma = find_vma(mm, addr);
281             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
282                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
283                     vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) {
284                         ret = vma;
285                         break;
286                 }
287         }
288         spin_unlock(&mm->page_table_lock);
289         RETURN(ret);
290 }
291
292 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
293                       unsigned long addr, size_t count)
294 {
295         struct vm_area_struct *vma;
296         struct ll_lock_tree_node *node;
297         ldlm_policy_data_t policy;
298         ENTRY;
299
300         if (count == 0)
301                 RETURN(0);
302
303         /* we need to look up vmas on page aligned addresses */
304         count += addr & (PAGE_SIZE - 1);
305         addr -= addr & (PAGE_SIZE - 1);
306
307         while ((vma = our_vma(addr, count, inode)) != NULL) {
308
309                 policy_from_vma(&policy, vma, addr, count);
310                 node = ll_node_from_inode(inode, policy.l_extent.start,
311                                           policy.l_extent.end,
312                                           mode_from_vma(vma));
313                 if (IS_ERR(node)) {
314                         CERROR("not enough mem for lock_tree_node!\n");
315                         RETURN(-ENOMEM);
316                 }
317                 lt_insert(tree, node);
318
319                 if (vma->vm_end - addr >= count)
320                         break;
321                 count -= vma->vm_end - addr;
322                 addr = vma->vm_end;
323         }
324         RETURN(0);
325 }
326 /* FIXME: there is a pagefault race goes as follow:
327  * 1. A user process on node A accesses a portion of a mapped file,
328  *    resulting in a page fault.  The pagefault handler invokes the
329  *    ll_nopage function, which reads the page into memory.
330  * 2. A user process on node B writes to the same portion of the file
331  *    (either via mmap or write()), that cause node A to cancel the
332  *    lock and truncate the page.
333  * 3. Node A then executes the rest of do_no_page(), entering the
334  *    now-invalid page into the PTEs.
335  *
336  * Make the whole do_no_page as a hook to cover both the page cache
337  * and page mapping installing with dlm lock would eliminate this race.
338  */
339 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
340 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
341                        int *type)
342 #else
343 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
344                        int unused)
345 #endif
346 {
347         struct file *filp = vma->vm_file;
348         struct ll_file_data *fd = filp->private_data;
349         struct inode *inode = filp->f_dentry->d_inode;
350         struct lustre_handle lockh = { 0 };
351         ldlm_policy_data_t policy;
352         ldlm_mode_t mode;
353         struct page *page = NULL;
354         struct ll_inode_info *lli = ll_i2info(inode);
355         struct obd_service_time *stime;
356         __u64 kms;
357         unsigned long pgoff, size, rand_read, seq_read;
358         int rc = 0;
359         ENTRY;
360
361         if (lli->lli_smd == NULL) {
362                 CERROR("No lsm on fault?\n");
363                 RETURN(NULL);
364         }
365
366         /* start and end the lock on the first and last bytes in the page */
367         policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
368
369         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
370                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
371
372         mode = mode_from_vma(vma);
373         stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
374                                   &ll_i2sbi(inode)->ll_read_stime;
375         
376         rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
377                             &lockh, LDLM_FL_CBPENDING, stime);
378         if (rc != 0)
379                 RETURN(NULL);
380
381         /* XXX change inode size without i_sem hold! there is a race condition
382          *     with truncate path. (see ll_extent_lock) */
383         down(&lli->lli_size_sem);
384         kms = lov_merge_size(lli->lli_smd, 1);
385         pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
386         size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
387
388         if (pgoff >= size) {
389                 up(&lli->lli_size_sem);
390                 ll_glimpse_size(inode);
391         } else {
392                 inode->i_size = kms;
393                 up(&lli->lli_size_sem);
394         }
395
396         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
397          * the kernel will not read other pages not covered by ldlm in
398          * filemap_nopage. we do our readahead in ll_readpage.
399          */
400         rand_read = vma->vm_flags & VM_RAND_READ;
401         seq_read = vma->vm_flags & VM_SEQ_READ;
402         vma->vm_flags &= ~ VM_SEQ_READ;
403         vma->vm_flags |= VM_RAND_READ;
404
405 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
406         page = filemap_nopage(vma, address, type);
407 #else
408         page = filemap_nopage(vma, address, unused);
409 #endif
410         vma->vm_flags &= ~VM_RAND_READ;
411         vma->vm_flags |= (rand_read | seq_read);
412
413         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
414         RETURN(page);
415 }
416
417 /* return the user space pointer that maps to a file offset via a vma */
418 static inline unsigned long file_to_user(struct vm_area_struct *vma,
419                                          __u64 byte)
420 {
421         return vma->vm_start +
422                (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
423 }
424
425 #define VMA_DEBUG(vma, fmt, arg...)                                          \
426         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
427                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
428                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
429                vma->vm_file->f_dentry->d_inode->i_ino,                       \
430                vma->vm_file->f_dentry->d_iname, ## arg);                     \
431
432 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
433 /* [first, last] are the byte offsets affected.
434  * vm_{start, end} are user addresses of the first byte of the mapping and
435  *      the next byte beyond it
436  * vm_pgoff is the page index of the first byte in the mapping */
437 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
438                           __u64 last)
439 {
440         unsigned long address, len;
441         for (; vma ; vma = vma->vm_next_share) {
442                 if (last >> PAGE_SHIFT < vma->vm_pgoff)
443                         continue;
444                 if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
445                     ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
446                         continue;
447
448                 /* XXX in case of unmap the cow pages of a running file,
449                  * don't unmap these private writeable mapping here!
450                  * though that will break private mappping a little.
451                  *
452                  * the clean way is to check the mapping of every page
453                  * and just unmap the non-cow pages, just like
454                  * unmap_mapping_range() with even_cow=0 in kernel 2.6.
455                  */
456                 if (!(vma->vm_flags & VM_SHARED) &&
457                     (vma->vm_flags & VM_WRITE))
458                         continue;
459
460                 address = max((unsigned long)vma->vm_start, 
461                               file_to_user(vma, first));
462                 len = min((unsigned long)vma->vm_end,
463                           file_to_user(vma, last) + 1) - address;
464
465                 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
466                           "address=%ld len=%ld]\n", first, last, address, len);
467                 LASSERT(len > 0);
468                 ll_zap_page_range(vma, address, len);
469         }
470 }
471 #endif
472
473 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
474  * nopage's reference passing to the pte */
475 int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
476                        __u64 last)
477 {
478         int rc = -ENOENT;
479         ENTRY;
480
481         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
482 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
483         if (mapping_mapped(mapping)) {
484                 rc = 0;
485                 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
486                                     last - first + 1, 0);
487         }
488 #else
489         spin_lock(&mapping->i_shared_lock);
490         if (mapping->i_mmap != NULL) {
491                 rc = 0;
492                 teardown_vmas(mapping->i_mmap, first, last);
493         }
494         if (mapping->i_mmap_shared != NULL) {
495                 rc = 0;
496                 teardown_vmas(mapping->i_mmap_shared, first, last);
497         }
498         spin_unlock(&mapping->i_shared_lock);
499 #endif
500
501         RETURN(rc);
502 }
503
504
505 static void ll_close_vma(struct vm_area_struct *vma)
506 {
507         struct inode *inode = vma->vm_file->f_dentry->d_inode;
508         struct address_space *mapping = inode->i_mapping;
509         unsigned long next, size, end;
510         struct ll_async_page *llap;
511         struct obd_export *exp;
512         struct pagevec pvec;
513         int i;
514         
515         if (!(vma->vm_flags & VM_SHARED))
516                 return;
517
518         /* all pte's are synced to mem_map by the moment
519          * we scan backing store and put all dirty pages
520          * onto pending list to track flushing */
521         
522         LASSERT(LLI_DIRTY_HANDLE(inode));
523         exp = ll_i2dtexp(inode);
524         if (exp == NULL) {
525                 CERROR("can't get export for the inode\n");
526                 return;
527         }
528         
529         pagevec_init(&pvec, 0);
530         next = vma->vm_pgoff;
531         size = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
532         end = next + size - 1;
533
534         CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma,
535                next, size, end, inode->i_ino, inode->i_generation);
536
537         while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
538                 for (i = 0; i < pagevec_count(&pvec); i++) {
539                         struct page *page = pvec.pages[i];
540
541                         if (page->index > next)
542                                 next = page->index;
543                         if (next > end)
544                                 continue;
545                         next++;
546
547                         lock_page(page);
548                         if (page->mapping != mapping || !PageDirty(page)) {
549                                 unlock_page(page);
550                                 continue;
551                         }
552
553                         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
554                         if (IS_ERR(llap)) {
555                                 CERROR("can't get llap\n");
556                                 unlock_page(page);
557                                 continue;
558                         }
559
560                         llap_write_pending(inode, llap);
561                         unlock_page(page);
562                 }
563                 pagevec_release(&pvec);
564         }
565 }
566
567 static struct vm_operations_struct ll_file_vm_ops = {
568         .nopage         = ll_nopage,
569         .close          = ll_close_vma,
570 };
571
572 /* Audit functions */
573 extern int ll_audit_log (struct inode *, audit_op, int);
574
575 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
576 {
577         int rc;
578         ENTRY;
579
580         rc = generic_file_mmap(file, vma);
581         if (rc == 0) {
582                 struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
583                 vma->vm_ops = &ll_file_vm_ops;
584         
585                 /* mark i/o epoch dirty */
586                 if (vma->vm_flags & VM_SHARED)
587                         set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
588         }
589         
590         ll_audit_log(file->f_dentry->d_inode, AUDIT_MMAP, rc);
591
592         RETURN(rc);
593 }
594