Whamcloud - gitweb
- added patches from #5492 and 5654
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include <linux/config.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/string.h>
26 #include <linux/stat.h>
27 #include <linux/errno.h>
28 #include <linux/smp_lock.h>
29 #include <linux/unistd.h>
30 #include <linux/version.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33
34 #include <linux/fs.h>
35 #include <linux/stat.h>
36 #include <asm/uaccess.h>
37 #include <asm/segment.h>
38 #include <linux/mm.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp_lock.h>
41 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
42 #include <linux/iobuf.h>
43 #endif
44
45 #define DEBUG_SUBSYSTEM S_LLITE
46
47 #include <linux/lustre_mds.h>
48 #include <linux/lustre_lite.h>
49 #include "llite_internal.h"
50 #include <linux/lustre_compat25.h>
51
52
53 struct ll_lock_tree_node {
54         rb_node_t               lt_node;
55         struct list_head        lt_locked_item;
56         __u64                   lt_oid;
57         ldlm_policy_data_t      lt_policy;
58         struct lustre_handle    lt_lockh;
59         ldlm_mode_t             lt_mode;
60 };
61
62 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
63 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
64                       unsigned long addr, size_t count);
65 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
66 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
67                        int *type);
68 #else
69 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
70                        int unused);
71 #endif
72
73 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
74                                               __u64 end, ldlm_mode_t mode)
75 {
76         struct ll_lock_tree_node *node;
77
78         OBD_ALLOC(node, sizeof(*node));
79         if (node == NULL)
80                 RETURN(ERR_PTR(-ENOMEM));
81
82         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
83         node->lt_policy.l_extent.start = start;
84         node->lt_policy.l_extent.end = end;
85         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
86         INIT_LIST_HEAD(&node->lt_locked_item);
87         node->lt_mode = mode;
88
89         return node;
90 }
91
92 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
93 {
94         /* XXX remove this assert when we really want to use this function
95          * to compare different file's region */
96         LASSERT(one->lt_oid == two->lt_oid);
97
98         if ( one->lt_oid < two->lt_oid)
99                 return -1;
100         if ( one->lt_oid > two->lt_oid)
101                 return 1;
102
103         if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start )
104                 return -1;
105         if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end )
106                 return 1;
107
108         return 0; /* they are the same object and overlap */
109 }
110
111 static void lt_merge(struct ll_lock_tree_node *dst,
112                      struct ll_lock_tree_node *src)
113 {
114         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
115                                             src->lt_policy.l_extent.start);
116         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
117                                           src->lt_policy.l_extent.end);
118
119         /* XXX could be a real call to the dlm to find superset modes */
120         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
121                 dst->lt_mode = LCK_PW;
122 }
123
124 static void lt_insert(struct ll_lock_tree *tree,
125                       struct ll_lock_tree_node *node)
126 {
127         struct ll_lock_tree_node *walk;
128         rb_node_t **p, *parent;
129         ENTRY;
130
131 restart:
132         p = &tree->lt_root.rb_node;
133         parent = NULL;
134         while (*p) {
135                 parent = *p;
136                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
137                 switch (lt_compare(node, walk)) {
138                 case -1:
139                         p = &(*p)->rb_left;
140                         break;
141                 case 1:
142                         p = &(*p)->rb_right;
143                         break;
144                 case 0:
145                         lt_merge(node, walk);
146                         rb_erase(&walk->lt_node, &tree->lt_root);
147                         OBD_FREE(walk, sizeof(*walk));
148                         goto restart;
149                         break;
150                 default:
151                         LBUG();
152                         break;
153                 }
154         }
155         rb_link_node(&node->lt_node, parent, p);
156         rb_insert_color(&node->lt_node, &tree->lt_root);
157         EXIT;
158 }
159
160 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
161 {
162         rb_node_t *rbnode;
163         struct ll_lock_tree_node *node = NULL;
164
165         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
166               rbnode = rbnode->rb_left) {
167                 if (rbnode->rb_left == NULL) {
168                         node = rb_entry(rbnode, struct ll_lock_tree_node,
169                                         lt_node);
170                         break;
171                 }
172         }
173         RETURN(node);
174 }
175
176 int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
177 {
178         struct ll_lock_tree_node *node;
179         struct list_head *pos, *n;
180         int rc = 0;
181         ENTRY;
182
183         list_for_each_safe(pos, n, &tree->lt_locked_list) {
184                 node = list_entry(pos, struct ll_lock_tree_node,
185                                   lt_locked_item);
186
187                 rc = ll_extent_unlock(tree->lt_fd, inode,
188                                       ll_i2info(inode)->lli_smd, node->lt_mode,
189                                       &node->lt_lockh);
190                 if (rc != 0) {
191                         /* XXX better message */
192                         CERROR("couldn't unlock %d\n", rc);
193                 }
194                 list_del(&node->lt_locked_item);
195                 OBD_FREE(node, sizeof(*node));
196         }
197
198         while ((node = lt_least_node(tree))) {
199                 rb_erase(&node->lt_node, &tree->lt_root);
200                 OBD_FREE(node, sizeof(*node));
201         }
202
203         RETURN(rc);
204 }
205
206 int ll_tree_lock(struct ll_lock_tree *tree,
207                  struct ll_lock_tree_node *first_node, struct inode *inode,
208                  const char *buf, size_t count, int ast_flags)
209 {
210         struct ll_lock_tree_node *node;
211         int rc = 0;
212         ENTRY;
213
214         tree->lt_root.rb_node = NULL;
215         INIT_LIST_HEAD(&tree->lt_locked_list);
216         if (first_node != NULL)
217                 lt_insert(tree, first_node);
218
219         /* order locking. what we have to concern about is ONLY double lock:
220          * the buffer is mapped to exactly this file. */
221         if (mapping_mapped(inode->i_mapping)) {
222                 rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
223                 if (rc)
224                         GOTO(out, rc);
225         }
226
227         while ((node = lt_least_node(tree))) {
228                 struct obd_service_time *stime;
229                 stime = (node->lt_mode & LCK_PW) ?
230                         &ll_i2sbi(inode)->ll_write_stime :
231                         &ll_i2sbi(inode)->ll_read_stime;
232
233                 rc = ll_extent_lock(tree->lt_fd, inode,
234                                     ll_i2info(inode)->lli_smd, node->lt_mode,
235                                     &node->lt_policy, &node->lt_lockh,
236                                     ast_flags, stime);
237                 if (rc != 0)
238                         GOTO(out, rc);
239
240                 rb_erase(&node->lt_node, &tree->lt_root);
241                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
242         }
243         RETURN(rc);
244 out:
245         ll_tree_unlock(tree, inode);
246         return rc;
247 }
248
249 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
250 {
251         /* we only want to hold PW locks if the mmap() can generate
252          * writes back to the file and that only happens in shared
253          * writable vmas */
254         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
255                 return LCK_PW;
256         return LCK_PR;
257 }
258
259 static void policy_from_vma(ldlm_policy_data_t *policy,
260                             struct vm_area_struct *vma, unsigned long addr,
261                             size_t count)
262 {
263         policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
264                                  (vma->vm_pgoff << PAGE_CACHE_SHIFT);
265         policy->l_extent.end = (policy->l_extent.start + count - 1) |
266                                (PAGE_CACHE_SIZE - 1);
267 }
268
269 static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
270                                        struct inode *inode)
271 {
272         struct mm_struct *mm = current->mm;
273         struct vm_area_struct *vma, *ret = NULL;
274         ENTRY;
275
276         spin_lock(&mm->page_table_lock);
277         for(vma = find_vma(mm, addr);
278             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
279                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
280                     vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) {
281                         ret = vma;
282                         break;
283                 }
284         }
285         spin_unlock(&mm->page_table_lock);
286         RETURN(ret);
287 }
288
289 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
290                       unsigned long addr, size_t count)
291 {
292         struct vm_area_struct *vma;
293         struct ll_lock_tree_node *node;
294         ldlm_policy_data_t policy;
295         ENTRY;
296
297         if (count == 0)
298                 RETURN(0);
299
300         /* we need to look up vmas on page aligned addresses */
301         count += addr & (PAGE_SIZE - 1);
302         addr -= addr & (PAGE_SIZE - 1);
303
304         while ((vma = our_vma(addr, count, inode)) != NULL) {
305
306                 policy_from_vma(&policy, vma, addr, count);
307                 node = ll_node_from_inode(inode, policy.l_extent.start,
308                                           policy.l_extent.end,
309                                           mode_from_vma(vma));
310                 if (IS_ERR(node)) {
311                         CERROR("not enough mem for lock_tree_node!\n");
312                         RETURN(-ENOMEM);
313                 }
314                 lt_insert(tree, node);
315
316                 if (vma->vm_end - addr >= count)
317                         break;
318                 count -= vma->vm_end - addr;
319                 addr = vma->vm_end;
320         }
321         RETURN(0);
322 }
323 /* FIXME: there is a pagefault race goes as follow:
324  * 1. A user process on node A accesses a portion of a mapped file,
325  *    resulting in a page fault.  The pagefault handler invokes the
326  *    ll_nopage function, which reads the page into memory.
327  * 2. A user process on node B writes to the same portion of the file
328  *    (either via mmap or write()), that cause node A to cancel the
329  *    lock and truncate the page.
330  * 3. Node A then executes the rest of do_no_page(), entering the
331  *    now-invalid page into the PTEs.
332  *
333  * Make the whole do_no_page as a hook to cover both the page cache
334  * and page mapping installing with dlm lock would eliminate this race.
335  */
336 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
337 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
338                        int *type)
339 #else
340 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
341                        int unused)
342 #endif
343 {
344         struct file *filp = vma->vm_file;
345         struct ll_file_data *fd = filp->private_data;
346         struct inode *inode = filp->f_dentry->d_inode;
347         struct lustre_handle lockh = { 0 };
348         ldlm_policy_data_t policy;
349         ldlm_mode_t mode;
350         struct page *page = NULL;
351         struct ll_inode_info *lli = ll_i2info(inode);
352         struct obd_service_time *stime;
353         __u64 kms;
354         unsigned long pgoff, size, rand_read, seq_read;
355         int rc = 0;
356         ENTRY;
357
358         if (lli->lli_smd == NULL) {
359                 CERROR("No lsm on fault?\n");
360                 RETURN(NULL);
361         }
362
363         /* start and end the lock on the first and last bytes in the page */
364         policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
365
366         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
367                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
368
369         mode = mode_from_vma(vma);
370         stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
371                                   &ll_i2sbi(inode)->ll_read_stime;
372         
373         rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
374                             &lockh, LDLM_FL_CBPENDING, stime);
375         if (rc != 0)
376                 RETURN(NULL);
377
378         /* XXX change inode size without i_sem hold! there is a race condition
379          *     with truncate path. (see ll_extent_lock) */
380         down(&lli->lli_size_sem);
381         kms = lov_merge_size(lli->lli_smd, 1);
382         pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
383         size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
384
385         if (pgoff >= size) {
386                 up(&lli->lli_size_sem);
387                 ll_glimpse_size(inode);
388         } else {
389                 inode->i_size = kms;
390                 up(&lli->lli_size_sem);
391         }
392
393         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
394          * the kernel will not read other pages not covered by ldlm in
395          * filemap_nopage. we do our readahead in ll_readpage.
396          */
397         rand_read = vma->vm_flags & VM_RAND_READ;
398         seq_read = vma->vm_flags & VM_SEQ_READ;
399         vma->vm_flags &= ~ VM_SEQ_READ;
400         vma->vm_flags |= VM_RAND_READ;
401
402 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
403         page = filemap_nopage(vma, address, type);
404 #else
405         page = filemap_nopage(vma, address, unused);
406 #endif
407         vma->vm_flags &= ~VM_RAND_READ;
408         vma->vm_flags |= (rand_read | seq_read);
409
410         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
411         RETURN(page);
412 }
413
414 /* return the user space pointer that maps to a file offset via a vma */
415 static inline unsigned long file_to_user(struct vm_area_struct *vma,
416                                          __u64 byte)
417 {
418         return vma->vm_start +
419                (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
420 }
421
422 #define VMA_DEBUG(vma, fmt, arg...)                                          \
423         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
424                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
425                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
426                vma->vm_file->f_dentry->d_inode->i_ino,                       \
427                vma->vm_file->f_dentry->d_iname, ## arg);                     \
428
429 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
430 /* [first, last] are the byte offsets affected.
431  * vm_{start, end} are user addresses of the first byte of the mapping and
432  *      the next byte beyond it
433  * vm_pgoff is the page index of the first byte in the mapping */
434 static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
435                           __u64 last)
436 {
437         unsigned long address, len;
438         for (; vma ; vma = vma->vm_next_share) {
439                 if (last >> PAGE_SHIFT < vma->vm_pgoff)
440                         continue;
441                 if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
442                     ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
443                         continue;
444
445                 /* XXX in case of unmap the cow pages of a running file,
446                  * don't unmap these private writeable mapping here!
447                  * though that will break private mappping a little.
448                  *
449                  * the clean way is to check the mapping of every page
450                  * and just unmap the non-cow pages, just like
451                  * unmap_mapping_range() with even_cow=0 in kernel 2.6.
452                  */
453                 if (!(vma->vm_flags & VM_SHARED) &&
454                     (vma->vm_flags & VM_WRITE))
455                         continue;
456
457                 address = max((unsigned long)vma->vm_start, 
458                               file_to_user(vma, first));
459                 len = min((unsigned long)vma->vm_end,
460                           file_to_user(vma, last) + 1) - address;
461
462                 VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
463                           "address=%ld len=%ld]\n", first, last, address, len);
464                 LASSERT(len > 0);
465                 ll_zap_page_range(vma, address, len);
466         }
467 }
468 #endif
469
470 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
471  * nopage's reference passing to the pte */
472 int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
473                        __u64 last)
474 {
475         int rc = -ENOENT;
476         ENTRY;
477
478         LASSERT(last > first);
479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
480         if (mapping_mapped(mapping)) {
481                 rc = 0;
482                 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
483                                     last - first + 1, 0);
484         }
485 #else
486         spin_lock(&mapping->i_shared_lock);
487         if (mapping->i_mmap != NULL) {
488                 rc = 0;
489                 teardown_vmas(mapping->i_mmap, first, last);
490         }
491         if (mapping->i_mmap_shared != NULL) {
492                 rc = 0;
493                 teardown_vmas(mapping->i_mmap_shared, first, last);
494         }
495         spin_unlock(&mapping->i_shared_lock);
496 #endif
497
498         RETURN(rc);
499 }
500
501 static struct vm_operations_struct ll_file_vm_ops = {
502         .nopage         = ll_nopage,
503 };
504
505 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
506 {
507         int rc;
508         ENTRY;
509
510         rc = generic_file_mmap(file, vma);
511         if (rc == 0)
512                 vma->vm_ops = &ll_file_vm_ops;
513
514         RETURN(rc);
515 }
516