Whamcloud - gitweb
Land b1_8_gate onto b1_8 (20081218_1708)
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 #ifndef AUTOCONF_INCLUDED
37 #include <linux/config.h>
38 #endif
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/string.h>
42 #include <linux/stat.h>
43 #include <linux/errno.h>
44 #include <linux/smp_lock.h>
45 #include <linux/unistd.h>
46 #include <linux/version.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49
50 #include <linux/fs.h>
51 #include <linux/stat.h>
52 #include <asm/uaccess.h>
53 #include <linux/mm.h>
54 #include <linux/pagemap.h>
55 #include <linux/smp_lock.h>
56
57 #define DEBUG_SUBSYSTEM S_LLITE
58
59 #include <lustre_lite.h>
60 #include "llite_internal.h"
61 #include <linux/lustre_compat25.h>
62
63 #define VMA_DEBUG(vma, fmt, arg...)                                     \
64         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
65                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
66                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
67                vma->vm_file->f_dentry->d_inode->i_ino,                       \
68                vma->vm_file->f_dentry->d_iname, ## arg);                     \
69
70
71 struct ll_lock_tree_node {
72         rb_node_t               lt_node;
73         struct list_head        lt_locked_item;
74         __u64                   lt_oid;
75         ldlm_policy_data_t      lt_policy;
76         struct lustre_handle    lt_lockh;
77         ldlm_mode_t             lt_mode;
78         struct inode           *lt_inode;
79 };
80
81 int lt_get_mmap_locks(struct ll_lock_tree *tree,
82                       unsigned long addr, size_t count);
83
84 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
85                        int *type);
86
87 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
88                                               __u64 end, ldlm_mode_t mode)
89 {
90         struct ll_lock_tree_node *node;
91
92         OBD_ALLOC(node, sizeof(*node));
93         if (node == NULL)
94                 RETURN(ERR_PTR(-ENOMEM));
95
96         node->lt_inode = inode;
97         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
98         node->lt_policy.l_extent.start = start;
99         node->lt_policy.l_extent.end = end;
100         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
101         INIT_LIST_HEAD(&node->lt_locked_item);
102         node->lt_mode = mode;
103
104         return node;
105 }
106
107 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
108 {
109         /* To avoid multiple fs deadlock */
110         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
111                 return -1;
112         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
113                 return 1;
114
115         if (one->lt_oid < two->lt_oid)
116                 return -1;
117         if (one->lt_oid > two->lt_oid)
118                 return 1;
119
120         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
121                 return -1;
122         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
123                 return 1;
124
125         return 0; /* they are the same object and overlap */
126 }
127
128 static void lt_merge(struct ll_lock_tree_node *dst,
129                      struct ll_lock_tree_node *src)
130 {
131         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
132                                             src->lt_policy.l_extent.start);
133         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
134                                           src->lt_policy.l_extent.end);
135
136         /* XXX could be a real call to the dlm to find superset modes */
137         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
138                 dst->lt_mode = LCK_PW;
139 }
140
141 static void lt_insert(struct ll_lock_tree *tree,
142                       struct ll_lock_tree_node *node)
143 {
144         struct ll_lock_tree_node *walk;
145         rb_node_t **p, *parent;
146         ENTRY;
147
148 restart:
149         p = &tree->lt_root.rb_node;
150         parent = NULL;
151         while (*p) {
152                 parent = *p;
153                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
154                 switch (lt_compare(node, walk)) {
155                 case -1:
156                         p = &(*p)->rb_left;
157                         break;
158                 case 1:
159                         p = &(*p)->rb_right;
160                         break;
161                 case 0:
162                         lt_merge(node, walk);
163                         rb_erase(&walk->lt_node, &tree->lt_root);
164                         OBD_FREE(walk, sizeof(*walk));
165                         goto restart;
166                         break;
167                 default:
168                         LBUG();
169                         break;
170                 }
171         }
172         rb_link_node(&node->lt_node, parent, p);
173         rb_insert_color(&node->lt_node, &tree->lt_root);
174         EXIT;
175 }
176
177 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
178 {
179         rb_node_t *rbnode;
180         struct ll_lock_tree_node *node = NULL;
181
182         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
183               rbnode = rbnode->rb_left) {
184                 if (rbnode->rb_left == NULL) {
185                         node = rb_entry(rbnode, struct ll_lock_tree_node,
186                                         lt_node);
187                         break;
188                 }
189         }
190         RETURN(node);
191 }
192
193 int ll_tree_unlock(struct ll_lock_tree *tree)
194 {
195         struct ll_lock_tree_node *node;
196         struct list_head *pos, *n;
197         struct inode *inode;
198         int rc = 0;
199         ENTRY;
200
201         list_for_each_safe(pos, n, &tree->lt_locked_list) {
202                 node = list_entry(pos, struct ll_lock_tree_node,
203                                   lt_locked_item);
204
205                 inode = node->lt_inode;
206                 rc = ll_extent_unlock(tree->lt_fd, inode,
207                                       ll_i2info(inode)->lli_smd, node->lt_mode,
208                                       &node->lt_lockh);
209                 if (rc != 0) {
210                         /* XXX better message */
211                         CERROR("couldn't unlock %d\n", rc);
212                 }
213                 list_del(&node->lt_locked_item);
214                 OBD_FREE(node, sizeof(*node));
215         }
216
217         while ((node = lt_least_node(tree))) {
218                 rb_erase(&node->lt_node, &tree->lt_root);
219                 OBD_FREE(node, sizeof(*node));
220         }
221
222         RETURN(rc);
223 }
224
225 int ll_tree_lock_iov(struct ll_lock_tree *tree,
226                  struct ll_lock_tree_node *first_node,
227                  const struct iovec *iov, unsigned long nr_segs, int ast_flags)
228 {
229         struct ll_lock_tree_node *node;
230         int rc = 0;
231         unsigned long seg;
232         ENTRY;
233
234         tree->lt_root.rb_node = NULL;
235         INIT_LIST_HEAD(&tree->lt_locked_list);
236         if (first_node != NULL)
237                 lt_insert(tree, first_node);
238
239         /* To avoid such subtle deadlock case: client1 try to read file1 to
240          * mmapped file2, on the same time, client2 try to read file2 to
241          * mmapped file1.*/
242         for (seg = 0; seg < nr_segs; seg++) {
243                 const struct iovec *iv = &iov[seg];
244                 rc = lt_get_mmap_locks(tree, (unsigned long)iv->iov_base,
245                                        iv->iov_len);
246                 if (rc)
247                         GOTO(out, rc);
248         }
249
250         while ((node = lt_least_node(tree))) {
251                 struct inode *inode = node->lt_inode;
252                 rc = ll_extent_lock(tree->lt_fd, inode,
253                                     ll_i2info(inode)->lli_smd, node->lt_mode,
254                                     &node->lt_policy, &node->lt_lockh,
255                                     ast_flags);
256                 if (rc != 0)
257                         GOTO(out, rc);
258
259                 rb_erase(&node->lt_node, &tree->lt_root);
260                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
261         }
262         RETURN(rc);
263 out:
264         ll_tree_unlock(tree);
265         RETURN(rc);
266 }
267
268 int ll_tree_lock(struct ll_lock_tree *tree,
269                  struct ll_lock_tree_node *first_node,
270                  const char *buf, size_t count, int ast_flags)
271 {
272         struct iovec local_iov = { .iov_base = (void __user *)buf,
273                                    .iov_len = count };
274
275         return ll_tree_lock_iov(tree, first_node, &local_iov, 1, ast_flags);
276 }
277
278 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
279 {
280         /* we only want to hold PW locks if the mmap() can generate
281          * writes back to the file and that only happens in shared
282          * writable vmas */
283         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
284                 return LCK_PW;
285         return LCK_PR;
286 }
287
288 static void policy_from_vma(ldlm_policy_data_t *policy,
289                             struct vm_area_struct *vma, unsigned long addr,
290                             size_t count)
291 {
292         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
293                                  ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
294         policy->l_extent.end = (policy->l_extent.start + count - 1) |
295                                ~CFS_PAGE_MASK;
296 }
297
298 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
299 {
300         struct mm_struct *mm = current->mm;
301         struct vm_area_struct *vma, *ret = NULL;
302         ENTRY;
303
304         /* No MM (e.g. NFS)? No vmas too. */
305         if (!mm)
306                 RETURN(NULL);
307
308         spin_lock(&mm->page_table_lock);
309         for(vma = find_vma(mm, addr);
310             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
311                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
312                     vma->vm_flags & VM_SHARED) {
313                         ret = vma;
314                         break;
315                 }
316         }
317         spin_unlock(&mm->page_table_lock);
318         RETURN(ret);
319 }
320
321 int ll_region_mapped(unsigned long addr, size_t count)
322 {
323         return !!our_vma(addr, count);
324 }
325
326 int lt_get_mmap_locks(struct ll_lock_tree *tree,
327                       unsigned long addr, size_t count)
328 {
329         struct vm_area_struct *vma;
330         struct ll_lock_tree_node *node;
331         ldlm_policy_data_t policy;
332         struct inode *inode;
333         ENTRY;
334
335         if (count == 0)
336                 RETURN(0);
337
338         /* we need to look up vmas on page aligned addresses */
339         count += addr & (~CFS_PAGE_MASK);
340         addr &= CFS_PAGE_MASK;
341
342         while ((vma = our_vma(addr, count)) != NULL) {
343                 LASSERT(vma->vm_file);
344
345                 inode = vma->vm_file->f_dentry->d_inode;
346                 policy_from_vma(&policy, vma, addr, count);
347                 node = ll_node_from_inode(inode, policy.l_extent.start,
348                                           policy.l_extent.end,
349                                           mode_from_vma(vma));
350                 if (IS_ERR(node)) {
351                         CERROR("not enough mem for lock_tree_node!\n");
352                         RETURN(-ENOMEM);
353                 }
354                 lt_insert(tree, node);
355
356                 if (vma->vm_end - addr >= count)
357                         break;
358                 count -= vma->vm_end - addr;
359                 addr = vma->vm_end;
360         }
361         RETURN(0);
362 }
363 /**
364  * Page fault handler.
365  *
366  * \param vma - is virtiual area struct related to page fault
367  * \param address - address when hit fault
368  * \param type - of fault
369  *
370  * \return allocated and filled page for address
371  * \retval NOPAGE_SIGBUS if page not exist on this address
372  * \retval NOPAGE_OOM not have memory for allocate new page
373  */
374 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
375                        int *type)
376 {
377         struct file *filp = vma->vm_file;
378         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
379         struct inode *inode = filp->f_dentry->d_inode;
380         struct lustre_handle lockh = { 0 };
381         ldlm_policy_data_t policy;
382         ldlm_mode_t mode;
383         struct page *page = NULL;
384         struct ll_inode_info *lli = ll_i2info(inode);
385         struct lov_stripe_md *lsm;
386         struct ost_lvb lvb;
387         __u64 kms, old_mtime;
388         unsigned long pgoff, size, rand_read, seq_read;
389         int rc = 0;
390         ENTRY;
391
392         if (lli->lli_smd == NULL) {
393                 CERROR("No lsm on fault?\n");
394                 RETURN(NOPAGE_SIGBUS);
395         }
396
397         ll_clear_file_contended(inode);
398
399         /* start and end the lock on the first and last bytes in the page */
400         policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
401
402         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
403                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
404
405         mode = mode_from_vma(vma);
406         old_mtime = LTIME_S(inode->i_mtime);
407
408         lsm = lli->lli_smd;
409         rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
410                             &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
411         if (rc != 0)
412                 RETURN(NOPAGE_SIGBUS);
413
414         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
415                 CWARN("binary changed. inode %lu\n", inode->i_ino);
416
417         lov_stripe_lock(lsm);
418         inode_init_lvb(inode, &lvb);
419         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
420         kms = lvb.lvb_size;
421
422         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
423         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
424
425         if (pgoff >= size) {
426                 lov_stripe_unlock(lsm);
427                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
428         } else {
429                 /* XXX change inode size without ll_inode_size_lock() held!
430                  *     there is a race condition with truncate path. (see
431                  *     ll_extent_lock) */
432                 /* XXX i_size_write() is not used because it is not safe to
433                  *     take the ll_inode_size_lock() due to a potential lock
434                  *     inversion (bug 6077).  And since it's not safe to use
435                  *     i_size_write() without a covering mutex we do the
436                  *     assignment directly.  It is not critical that the
437                  *     size be correct. */
438                 /* NOTE: region is within kms and, hence, within real file size (A).
439                  * We need to increase i_size to cover the read region so that
440                  * generic_file_read() will do its job, but that doesn't mean
441                  * the kms size is _correct_, it is only the _minimum_ size.
442                  * If someone does a stat they will get the correct size which
443                  * will always be >= the kms value here.  b=11081 */
444                 if (i_size_read(inode) < kms) {
445                         inode->i_size = kms;
446                         CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
447                                inode->i_ino, i_size_read(inode));
448                 }
449                 lov_stripe_unlock(lsm);
450         }
451
452         /* If mapping is writeable, adjust kms to cover this page,
453          * but do not extend kms beyond actual file size.
454          * policy.l_extent.end is set to the end of the page by policy_from_vma
455          * bug 10919 */
456         lov_stripe_lock(lsm);
457         if (mode == LCK_PW)
458                 obd_adjust_kms(ll_i2obdexp(inode), lsm,
459                                min_t(loff_t, policy.l_extent.end + 1,
460                                i_size_read(inode)), 0);
461         lov_stripe_unlock(lsm);
462
463         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
464          * the kernel will not read other pages not covered by ldlm in
465          * filemap_nopage. we do our readahead in ll_readpage.
466          */
467         rand_read = vma->vm_flags & VM_RAND_READ;
468         seq_read = vma->vm_flags & VM_SEQ_READ;
469         vma->vm_flags &= ~ VM_SEQ_READ;
470         vma->vm_flags |= VM_RAND_READ;
471
472         page = filemap_nopage(vma, address, type);
473         if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM)
474                 LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
475                                (long)type);
476         else
477                 CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",  address,
478                                (long)type);
479
480         vma->vm_flags &= ~VM_RAND_READ;
481         vma->vm_flags |= (rand_read | seq_read);
482
483         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
484         RETURN(page);
485 }
486
487 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
488  * we track the mapped vma count by lli_mmap_cnt.
489  * ll_vm_open():  when first vma is linked, split locks from lru.
490  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
491  *
492  * XXX we don't check the if the region of vma/lock for performance.
493  */
494 static void ll_vm_open(struct vm_area_struct * vma)
495 {
496         struct inode *inode = vma->vm_file->f_dentry->d_inode;
497         struct ll_inode_info *lli = ll_i2info(inode);
498         ENTRY;
499
500         LASSERT(vma->vm_file);
501
502         spin_lock(&lli->lli_lock);
503         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
504
505         atomic_inc(&lli->lli_mmap_cnt);
506         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
507                 struct lov_stripe_md *lsm = lli->lli_smd;
508                 struct ll_sb_info *sbi = ll_i2sbi(inode);
509                 int count;
510
511                 spin_unlock(&lli->lli_lock);
512
513                 if (!lsm)
514                         return;
515                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
516                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
517         } else {
518                 spin_unlock(&lli->lli_lock);
519         }
520
521 }
522
523 static void ll_vm_close(struct vm_area_struct *vma)
524 {
525         struct inode *inode = vma->vm_file->f_dentry->d_inode;
526         struct ll_inode_info *lli = ll_i2info(inode);
527         ENTRY;
528
529         LASSERT(vma->vm_file);
530
531         spin_lock(&lli->lli_lock);
532         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
533
534         atomic_dec(&lli->lli_mmap_cnt);
535         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
536                 struct lov_stripe_md *lsm = lli->lli_smd;
537                 struct ll_sb_info *sbi = ll_i2sbi(inode);
538                 int count;
539
540                 spin_unlock(&lli->lli_lock);
541
542                 if (!lsm)
543                         return;
544                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
545                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
546         } else {
547                 spin_unlock(&lli->lli_lock);
548         }
549 }
550
551 #ifndef HAVE_FILEMAP_POPULATE
552 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
553 #endif
554 static int ll_populate(struct vm_area_struct *area, unsigned long address,
555                        unsigned long len, pgprot_t prot, unsigned long pgoff,
556                        int nonblock)
557 {
558         int rc = 0;
559         ENTRY;
560
561         /* always set nonblock as true to avoid page read ahead */
562         rc = filemap_populate(area, address, len, prot, pgoff, 1);
563         RETURN(rc);
564 }
565
566 /* return the user space pointer that maps to a file offset via a vma */
567 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
568 {
569         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
570
571 }
572
573 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
574  * nopage's reference passing to the pte */
575 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
576 {
577         int rc = -ENOENT;
578         ENTRY;
579
580         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
581         if (mapping_mapped(mapping)) {
582                 rc = 0;
583                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
584                                     last - first + 1, 0);
585         }
586
587         RETURN(rc);
588 }
589
590 static struct vm_operations_struct ll_file_vm_ops = {
591         .nopage         = ll_nopage,
592         .open           = ll_vm_open,
593         .close          = ll_vm_close,
594         .populate       = ll_populate,
595 };
596
597 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
598 {
599         int rc;
600         ENTRY;
601
602         ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
603         rc = generic_file_mmap(file, vma);
604         if (rc == 0) {
605 #ifndef HAVE_FILEMAP_POPULATE
606                 if (!filemap_populate)
607                         filemap_populate = vma->vm_ops->populate;
608 #endif
609                 vma->vm_ops = &ll_file_vm_ops;
610                 vma->vm_ops->open(vma);
611                 /* update the inode's size and mtime */
612                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
613         }
614
615         RETURN(rc);
616 }