Whamcloud - gitweb
Branch b1_8
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 #ifndef AUTOCONF_INCLUDED
22 #include <linux/config.h>
23 #endif
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/string.h>
27 #include <linux/stat.h>
28 #include <linux/errno.h>
29 #include <linux/smp_lock.h>
30 #include <linux/unistd.h>
31 #include <linux/version.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34
35 #include <linux/fs.h>
36 #include <linux/stat.h>
37 #include <asm/uaccess.h>
38 #include <linux/mm.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp_lock.h>
41 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
42 #include <linux/iobuf.h>
43 #endif
44
45 #define DEBUG_SUBSYSTEM S_LLITE
46
47 #include <lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #define VMA_DEBUG(vma, fmt, arg...)                                     \
52         CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
53                "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
54                vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
55                vma->vm_file->f_dentry->d_inode->i_ino,                       \
56                vma->vm_file->f_dentry->d_iname, ## arg);                     \
57
58
59 struct ll_lock_tree_node {
60         rb_node_t               lt_node;
61         struct list_head        lt_locked_item;
62         __u64                   lt_oid;
63         ldlm_policy_data_t      lt_policy;
64         struct lustre_handle    lt_lockh;
65         ldlm_mode_t             lt_mode;
66         struct inode           *lt_inode;
67 };
68
69 int lt_get_mmap_locks(struct ll_lock_tree *tree,
70                       unsigned long addr, size_t count);
71
72 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
73                        int *type);
74
75 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
76                                               __u64 end, ldlm_mode_t mode)
77 {
78         struct ll_lock_tree_node *node;
79
80         OBD_ALLOC(node, sizeof(*node));
81         if (node == NULL)
82                 RETURN(ERR_PTR(-ENOMEM));
83
84         node->lt_inode = inode;
85         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
86         node->lt_policy.l_extent.start = start;
87         node->lt_policy.l_extent.end = end;
88         memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
89         INIT_LIST_HEAD(&node->lt_locked_item);
90         node->lt_mode = mode;
91
92         return node;
93 }
94
95 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
96 {
97         /* To avoid multiple fs deadlock */
98         if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
99                 return -1;
100         if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
101                 return 1;
102
103         if (one->lt_oid < two->lt_oid)
104                 return -1;
105         if (one->lt_oid > two->lt_oid)
106                 return 1;
107
108         if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
109                 return -1;
110         if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
111                 return 1;
112
113         return 0; /* they are the same object and overlap */
114 }
115
116 static void lt_merge(struct ll_lock_tree_node *dst,
117                      struct ll_lock_tree_node *src)
118 {
119         dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
120                                             src->lt_policy.l_extent.start);
121         dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
122                                           src->lt_policy.l_extent.end);
123
124         /* XXX could be a real call to the dlm to find superset modes */
125         if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
126                 dst->lt_mode = LCK_PW;
127 }
128
129 static void lt_insert(struct ll_lock_tree *tree,
130                       struct ll_lock_tree_node *node)
131 {
132         struct ll_lock_tree_node *walk;
133         rb_node_t **p, *parent;
134         ENTRY;
135
136 restart:
137         p = &tree->lt_root.rb_node;
138         parent = NULL;
139         while (*p) {
140                 parent = *p;
141                 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
142                 switch (lt_compare(node, walk)) {
143                 case -1:
144                         p = &(*p)->rb_left;
145                         break;
146                 case 1:
147                         p = &(*p)->rb_right;
148                         break;
149                 case 0:
150                         lt_merge(node, walk);
151                         rb_erase(&walk->lt_node, &tree->lt_root);
152                         OBD_FREE(walk, sizeof(*walk));
153                         goto restart;
154                         break;
155                 default:
156                         LBUG();
157                         break;
158                 }
159         }
160         rb_link_node(&node->lt_node, parent, p);
161         rb_insert_color(&node->lt_node, &tree->lt_root);
162         EXIT;
163 }
164
165 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
166 {
167         rb_node_t *rbnode;
168         struct ll_lock_tree_node *node = NULL;
169
170         for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
171               rbnode = rbnode->rb_left) {
172                 if (rbnode->rb_left == NULL) {
173                         node = rb_entry(rbnode, struct ll_lock_tree_node,
174                                         lt_node);
175                         break;
176                 }
177         }
178         RETURN(node);
179 }
180
181 int ll_tree_unlock(struct ll_lock_tree *tree)
182 {
183         struct ll_lock_tree_node *node;
184         struct list_head *pos, *n;
185         struct inode *inode;
186         int rc = 0;
187         ENTRY;
188
189         list_for_each_safe(pos, n, &tree->lt_locked_list) {
190                 node = list_entry(pos, struct ll_lock_tree_node,
191                                   lt_locked_item);
192
193                 inode = node->lt_inode;
194                 rc = ll_extent_unlock(tree->lt_fd, inode,
195                                       ll_i2info(inode)->lli_smd, node->lt_mode,
196                                       &node->lt_lockh);
197                 if (rc != 0) {
198                         /* XXX better message */
199                         CERROR("couldn't unlock %d\n", rc);
200                 }
201                 list_del(&node->lt_locked_item);
202                 OBD_FREE(node, sizeof(*node));
203         }
204
205         while ((node = lt_least_node(tree))) {
206                 rb_erase(&node->lt_node, &tree->lt_root);
207                 OBD_FREE(node, sizeof(*node));
208         }
209
210         RETURN(rc);
211 }
212
213 int ll_tree_lock_iov(struct ll_lock_tree *tree,
214                  struct ll_lock_tree_node *first_node,
215                  const struct iovec *iov, unsigned long nr_segs, int ast_flags)
216 {
217         struct ll_lock_tree_node *node;
218         int rc = 0;
219         unsigned long seg;
220         ENTRY;
221
222         tree->lt_root.rb_node = NULL;
223         INIT_LIST_HEAD(&tree->lt_locked_list);
224         if (first_node != NULL)
225                 lt_insert(tree, first_node);
226
227         /* To avoid such subtle deadlock case: client1 try to read file1 to
228          * mmapped file2, on the same time, client2 try to read file2 to
229          * mmapped file1.*/
230         for (seg = 0; seg < nr_segs; seg++) {
231                 const struct iovec *iv = &iov[seg];
232                 rc = lt_get_mmap_locks(tree, (unsigned long)iv->iov_base,
233                                        iv->iov_len);
234                 if (rc)
235                         GOTO(out, rc);
236         }
237
238         while ((node = lt_least_node(tree))) {
239                 struct inode *inode = node->lt_inode;
240                 rc = ll_extent_lock(tree->lt_fd, inode,
241                                     ll_i2info(inode)->lli_smd, node->lt_mode,
242                                     &node->lt_policy, &node->lt_lockh,
243                                     ast_flags);
244                 if (rc != 0)
245                         GOTO(out, rc);
246
247                 rb_erase(&node->lt_node, &tree->lt_root);
248                 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
249         }
250         RETURN(rc);
251 out:
252         ll_tree_unlock(tree);
253         RETURN(rc);
254 }
255
256 int ll_tree_lock(struct ll_lock_tree *tree,
257                  struct ll_lock_tree_node *first_node,
258                  const char *buf, size_t count, int ast_flags)
259 {
260         struct iovec local_iov = { .iov_base = (void __user *)buf,
261                                    .iov_len = count };
262
263         return ll_tree_lock_iov(tree, first_node, &local_iov, 1, ast_flags);
264 }
265
266 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
267 {
268         /* we only want to hold PW locks if the mmap() can generate
269          * writes back to the file and that only happens in shared
270          * writable vmas */
271         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
272                 return LCK_PW;
273         return LCK_PR;
274 }
275
276 static void policy_from_vma(ldlm_policy_data_t *policy,
277                             struct vm_area_struct *vma, unsigned long addr,
278                             size_t count)
279 {
280         policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
281                                  ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
282         policy->l_extent.end = (policy->l_extent.start + count - 1) |
283                                ~CFS_PAGE_MASK;
284 }
285
286 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
287 {
288         struct mm_struct *mm = current->mm;
289         struct vm_area_struct *vma, *ret = NULL;
290         ENTRY;
291
292         /* No MM (e.g. NFS)? No vmas too. */
293         if (!mm)
294                 RETURN(NULL);
295
296         spin_lock(&mm->page_table_lock);
297         for(vma = find_vma(mm, addr);
298             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
299                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
300                     vma->vm_flags & VM_SHARED) {
301                         ret = vma;
302                         break;
303                 }
304         }
305         spin_unlock(&mm->page_table_lock);
306         RETURN(ret);
307 }
308
309 int ll_region_mapped(unsigned long addr, size_t count)
310 {
311         return !!our_vma(addr, count);
312 }
313
314 int lt_get_mmap_locks(struct ll_lock_tree *tree,
315                       unsigned long addr, size_t count)
316 {
317         struct vm_area_struct *vma;
318         struct ll_lock_tree_node *node;
319         ldlm_policy_data_t policy;
320         struct inode *inode;
321         ENTRY;
322
323         if (count == 0)
324                 RETURN(0);
325
326         /* we need to look up vmas on page aligned addresses */
327         count += addr & (~CFS_PAGE_MASK);
328         addr &= CFS_PAGE_MASK;
329
330         while ((vma = our_vma(addr, count)) != NULL) {
331                 LASSERT(vma->vm_file);
332
333                 inode = vma->vm_file->f_dentry->d_inode;
334                 policy_from_vma(&policy, vma, addr, count);
335                 node = ll_node_from_inode(inode, policy.l_extent.start,
336                                           policy.l_extent.end,
337                                           mode_from_vma(vma));
338                 if (IS_ERR(node)) {
339                         CERROR("not enough mem for lock_tree_node!\n");
340                         RETURN(-ENOMEM);
341                 }
342                 lt_insert(tree, node);
343
344                 if (vma->vm_end - addr >= count)
345                         break;
346                 count -= vma->vm_end - addr;
347                 addr = vma->vm_end;
348         }
349         RETURN(0);
350 }
351 /**
352  * Page fault handler.
353  *
354  * \param vma - is virtiual area struct related to page fault
355  * \param address - address when hit fault
356  * \param type - of fault
357  *
358  * \return allocated and filled page for address
359  * \retval NOPAGE_SIGBUS if page not exist on this address
360  * \retval NOPAGE_OOM not have memory for allocate new page
361  */
362 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
363                        int *type)
364 {
365         struct file *filp = vma->vm_file;
366         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
367         struct inode *inode = filp->f_dentry->d_inode;
368         struct lustre_handle lockh = { 0 };
369         ldlm_policy_data_t policy;
370         ldlm_mode_t mode;
371         struct page *page = NULL;
372         struct ll_inode_info *lli = ll_i2info(inode);
373         struct lov_stripe_md *lsm;
374         struct ost_lvb lvb;
375         __u64 kms, old_mtime;
376         unsigned long pgoff, size, rand_read, seq_read;
377         int rc = 0;
378         ENTRY;
379
380         if (lli->lli_smd == NULL) {
381                 CERROR("No lsm on fault?\n");
382                 RETURN(NOPAGE_SIGBUS);
383         }
384
385         ll_clear_file_contended(inode);
386
387         /* start and end the lock on the first and last bytes in the page */
388         policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
389
390         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
391                vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
392
393         mode = mode_from_vma(vma);
394         old_mtime = LTIME_S(inode->i_mtime);
395
396         lsm = lli->lli_smd;
397         rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
398                             &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
399         if (rc != 0)
400                 RETURN(NOPAGE_SIGBUS);
401
402         if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
403                 CWARN("binary changed. inode %lu\n", inode->i_ino);
404
405         lov_stripe_lock(lsm);
406         inode_init_lvb(inode, &lvb);
407         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
408         kms = lvb.lvb_size;
409
410         pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
411         size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
412
413         if (pgoff >= size) {
414                 lov_stripe_unlock(lsm);
415                 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
416         } else {
417                 /* XXX change inode size without ll_inode_size_lock() held!
418                  *     there is a race condition with truncate path. (see
419                  *     ll_extent_lock) */
420                 /* XXX i_size_write() is not used because it is not safe to
421                  *     take the ll_inode_size_lock() due to a potential lock
422                  *     inversion (bug 6077).  And since it's not safe to use
423                  *     i_size_write() without a covering mutex we do the
424                  *     assignment directly.  It is not critical that the
425                  *     size be correct. */
426                 /* NOTE: region is within kms and, hence, within real file size (A).
427                  * We need to increase i_size to cover the read region so that
428                  * generic_file_read() will do its job, but that doesn't mean
429                  * the kms size is _correct_, it is only the _minimum_ size.
430                  * If someone does a stat they will get the correct size which
431                  * will always be >= the kms value here.  b=11081 */
432                 if (i_size_read(inode) < kms) {
433                         inode->i_size = kms;
434                         CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
435                                inode->i_ino, i_size_read(inode));
436                 }
437                 lov_stripe_unlock(lsm);
438         }
439
440         /* If mapping is writeable, adjust kms to cover this page,
441          * but do not extend kms beyond actual file size.
442          * policy.l_extent.end is set to the end of the page by policy_from_vma
443          * bug 10919 */
444         lov_stripe_lock(lsm);
445         if (mode == LCK_PW)
446                 obd_adjust_kms(ll_i2obdexp(inode), lsm,
447                                min_t(loff_t, policy.l_extent.end + 1,
448                                i_size_read(inode)), 0);
449         lov_stripe_unlock(lsm);
450
451         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
452          * the kernel will not read other pages not covered by ldlm in
453          * filemap_nopage. we do our readahead in ll_readpage.
454          */
455         rand_read = vma->vm_flags & VM_RAND_READ;
456         seq_read = vma->vm_flags & VM_SEQ_READ;
457         vma->vm_flags &= ~ VM_SEQ_READ;
458         vma->vm_flags |= VM_RAND_READ;
459
460         page = filemap_nopage(vma, address, type);
461         if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM)
462                 LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
463                                (long)type);
464         else
465                 CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",  address,
466                                (long)type);
467
468         vma->vm_flags &= ~VM_RAND_READ;
469         vma->vm_flags |= (rand_read | seq_read);
470
471         ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
472         RETURN(page);
473 }
474
475 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
476  * we track the mapped vma count by lli_mmap_cnt.
477  * ll_vm_open():  when first vma is linked, split locks from lru.
478  * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
479  *
480  * XXX we don't check the if the region of vma/lock for performance.
481  */
482 static void ll_vm_open(struct vm_area_struct * vma)
483 {
484         struct inode *inode = vma->vm_file->f_dentry->d_inode;
485         struct ll_inode_info *lli = ll_i2info(inode);
486         ENTRY;
487
488         LASSERT(vma->vm_file);
489
490         spin_lock(&lli->lli_lock);
491         LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
492
493         atomic_inc(&lli->lli_mmap_cnt);
494         if (atomic_read(&lli->lli_mmap_cnt) == 1) {
495                 struct lov_stripe_md *lsm = lli->lli_smd;
496                 struct ll_sb_info *sbi = ll_i2sbi(inode);
497                 int count;
498
499                 spin_unlock(&lli->lli_lock);
500
501                 if (!lsm)
502                         return;
503                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
504                 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
505         } else {
506                 spin_unlock(&lli->lli_lock);
507         }
508
509 }
510
511 static void ll_vm_close(struct vm_area_struct *vma)
512 {
513         struct inode *inode = vma->vm_file->f_dentry->d_inode;
514         struct ll_inode_info *lli = ll_i2info(inode);
515         ENTRY;
516
517         LASSERT(vma->vm_file);
518
519         spin_lock(&lli->lli_lock);
520         LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
521
522         atomic_dec(&lli->lli_mmap_cnt);
523         if (atomic_read(&lli->lli_mmap_cnt) == 0) {
524                 struct lov_stripe_md *lsm = lli->lli_smd;
525                 struct ll_sb_info *sbi = ll_i2sbi(inode);
526                 int count;
527
528                 spin_unlock(&lli->lli_lock);
529
530                 if (!lsm)
531                         return;
532                 count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
533                 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
534         } else {
535                 spin_unlock(&lli->lli_lock);
536         }
537 }
538
539 #ifndef HAVE_FILEMAP_POPULATE
540 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
541 #endif
542 static int ll_populate(struct vm_area_struct *area, unsigned long address,
543                        unsigned long len, pgprot_t prot, unsigned long pgoff,
544                        int nonblock)
545 {
546         int rc = 0;
547         ENTRY;
548
549         /* always set nonblock as true to avoid page read ahead */
550         rc = filemap_populate(area, address, len, prot, pgoff, 1);
551         RETURN(rc);
552 }
553
554 /* return the user space pointer that maps to a file offset via a vma */
555 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
556 {
557         return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
558
559 }
560
561 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
562  * nopage's reference passing to the pte */
563 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
564 {
565         int rc = -ENOENT;
566         ENTRY;
567
568         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
569         if (mapping_mapped(mapping)) {
570                 rc = 0;
571                 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
572                                     last - first + 1, 0);
573         }
574
575         RETURN(rc);
576 }
577
578 static struct vm_operations_struct ll_file_vm_ops = {
579         .nopage         = ll_nopage,
580         .open           = ll_vm_open,
581         .close          = ll_vm_close,
582         .populate       = ll_populate,
583 };
584
585 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
586 {
587         int rc;
588         ENTRY;
589
590         ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
591         rc = generic_file_mmap(file, vma);
592         if (rc == 0) {
593 #if !defined(HAVE_FILEMAP_POPULATE) && \
594     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
595                 if (!filemap_populate)
596                         filemap_populate = vma->vm_ops->populate;
597 #endif
598                 vma->vm_ops = &ll_file_vm_ops;
599                 vma->vm_ops->open(vma);
600                 /* update the inode's size and mtime */
601                 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
602         }
603
604         RETURN(rc);
605 }