1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 #ifndef AUTOCONF_INCLUDED
37 #include <linux/config.h>
39 #include <linux/kernel.h>
41 #include <linux/string.h>
42 #include <linux/stat.h>
43 #include <linux/errno.h>
44 #include <linux/smp_lock.h>
45 #include <linux/unistd.h>
46 #include <linux/version.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
51 #include <linux/stat.h>
52 #include <asm/uaccess.h>
54 #include <linux/pagemap.h>
55 #include <linux/smp_lock.h>
57 #define DEBUG_SUBSYSTEM S_LLITE
59 #include <lustre_lite.h>
60 #include "llite_internal.h"
61 #include <linux/lustre_compat25.h>
63 #define VMA_DEBUG(vma, fmt, arg...) \
64 CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \
65 "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \
66 vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \
67 vma->vm_file->f_dentry->d_inode->i_ino, \
68 vma->vm_file->f_dentry->d_iname, ## arg); \
71 struct ll_lock_tree_node {
73 struct list_head lt_locked_item;
75 ldlm_policy_data_t lt_policy;
76 struct lustre_handle lt_lockh;
78 struct inode *lt_inode;
81 int lt_get_mmap_locks(struct ll_lock_tree *tree,
82 unsigned long addr, size_t count);
84 static struct vm_operations_struct ll_file_vm_ops;
86 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
87 __u64 end, ldlm_mode_t mode)
89 struct ll_lock_tree_node *node;
91 OBD_ALLOC(node, sizeof(*node));
93 RETURN(ERR_PTR(-ENOMEM));
95 node->lt_inode = inode;
96 node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
97 node->lt_policy.l_extent.start = start;
98 node->lt_policy.l_extent.end = end;
99 memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
100 INIT_LIST_HEAD(&node->lt_locked_item);
101 node->lt_mode = mode;
106 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
108 /* To avoid multiple fs deadlock */
109 if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
111 if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
114 if (one->lt_oid < two->lt_oid)
116 if (one->lt_oid > two->lt_oid)
119 if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
121 if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
124 return 0; /* they are the same object and overlap */
127 static void lt_merge(struct ll_lock_tree_node *dst,
128 struct ll_lock_tree_node *src)
130 dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
131 src->lt_policy.l_extent.start);
132 dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
133 src->lt_policy.l_extent.end);
135 /* XXX could be a real call to the dlm to find superset modes */
136 if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
137 dst->lt_mode = LCK_PW;
140 static void lt_insert(struct ll_lock_tree *tree,
141 struct ll_lock_tree_node *node)
143 struct ll_lock_tree_node *walk;
144 rb_node_t **p, *parent;
148 p = &tree->lt_root.rb_node;
152 walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
153 switch (lt_compare(node, walk)) {
161 lt_merge(node, walk);
162 rb_erase(&walk->lt_node, &tree->lt_root);
163 OBD_FREE(walk, sizeof(*walk));
171 rb_link_node(&node->lt_node, parent, p);
172 rb_insert_color(&node->lt_node, &tree->lt_root);
176 static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
179 struct ll_lock_tree_node *node = NULL;
181 for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
182 rbnode = rbnode->rb_left) {
183 if (rbnode->rb_left == NULL) {
184 node = rb_entry(rbnode, struct ll_lock_tree_node,
192 int ll_tree_unlock(struct ll_lock_tree *tree)
194 struct ll_lock_tree_node *node;
195 struct list_head *pos, *n;
200 list_for_each_safe(pos, n, &tree->lt_locked_list) {
201 node = list_entry(pos, struct ll_lock_tree_node,
204 inode = node->lt_inode;
205 rc = ll_extent_unlock(tree->lt_fd, inode,
206 ll_i2info(inode)->lli_smd, node->lt_mode,
209 /* XXX better message */
210 CERROR("couldn't unlock %d\n", rc);
212 list_del(&node->lt_locked_item);
213 OBD_FREE(node, sizeof(*node));
216 while ((node = lt_least_node(tree))) {
217 rb_erase(&node->lt_node, &tree->lt_root);
218 OBD_FREE(node, sizeof(*node));
224 int ll_tree_lock_iov(struct ll_lock_tree *tree,
225 struct ll_lock_tree_node *first_node,
226 const struct iovec *iov, unsigned long nr_segs, int ast_flags)
228 struct ll_lock_tree_node *node;
233 tree->lt_root.rb_node = NULL;
234 INIT_LIST_HEAD(&tree->lt_locked_list);
235 if (first_node != NULL)
236 lt_insert(tree, first_node);
238 /* To avoid such subtle deadlock case: client1 try to read file1 to
239 * mmapped file2, on the same time, client2 try to read file2 to
241 for (seg = 0; seg < nr_segs; seg++) {
242 const struct iovec *iv = &iov[seg];
243 rc = lt_get_mmap_locks(tree, (unsigned long)iv->iov_base,
249 while ((node = lt_least_node(tree))) {
250 struct inode *inode = node->lt_inode;
251 rc = ll_extent_lock(tree->lt_fd, inode,
252 ll_i2info(inode)->lli_smd, node->lt_mode,
253 &node->lt_policy, &node->lt_lockh,
258 rb_erase(&node->lt_node, &tree->lt_root);
259 list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
263 ll_tree_unlock(tree);
267 int ll_tree_lock(struct ll_lock_tree *tree,
268 struct ll_lock_tree_node *first_node,
269 const char *buf, size_t count, int ast_flags)
271 struct iovec local_iov = { .iov_base = (void __user *)buf,
274 return ll_tree_lock_iov(tree, first_node, &local_iov, 1, ast_flags);
277 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
279 /* we only want to hold PW locks if the mmap() can generate
280 * writes back to the file and that only happens in shared
282 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
287 static void policy_from_vma_pgoff(ldlm_policy_data_t *policy,
288 struct vm_area_struct *vma,
289 __u64 pgoff, size_t count)
291 policy->l_extent.start = pgoff << CFS_PAGE_SHIFT;
292 policy->l_extent.end = (policy->l_extent.start + count - 1) |
296 static void policy_from_vma(ldlm_policy_data_t *policy,
297 struct vm_area_struct *vma, unsigned long addr,
301 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
302 ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT);
303 policy->l_extent.end = (policy->l_extent.start + count - 1) |
307 static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
309 struct mm_struct *mm = current->mm;
310 struct vm_area_struct *vma, *ret = NULL;
313 /* No MM (e.g. NFS)? No vmas too. */
317 spin_lock(&mm->page_table_lock);
318 for(vma = find_vma(mm, addr);
319 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
320 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
321 vma->vm_flags & VM_SHARED) {
326 spin_unlock(&mm->page_table_lock);
330 int ll_region_mapped(unsigned long addr, size_t count)
332 return !!our_vma(addr, count);
335 int lt_get_mmap_locks(struct ll_lock_tree *tree,
336 unsigned long addr, size_t count)
338 struct vm_area_struct *vma;
339 struct ll_lock_tree_node *node;
340 ldlm_policy_data_t policy;
347 /* we need to look up vmas on page aligned addresses */
348 count += addr & (~CFS_PAGE_MASK);
349 addr &= CFS_PAGE_MASK;
351 while ((vma = our_vma(addr, count)) != NULL) {
352 LASSERT(vma->vm_file);
354 inode = vma->vm_file->f_dentry->d_inode;
355 policy_from_vma(&policy, vma, addr, count);
356 node = ll_node_from_inode(inode, policy.l_extent.start,
360 CERROR("not enough mem for lock_tree_node!\n");
363 lt_insert(tree, node);
365 if (vma->vm_end - addr >= count)
367 count -= vma->vm_end - addr;
373 static int ll_get_extent_lock(struct vm_area_struct *vma, unsigned long pgoff,
374 int *save_flags, struct lustre_handle *lockh)
376 struct file *filp = vma->vm_file;
377 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
378 struct inode *inode = filp->f_dentry->d_inode;
379 ldlm_policy_data_t policy;
381 struct ll_inode_info *lli = ll_i2info(inode);
383 __u64 kms, old_mtime;
387 if (lli->lli_smd == NULL) {
388 CERROR("No lsm on fault?\n");
392 ll_clear_file_contended(inode);
394 /* start and end the lock on the first and last bytes in the page */
395 policy_from_vma_pgoff(&policy, vma, pgoff, CFS_PAGE_SIZE);
397 CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
398 vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
400 mode = mode_from_vma(vma);
401 old_mtime = LTIME_S(inode->i_mtime);
403 if(ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
404 lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU) != 0)
407 if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
408 CWARN("binary changed. inode %lu\n", inode->i_ino);
410 lov_stripe_lock(lli->lli_smd);
411 inode_init_lvb(inode, &lvb);
412 if(obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 1)) {
413 lov_stripe_unlock(lli->lli_smd);
418 size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
419 CDEBUG(D_INFO, "Kms %lu - %lu\n", size, pgoff);
422 lov_stripe_unlock(lli->lli_smd);
423 ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
424 lov_stripe_lock(lli->lli_smd);
426 /* XXX change inode size without ll_inode_size_lock() held!
427 * there is a race condition with truncate path. (see
429 /* XXX i_size_write() is not used because it is not safe to
430 * take the ll_inode_size_lock() due to a potential lock
431 * inversion (bug 6077). And since it's not safe to use
432 * i_size_write() without a covering mutex we do the
433 * assignment directly. It is not critical that the
434 * size be correct. */
435 /* NOTE: region is within kms and, hence, within real file size (A).
436 * We need to increase i_size to cover the read region so that
437 * generic_file_read() will do its job, but that doesn't mean
438 * the kms size is _correct_, it is only the _minimum_ size.
439 * If someone does a stat they will get the correct size which
440 * will always be >= the kms value here. b=11081 */
441 if (i_size_read(inode) < kms) {
443 CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
444 inode->i_ino, i_size_read(inode));
448 /* If mapping is writeable, adjust kms to cover this page,
449 * but do not extend kms beyond actual file size.
450 * policy.l_extent.end is set to the end of the page by policy_from_vma
453 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
454 min_t(loff_t, policy.l_extent.end + 1,
455 i_size_read(inode)), 0);
456 lov_stripe_unlock(lli->lli_smd);
458 /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
459 * the kernel will not read other pages not covered by ldlm in
460 * filemap_nopage. we do our readahead in ll_readpage.
462 *save_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
463 vma->vm_flags &= ~ VM_SEQ_READ;
464 vma->vm_flags |= VM_RAND_READ;
469 static void ll_put_extent_lock(struct vm_area_struct *vma, int save_flags,
470 struct lustre_handle *lockh)
472 struct file *filp = vma->vm_file;
473 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
474 struct inode *inode = filp->f_dentry->d_inode;
477 mode = mode_from_vma(vma);
478 vma->vm_flags &= ~(VM_RAND_READ | VM_SEQ_READ);
479 vma->vm_flags |= save_flags;
481 ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, lockh);
484 struct lustre_handle *ltd2lockh(struct ll_thread_data *ltd,
485 __u64 start, __u64 end) {
489 switch(ltd->lock_style) {
490 case LL_LOCK_STYLE_FASTLOCK:
491 RETURN(<d->u.lockh);
493 case LL_LOCK_STYLE_TREELOCK: {
494 struct ll_lock_tree_node *node;
495 if (ltd->tree_list == NULL)
496 ltd->tree_list = <d->u.tree.lt_locked_list;
498 list_for_each_entry(node, ltd->tree_list, lt_locked_item) {
499 if (node->lt_policy.l_extent.start <= start &&
500 node->lt_policy.l_extent.end >= end) {
501 ltd->tree_list = node->lt_locked_item.prev;
502 RETURN(&node->lt_lockh);
512 #ifndef HAVE_VM_OP_FAULT
514 * Page fault handler.
516 * \param vma - is virtiual area struct related to page fault
517 * \param address - address when hit fault
518 * \param type - of fault
520 * \return allocated and filled page for address
521 * \retval NOPAGE_SIGBUS if page not exist on this address
522 * \retval NOPAGE_OOM not have memory for allocate new page
524 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
527 struct lustre_handle lockh = { 0 };
533 pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
534 if(!ll_get_extent_lock(vma, pgoff, &save_fags, &lockh))
535 RETURN(NOPAGE_SIGBUS);
537 page = filemap_nopage(vma, address, type);
538 if (page != NOPAGE_SIGBUS && page != NOPAGE_OOM)
539 LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
542 CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n", address,
545 ll_put_extent_lock(vma, save_fags, &lockh);
553 * Page fault handler.
555 * \param vma - is virtiual area struct related to page fault
556 * \param address - address when hit fault
557 * \param type - of fault
559 * \return allocated and filled page for address
560 * \retval NOPAGE_SIGBUS if page not exist on this address
561 * \retval NOPAGE_OOM not have memory for allocate new page
563 int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
565 struct lustre_handle lockh = { 0 };
570 if(!ll_get_extent_lock(vma, vmf->pgoff, &save_fags, &lockh))
571 RETURN(VM_FAULT_SIGBUS);
573 rc = filemap_fault(vma, vmf);
575 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
576 vmf->virtual_address);
578 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n",
579 vmf->virtual_address);
581 ll_put_extent_lock(vma, save_fags, &lockh);
587 /* To avoid cancel the locks covering mmapped region for lock cache pressure,
588 * we track the mapped vma count by lli_mmap_cnt.
589 * ll_vm_open(): when first vma is linked, split locks from lru.
590 * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
592 * XXX we don't check the if the region of vma/lock for performance.
594 static void ll_vm_open(struct vm_area_struct * vma)
596 struct inode *inode = vma->vm_file->f_dentry->d_inode;
597 struct ll_inode_info *lli = ll_i2info(inode);
600 LASSERT(vma->vm_file);
602 spin_lock(&lli->lli_lock);
603 LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
605 atomic_inc(&lli->lli_mmap_cnt);
606 if (atomic_read(&lli->lli_mmap_cnt) == 1) {
607 struct lov_stripe_md *lsm = lli->lli_smd;
608 struct ll_sb_info *sbi = ll_i2sbi(inode);
611 spin_unlock(&lli->lli_lock);
615 count = obd_join_lru(sbi->ll_osc_exp, lsm, 0);
616 VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
618 spin_unlock(&lli->lli_lock);
623 static void ll_vm_close(struct vm_area_struct *vma)
625 struct inode *inode = vma->vm_file->f_dentry->d_inode;
626 struct ll_inode_info *lli = ll_i2info(inode);
629 LASSERT(vma->vm_file);
631 spin_lock(&lli->lli_lock);
632 LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
634 atomic_dec(&lli->lli_mmap_cnt);
635 if (atomic_read(&lli->lli_mmap_cnt) == 0) {
636 struct lov_stripe_md *lsm = lli->lli_smd;
637 struct ll_sb_info *sbi = ll_i2sbi(inode);
640 spin_unlock(&lli->lli_lock);
644 count = obd_join_lru(sbi->ll_osc_exp, lsm, 1);
645 VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
647 spin_unlock(&lli->lli_lock);
651 #ifndef HAVE_VM_OP_FAULT
652 #ifndef HAVE_FILEMAP_POPULATE
653 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
655 static int ll_populate(struct vm_area_struct *area, unsigned long address,
656 unsigned long len, pgprot_t prot, unsigned long pgoff,
662 /* always set nonblock as true to avoid page read ahead */
663 rc = filemap_populate(area, address, len, prot, pgoff, 1);
668 /* return the user space pointer that maps to a file offset via a vma */
669 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
671 return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
675 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
676 * nopage's reference passing to the pte */
677 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
682 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
683 if (mapping_mapped(mapping)) {
685 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
686 last - first + 1, 0);
692 static struct vm_operations_struct ll_file_vm_ops = {
694 .close = ll_vm_close,
695 #ifdef HAVE_VM_OP_FAULT
699 .populate = ll_populate,
703 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
708 ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
709 rc = generic_file_mmap(file, vma);
711 #if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
712 if (!filemap_populate)
713 filemap_populate = vma->vm_ops->populate;
715 vma->vm_ops = &ll_file_vm_ops;
716 vma->vm_ops->open(vma);
717 /* update the inode's size and mtime */
718 rc = ll_glimpse_size(file->f_dentry->d_inode, 0);