Whamcloud - gitweb
src needs to be in the options list if it is going to be available for use.
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
index 68af4ac..5fbf6f0 100644 (file)
 #include <linux/iobuf.h>
 #endif
 
+#include <linux/pagevec.h>
+
 #define DEBUG_SUBSYSTEM S_LLITE
 
 #include <linux/lustre_mds.h>
 #include <linux/lustre_lite.h>
+#include <linux/lustre_audit.h>
 #include "llite_internal.h"
 #include <linux/lustre_compat25.h>
 
-
-struct ll_lock_tree_node {
-        rb_node_t               lt_node;
-        struct list_head        lt_locked_item;
-        __u64                   lt_oid;
-        ldlm_policy_data_t      lt_policy;
-        struct lustre_handle    lt_lockh;
-        ldlm_mode_t             lt_mode;
-};
-
 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
                       unsigned long addr, size_t count);
@@ -91,6 +84,10 @@ struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
 
 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
 {
+        /* XXX remove this assert when we really want to use this function
+         * to compare different file's region */
+        LASSERT(one->lt_oid == two->lt_oid);
+
         if ( one->lt_oid < two->lt_oid)
                 return -1;
         if ( one->lt_oid > two->lt_oid)
@@ -198,6 +195,7 @@ int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
 
         RETURN(rc);
 }
+
 int ll_tree_lock(struct ll_lock_tree *tree,
                  struct ll_lock_tree_node *first_node, struct inode *inode,
                  const char *buf, size_t count, int ast_flags)
@@ -211,6 +209,8 @@ int ll_tree_lock(struct ll_lock_tree *tree,
         if (first_node != NULL)
                 lt_insert(tree, first_node);
 
+        /* order locking. what we have to concern about is ONLY double lock:
+         * the buffer is mapped to exactly this file. */
         if (mapping_mapped(inode->i_mapping)) {
                 rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
                 if (rc)
@@ -236,7 +236,7 @@ int ll_tree_lock(struct ll_lock_tree *tree,
         RETURN(rc);
 out:
         ll_tree_unlock(tree, inode);
-        RETURN(rc);
+        return rc;
 }
 
 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
@@ -258,7 +258,9 @@ static void policy_from_vma(ldlm_policy_data_t *policy,
         policy->l_extent.end = (policy->l_extent.start + count - 1) |
                                (PAGE_CACHE_SIZE - 1);
 }
-static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
+
+static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
+                                       struct inode *inode)
 {
         struct mm_struct *mm = current->mm;
         struct vm_area_struct *vma, *ret = NULL;
@@ -267,7 +269,8 @@ static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
         spin_lock(&mm->page_table_lock);
         for(vma = find_vma(mm, addr);
             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
-                if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage) {
+                if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
+                    vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) {
                         ret = vma;
                         break;
                 }
@@ -291,7 +294,7 @@ int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
         count += addr & (PAGE_SIZE - 1);
         addr -= addr & (PAGE_SIZE - 1);
 
-        while ((vma = our_vma(addr, count)) != NULL) {
+        while ((vma = our_vma(addr, count, inode)) != NULL) {
 
                 policy_from_vma(&policy, vma, addr, count);
                 node = ll_node_from_inode(inode, policy.l_extent.start,
@@ -337,14 +340,15 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         struct lustre_handle lockh = { 0 };
         ldlm_policy_data_t policy;
         ldlm_mode_t mode;
-        struct page *page;
+        struct page *page = NULL;
+        struct ll_inode_info *lli = ll_i2info(inode);
         struct obd_service_time *stime;
         __u64 kms;
         unsigned long pgoff, size, rand_read, seq_read;
         int rc = 0;
         ENTRY;
 
-        if (ll_i2info(inode)->lli_smd == NULL) {
+        if (lli->lli_smd == NULL) {
                 CERROR("No lsm on fault?\n");
                 RETURN(NULL);
         }
@@ -353,28 +357,31 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
 
         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
-               vma, inode->i_ino, policy.l_extent.start,
-               policy.l_extent.end);
+               vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
 
         mode = mode_from_vma(vma);
         stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
                                   &ll_i2sbi(inode)->ll_read_stime;
-
-        rc = ll_extent_lock(fd, inode, ll_i2info(inode)->lli_smd, mode, &policy,
+        
+        rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
                             &lockh, LDLM_FL_CBPENDING, stime);
         if (rc != 0)
                 RETURN(NULL);
 
         /* XXX change inode size without i_sem hold! there is a race condition
          *     with truncate path. (see ll_extent_lock) */
-        kms = lov_merge_size(ll_i2info(inode)->lli_smd, 1);
+        down(&lli->lli_size_sem);
+        kms = lov_merge_size(lli->lli_smd, 1);
         pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
         size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-        if (pgoff >= size)
+        if (pgoff >= size) {
+                up(&lli->lli_size_sem);
                 ll_glimpse_size(inode);
-        else
+        } else {
                 inode->i_size = kms;
+                up(&lli->lli_size_sem);
+        }
 
         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
          * the kernel will not read other pages not covered by ldlm in
@@ -403,13 +410,14 @@ static inline unsigned long file_to_user(struct vm_area_struct *vma,
 {
         return vma->vm_start +
                (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
-
 }
 
 #define VMA_DEBUG(vma, fmt, arg...)                                          \
-        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p): "  \
-               fmt, vma, vma->vm_start, vma->vm_end, vma->vm_pgoff,          \
-               vma->vm_file->f_dentry->d_inode, ## arg);
+        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
+               "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
+               vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
+               vma->vm_file->f_dentry->d_inode->i_ino,                       \
+               vma->vm_file->f_dentry->d_iname, ## arg);                     \
 
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
 /* [first, last] are the byte offsets affected.
@@ -421,20 +429,32 @@ static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
 {
         unsigned long address, len;
         for (; vma ; vma = vma->vm_next_share) {
-                if (last >> PAGE_CACHE_SHIFT < vma->vm_pgoff)
+                if (last >> PAGE_SHIFT < vma->vm_pgoff)
                         continue;
                 if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
                     ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
                         continue;
 
-                address = max((unsigned long)vma->vm_start,
+                /* XXX in case of unmap the cow pages of a running file,
+                 * don't unmap these private writeable mapping here!
+                 * though that will break private mappping a little.
+                 *
+                 * the clean way is to check the mapping of every page
+                 * and just unmap the non-cow pages, just like
+                 * unmap_mapping_range() with even_cow=0 in kernel 2.6.
+                 */
+                if (!(vma->vm_flags & VM_SHARED) &&
+                    (vma->vm_flags & VM_WRITE))
+                        continue;
+
+                address = max((unsigned long)vma->vm_start, 
                               file_to_user(vma, first));
                 len = min((unsigned long)vma->vm_end,
                           file_to_user(vma, last) + 1) - address;
 
-                VMA_DEBUG(vma, "zapping vma [address=%ld len=%ld]\n",
-                          address, len);
-                LASSERT(vma->vm_mm);
+                VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
+                          "address=%ld len=%ld]\n", first, last, address, len);
+                LASSERT(len > 0);
                 ll_zap_page_range(vma, address, len);
         }
 }
@@ -448,11 +468,12 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
         int rc = -ENOENT;
         ENTRY;
 
+        LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
         if (mapping_mapped(mapping)) {
                 rc = 0;
                 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
-                                    last - first + 1, 1);
+                                    last - first + 1, 0);
         }
 #else
         spin_lock(&mapping->i_shared_lock);
@@ -470,18 +491,93 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
         RETURN(rc);
 }
 
+
+static void ll_close_vma(struct vm_area_struct *vma)
+{
+        struct inode *inode = vma->vm_file->f_dentry->d_inode;
+        struct address_space *mapping = inode->i_mapping;
+        unsigned long next, size, end;
+        struct ll_async_page *llap;
+        struct obd_export *exp;
+        struct pagevec pvec;
+        int i;
+        
+        if (!(vma->vm_flags & VM_SHARED))
+                return;
+
+        /* all pte's are synced to mem_map by the moment
+         * we scan backing store and put all dirty pages
+         * onto pending list to track flushing */
+        
+        LASSERT(LLI_DIRTY_HANDLE(inode));
+        exp = ll_i2dtexp(inode);
+        if (exp == NULL) {
+                CERROR("can't get export for the inode\n");
+                return;
+        }
+        
+       pagevec_init(&pvec, 0);
+        next = vma->vm_pgoff;
+        size = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+        end = next + size - 1;
+
+        CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma,
+               next, size, end, inode->i_ino, inode->i_generation);
+
+        while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+                for (i = 0; i < pagevec_count(&pvec); i++) {
+                        struct page *page = pvec.pages[i];
+
+                        if (page->index > next)
+                                next = page->index;
+                        if (next > end)
+                                continue;
+                        next++;
+
+                        lock_page(page);
+                        if (page->mapping != mapping || !PageDirty(page)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
+                        if (IS_ERR(llap)) {
+                                CERROR("can't get llap\n");
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        llap_write_pending(inode, llap);
+                        unlock_page(page);
+                }
+                pagevec_release(&pvec);
+        }
+}
+
 static struct vm_operations_struct ll_file_vm_ops = {
         .nopage         = ll_nopage,
+        .close          = ll_close_vma,
 };
 
+/* Audit functions */
+extern int ll_audit_log (struct inode *, audit_op, int);
+
 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
         int rc;
         ENTRY;
 
         rc = generic_file_mmap(file, vma);
-        if (rc == 0)
+        if (rc == 0) {
+                struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
                 vma->vm_ops = &ll_file_vm_ops;
+        
+                /* mark i/o epoch dirty */
+                if (vma->vm_flags & VM_SHARED)
+                        set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
+        }
+        
+        ll_audit_log(file->f_dentry->d_inode, AUDIT_MMAP, rc);
 
         RETURN(rc);
 }