Whamcloud - gitweb
src needs to be in the options list if it is going to be available for use.
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
index 26bac6c..5fbf6f0 100644 (file)
 #include <linux/iobuf.h>
 #endif
 
+#include <linux/pagevec.h>
+
 #define DEBUG_SUBSYSTEM S_LLITE
 
 #include <linux/lustre_mds.h>
 #include <linux/lustre_lite.h>
+#include <linux/lustre_audit.h>
 #include "llite_internal.h"
 #include <linux/lustre_compat25.h>
 
-
-struct ll_lock_tree_node {
-        rb_node_t               lt_node;
-        struct list_head        lt_locked_item;
-        __u64                   lt_oid;
-        ldlm_policy_data_t      lt_policy;
-        struct lustre_handle    lt_lockh;
-        ldlm_mode_t             lt_mode;
-};
-
 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
 int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
                       unsigned long addr, size_t count);
@@ -347,14 +340,15 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         struct lustre_handle lockh = { 0 };
         ldlm_policy_data_t policy;
         ldlm_mode_t mode;
-        struct page *page;
+        struct page *page = NULL;
+        struct ll_inode_info *lli = ll_i2info(inode);
         struct obd_service_time *stime;
         __u64 kms;
         unsigned long pgoff, size, rand_read, seq_read;
         int rc = 0;
         ENTRY;
 
-        if (ll_i2info(inode)->lli_smd == NULL) {
+        if (lli->lli_smd == NULL) {
                 CERROR("No lsm on fault?\n");
                 RETURN(NULL);
         }
@@ -363,28 +357,31 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
 
         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
-               vma, inode->i_ino, policy.l_extent.start,
-               policy.l_extent.end);
+               vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
 
         mode = mode_from_vma(vma);
         stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
                                   &ll_i2sbi(inode)->ll_read_stime;
         
-        rc = ll_extent_lock(fd, inode, ll_i2info(inode)->lli_smd, mode, &policy,
+        rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
                             &lockh, LDLM_FL_CBPENDING, stime);
         if (rc != 0)
                 RETURN(NULL);
 
         /* XXX change inode size without i_sem hold! there is a race condition
          *     with truncate path. (see ll_extent_lock) */
-        kms = lov_merge_size(ll_i2info(inode)->lli_smd, 1);
+        down(&lli->lli_size_sem);
+        kms = lov_merge_size(lli->lli_smd, 1);
         pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
         size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-        if (pgoff >= size)
+        if (pgoff >= size) {
+                up(&lli->lli_size_sem);
                 ll_glimpse_size(inode);
-        else
+        } else {
                 inode->i_size = kms;
+                up(&lli->lli_size_sem);
+        }
 
         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
          * the kernel will not read other pages not covered by ldlm in
@@ -471,7 +468,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
         int rc = -ENOENT;
         ENTRY;
 
-        LASSERT(last > first);
+        LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
         if (mapping_mapped(mapping)) {
                 rc = 0;
@@ -494,18 +491,93 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
         RETURN(rc);
 }
 
+
+static void ll_close_vma(struct vm_area_struct *vma)
+{
+        struct inode *inode = vma->vm_file->f_dentry->d_inode;
+        struct address_space *mapping = inode->i_mapping;
+        unsigned long next, size, end;
+        struct ll_async_page *llap;
+        struct obd_export *exp;
+        struct pagevec pvec;
+        int i;
+        
+        if (!(vma->vm_flags & VM_SHARED))
+                return;
+
+        /* all pte's are synced to mem_map by the moment
+         * we scan backing store and put all dirty pages
+         * onto pending list to track flushing */
+        
+        LASSERT(LLI_DIRTY_HANDLE(inode));
+        exp = ll_i2dtexp(inode);
+        if (exp == NULL) {
+                CERROR("can't get export for the inode\n");
+                return;
+        }
+        
+       pagevec_init(&pvec, 0);
+        next = vma->vm_pgoff;
+        size = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+        end = next + size - 1;
+
+        CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma,
+               next, size, end, inode->i_ino, inode->i_generation);
+
+        while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+                for (i = 0; i < pagevec_count(&pvec); i++) {
+                        struct page *page = pvec.pages[i];
+
+                        if (page->index > next)
+                                next = page->index;
+                        if (next > end)
+                                continue;
+                        next++;
+
+                        lock_page(page);
+                        if (page->mapping != mapping || !PageDirty(page)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
+                        if (IS_ERR(llap)) {
+                                CERROR("can't get llap\n");
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        llap_write_pending(inode, llap);
+                        unlock_page(page);
+                }
+                pagevec_release(&pvec);
+        }
+}
+
 static struct vm_operations_struct ll_file_vm_ops = {
         .nopage         = ll_nopage,
+        .close          = ll_close_vma,
 };
 
+/* Audit functions */
+extern int ll_audit_log (struct inode *, audit_op, int);
+
 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
         int rc;
         ENTRY;
 
         rc = generic_file_mmap(file, vma);
-        if (rc == 0)
+        if (rc == 0) {
+                struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
                 vma->vm_ops = &ll_file_vm_ops;
+        
+                /* mark i/o epoch dirty */
+                if (vma->vm_flags & VM_SHARED)
+                        set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
+        }
+        
+        ll_audit_log(file->f_dentry->d_inode, AUDIT_MMAP, rc);
 
         RETURN(rc);
 }