#include <linux/iobuf.h>
#endif
+#include <linux/pagevec.h>
+
#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/lustre_mds.h>
#include <linux/lustre_lite.h>
+#include <linux/lustre_audit.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
-
-struct ll_lock_tree_node {
- rb_node_t lt_node;
- struct list_head lt_locked_item;
- __u64 lt_oid;
- ldlm_policy_data_t lt_policy;
- struct lustre_handle lt_lockh;
- ldlm_mode_t lt_mode;
-};
-
__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
unsigned long addr, size_t count);
int rc = -ENOENT;
ENTRY;
- LASSERT(last > first);
+ LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
if (mapping_mapped(mapping)) {
rc = 0;
RETURN(rc);
}
+
+static void ll_close_vma(struct vm_area_struct *vma)
+{
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long next, size, end;
+ struct ll_async_page *llap;
+ struct obd_export *exp;
+ struct pagevec pvec;
+ int i;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return;
+
+ /* all pte's are synced to mem_map by the moment
+ * we scan backing store and put all dirty pages
+ * onto pending list to track flushing */
+
+ LASSERT(LLI_DIRTY_HANDLE(inode));
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL) {
+ CERROR("can't get export for the inode\n");
+ return;
+ }
+
+ pagevec_init(&pvec, 0);
+ next = vma->vm_pgoff;
+ size = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+ end = next + size - 1;
+
+ CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma,
+ next, size, end, inode->i_ino, inode->i_generation);
+
+ while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
+
+ if (page->index > next)
+ next = page->index;
+ if (next > end)
+ continue;
+ next++;
+
+ lock_page(page);
+ if (page->mapping != mapping || !PageDirty(page)) {
+ unlock_page(page);
+ continue;
+ }
+
+ llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
+ if (IS_ERR(llap)) {
+ CERROR("can't get llap\n");
+ unlock_page(page);
+ continue;
+ }
+
+ llap_write_pending(inode, llap);
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ }
+}
+
static struct vm_operations_struct ll_file_vm_ops = {
.nopage = ll_nopage,
+ .close = ll_close_vma,
};
+/* Audit functions */
+extern int ll_audit_log (struct inode *, audit_op, int);
+
int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
{
int rc;
ENTRY;
rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (rc == 0) {
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
vma->vm_ops = &ll_file_vm_ops;
+
+ /* mark i/o epoch dirty */
+ if (vma->vm_flags & VM_SHARED)
+ set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
+ }
+
+ ll_audit_log(file->f_dentry->d_inode, AUDIT_MMAP, rc);
RETURN(rc);
}