/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
-//#include <lustre_mdc.h>
#include <lustre_lite.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
int page_count = pv->ldp_nr;
struct page **pages = pv->ldp_pages;
long page_size = cl_page_size(obj);
+ bool do_io;
+ int io_pages = 0;
ENTRY;
queue = &io->ci_queue;
for (i = 0; i < page_count; i++) {
if (pv->ldp_offsets)
file_offset = pv->ldp_offsets[i];
+
LASSERT(!(file_offset & (page_size - 1)));
clp = cl_page_find(env, obj, cl_index(obj, file_offset),
pv->ldp_pages[i], CPT_TRANSIENT);
break;
}
+ rc = cl_page_own(env, io, clp);
+ if (rc) {
+ LASSERT(clp->cp_state == CPS_FREEING);
+ cl_page_put(env, clp);
+ break;
+ }
+
+ do_io = true;
+
/* check the page type: if the page is a host page, then do
* write directly */
- /*
- * Very rare case that the host pages can be found for
- * directIO case, since linux kernel truncated all covered
- * pages before getting here. So, to make the OST happy(to
- * write a contiguous region), all pages are issued
- * here. -jay */
if (clp->cp_type == CPT_CACHEABLE) {
cfs_page_t *vmpage = cl_page_vmpage(env, clp);
cfs_page_t *src_page;
* cl_io_submit()->...->vvp_page_prep_write(). */
if (rw == WRITE)
set_page_dirty(vmpage);
+
+ if (rw == READ) {
+ /* do not issue the page for read, since it
+ * may reread a ra page which has NOT uptodate
+ * bit set. */
+ cl_page_disown(env, io, clp);
+ do_io = false;
+ }
+ }
+
+ if (likely(do_io)) {
+ cl_2queue_add(queue, clp);
+
/*
- * If direct-io read finds up-to-date page in the
- * cache, just copy it to the user space. Page will be
- * filtered out by vvp_page_prep_read(). This
- * preserves an invariant, that page is read at most
- * once, see cl_page_flags::CPF_READ_COMPLETED.
+ * Set page clip to tell transfer formation engine
+ * that page has to be sent even if it is beyond KMS.
*/
- }
+ cl_page_clip(env, clp, 0, min(size, page_size));
- rc = cl_page_own(env, io, clp);
- if (rc) {
- LASSERT(clp->cp_state == CPS_FREEING);
- cl_page_put(env, clp);
- break;
+ ++io_pages;
}
- cl_2queue_add(queue, clp);
-
- /* drop the reference count for cl_page_find, so that the page
- * will be freed in cl_2queue_fini. */
+ /* drop the reference count for cl_page_find */
cl_page_put(env, clp);
- /*
- * Set page clip to tell transfer formation engine that page
- * has to be sent even if it is beyond KMS.
- */
- cl_page_clip(env, clp, 0, min(size, page_size));
size -= page_size;
file_offset += page_size;
}
- if (rc == 0) {
+ if (rc == 0 && io_pages) {
rc = cl_io_submit_sync(env, io,
rw == READ ? CRT_READ : CRT_WRITE,
queue, CRP_NORMAL, 0);
- if (rc == 0)
- rc = pv->ldp_size;
}
+ if (rc == 0)
+ rc = pv->ldp_size;
cl_2queue_discard(env, io, queue);
cl_2queue_disown(env, io, queue);
return ll_direct_rw_pages(env, io, rw, inode, &pvec);
}
-/* This is the maximum size of a single O_DIRECT request, based on a 128kB
+#ifdef KMALLOC_MAX_SIZE
+#define MAX_MALLOC KMALLOC_MAX_SIZE
+#else
+#define MAX_MALLOC (128 * 1024)
+#endif
+
+/* This is the maximum size of a single O_DIRECT request, based on the
* kmalloc limit. We need to fit all of the brw_page structs, each one
* representing PAGE_SIZE worth of user data, into a single buffer, and
- * then truncate this to be a full-sized RPC. This is 22MB for 4kB pages. */
-#define MAX_DIO_SIZE ((128 * 1024 / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \
+ * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
+ * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
+#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \
~(PTLRPC_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t file_offset,
LASSERT(io != NULL);
/* 0. Need locking between buffered and direct access. and race with
- *size changing by concurrent truncates and writes.
+ * size changing by concurrent truncates and writes.
* 1. Need inode sem to operate transient pages. */
if (rw == READ)
LOCK_INODE_MUTEX(inode);
unsigned long user_addr = (unsigned long)iov[seg].iov_base;
if (rw == READ) {
- if (file_offset >= inode->i_size)
+ if (file_offset >= i_size_read(inode))
break;
- if (file_offset + iov_left > inode->i_size)
- iov_left = inode->i_size - file_offset;
+ if (file_offset + iov_left > i_size_read(inode))
+ iov_left = i_size_read(inode) - file_offset;
}
while (iov_left > 0) {
int page_count, max_pages = 0;
long bytes;
- bytes = min(size,iov_left);
+ bytes = min(size, iov_left);
page_count = ll_get_user_pages(rw, user_addr, bytes,
&pages, &max_pages);
if (likely(page_count > 0)) {
bytes = page_count << CFS_PAGE_SHIFT;
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
- bytes,
- file_offset, pages,
- page_count);
+ bytes, file_offset,
+ pages, page_count);
ll_free_user_pages(pages, max_pages, rw==READ);
} else if (page_count == 0) {
GOTO(out, result = -EFAULT);
RETURN(tot_bytes ? : result);
}
-#ifdef HAVE_KERNEL_WRITE_BEGIN_END
+#if defined(HAVE_KERNEL_WRITE_BEGIN_END) || defined(MS_HAS_NEW_AOPS)
static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
int rc;
- rc = ll_commit_write(file, page, from, from + copied);
+ rc = ll_commit_write(file, page, from, from + copied);
unlock_page(page);
page_cache_release(page);
- return rc?rc:copied;
+
+ return rc ?: copied;
}
#endif
+#ifdef CONFIG_MIGRATION
+int ll_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ /* Always fail page migration until we have a proper implementation */
+ return -EIO;
+}
+#endif
+
+#ifndef MS_HAS_NEW_AOPS
struct address_space_operations ll_aops = {
.readpage = ll_readpage,
// .readpages = ll_readpages,
#endif
.invalidatepage = ll_invalidatepage,
.releasepage = (void *)ll_releasepage,
+#ifdef CONFIG_MIGRATION
+ .migratepage = ll_migratepage,
+#endif
.bmap = NULL
};
+#else
+struct address_space_operations_ext ll_aops = {
+ .orig_aops.readpage = ll_readpage,
+// .orig_aops.readpages = ll_readpages,
+ .orig_aops.direct_IO = ll_direct_IO_26,
+ .orig_aops.writepage = ll_writepage,
+ .orig_aops.writepages = generic_writepages,
+ .orig_aops.set_page_dirty = ll_set_page_dirty,
+ .orig_aops.sync_page = NULL,
+ .orig_aops.prepare_write = ll_prepare_write,
+ .orig_aops.commit_write = ll_commit_write,
+ .orig_aops.invalidatepage = ll_invalidatepage,
+ .orig_aops.releasepage = ll_releasepage,
+#ifdef CONFIG_MIGRATION
+ .orig_aops.migratepage = ll_migratepage,
+#endif
+ .orig_aops.bmap = NULL,
+ .write_begin = ll_write_begin,
+ .write_end = ll_write_end
+};
+#endif