* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
*/
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
-#include <linux/version.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
+#ifdef HAVE_MIGRATE_H
+#include <linux/migrate.h>
+#elif defined(HAVE_MIGRATE_MODE_H)
+#include <linux/migrate_mode.h>
+#endif
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#define DEBUG_SUBSYSTEM S_LLITE
page = cl_vmpage_page(vmpage, obj);
result = page == NULL;
if (page != NULL) {
- if (cfs_atomic_read(&page->cp_ref) == 1) {
+ if (!cl_page_in_use(page)) {
result = 1;
cl_page_delete(env, page);
}
return -EFBIG;
}
- *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- *max_pages -= user_addr >> CFS_PAGE_SHIFT;
+ *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
if (*pages) {
/* check the page type: if the page is a host page, then do
* write directly */
if (clp->cp_type == CPT_CACHEABLE) {
- cfs_page_t *vmpage = cl_page_vmpage(env, clp);
- cfs_page_t *src_page;
- cfs_page_t *dst_page;
+ struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *src_page;
+ struct page *dst_page;
void *src;
void *dst;
src_page = (rw == WRITE) ? pages[i] : vmpage;
dst_page = (rw == WRITE) ? vmpage : pages[i];
- src = kmap_atomic(src_page, KM_USER0);
- dst = kmap_atomic(dst_page, KM_USER1);
+ src = ll_kmap_atomic(src_page, KM_USER0);
+ dst = ll_kmap_atomic(dst_page, KM_USER1);
memcpy(dst, src, min(page_size, size));
- kunmap_atomic(dst, KM_USER1);
- kunmap_atomic(src, KM_USER0);
+ ll_kunmap_atomic(dst, KM_USER1);
+ ll_kunmap_atomic(src, KM_USER0);
/* make sure page will be added to the transfer by
* cl_io_submit()->...->vvp_page_prep_write(). */
* representing PAGE_SIZE worth of user data, into a single buffer, and
* then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
-#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \
- ~(PTLRPC_MAX_BRW_SIZE - 1))
+#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
+ ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t file_offset,
unsigned long nr_segs)
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), "
"offset=%lld=%llx, pages %lu (max %lu)\n",
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> CFS_PAGE_SHIFT,
- MAX_DIO_SIZE >> CFS_PAGE_SHIFT);
+ file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
/* Check that all user buffers are aligned as well */
for (seg = 0; seg < nr_segs; seg++) {
io = ccc_env_io(env)->cui_cl.cis_io;
LASSERT(io != NULL);
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode sem to operate transient pages. */
- if (rw == READ)
- LOCK_INODE_MUTEX(inode);
+ /* 0. Need locking between buffered and direct access. and race with
+ * size changing by concurrent truncates and writes.
+ * 1. Need inode mutex to operate transient pages.
+ */
+ if (rw == READ)
+ mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
for (seg = 0; seg < nr_segs; seg++) {
&pages, &max_pages);
if (likely(page_count > 0)) {
if (unlikely(page_count < max_pages))
- bytes = page_count << CFS_PAGE_SHIFT;
+ bytes = page_count << PAGE_CACHE_SHIFT;
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
bytes, file_offset,
* We should always be able to kmalloc for a
* page worth of page pointers = 4MB on i386. */
if (result == -ENOMEM &&
- size > (CFS_PAGE_SIZE / sizeof(*pages)) *
- CFS_PAGE_SIZE) {
+ size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
+ PAGE_CACHE_SIZE) {
size = ((((size / 2) - 1) |
~CFS_PAGE_MASK) + 1) &
CFS_PAGE_MASK;
}
}
out:
- LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
- UNLOCK_INODE_MUTEX(inode);
+ LASSERT(obj->cob_transient_pages == 0);
+ if (rw == READ)
+ mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
if (rw == WRITE) {
RETURN(tot_bytes ? : result);
}
-#if defined(HAVE_KERNEL_WRITE_BEGIN_END) || defined(MS_HAS_NEW_AOPS)
static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
return rc ?: copied;
}
-#endif
#ifdef CONFIG_MIGRATION
int ll_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page
+#ifdef HAVE_MIGRATEPAGE_4ARGS
+ , enum migrate_mode mode
+#endif
+ )
{
/* Always fail page migration until we have a proper implementation */
return -EIO;
.writepage = ll_writepage,
.writepages = ll_writepages,
.set_page_dirty = ll_set_page_dirty,
-#ifdef HAVE_KERNEL_WRITE_BEGIN_END
.write_begin = ll_write_begin,
.write_end = ll_write_end,
-#else
- .prepare_write = ll_prepare_write,
- .commit_write = ll_commit_write,
-#endif
.invalidatepage = ll_invalidatepage,
.releasepage = (void *)ll_releasepage,
#ifdef CONFIG_MIGRATION