* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
*/
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/version.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
+#ifdef HAVE_MIGRATE_H
+#include <linux/migrate.h>
+#elif defined(HAVE_MIGRATE_MODE_H)
+#include <linux/migrate_mode.h>
+#endif
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
page = cl_vmpage_page(vmpage, obj);
result = page == NULL;
if (page != NULL) {
- if (cfs_atomic_read(&page->cp_ref) == 1) {
+ if (!cl_page_in_use(page)) {
result = 1;
cl_page_delete(env, page);
}
src_page = (rw == WRITE) ? pages[i] : vmpage;
dst_page = (rw == WRITE) ? vmpage : pages[i];
- src = kmap_atomic(src_page, KM_USER0);
- dst = kmap_atomic(dst_page, KM_USER1);
+ src = ll_kmap_atomic(src_page, KM_USER0);
+ dst = ll_kmap_atomic(dst_page, KM_USER1);
memcpy(dst, src, min(page_size, size));
- kunmap_atomic(dst, KM_USER1);
- kunmap_atomic(src, KM_USER0);
+ ll_kunmap_atomic(dst, KM_USER1);
+ ll_kunmap_atomic(src, KM_USER0);
/* make sure page will be added to the transfer by
* cl_io_submit()->...->vvp_page_prep_write(). */
* then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \
- ~(PTLRPC_MAX_BRW_SIZE - 1))
+ ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t file_offset,
unsigned long nr_segs)
io = ccc_env_io(env)->cui_cl.cis_io;
LASSERT(io != NULL);
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode sem to operate transient pages. */
- if (rw == READ)
- LOCK_INODE_MUTEX(inode);
+ /* 0. Need locking between buffered and direct access. and race with
+ * size changing by concurrent truncates and writes.
+ * 1. Need inode mutex to operate transient pages.
+ */
+ if (rw == READ)
+ mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
for (seg = 0; seg < nr_segs; seg++) {
}
}
out:
- LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
- UNLOCK_INODE_MUTEX(inode);
+ LASSERT(obj->cob_transient_pages == 0);
+ if (rw == READ)
+ mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
if (rw == WRITE) {
#ifdef CONFIG_MIGRATION
int ll_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page
+#ifdef HAVE_MIGRATEPAGE_4ARGS
+ , enum migrate_mode mode
+#endif
+ )
{
/* Always fail page migration until we have a proper implementation */
return -EIO;