X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Frw26.c;h=a90b9dd049d959f1c9f459710c997241c5227c08;hp=6335b7f8048f9e4309fd0f55af8f725aaedfa2b4;hb=0aec97df129d2c6263c66454ee9039cd5dbfdf85;hpb=9fe4b52ad2ffadf125d9b5c78bb2ff9a01725707 diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c index 6335b7f..a90b9dd 100644 --- a/lustre/llite/rw26.c +++ b/lustre/llite/rw26.c @@ -27,7 +27,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -38,28 +38,27 @@ * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version */ -#ifndef AUTOCONF_INCLUDED -#include -#endif #include #include #include #include #include -#include #include -#include -#include #include +#ifdef HAVE_MIGRATE_H +#include +#elif defined(HAVE_MIGRATE_MODE_H) +#include +#endif #include #include +#include #include #include #include #include #include -#include #define DEBUG_SUBSYSTEM S_LLITE @@ -158,7 +157,7 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) page = cl_vmpage_page(vmpage, obj); result = page == NULL; if (page != NULL) { - if (cfs_atomic_read(&page->cp_ref) == 1) { + if (!cl_page_in_use(page)) { result = 1; cl_page_delete(env, page); } @@ -204,8 +203,9 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; - *max_pages -= user_addr >> CFS_PAGE_SHIFT; + *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + *max_pages -= user_addr >> PAGE_CACHE_SHIFT; OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); if (*pages) { @@ -282,20 +282,20 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, /* check the page type: if the page is a host page, then do * write directly */ if (clp->cp_type == CPT_CACHEABLE) { - cfs_page_t *vmpage = cl_page_vmpage(env, clp); - cfs_page_t *src_page; - cfs_page_t *dst_page; + struct page *vmpage = cl_page_vmpage(env, clp); + struct page *src_page; + struct page *dst_page; void *src; void *dst; src_page = (rw == WRITE) ? pages[i] : vmpage; dst_page = (rw == WRITE) ? vmpage : pages[i]; - src = kmap_atomic(src_page, KM_USER0); - dst = kmap_atomic(dst_page, KM_USER1); + src = ll_kmap_atomic(src_page, KM_USER0); + dst = ll_kmap_atomic(dst_page, KM_USER1); memcpy(dst, src, min(page_size, size)); - kunmap_atomic(dst, KM_USER1); - kunmap_atomic(src, KM_USER0); + ll_kunmap_atomic(dst, KM_USER1); + ll_kunmap_atomic(src, KM_USER0); /* make sure page will be added to the transfer by * cl_io_submit()->...->vvp_page_prep_write(). */ @@ -371,8 +371,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, * representing PAGE_SIZE worth of user data, into a single buffer, and * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ -#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \ - ~(PTLRPC_MAX_BRW_SIZE - 1)) +#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ + ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs) @@ -385,13 +385,12 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, long count = iov_length(iov, nr_segs); long tot_bytes = 0, result = 0; struct ll_inode_info *lli = ll_i2info(inode); - struct lov_stripe_md *lsm = lli->lli_smd; unsigned long seg = 0; long size = MAX_DIO_SIZE; int refcheck; ENTRY; - if (!lli->lli_smd || !lli->lli_smd->lsm_object_id) + if (!lli->lli_has_smd) RETURN(-EBADF); /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ @@ -401,8 +400,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), " "offset=%lld=%llx, pages %lu (max %lu)\n", inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> CFS_PAGE_SHIFT, - MAX_DIO_SIZE >> CFS_PAGE_SHIFT); + file_offset, file_offset, count >> PAGE_CACHE_SHIFT, + MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); /* Check that all user buffers are aligned as well */ for (seg = 0; seg < nr_segs; seg++) { @@ -416,11 +415,12 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, io = ccc_env_io(env)->cui_cl.cis_io; LASSERT(io != NULL); - /* 0. Need locking between buffered and direct access. and race with - * size changing by concurrent truncates and writes. - * 1. Need inode sem to operate transient pages. */ - if (rw == READ) - LOCK_INODE_MUTEX(inode); + /* 0. Need locking between buffered and direct access. and race with + * size changing by concurrent truncates and writes. + * 1. Need inode mutex to operate transient pages. + */ + if (rw == READ) + mutex_lock(&inode->i_mutex); LASSERT(obj->cob_transient_pages == 0); for (seg = 0; seg < nr_segs; seg++) { @@ -444,7 +444,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, &pages, &max_pages); if (likely(page_count > 0)) { if (unlikely(page_count < max_pages)) - bytes = page_count << CFS_PAGE_SHIFT; + bytes = page_count << PAGE_CACHE_SHIFT; result = ll_direct_IO_26_seg(env, io, rw, inode, file->f_mapping, bytes, file_offset, @@ -462,8 +462,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, * We should always be able to kmalloc for a * page worth of page pointers = 4MB on i386. */ if (result == -ENOMEM && - size > (CFS_PAGE_SIZE / sizeof(*pages)) * - CFS_PAGE_SIZE) { + size > (PAGE_CACHE_SIZE / sizeof(*pages)) * + PAGE_CACHE_SIZE) { size = ((((size / 2) - 1) | ~CFS_PAGE_MASK) + 1) & CFS_PAGE_MASK; @@ -482,23 +482,27 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, } } out: - LASSERT(obj->cob_transient_pages == 0); - if (rw == READ) - UNLOCK_INODE_MUTEX(inode); + LASSERT(obj->cob_transient_pages == 0); + if (rw == READ) + mutex_unlock(&inode->i_mutex); if (tot_bytes > 0) { if (rw == WRITE) { - lov_stripe_lock(lsm); - obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); - lov_stripe_unlock(lsm); - } - } - - cl_env_put(env, &refcheck); - RETURN(tot_bytes ? : result); + struct lov_stripe_md *lsm; + + lsm = ccc_inode_lsm_get(inode); + LASSERT(lsm != NULL); + lov_stripe_lock(lsm); + obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); + lov_stripe_unlock(lsm); + ccc_inode_lsm_put(inode, lsm); + } + } + + cl_env_put(env, &refcheck); + RETURN(tot_bytes ? : result); } -#if defined(HAVE_KERNEL_WRITE_BEGIN_END) || defined(MS_HAS_NEW_AOPS) static int ll_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) @@ -536,11 +540,14 @@ static int ll_write_end(struct file *file, struct address_space *mapping, return rc ?: copied; } -#endif #ifdef CONFIG_MIGRATION int ll_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page) + struct page *newpage, struct page *page +#ifdef HAVE_MIGRATEPAGE_4ARGS + , enum migrate_mode mode +#endif + ) { /* Always fail page migration until we have a proper implementation */ return -EIO; @@ -555,13 +562,8 @@ struct address_space_operations ll_aops = { .writepage = ll_writepage, .writepages = ll_writepages, .set_page_dirty = ll_set_page_dirty, -#ifdef HAVE_KERNEL_WRITE_BEGIN_END .write_begin = ll_write_begin, .write_end = ll_write_end, -#else - .prepare_write = ll_prepare_write, - .commit_write = ll_commit_write, -#endif .invalidatepage = ll_invalidatepage, .releasepage = (void *)ll_releasepage, #ifdef CONFIG_MIGRATION