X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Frw26.c;h=999fd270a803738fa2e0cad529d08496692180ac;hb=4e0d9d5eb1f2f1bc0442a3e45ac23ceb5ff390ca;hp=2e4e32cb88c6310de7b908d82426bed45a9f6bdf;hpb=0f8dca08a4f68cba82c2c822998ecc309d3b7aaf;p=fs%2Flustre-release.git diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c index 2e4e32c..999fd27 100644 --- a/lustre/llite/rw26.c +++ b/lustre/llite/rw26.c @@ -28,6 +28,9 @@ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011 Whamcloud, Inc. + * */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -63,7 +66,6 @@ #define DEBUG_SUBSYSTEM S_LLITE -//#include #include #include "llite_internal.h" #include @@ -187,7 +189,7 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; *max_pages -= user_addr >> CFS_PAGE_SHIFT; - OBD_ALLOC_WAIT(*pages, *max_pages * sizeof(**pages)); + OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); if (*pages) { down_read(¤t->mm->mmap_sem); result = get_user_pages(current, current->mm, user_addr, @@ -195,7 +197,7 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, NULL); up_read(¤t->mm->mmap_sem); if (unlikely(result <= 0)) - OBD_FREE(*pages, *max_pages * sizeof(**pages)); + OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages)); } return result; @@ -215,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) page_cache_release(pages[i]); } - OBD_FREE(pages, npages * sizeof(*pages)); + OBD_FREE_LARGE(pages, npages * sizeof(*pages)); } ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, @@ -232,6 +234,8 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, int page_count = pv->ldp_nr; struct page **pages = pv->ldp_pages; long page_size = cl_page_size(obj); + bool do_io; + int io_pages = 0; ENTRY; queue = &io->ci_queue; @@ -239,6 +243,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, for (i = 0; i < page_count; i++) { if (pv->ldp_offsets) file_offset = pv->ldp_offsets[i]; + LASSERT(!(file_offset & (page_size - 1))); clp = cl_page_find(env, obj, cl_index(obj, file_offset), pv->ldp_pages[i], CPT_TRANSIENT); @@ -247,14 +252,17 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, break; } + rc = cl_page_own(env, io, clp); + if (rc) { + LASSERT(clp->cp_state == CPS_FREEING); + cl_page_put(env, clp); + break; + } + + do_io = true; + /* check the page type: if the page is a host page, then do * write directly */ - /* - * Very rare case that the host pages can be found for - * directIO case, since linux kernel truncated all covered - * pages before getting here. So, to make the OST happy(to - * write a contiguous region), all pages are issued - * here. -jay */ if (clp->cp_type == CPT_CACHEABLE) { cfs_page_t *vmpage = cl_page_vmpage(env, clp); cfs_page_t *src_page; @@ -275,43 +283,41 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, * cl_io_submit()->...->vvp_page_prep_write(). */ if (rw == WRITE) set_page_dirty(vmpage); + + if (rw == READ) { + /* do not issue the page for read, since it + * may reread a ra page which has NOT uptodate + * bit set. */ + cl_page_disown(env, io, clp); + do_io = false; + } + } + + if (likely(do_io)) { + cl_2queue_add(queue, clp); + /* - * If direct-io read finds up-to-date page in the - * cache, just copy it to the user space. Page will be - * filtered out by vvp_page_prep_read(). This - * preserves an invariant, that page is read at most - * once, see cl_page_flags::CPF_READ_COMPLETED. + * Set page clip to tell transfer formation engine + * that page has to be sent even if it is beyond KMS. */ - } + cl_page_clip(env, clp, 0, min(size, page_size)); - rc = cl_page_own(env, io, clp); - if (rc) { - LASSERT(clp->cp_state == CPS_FREEING); - cl_page_put(env, clp); - break; + ++io_pages; } - cl_2queue_add(queue, clp); - - /* drop the reference count for cl_page_find, so that the page - * will be freed in cl_2queue_fini. */ + /* drop the reference count for cl_page_find */ cl_page_put(env, clp); - /* - * Set page clip to tell transfer formation engine that page - * has to be sent even if it is beyond KMS. - */ - cl_page_clip(env, clp, 0, min(size, page_size)); size -= page_size; file_offset += page_size; } - if (rc == 0) { + if (rc == 0 && io_pages) { rc = cl_io_submit_sync(env, io, rw == READ ? CRT_READ : CRT_WRITE, queue, CRP_NORMAL, 0); - if (rc == 0) - rc = pv->ldp_size; } + if (rc == 0) + rc = pv->ldp_size; cl_2queue_discard(env, io, queue); cl_2queue_disown(env, io, queue); @@ -468,7 +474,7 @@ out: RETURN(tot_bytes ? : result); } -#ifdef HAVE_KERNEL_WRITE_BEGIN_END +#if defined(HAVE_KERNEL_WRITE_BEGIN_END) || defined(MS_HAS_NEW_AOPS) static int ll_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) @@ -499,14 +505,25 @@ static int ll_write_end(struct file *file, struct address_space *mapping, { unsigned from = pos & (PAGE_CACHE_SIZE - 1); int rc; - rc = ll_commit_write(file, page, from, from + copied); + rc = ll_commit_write(file, page, from, from + copied); unlock_page(page); page_cache_release(page); - return rc?rc:copied; + + return rc ?: copied; +} +#endif + +#ifdef CONFIG_MIGRATION +int ll_migratepage(struct address_space *mapping, + struct page *newpage, struct page *page) +{ + /* Always fail page migration until we have a proper implementation */ + return -EIO; } #endif +#ifndef MS_HAS_NEW_AOPS struct address_space_operations ll_aops = { .readpage = ll_readpage, // .readpages = ll_readpages, @@ -524,5 +541,29 @@ struct address_space_operations ll_aops = { #endif .invalidatepage = ll_invalidatepage, .releasepage = (void *)ll_releasepage, +#ifdef CONFIG_MIGRATION + .migratepage = ll_migratepage, +#endif .bmap = NULL }; +#else +struct address_space_operations_ext ll_aops = { + .orig_aops.readpage = ll_readpage, +// .orig_aops.readpages = ll_readpages, + .orig_aops.direct_IO = ll_direct_IO_26, + .orig_aops.writepage = ll_writepage, + .orig_aops.writepages = generic_writepages, + .orig_aops.set_page_dirty = ll_set_page_dirty, + .orig_aops.sync_page = NULL, + .orig_aops.prepare_write = ll_prepare_write, + .orig_aops.commit_write = ll_commit_write, + .orig_aops.invalidatepage = ll_invalidatepage, + .orig_aops.releasepage = ll_releasepage, +#ifdef CONFIG_MIGRATION + .orig_aops.migratepage = ll_migratepage, +#endif + .orig_aops.bmap = NULL, + .write_begin = ll_write_begin, + .write_end = ll_write_end +}; +#endif