X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Frw26.c;h=6cbf9e0cb2f6eba279c54000a814467e7d2d1156;hp=32bfa8acd7b442bdedf2db0155fc5d82fe0aef4c;hb=322968acf183ab16d952cd3026f6580957b31259;hpb=b918d20c1073fbb29de95dccec018b1b992cf81d diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c index 32bfa8a..6cbf9e0 100644 --- a/lustre/llite/rw26.c +++ b/lustre/llite/rw26.c @@ -26,8 +26,11 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011 Whamcloud, Inc. + * */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -63,7 +66,6 @@ #define DEBUG_SUBSYSTEM S_LLITE -//#include #include #include "llite_internal.h" #include @@ -173,10 +175,10 @@ static int ll_set_page_dirty(struct page *vmpage) #define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL static inline int ll_get_user_pages(int rw, unsigned long user_addr, - size_t size, struct page ***pages) + size_t size, struct page ***pages, + int *max_pages) { int result = -ENOMEM; - int page_count; /* set an arbitrary limit to prevent arithmetic overflow */ if (size > MAX_DIRECTIO_SIZE) { @@ -184,18 +186,18 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - page_count = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; - page_count -= user_addr >> CFS_PAGE_SHIFT; + *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + *max_pages -= user_addr >> CFS_PAGE_SHIFT; - OBD_ALLOC_WAIT(*pages, page_count * sizeof(**pages)); + OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); if (*pages) { down_read(¤t->mm->mmap_sem); result = get_user_pages(current, current->mm, user_addr, - page_count, (rw == READ), 0, *pages, + *max_pages, (rw == READ), 0, *pages, NULL); up_read(¤t->mm->mmap_sem); - if (result < 0) - OBD_FREE(*pages, page_count * sizeof(**pages)); + if (unlikely(result <= 0)) + OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages)); } return result; @@ -208,12 +210,14 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) int i; for (i = 0; i < npages; i++) { + if (pages[i] == NULL) + break; if (do_dirty) set_page_dirty_lock(pages[i]); page_cache_release(pages[i]); } - OBD_FREE(pages, npages * sizeof(*pages)); + OBD_FREE_LARGE(pages, npages * sizeof(*pages)); } ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, @@ -221,26 +225,25 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, struct ll_dio_pages *pv) { struct cl_page *clp; - struct ccc_page *clup; struct cl_2queue *queue; struct cl_object *obj = io->ci_obj; - struct cl_sync_io *anchor = &ccc_env_info(env)->cti_sync_io; int i; ssize_t rc = 0; loff_t file_offset = pv->ldp_start_offset; - size_t size = pv->ldp_size; + long size = pv->ldp_size; int page_count = pv->ldp_nr; struct page **pages = pv->ldp_pages; - size_t page_size = cl_page_size(obj); + long page_size = cl_page_size(obj); + bool do_io; + int io_pages = 0; ENTRY; - cl_sync_io_init(anchor, page_count); - queue = &io->ci_queue; cl_2queue_init(queue); for (i = 0; i < page_count; i++) { if (pv->ldp_offsets) file_offset = pv->ldp_offsets[i]; + LASSERT(!(file_offset & (page_size - 1))); clp = cl_page_find(env, obj, cl_index(obj, file_offset), pv->ldp_pages[i], CPT_TRANSIENT); @@ -249,14 +252,17 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, break; } + rc = cl_page_own(env, io, clp); + if (rc) { + LASSERT(clp->cp_state == CPS_FREEING); + cl_page_put(env, clp); + break; + } + + do_io = true; + /* check the page type: if the page is a host page, then do * write directly */ - /* - * Very rare case that the host pages can be found for - * directIO case, since linux kernel truncated all covered - * pages before getting here. So, to make the OST happy(to - * write a contiguous region), all pages are issued - * here. -jay */ if (clp->cp_type == CPT_CACHEABLE) { cfs_page_t *vmpage = cl_page_vmpage(env, clp); cfs_page_t *src_page; @@ -269,7 +275,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, src = kmap_atomic(src_page, KM_USER0); dst = kmap_atomic(dst_page, KM_USER1); - memcpy(dst, (const void *)src, min(page_size, size)); + memcpy(dst, src, min(page_size, size)); kunmap_atomic(dst, KM_USER1); kunmap_atomic(src, KM_USER0); @@ -277,55 +283,41 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, * cl_io_submit()->...->vvp_page_prep_write(). */ if (rw == WRITE) set_page_dirty(vmpage); + + if (rw == READ) { + /* do not issue the page for read, since it + * may reread a ra page which has NOT uptodate + * bit set. */ + cl_page_disown(env, io, clp); + do_io = false; + } + } + + if (likely(do_io)) { + cl_2queue_add(queue, clp); + /* - * If direct-io read finds up-to-date page in the - * cache, just copy it to the user space. Page will be - * filtered out by vvp_page_prep_read(). This - * preserves an invariant, that page is read at most - * once, see cl_page_flags::CPF_READ_COMPLETED. + * Set page clip to tell transfer formation engine + * that page has to be sent even if it is beyond KMS. */ - } + cl_page_clip(env, clp, 0, min(size, page_size)); - rc = cl_page_own(env, io, clp); - if (rc) { - LASSERT(clp->cp_state == CPS_FREEING); - cl_page_put(env, clp); - break; + ++io_pages; } - clup = cl2ccc_page(cl_page_at(clp, &vvp_device_type)); - clup->cpg_sync_io = anchor; - cl_2queue_add(queue, clp); - - /* drop the reference count for cl_page_find, so that the page - * will be freed in cl_2queue_fini. */ + /* drop the reference count for cl_page_find */ cl_page_put(env, clp); - /* - * Set page clip to tell transfer formation engine that page - * has to be sent even if it is beyond KMS. - */ - cl_page_clip(env, clp, 0, min(size, page_size)); size -= page_size; file_offset += page_size; } - if (rc == 0) { - rc = cl_io_submit_rw(env, io, rw == READ ? CRT_READ : CRT_WRITE, - queue, CRP_NORMAL); - if (rc == 0) { - /* - * If some pages weren't sent for any reason (e.g., - * direct-io read found up-to-date pages in the - * cache), count them as completed to avoid infinite - * wait. - */ - cl_page_list_for_each(clp, &queue->c2_qin) - cl_sync_io_note(anchor, +1); - /* wait for the IO to be finished. */ - rc = cl_sync_io_wait(env, io, &queue->c2_qout, - anchor) ?: pv->ldp_size; - } + if (rc == 0 && io_pages) { + rc = cl_io_submit_sync(env, io, + rw == READ ? CRT_READ : CRT_WRITE, + queue, CRP_NORMAL, 0); } + if (rc == 0) + rc = pv->ldp_size; cl_2queue_discard(env, io, queue); cl_2queue_disown(env, io, queue); @@ -350,11 +342,18 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, return ll_direct_rw_pages(env, io, rw, inode, &pvec); } -/* This is the maximum size of a single O_DIRECT request, based on a 128kB +#ifdef KMALLOC_MAX_SIZE +#define MAX_MALLOC KMALLOC_MAX_SIZE +#else +#define MAX_MALLOC (128 * 1024) +#endif + +/* This is the maximum size of a single O_DIRECT request, based on the * kmalloc limit. We need to fit all of the brw_page structs, each one * representing PAGE_SIZE worth of user data, into a single buffer, and - * then truncate this to be a full-sized RPC. This is 22MB for 4kB pages. */ -#define MAX_DIO_SIZE ((128 * 1024 / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \ + * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is + * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ +#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \ ~(PTLRPC_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t file_offset, @@ -365,11 +364,12 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ccc_object *obj = cl_inode2ccc(inode); - ssize_t count = iov_length(iov, nr_segs), tot_bytes = 0; + long count = iov_length(iov, nr_segs); + long tot_bytes = 0, result = 0; struct ll_inode_info *lli = ll_i2info(inode); struct lov_stripe_md *lsm = lli->lli_smd; unsigned long seg = 0; - size_t size = MAX_DIO_SIZE; + long size = MAX_DIO_SIZE; int refcheck; ENTRY; @@ -380,8 +380,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK)) RETURN(-EINVAL); - CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size="LPSZ" (max %lu), " - "offset=%lld=%llx, pages "LPSZ" (max %lu)\n", + CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), " + "offset=%lld=%llx, pages %lu (max %lu)\n", inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, file_offset, file_offset, count >> CFS_PAGE_SHIFT, MAX_DIO_SIZE >> CFS_PAGE_SHIFT); @@ -399,62 +399,62 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, LASSERT(io != NULL); /* 0. Need locking between buffered and direct access. and race with - *size changing by concurrent truncates and writes. + * size changing by concurrent truncates and writes. * 1. Need inode sem to operate transient pages. */ if (rw == READ) LOCK_INODE_MUTEX(inode); LASSERT(obj->cob_transient_pages == 0); for (seg = 0; seg < nr_segs; seg++) { - size_t iov_left = iov[seg].iov_len; + long iov_left = iov[seg].iov_len; unsigned long user_addr = (unsigned long)iov[seg].iov_base; if (rw == READ) { - if (file_offset >= inode->i_size) + if (file_offset >= i_size_read(inode)) break; - if (file_offset + iov_left > inode->i_size) - iov_left = inode->i_size - file_offset; + if (file_offset + iov_left > i_size_read(inode)) + iov_left = i_size_read(inode) - file_offset; } while (iov_left > 0) { struct page **pages; - int page_count; - ssize_t result; - - page_count = ll_get_user_pages(rw, user_addr, - min(size, iov_left), - &pages); - LASSERT(page_count != 0); - if (page_count > 0) { + int page_count, max_pages = 0; + long bytes; + + bytes = min(size, iov_left); + page_count = ll_get_user_pages(rw, user_addr, bytes, + &pages, &max_pages); + if (likely(page_count > 0)) { + if (unlikely(page_count < max_pages)) + bytes = page_count << CFS_PAGE_SHIFT; result = ll_direct_IO_26_seg(env, io, rw, inode, file->f_mapping, - min(size,iov_left), - file_offset, pages, - page_count); - ll_free_user_pages(pages, page_count, rw==READ); + bytes, file_offset, + pages, page_count); + ll_free_user_pages(pages, max_pages, rw==READ); + } else if (page_count == 0) { + GOTO(out, result = -EFAULT); } else { - result = 0; + result = page_count; } - if (page_count < 0 || result <= 0) { + if (unlikely(result <= 0)) { /* If we can't allocate a large enough buffer * for the request, shrink it to a smaller * PAGE_SIZE multiple and try again. * We should always be able to kmalloc for a * page worth of page pointers = 4MB on i386. */ - if ((page_count == -ENOMEM||result == -ENOMEM)&& + if (result == -ENOMEM && size > (CFS_PAGE_SIZE / sizeof(*pages)) * CFS_PAGE_SIZE) { size = ((((size / 2) - 1) | ~CFS_PAGE_MASK) + 1) & CFS_PAGE_MASK; - CDEBUG(D_VFSTRACE, "DIO size now %u\n", - (int)size); + CDEBUG(D_VFSTRACE,"DIO size now %lu\n", + size); continue; } - if (tot_bytes <= 0) - tot_bytes = page_count < 0 ? page_count : result; - GOTO(out, tot_bytes); + GOTO(out, result); } tot_bytes += result; @@ -477,9 +477,59 @@ out: } cl_env_put(env, &refcheck); - RETURN(tot_bytes); + RETURN(tot_bytes ? : result); +} + +#if defined(HAVE_KERNEL_WRITE_BEGIN_END) || defined(MS_HAS_NEW_AOPS) +static int ll_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + struct page *page; + int rc; + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + ENTRY; + + page = grab_cache_page_write_begin(mapping, index, flags); + if (!page) + RETURN(-ENOMEM); + + *pagep = page; + + rc = ll_prepare_write(file, page, from, from + len); + if (rc) { + unlock_page(page); + page_cache_release(page); + } + RETURN(rc); +} + +static int ll_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + int rc; + + rc = ll_commit_write(file, page, from, from + copied); + unlock_page(page); + page_cache_release(page); + + return rc ?: copied; } +#endif + +#ifdef CONFIG_MIGRATION +int ll_migratepage(struct address_space *mapping, + struct page *newpage, struct page *page) +{ + /* Always fail page migration until we have a proper implementation */ + return -EIO; +} +#endif +#ifndef MS_HAS_NEW_AOPS struct address_space_operations ll_aops = { .readpage = ll_readpage, // .readpages = ll_readpages, @@ -488,9 +538,38 @@ struct address_space_operations ll_aops = { .writepages = generic_writepages, .set_page_dirty = ll_set_page_dirty, .sync_page = NULL, +#ifdef HAVE_KERNEL_WRITE_BEGIN_END + .write_begin = ll_write_begin, + .write_end = ll_write_end, +#else .prepare_write = ll_prepare_write, .commit_write = ll_commit_write, +#endif .invalidatepage = ll_invalidatepage, .releasepage = (void *)ll_releasepage, +#ifdef CONFIG_MIGRATION + .migratepage = ll_migratepage, +#endif .bmap = NULL }; +#else +struct address_space_operations_ext ll_aops = { + .orig_aops.readpage = ll_readpage, +// .orig_aops.readpages = ll_readpages, + .orig_aops.direct_IO = ll_direct_IO_26, + .orig_aops.writepage = ll_writepage, + .orig_aops.writepages = generic_writepages, + .orig_aops.set_page_dirty = ll_set_page_dirty, + .orig_aops.sync_page = NULL, + .orig_aops.prepare_write = ll_prepare_write, + .orig_aops.commit_write = ll_commit_write, + .orig_aops.invalidatepage = ll_invalidatepage, + .orig_aops.releasepage = ll_releasepage, +#ifdef CONFIG_MIGRATION + .orig_aops.migratepage = ll_migratepage, +#endif + .orig_aops.bmap = NULL, + .write_begin = ll_write_begin, + .write_end = ll_write_end +}; +#endif