X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Frw.c;h=e359b60544241cef08850ba1067733b5c8ca7253;hp=4aab3d10ee9060fcfae6b7a8abd5c69c10a26c6c;hb=c084c6215851d238d14b0d414374b6b55c91f525;hpb=9e280281b8daf8ad69d04a5371121224ace28fbc diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index 4aab3d1..e359b60 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -57,18 +53,9 @@ #define DEBUG_SUBSYSTEM S_LLITE -#include #include #include "llite_internal.h" -#include - -struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt) -{ - __u64 opc; - - opc = crt == CRT_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW; - return ll_osscapa_get(inode, opc); -} +#include static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); @@ -102,27 +89,11 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, /* If read-ahead pages left are less than 1M, do not do read-ahead, * otherwise it will form small read RPC(< 1M), which hurt server * performance a lot. */ - ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages); + ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), + pages); if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) GOTO(out, ret = 0); - /* If the non-strided (ria_pages == 0) readahead window - * (ria_start + ret) has grown across an RPC boundary, then trim - * readahead size by the amount beyond the RPC so it ends on an - * RPC boundary. If the readahead window is already ending on - * an RPC boundary (beyond_rpc == 0), or smaller than a full - * RPC (beyond_rpc < ret) the readahead size is unchanged. - * The (beyond_rpc != 0) check is skipped since the conditional - * branch is more expensive than subtracting zero from the result. - * - * Strided read is left unaligned to avoid small fragments beyond - * the RPC boundary from needing an extra read RPC. */ - if (ria->ria_pages == 0) { - long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; - if (/* beyond_rpc != 0 && */ beyond_rpc < ret) - ret -= beyond_rpc; - } - if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) { atomic_sub(ret, &ra->ra_cur_pages); ret = 0; @@ -145,8 +116,8 @@ void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) { - LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which); - lprocfs_counter_incr(sbi->ll_ra_stats, which); + LASSERTF(which < _NR_RA_STAT, "which: %u\n", which); + lprocfs_counter_incr(sbi->ll_ra_stats, which); } void ll_ra_stats_inc(struct inode *inode, enum ra_stat which) @@ -156,15 +127,16 @@ void ll_ra_stats_inc(struct inode *inode, enum ra_stat which) } #define RAS_CDEBUG(ras) \ - CDEBUG(D_READA, \ - "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \ - "csr %lu sf %lu sp %lu sl %lu \n", \ - ras->ras_last_readpage, ras->ras_consecutive_requests, \ - ras->ras_consecutive_pages, ras->ras_window_start, \ - ras->ras_window_len, ras->ras_next_readahead, \ - ras->ras_requests, ras->ras_request_index, \ - ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \ - ras->ras_stride_pages, ras->ras_stride_length) + CDEBUG(D_READA, \ + "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu rpc %lu " \ + "r %lu ri %lu csr %lu sf %lu sp %lu sl %lu\n", \ + ras->ras_last_readpage, ras->ras_consecutive_requests, \ + ras->ras_consecutive_pages, ras->ras_window_start, \ + ras->ras_window_len, ras->ras_next_readahead, \ + ras->ras_rpc_size, \ + ras->ras_requests, ras->ras_request_index, \ + ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \ + ras->ras_stride_pages, ras->ras_stride_length) static int index_in_window(unsigned long index, unsigned long point, unsigned long before, unsigned long after) @@ -179,166 +151,90 @@ static int index_in_window(unsigned long index, unsigned long point, return start <= index && index <= end; } -static struct ll_readahead_state *ll_ras_get(struct file *f) -{ - struct ll_file_data *fd; - - fd = LUSTRE_FPRIVATE(f); - return &fd->fd_ras; -} - -void ll_ra_read_in(struct file *f, struct ll_ra_read *rar) +void ll_ras_enter(struct file *f) { - struct ll_readahead_state *ras; - - ras = ll_ras_get(f); + struct ll_file_data *fd = LUSTRE_FPRIVATE(f); + struct ll_readahead_state *ras = &fd->fd_ras; spin_lock(&ras->ras_lock); ras->ras_requests++; ras->ras_request_index = 0; ras->ras_consecutive_requests++; - rar->lrr_reader = current; - - list_add(&rar->lrr_linkage, &ras->ras_read_beads); - spin_unlock(&ras->ras_lock); -} - -void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar) -{ - struct ll_readahead_state *ras; - - ras = ll_ras_get(f); - - spin_lock(&ras->ras_lock); - list_del_init(&rar->lrr_linkage); spin_unlock(&ras->ras_lock); } -static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras) -{ - struct ll_ra_read *scan; - - list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) { - if (scan->lrr_reader == current) - return scan; - } - return NULL; -} - -struct ll_ra_read *ll_ra_read_get(struct file *f) +/** + * Initiates read-ahead of a page with given index. + * + * \retval +ve: page was already uptodate so it will be skipped + * from being added; + * \retval -ve: page wasn't added to \a queue for error; + * \retval 0: page was added into \a queue for read ahead. + */ +static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, pgoff_t index) { - struct ll_readahead_state *ras; - struct ll_ra_read *bead; - - ras = ll_ras_get(f); + struct cl_object *clob = io->ci_obj; + struct inode *inode = vvp_object_inode(clob); + struct page *vmpage; + struct cl_page *page; + struct vvp_page *vpg; + enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ + int rc = 0; + const char *msg = NULL; + ENTRY; - spin_lock(&ras->ras_lock); - bead = ll_ra_read_get_locked(ras); - spin_unlock(&ras->ras_lock); - return bead; -} + vmpage = grab_cache_page_nowait(inode->i_mapping, index); + if (vmpage == NULL) { + which = RA_STAT_FAILED_GRAB_PAGE; + msg = "g_c_p_n failed"; + GOTO(out, rc = -EBUSY); + } -static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct cl_page *page, - struct cl_object *clob, pgoff_t *max_index) -{ - struct page *vmpage = page->cp_vmpage; - struct ccc_page *cp; - int rc; + /* Check if vmpage was truncated or reclaimed */ + if (vmpage->mapping != inode->i_mapping) { + which = RA_STAT_WRONG_GRAB_PAGE; + msg = "g_c_p_n returned invalid page"; + GOTO(out, rc = -EBUSY); + } - ENTRY; + page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); + if (IS_ERR(page)) { + which = RA_STAT_FAILED_GRAB_PAGE; + msg = "cl_page_find failed"; + GOTO(out, rc = PTR_ERR(page)); + } - rc = 0; - cl_page_assume(env, io, page); lu_ref_add(&page->cp_reference, "ra", current); - cp = cl2ccc_page(cl_object_page_slice(clob, page)); - if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) { - CDEBUG(D_READA, "page index %lu, max_index: %lu\n", - ccc_index(cp), *max_index); - /* Disable the optimization on prefetching maximum readahead - * index because there is a race with lock cancellation. This - * optimization will be revived later. - * if (*max_index == 0 || ccc_index(cp) > *max_index) */ - rc = cl_page_is_under_lock(env, io, page, max_index); - if (rc == 0) { - cp->cpg_defer_uptodate = 1; - cp->cpg_ra_used = 0; - cl_page_list_add(queue, page); - rc = 1; - } else { - cl_page_discard(env, io, page); - rc = -ENOLCK; - } + cl_page_assume(env, io, page); + vpg = cl2vvp_page(cl_object_page_slice(clob, page)); + if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) { + vpg->vpg_defer_uptodate = 1; + vpg->vpg_ra_used = 0; + cl_page_list_add(queue, page); } else { /* skip completed pages */ cl_page_unassume(env, io, page); + /* This page is already uptodate, returning a positive number + * to tell the callers about this */ + rc = 1; } + lu_ref_del(&page->cp_reference, "ra", current); cl_page_put(env, page); - RETURN(rc); -} - -/** - * Initiates read-ahead of a page with given index. - * - * \retval +ve: page was added to \a queue. - * - * \retval -ENOLCK: there is no extent lock for this part of a file, stop - * read-ahead. - * - * \retval -ve, 0: page wasn't added to \a queue for other reason. - */ -static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, - pgoff_t index, pgoff_t *max_index) -{ - struct cl_object *clob = io->ci_obj; - struct inode *inode = ccc_object_inode(clob); - struct page *vmpage; - struct cl_page *page; - enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ - unsigned int gfp_mask; - int rc = 0; - const char *msg = NULL; - - ENTRY; - gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT; -#ifdef __GFP_NOWARN - gfp_mask |= __GFP_NOWARN; -#endif - vmpage = grab_cache_page_nowait(inode->i_mapping, index); +out: if (vmpage != NULL) { - /* Check if vmpage was truncated or reclaimed */ - if (vmpage->mapping == inode->i_mapping) { - page = cl_page_find(env, clob, vmpage->index, - vmpage, CPT_CACHEABLE); - if (!IS_ERR(page)) { - rc = cl_read_ahead_page(env, io, queue, - page, clob, max_index); - if (rc == -ENOLCK) { - which = RA_STAT_FAILED_MATCH; - msg = "lock match failed"; - } - } else { - which = RA_STAT_FAILED_GRAB_PAGE; - msg = "cl_page_find failed"; - } - } else { - which = RA_STAT_WRONG_GRAB_PAGE; - msg = "g_c_p_n returned invalid page"; - } - if (rc != 1) - unlock_page(vmpage); - page_cache_release(vmpage); - } else { - which = RA_STAT_FAILED_GRAB_PAGE; - msg = "g_c_p_n failed"; - } + if (rc != 0) + unlock_page(vmpage); + put_page(vmpage); + } if (msg != NULL) { ll_ra_stats_inc(inode, which); CDEBUG(D_READA, "%s\n", msg); + } + RETURN(rc); } @@ -347,22 +243,11 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\ ria->ria_pages) -/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't - * know what the actual RPC size is. If this needs to change, it makes more - * sense to tune the i_blkbits value for the file based on the OSTs it is - * striped over, rather than having a constant value for all files here. */ - -/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). - * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled - * by default, this should be adjusted corresponding with max_read_ahead_mb - * and max_read_ahead_per_file_mb otherwise the readahead budget can be used - * up quickly which will affect read performance siginificantly. See LU-2816 */ -#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) - static inline int stride_io_mode(struct ll_readahead_state *ras) { return ras->ras_consecutive_stride_requests > 1; } + /* The function calculates how much pages will be read in * [off, off + length], in such stride IO area, * stride_offset = st_off, stride_lengh = st_len, @@ -404,7 +289,7 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, if (end_left > st_pgs) end_left = st_pgs; - CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n", + CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n", start, end, start_left, end_left); if (start == end) @@ -428,6 +313,16 @@ static int ria_page_count(struct ra_io_arg *ria) length); } +static unsigned long ras_align(struct ll_readahead_state *ras, + unsigned long index, + unsigned long *remainder) +{ + unsigned long rem = index % ras->ras_rpc_size; + if (remainder != NULL) + *remainder = rem; + return index - rem; +} + /*Check whether the index is in the defined ra-window */ static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) { @@ -440,32 +335,63 @@ static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) ria->ria_length < ria->ria_pages); } -static int ll_read_ahead_pages(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *queue, - struct ra_io_arg *ria, - unsigned long *reserved_pages, - unsigned long *ra_end) +static unsigned long +ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, struct ll_readahead_state *ras, + struct ra_io_arg *ria) { - int rc, count = 0; + struct cl_read_ahead ra = { 0 }; + int rc = 0; bool stride_ria; + unsigned long ra_end = 0; pgoff_t page_idx; - pgoff_t max_index = 0; LASSERT(ria != NULL); RIA_DEBUG(ria); stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; for (page_idx = ria->ria_start; - page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { + page_idx <= ria->ria_end && ria->ria_reserved > 0; page_idx++) { if (ras_inside_ra_window(page_idx, ria)) { - /* If the page is inside the read-ahead window*/ - rc = ll_read_ahead_page(env, io, queue, - page_idx, &max_index); - if (rc == 1) { - (*reserved_pages)--; - count++; - } else if (rc == -ENOLCK) - break; + if (ra.cra_end == 0 || ra.cra_end < page_idx) { + unsigned long end; + + cl_read_ahead_release(env, &ra); + + rc = cl_io_read_ahead(env, io, page_idx, &ra); + if (rc < 0) + break; + + CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n", + page_idx, ra.cra_end, ra.cra_rpc_size); + LASSERTF(ra.cra_end >= page_idx, + "object: %p, indcies %lu / %lu\n", + io->ci_obj, ra.cra_end, page_idx); + /* update read ahead RPC size. + * NB: it's racy but doesn't matter */ + if (ras->ras_rpc_size > ra.cra_rpc_size && + ra.cra_rpc_size > 0) + ras->ras_rpc_size = ra.cra_rpc_size; + /* trim it to align with optimal RPC size */ + end = ras_align(ras, ria->ria_end + 1, NULL); + if (end > 0 && !ria->ria_eof) + ria->ria_end = end - 1; + if (ria->ria_end < ria->ria_end_min) + ria->ria_end = ria->ria_end_min; + if (ria->ria_end > ra.cra_end) + ria->ria_end = ra.cra_end; + } + if (page_idx > ria->ria_end) + break; + + /* If the page is inside the read-ahead window */ + rc = ll_read_ahead_page(env, io, queue, page_idx); + if (rc < 0) + break; + + ra_end = page_idx; + if (rc == 0) + ria->ria_reserved--; } else if (stride_ria) { /* If it is not in the read-ahead window, and it is * read-ahead mode, then check whether it should skip @@ -489,29 +415,30 @@ static int ll_read_ahead_pages(const struct lu_env *env, } } } - *ra_end = page_idx; - return count; + + cl_read_ahead_release(env, &ra); + + return ra_end; } -int ll_readahead(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct ll_readahead_state *ras, - bool hit) +static int ll_readahead(const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, + struct ll_readahead_state *ras, bool hit) { struct vvp_io *vio = vvp_env_io(env); - struct vvp_thread_info *vti = vvp_env_info(env); - struct cl_attr *attr = ccc_env_thread_attr(env); - unsigned long start = 0, end = 0, reserved; - unsigned long ra_end, len, mlen = 0; + struct ll_thread_info *lti = ll_env_info(env); + struct cl_attr *attr = vvp_env_thread_attr(env); + unsigned long len, mlen = 0; + pgoff_t ra_end, start = 0, end = 0; struct inode *inode; - struct ll_ra_read *bead; - struct ra_io_arg *ria = &vti->vti_ria; + struct ra_io_arg *ria = <i->lti_ria; struct cl_object *clob; int ret = 0; __u64 kms; ENTRY; clob = io->ci_obj; - inode = ccc_object_inode(clob); + inode = vvp_object_inode(clob); memset(ria, 0, sizeof *ria); @@ -528,59 +455,39 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, } spin_lock(&ras->ras_lock); - if (vio->cui_ra_window_set) - bead = &vio->cui_bead; - else - bead = NULL; - /* Enlarge the RA window to encompass the full read */ - if (bead != NULL && ras->ras_window_start + ras->ras_window_len < - bead->lrr_start + bead->lrr_count) { - ras->ras_window_len = bead->lrr_start + bead->lrr_count - - ras->ras_window_start; - } - /* Reserve a part of the read-ahead window that we'll be issuing */ - if (ras->ras_window_len > 0) { - /* - * Note: other thread might rollback the ras_next_readahead, - * if it can not get the full size of prepared pages, see the - * end of this function. For stride read ahead, it needs to - * make sure the offset is no less than ras_stride_offset, - * so that stride read ahead can work correctly. - */ - if (stride_io_mode(ras)) - start = max(ras->ras_next_readahead, - ras->ras_stride_offset); - else - start = ras->ras_next_readahead; + /** + * Note: other thread might rollback the ras_next_readahead, + * if it can not get the full size of prepared pages, see the + * end of this function. For stride read ahead, it needs to + * make sure the offset is no less than ras_stride_offset, + * so that stride read ahead can work correctly. + */ + if (stride_io_mode(ras)) + start = max(ras->ras_next_readahead, ras->ras_stride_offset); + else + start = ras->ras_next_readahead; + + if (ras->ras_window_len > 0) end = ras->ras_window_start + ras->ras_window_len - 1; - } + + /* Enlarge the RA window to encompass the full read */ + if (vio->vui_ra_valid && + end < vio->vui_ra_start + vio->vui_ra_count - 1) + end = vio->vui_ra_start + vio->vui_ra_count - 1; if (end != 0) { - unsigned long rpc_boundary; - /* - * Align RA window to an optimal boundary. - * - * XXX This would be better to align to cl_max_pages_per_rpc - * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may - * be aligned to the RAID stripe size in the future and that - * is more important than the RPC size. - */ - /* Note: we only trim the RPC, instead of extending the RPC - * to the boundary, so to avoid reading too much pages during - * random reading. */ - rpc_boundary = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1))); - if (rpc_boundary > 0) - rpc_boundary--; - - if (rpc_boundary > start) - end = rpc_boundary; - - /* Truncate RA window to end of file */ - end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); - - ras->ras_next_readahead = max(end, end + 1); - RAS_CDEBUG(ras); + unsigned long end_index; + + /* Truncate RA window to end of file */ + end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT); + if (end_index <= end) { + end = end_index; + ria->ria_eof = true; + } + + ras->ras_next_readahead = max(end, end + 1); + RAS_CDEBUG(ras); } ria->ria_start = start; ria->ria_end = end; @@ -605,35 +512,38 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, CDEBUG(D_READA, DFID": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n", PFID(lu_object_fid(&clob->co_lu)), ria->ria_start, ria->ria_end, - bead == NULL ? 0 : bead->lrr_start, - bead == NULL ? 0 : bead->lrr_count, + vio->vui_ra_valid ? vio->vui_ra_start : 0, + vio->vui_ra_valid ? vio->vui_ra_count : 0, hit); /* at least to extend the readahead window to cover current read */ - if (!hit && bead != NULL && - bead->lrr_start + bead->lrr_count > ria->ria_start) { + if (!hit && vio->vui_ra_valid && + vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) { + unsigned long remainder; + /* to the end of current read window. */ - mlen = bead->lrr_start + bead->lrr_count - ria->ria_start; + mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start; /* trim to RPC boundary */ - start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1); - mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start); + ras_align(ras, ria->ria_start, &remainder); + mlen = min(mlen, ras->ras_rpc_size - remainder); + ria->ria_end_min = ria->ria_start + mlen; } - reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen); - if (reserved < len) + ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen); + if (ria->ria_reserved < len) ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT); CDEBUG(D_READA, "reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n", - reserved, len, mlen, + ria->ria_reserved, len, mlen, atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages), ll_i2sbi(inode)->ll_ra_info.ra_max_pages); - ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end); + ra_end = ll_read_ahead_pages(env, io, queue, ras, ria); - if (reserved != 0) - ll_ra_count_put(ll_i2sbi(inode), reserved); + if (ria->ria_reserved != 0) + ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved); - if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) + if (ra_end == end && ra_end == (kms >> PAGE_SHIFT)) ll_ra_stats_inc(inode, RA_STAT_EOF); /* if we didn't get to the end of the region we reserved from @@ -641,16 +551,16 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, * next read-ahead tries from where we left off. we only do so * if the region we failed to issue read-ahead on is still ahead * of the app and behind the next index to start read-ahead from */ - CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n", - ra_end, end, ria->ria_end); + CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n", + ra_end, end, ria->ria_end, ret); - if (ra_end != end + 1) { + if (ra_end > 0 && ra_end != end) { ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END); spin_lock(&ras->ras_lock); - if (ra_end < ras->ras_next_readahead && + if (ra_end <= ras->ras_next_readahead && index_in_window(ra_end, ras->ras_window_start, 0, ras->ras_window_len)) { - ras->ras_next_readahead = ra_end; + ras->ras_next_readahead = ra_end + 1; RAS_CDEBUG(ras); } spin_unlock(&ras->ras_lock); @@ -662,7 +572,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras, unsigned long index) { - ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1)); + ras->ras_window_start = ras_align(ras, index, NULL); } /* called with the ras_lock held or from places where it doesn't matter */ @@ -674,7 +584,7 @@ static void ras_reset(struct inode *inode, struct ll_readahead_state *ras, ras->ras_consecutive_pages = 0; ras->ras_window_len = 0; ras_set_start(inode, ras, index); - ras->ras_next_readahead = max(ras->ras_window_start, index); + ras->ras_next_readahead = max(ras->ras_window_start, index + 1); RAS_CDEBUG(ras); } @@ -691,9 +601,9 @@ static void ras_stride_reset(struct ll_readahead_state *ras) void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras) { spin_lock_init(&ras->ras_lock); + ras->ras_rpc_size = PTLRPC_MAX_BRW_PAGES; ras_reset(inode, ras, 0); ras->ras_requests = 0; - INIT_LIST_HEAD(&ras->ras_read_beads); } /* @@ -801,24 +711,31 @@ static void ras_increase_window(struct inode *inode, * but current clio architecture does not support retrieve such * information from lower layer. FIXME later */ - if (stride_io_mode(ras)) - ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode)); - else - ras->ras_window_len = min(ras->ras_window_len + - RAS_INCREASE_STEP(inode), - ra->ra_max_pages_per_file); + if (stride_io_mode(ras)) { + ras_stride_increase_window(ras, ra, ras->ras_rpc_size); + } else { + unsigned long wlen; + + wlen = min(ras->ras_window_len + ras->ras_rpc_size, + ra->ra_max_pages_per_file); + ras->ras_window_len = ras_align(ras, wlen, NULL); + } } -void ras_update(struct ll_sb_info *sbi, struct inode *inode, - struct ll_readahead_state *ras, unsigned long index, - unsigned hit) +static void ras_update(struct ll_sb_info *sbi, struct inode *inode, + struct ll_readahead_state *ras, unsigned long index, + enum ras_update_flags flags) { struct ll_ra_info *ra = &sbi->ll_ra_info; + bool hit = flags & LL_RAS_HIT; int zero = 0, stride_detect = 0, ra_miss = 0; ENTRY; spin_lock(&ras->ras_lock); + if (!hit) + CDEBUG(D_READA, DFID " pages at %lu miss.\n", + PFID(ll_inode2fid(inode)), index); ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS); /* reset the read-ahead window in two cases. First when the app seeks @@ -843,20 +760,19 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * and only occurs once per open file. Normal RA behavior is reverted * to for subsequent IO. The mmap case does not increment * ras_requests and thus can never trigger this behavior. */ - if (ras->ras_requests == 2 && !ras->ras_request_index) { - __u64 kms_pages; + if (ras->ras_requests >= 2 && !ras->ras_request_index) { + __u64 kms_pages; - kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; - CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages, + CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); if (kms_pages && kms_pages <= ra->ra_max_read_ahead_whole_pages) { ras->ras_window_start = 0; - ras->ras_last_readpage = 0; - ras->ras_next_readahead = 0; + ras->ras_next_readahead = index + 1; ras->ras_window_len = min(ra->ra_max_pages_per_file, ra->ra_max_read_ahead_whole_pages); GOTO(out_unlock, 0); @@ -886,12 +802,19 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (ra_miss) { if (index_in_stride_window(ras, index) && stride_io_mode(ras)) { - /*If stride-RA hit cache miss, the stride dector - *will not be reset to avoid the overhead of - *redetecting read-ahead mode */ if (index != ras->ras_last_readpage + 1) ras->ras_consecutive_pages = 0; ras_reset(inode, ras, index); + + /* If stride-RA hit cache miss, the stride + * detector will not be reset to avoid the + * overhead of redetecting read-ahead mode, + * but on the condition that the stride window + * is still intersect with normal sequential + * read-ahead window. */ + if (ras->ras_window_start < + ras->ras_stride_offset) + ras_stride_reset(ras); RAS_CDEBUG(ras); } else { /* Reset both stride window and normal RA @@ -922,6 +845,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * of read-ahead, so we use original offset here, * instead of ras_window_start, which is RPC aligned */ ras->ras_next_readahead = max(index, ras->ras_next_readahead); + ras->ras_window_start = max(ras->ras_stride_offset, + ras->ras_window_start); } else { if (ras->ras_next_readahead < ras->ras_window_start) ras->ras_next_readahead = ras->ras_window_start; @@ -932,8 +857,11 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, /* Trigger RA in the mmap case where ras_consecutive_requests * is not incremented and thus can't be used to trigger RA */ - if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { - ras->ras_window_len = RAS_INCREASE_STEP(inode); + if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) { + ras_increase_window(inode, ras, ra); + /* reset consecutive pages so that the readahead window can + * grow gradually. */ + ras->ras_consecutive_pages = 0; GOTO(out_unlock, 0); } @@ -945,7 +873,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, */ ras->ras_next_readahead = max(index, ras->ras_next_readahead); ras->ras_stride_offset = index; - ras->ras_window_len = RAS_INCREASE_STEP(inode); + ras->ras_window_start = max(index, ras->ras_window_start); } /* The initial ras_window_len is set to the request size. To avoid @@ -970,10 +898,10 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) struct cl_io *io; struct cl_page *page; struct cl_object *clob; - struct cl_env_nest nest; bool redirtied = false; bool unlocked = false; int result; + __u16 refcheck; ENTRY; LASSERT(PageLocked(vmpage)); @@ -981,14 +909,14 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) LASSERT(ll_i2dtexp(inode) != NULL); - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) GOTO(out, result = PTR_ERR(env)); clob = ll_i2info(inode)->lli_clob; LASSERT(clob != NULL); - io = ccc_env_thread_io(env); + io = vvp_env_thread_io(env); io->ci_obj = clob; io->ci_ignore_layout = 1; result = cl_io_init(env, io, CIT_MISC, clob); @@ -1031,7 +959,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) * breaking kernel which assumes ->writepage should mark * PageWriteback or clean the page. */ result = cl_sync_file_range(inode, offset, - offset + PAGE_CACHE_SIZE - 1, + offset + PAGE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. @@ -1042,7 +970,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) } } - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); GOTO(out, result); out: @@ -1059,17 +987,15 @@ out: int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; - struct ll_sb_info *sbi = ll_i2sbi(inode); loff_t start; loff_t end; enum cl_fsync_mode mode; int range_whole = 0; int result; - int ignore_layout = 0; ENTRY; if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_CACHE_SHIFT; + start = mapping->writeback_index << PAGE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1084,16 +1010,13 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (wbc->sync_mode == WB_SYNC_ALL) mode = CL_FSYNC_LOCAL; - if (sbi->ll_umounting) - /* if the mountpoint is being umounted, all pages have to be - * evicted to avoid hitting LBUG when truncate_inode_pages() - * is called later on. */ - ignore_layout = 1; - - if (cl_i2info(inode)->lli_clob == NULL) + if (ll_i2info(inode)->lli_clob == NULL) RETURN(0); - result = cl_sync_file_range(inode, start, end, mode, ignore_layout); + /* for directio, it would call writepages() to evict cached pages + * inside the IO context of write, which will cause deadlock at + * layout_conf since it waits for active IOs to complete. */ + result = cl_sync_file_range(inode, start, end, mode, 1); if (result > 0) { wbc->nr_to_write -= result; result = 0; @@ -1103,7 +1026,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (end == OBD_OBJECT_EOF) mapping->writeback_index = 0; else - mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) +1; + mapping->writeback_index = (end >> PAGE_SHIFT) + 1; } RETURN(result); } @@ -1126,16 +1049,18 @@ struct ll_cl_context *ll_cl_find(struct file *file) return found; } -void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io) +void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io, + enum lcc_type type) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_cl_context *lcc = &vvp_env_info(env)->vti_io_ctx; + struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx; memset(lcc, 0, sizeof(*lcc)); INIT_LIST_HEAD(&lcc->lcc_list); lcc->lcc_cookie = current; lcc->lcc_env = env; lcc->lcc_io = io; + lcc->lcc_type = type; write_lock(&fd->fd_lock); list_add(&lcc->lcc_list, &fd->fd_lccs); @@ -1145,39 +1070,157 @@ void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io) void ll_cl_remove(struct file *file, const struct lu_env *env) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_cl_context *lcc = &vvp_env_info(env)->vti_io_ctx; + struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx; write_lock(&fd->fd_lock); list_del_init(&lcc->lcc_list); write_unlock(&fd->fd_lock); } +static int ll_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, struct file *file) +{ + struct inode *inode = vvp_object_inode(page->cp_obj); + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_readahead_state *ras = &fd->fd_ras; + struct cl_2queue *queue = &io->ci_queue; + struct vvp_page *vpg; + int rc = 0; + bool uptodate; + ENTRY; + + vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page)); + uptodate = vpg->vpg_defer_uptodate; + + if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && + sbi->ll_ra_info.ra_max_pages > 0 && + !vpg->vpg_ra_updated) { + struct vvp_io *vio = vvp_env_io(env); + enum ras_update_flags flags = 0; + + if (uptodate) + flags |= LL_RAS_HIT; + if (!vio->vui_ra_valid) + flags |= LL_RAS_MMAP; + ras_update(sbi, inode, ras, vvp_index(vpg), flags); + } + + cl_2queue_init(queue); + if (uptodate) { + vpg->vpg_ra_used = 1; + cl_page_export(env, page, 1); + cl_page_disown(env, io, page); + } else { + cl_2queue_add(queue, page); + } + + if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && + sbi->ll_ra_info.ra_max_pages > 0) { + int rc2; + + rc2 = ll_readahead(env, io, &queue->c2_qin, ras, + uptodate); + CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n", + PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg)); + } + + if (queue->c2_qin.pl_nr > 0) + rc = cl_io_submit_rw(env, io, CRT_READ, queue); + + /* + * Unlock unsent pages in case of error. + */ + cl_page_list_disown(env, io, &queue->c2_qin); + cl_2queue_fini(env, queue); + + RETURN(rc); +} + int ll_readpage(struct file *file, struct page *vmpage) { - struct cl_object *clob = ll_i2info(file->f_dentry->d_inode)->lli_clob; + struct inode *inode = file_inode(file); + struct cl_object *clob = ll_i2info(inode)->lli_clob; struct ll_cl_context *lcc; - const struct lu_env *env; - struct cl_io *io; + const struct lu_env *env = NULL; + struct cl_io *io = NULL; struct cl_page *page; int result; ENTRY; lcc = ll_cl_find(file); - if (lcc == NULL) { + if (lcc != NULL) { + env = lcc->lcc_env; + io = lcc->lcc_io; + } + + if (io == NULL) { /* fast read */ + struct inode *inode = file_inode(file); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_readahead_state *ras = &fd->fd_ras; + struct lu_env *local_env = NULL; + struct vvp_page *vpg; + + result = -ENODATA; + + /* TODO: need to verify the layout version to make sure + * the page is not invalid due to layout change. */ + page = cl_vmpage_page(vmpage, clob); + if (page == NULL) { + unlock_page(vmpage); + RETURN(result); + } + + if (!env) { + local_env = cl_env_percpu_get(); + env = local_env; + } + + vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page)); + if (vpg->vpg_defer_uptodate) { + enum ras_update_flags flags = LL_RAS_HIT; + + if (lcc && lcc->lcc_type == LCC_MMAP) + flags |= LL_RAS_MMAP; + + /* For fast read, it updates read ahead state only + * if the page is hit in cache because non cache page + * case will be handled by slow read later. */ + ras_update(ll_i2sbi(inode), inode, ras, vvp_index(vpg), + flags); + /* avoid duplicate ras_update() call */ + vpg->vpg_ra_updated = 1; + + /* Check if we can issue a readahead RPC, if that is + * the case, we can't do fast IO because we will need + * a cl_io to issue the RPC. */ + if (ras->ras_window_start + ras->ras_window_len < + ras->ras_next_readahead + PTLRPC_MAX_BRW_PAGES) { + /* export the page and skip io stack */ + vpg->vpg_ra_used = 1; + cl_page_export(env, page, 1); + result = 0; + } + } + + /* release page refcount before unlocking the page to ensure + * the object won't be destroyed in the calling path of + * cl_page_put(). Please see comment in ll_releasepage(). */ + cl_page_put(env, page); unlock_page(vmpage); - RETURN(-EIO); + if (local_env) + cl_env_percpu_put(local_env); + + RETURN(result); } - env = lcc->lcc_env; - io = lcc->lcc_io; - LASSERT(io != NULL); LASSERT(io->ci_state == CIS_IO_GOING); page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); if (!IS_ERR(page)) { LASSERT(page->cp_type == CPT_CACHEABLE); if (likely(!PageUptodate(vmpage))) { cl_page_assume(env, io, page); - result = cl_io_read_page(env, io, page); + result = ll_io_read_page(env, io, page, file); } else { /* Page from a non-object file. */ unlock_page(vmpage);