X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Frw.c;h=2979d17f9d21398bfba4c09f6b227ff81b0e653a;hp=6ca07f7f21dbc9261d772a168884f836f6018e29;hb=b9c155065d2ca4a6037a0ca4bfc788d6961fdc8e;hpb=83ae3e2e5b9713822ea4889d832915e791801d90 diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index 6ca07f7..2979d17 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2014, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -48,12 +44,14 @@ #include #include +#include #include #include #include #include /* current_is_kswapd() */ #include +#include #define DEBUG_SUBSYSTEM S_LLITE @@ -84,7 +82,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria, - unsigned long pages, unsigned long min) + unsigned long pages, + unsigned long pages_min) { struct ll_ra_info *ra = &sbi->ll_ra_info; long ret; @@ -98,47 +97,30 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) GOTO(out, ret = 0); - /* If the non-strided (ria_pages == 0) readahead window - * (ria_start + ret) has grown across an RPC boundary, then trim - * readahead size by the amount beyond the RPC so it ends on an - * RPC boundary. If the readahead window is already ending on - * an RPC boundary (beyond_rpc == 0), or smaller than a full - * RPC (beyond_rpc < ret) the readahead size is unchanged. - * The (beyond_rpc != 0) check is skipped since the conditional - * branch is more expensive than subtracting zero from the result. - * - * Strided read is left unaligned to avoid small fragments beyond - * the RPC boundary from needing an extra read RPC. */ - if (ria->ria_pages == 0) { - long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; - if (/* beyond_rpc != 0 && */ beyond_rpc < ret) - ret -= beyond_rpc; - } - if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) { atomic_sub(ret, &ra->ra_cur_pages); ret = 0; } out: - if (ret < min) { + if (ret < pages_min) { /* override ra limit for maximum performance */ - atomic_add(min - ret, &ra->ra_cur_pages); - ret = min; + atomic_add(pages_min - ret, &ra->ra_cur_pages); + ret = pages_min; } RETURN(ret); } -void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) +void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long pages) { struct ll_ra_info *ra = &sbi->ll_ra_info; - atomic_sub(len, &ra->ra_cur_pages); + atomic_sub(pages, &ra->ra_cur_pages); } static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) { - LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which); - lprocfs_counter_incr(sbi->ll_ra_stats, which); + LASSERTF(which < _NR_RA_STAT, "which: %u\n", which); + lprocfs_counter_incr(sbi->ll_ra_stats, which); } void ll_ra_stats_inc(struct inode *inode, enum ra_stat which) @@ -148,39 +130,29 @@ void ll_ra_stats_inc(struct inode *inode, enum ra_stat which) } #define RAS_CDEBUG(ras) \ - CDEBUG(D_READA, \ - "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \ - "csr %lu sf %lu sp %lu sl %lu \n", \ - ras->ras_last_readpage, ras->ras_consecutive_requests, \ - ras->ras_consecutive_pages, ras->ras_window_start, \ - ras->ras_window_len, ras->ras_next_readahead, \ - ras->ras_requests, ras->ras_request_index, \ - ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \ - ras->ras_stride_pages, ras->ras_stride_length) - -static int index_in_window(unsigned long index, unsigned long point, - unsigned long before, unsigned long after) + CDEBUG(D_READA, \ + "lre %llu cr %lu cb %llu wsi %lu wp %lu nra %lu rpc %lu " \ + "r %lu csr %lu so %llu sb %llu sl %llu lr %lu\n", \ + ras->ras_last_read_end_bytes, ras->ras_consecutive_requests, \ + ras->ras_consecutive_bytes, ras->ras_window_start_idx, \ + ras->ras_window_pages, ras->ras_next_readahead_idx, \ + ras->ras_rpc_pages, ras->ras_requests, \ + ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \ + ras->ras_stride_bytes, ras->ras_stride_length, \ + ras->ras_async_last_readpage_idx) + +static bool pos_in_window(loff_t pos, loff_t point, + unsigned long before, unsigned long after) { - unsigned long start = point - before, end = point + after; + loff_t start = point - before; + loff_t end = point + after; - if (start > point) - start = 0; - if (end < point) - end = ~0; + if (start > point) + start = 0; + if (end < point) + end = ~0; - return start <= index && index <= end; -} - -void ll_ras_enter(struct file *f) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(f); - struct ll_readahead_state *ras = &fd->fd_ras; - - spin_lock(&ras->ras_lock); - ras->ras_requests++; - ras->ras_request_index = 0; - ras->ras_consecutive_requests++; - spin_unlock(&ras->ras_lock); + return start <= pos && pos <= end; } /** @@ -247,7 +219,7 @@ out: if (vmpage != NULL) { if (rc != 0) unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); } if (msg != NULL) { ll_ra_stats_inc(inode, which); @@ -258,179 +230,408 @@ out: RETURN(rc); } -#define RIA_DEBUG(ria) \ - CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \ - ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\ - ria->ria_pages) - -/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't - * know what the actual RPC size is. If this needs to change, it makes more - * sense to tune the i_blkbits value for the file based on the OSTs it is - * striped over, rather than having a constant value for all files here. */ - -/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). - * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled - * by default, this should be adjusted corresponding with max_read_ahead_mb - * and max_read_ahead_per_file_mb otherwise the readahead budget can be used - * up quickly which will affect read performance siginificantly. See LU-2816 */ -#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define RIA_DEBUG(ria) \ + CDEBUG(D_READA, "rs %lu re %lu ro %llu rl %llu rb %llu\n", \ + ria->ria_start_idx, ria->ria_end_idx, ria->ria_stoff, \ + ria->ria_length, ria->ria_bytes) static inline int stride_io_mode(struct ll_readahead_state *ras) { return ras->ras_consecutive_stride_requests > 1; } -/* The function calculates how much pages will be read in + +/* The function calculates how many bytes will be read in * [off, off + length], in such stride IO area, * stride_offset = st_off, stride_lengh = st_len, - * stride_pages = st_pgs + * stride_bytes = st_bytes * * |------------------|*****|------------------|*****|------------|*****|.... * st_off - * |--- st_pgs ---| + * |--- st_bytes ---| * |----- st_len -----| * - * How many pages it should read in such pattern + * How many bytes it should read in such pattern * |-------------------------------------------------------------| * off * |<------ length ------->| * * = |<----->| + |-------------------------------------| + |---| - * start_left st_pgs * i end_left + * start_left st_bytes * i end_left */ -static unsigned long -stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, - unsigned long off, unsigned long length) +static loff_t stride_byte_count(loff_t st_off, loff_t st_len, loff_t st_bytes, + loff_t off, loff_t length) { - __u64 start = off > st_off ? off - st_off : 0; - __u64 end = off + length > st_off ? off + length - st_off : 0; - unsigned long start_left = 0; - unsigned long end_left = 0; - unsigned long pg_count; - - if (st_len == 0 || length == 0 || end == 0) - return length; - - start_left = do_div(start, st_len); - if (start_left < st_pgs) - start_left = st_pgs - start_left; - else - start_left = 0; + u64 start = off > st_off ? off - st_off : 0; + u64 end = off + length > st_off ? off + length - st_off : 0; + u64 start_left; + u64 end_left; + u64 bytes_count; + + if (st_len == 0 || length == 0 || end == 0) + return length; + + start = div64_u64_rem(start, st_len, &start_left); + if (start_left < st_bytes) + start_left = st_bytes - start_left; + else + start_left = 0; - end_left = do_div(end, st_len); - if (end_left > st_pgs) - end_left = st_pgs; + end = div64_u64_rem(end, st_len, &end_left); + if (end_left > st_bytes) + end_left = st_bytes; - CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n", - start, end, start_left, end_left); + CDEBUG(D_READA, "start %llu, end %llu start_left %llu end_left %llu\n", + start, end, start_left, end_left); - if (start == end) - pg_count = end_left - (st_pgs - start_left); - else - pg_count = start_left + st_pgs * (end - start - 1) + end_left; + if (start == end) + bytes_count = end_left - (st_bytes - start_left); + else + bytes_count = start_left + + st_bytes * (end - start - 1) + end_left; - CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu" - "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count); + CDEBUG(D_READA, + "st_off %llu, st_len %llu st_bytes %llu off %llu length %llu bytescount %llu\n", + st_off, st_len, st_bytes, off, length, bytes_count); - return pg_count; + return bytes_count; } -static int ria_page_count(struct ra_io_arg *ria) +static unsigned long ria_page_count(struct ra_io_arg *ria) { - __u64 length = ria->ria_end >= ria->ria_start ? - ria->ria_end - ria->ria_start + 1 : 0; + loff_t length_bytes = ria->ria_end_idx >= ria->ria_start_idx ? + (loff_t)(ria->ria_end_idx - + ria->ria_start_idx + 1) << PAGE_SHIFT : 0; + loff_t bytes_count; + + if (ria->ria_length > ria->ria_bytes && ria->ria_bytes && + (ria->ria_length & ~PAGE_MASK || ria->ria_bytes & ~PAGE_MASK || + ria->ria_stoff & ~PAGE_MASK)) { + /* Over-estimate un-aligned page stride read */ + unsigned long pg_count = ((ria->ria_bytes + + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; + pg_count *= length_bytes / ria->ria_length + 1; + + return pg_count; + } + bytes_count = stride_byte_count(ria->ria_stoff, ria->ria_length, + ria->ria_bytes, + (loff_t)ria->ria_start_idx<> PAGE_SHIFT; +} - return stride_pg_count(ria->ria_stoff, ria->ria_length, - ria->ria_pages, ria->ria_start, - length); +static pgoff_t ras_align(struct ll_readahead_state *ras, pgoff_t index) +{ + return index - (index % ras->ras_rpc_pages); } -/*Check whether the index is in the defined ra-window */ -static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) +/* Check whether the index is in the defined ra-window */ +static bool ras_inside_ra_window(pgoff_t idx, struct ra_io_arg *ria) { - /* If ria_length == ria_pages, it means non-stride I/O mode, + loff_t pos = (loff_t)idx << PAGE_SHIFT; + + /* If ria_length == ria_bytes, it means non-stride I/O mode, * idx should always inside read-ahead window in this case * For stride I/O mode, just check whether the idx is inside - * the ria_pages. */ - return ria->ria_length == 0 || ria->ria_length == ria->ria_pages || - (idx >= ria->ria_stoff && (idx - ria->ria_stoff) % - ria->ria_length < ria->ria_pages); + * the ria_bytes. + */ + if (ria->ria_length == 0 || ria->ria_length == ria->ria_bytes) + return true; + + if (pos >= ria->ria_stoff) { + u64 offset; + + div64_u64_rem(pos - ria->ria_stoff, ria->ria_length, &offset); + + if (offset < ria->ria_bytes || + (ria->ria_length - offset) < PAGE_SIZE) + return true; + } else if (pos + PAGE_SIZE > ria->ria_stoff) { + return true; + } + + return false; } -static int ll_read_ahead_pages(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *queue, - struct ra_io_arg *ria, - unsigned long *reserved_pages, - pgoff_t *ra_end) +static unsigned long +ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, struct ll_readahead_state *ras, + struct ra_io_arg *ria, pgoff_t *ra_end) { struct cl_read_ahead ra = { 0 }; - int rc, count = 0; - bool stride_ria; + /* busy page count is per stride */ + int rc = 0, count = 0, busy_page_count = 0; pgoff_t page_idx; LASSERT(ria != NULL); RIA_DEBUG(ria); - stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; - for (page_idx = ria->ria_start; - page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { + for (page_idx = ria->ria_start_idx; + page_idx <= ria->ria_end_idx && ria->ria_reserved > 0; + page_idx++) { if (ras_inside_ra_window(page_idx, ria)) { - if (ra.cra_end == 0 || ra.cra_end < page_idx) { + if (ra.cra_end_idx == 0 || ra.cra_end_idx < page_idx) { + pgoff_t end_idx; + cl_read_ahead_release(env, &ra); rc = cl_io_read_ahead(env, io, page_idx, &ra); if (rc < 0) break; - LASSERTF(ra.cra_end >= page_idx, + /* Do not shrink ria_end_idx at any case until + * the minimum end of current read is covered. + * And only shrink ria_end_idx if the matched + * LDLM lock doesn't cover more. */ + if (page_idx > ra.cra_end_idx || + (ra.cra_contention && + page_idx > ria->ria_end_idx_min)) { + ria->ria_end_idx = ra.cra_end_idx; + break; + } + + CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n", + page_idx, ra.cra_end_idx, + ra.cra_rpc_pages); + LASSERTF(ra.cra_end_idx >= page_idx, "object: %p, indcies %lu / %lu\n", - io->ci_obj, ra.cra_end, page_idx); + io->ci_obj, ra.cra_end_idx, page_idx); + /* update read ahead RPC size. + * NB: it's racy but doesn't matter */ + if (ras->ras_rpc_pages != ra.cra_rpc_pages && + ra.cra_rpc_pages > 0) + ras->ras_rpc_pages = ra.cra_rpc_pages; + /* trim it to align with optimal RPC size */ + end_idx = ras_align(ras, ria->ria_end_idx + 1); + if (end_idx > 0 && !ria->ria_eof) + ria->ria_end_idx = end_idx - 1; + if (ria->ria_end_idx < ria->ria_end_idx_min) + ria->ria_end_idx = ria->ria_end_idx_min; } + if (page_idx > ria->ria_end_idx) + break; - /* If the page is inside the read-ahead window*/ + /* If the page is inside the read-ahead window */ rc = ll_read_ahead_page(env, io, queue, page_idx); + if (rc < 0 && rc != -EBUSY) + break; + if (rc == -EBUSY) { + busy_page_count++; + CDEBUG(D_READA, + "skip busy page: %lu\n", page_idx); + /* For page unaligned readahead the first + * last pages of each region can be read by + * another reader on the same node, and so + * may be busy. So only stop for > 2 busy + * pages. */ + if (busy_page_count > 2) + break; + } + + *ra_end = page_idx; + /* Only subtract from reserve & count the page if we + * really did readahead on that page. */ if (rc == 0) { - (*reserved_pages)--; + ria->ria_reserved--; count++; } - } else if (stride_ria) { - /* If it is not in the read-ahead window, and it is - * read-ahead mode, then check whether it should skip - * the stride gap */ - pgoff_t offset; - /* FIXME: This assertion only is valid when it is for - * forward read-ahead, it will be fixed when backward - * read-ahead is implemented */ - LASSERTF(page_idx >= ria->ria_stoff, - "Invalid page_idx %lu rs %lu re %lu ro %lu " - "rl %lu rp %lu\n", page_idx, - ria->ria_start, ria->ria_end, ria->ria_stoff, - ria->ria_length, ria->ria_pages); - offset = page_idx - ria->ria_stoff; - offset = offset % (ria->ria_length); - if (offset > ria->ria_pages) { - page_idx += ria->ria_length - offset; - CDEBUG(D_READA, "i %lu skip %lu \n", page_idx, - ria->ria_length - offset); - continue; - } - } - } + } else if (stride_io_mode(ras)) { + /* If it is not in the read-ahead window, and it is + * read-ahead mode, then check whether it should skip + * the stride gap. + */ + loff_t pos = (loff_t)page_idx << PAGE_SHIFT; + u64 offset; + + div64_u64_rem(pos - ria->ria_stoff, ria->ria_length, + &offset); + if (offset >= ria->ria_bytes) { + pos += (ria->ria_length - offset); + if ((pos >> PAGE_SHIFT) >= page_idx + 1) + page_idx = (pos >> PAGE_SHIFT) - 1; + busy_page_count = 0; + CDEBUG(D_READA, + "Stride: jump %llu pages to %lu\n", + ria->ria_length - offset, page_idx); + continue; + } + } + } cl_read_ahead_release(env, &ra); - *ra_end = page_idx; return count; } +static void ll_readahead_work_free(struct ll_readahead_work *work) +{ + fput(work->lrw_file); + OBD_FREE_PTR(work); +} + +static void ll_readahead_handle_work(struct work_struct *wq); +static void ll_readahead_work_add(struct inode *inode, + struct ll_readahead_work *work) +{ + INIT_WORK(&work->lrw_readahead_work, ll_readahead_handle_work); + queue_work(ll_i2sbi(inode)->ll_ra_info.ll_readahead_wq, + &work->lrw_readahead_work); +} + +static int ll_readahead_file_kms(const struct lu_env *env, + struct cl_io *io, __u64 *kms) +{ + struct cl_object *clob; + struct inode *inode; + struct cl_attr *attr = vvp_env_thread_attr(env); + int ret; + + clob = io->ci_obj; + inode = vvp_object_inode(clob); + + cl_object_attr_lock(clob); + ret = cl_object_attr_get(env, clob, attr); + cl_object_attr_unlock(clob); + + if (ret != 0) + RETURN(ret); + + *kms = attr->cat_kms; + return 0; +} + +static void ll_readahead_handle_work(struct work_struct *wq) +{ + struct ll_readahead_work *work; + struct lu_env *env; + __u16 refcheck; + struct ra_io_arg *ria; + struct inode *inode; + struct ll_file_data *fd; + struct ll_readahead_state *ras; + struct cl_io *io; + struct cl_2queue *queue; + pgoff_t ra_end_idx = 0; + unsigned long pages, pages_min = 0; + struct file *file; + __u64 kms; + int rc; + pgoff_t eof_index; + + work = container_of(wq, struct ll_readahead_work, + lrw_readahead_work); + fd = LUSTRE_FPRIVATE(work->lrw_file); + ras = &fd->fd_ras; + file = work->lrw_file; + inode = file_inode(file); + + env = cl_env_alloc(&refcheck, LCT_NOREF); + if (IS_ERR(env)) + GOTO(out_free_work, rc = PTR_ERR(env)); + + io = vvp_env_thread_io(env); + ll_io_init(io, file, CIT_READ, NULL); + + rc = ll_readahead_file_kms(env, io, &kms); + if (rc != 0) + GOTO(out_put_env, rc); + + if (kms == 0) { + ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN); + GOTO(out_put_env, rc = 0); + } + + ria = &ll_env_info(env)->lti_ria; + memset(ria, 0, sizeof(*ria)); + + ria->ria_start_idx = work->lrw_start_idx; + /* Truncate RA window to end of file */ + eof_index = (pgoff_t)(kms - 1) >> PAGE_SHIFT; + if (eof_index <= work->lrw_end_idx) { + work->lrw_end_idx = eof_index; + ria->ria_eof = true; + } + if (work->lrw_end_idx <= work->lrw_start_idx) + GOTO(out_put_env, rc = 0); + + ria->ria_end_idx = work->lrw_end_idx; + pages = ria->ria_end_idx - ria->ria_start_idx + 1; + ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, + ria_page_count(ria), pages_min); + + CDEBUG(D_READA, + "async reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n", + ria->ria_reserved, pages, pages_min, + atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages), + ll_i2sbi(inode)->ll_ra_info.ra_max_pages); + + if (ria->ria_reserved < pages) { + ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT); + if (PAGES_TO_MiB(ria->ria_reserved) < 1) { + ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved); + GOTO(out_put_env, rc = 0); + } + } + + rc = cl_io_rw_init(env, io, CIT_READ, ria->ria_start_idx, pages); + if (rc) + GOTO(out_put_env, rc); + + vvp_env_io(env)->vui_io_subtype = IO_NORMAL; + vvp_env_io(env)->vui_fd = fd; + io->ci_state = CIS_LOCKED; + io->ci_async_readahead = true; + rc = cl_io_start(env, io); + if (rc) + GOTO(out_io_fini, rc); + + queue = &io->ci_queue; + cl_2queue_init(queue); + + rc = ll_read_ahead_pages(env, io, &queue->c2_qin, ras, ria, + &ra_end_idx); + if (ria->ria_reserved != 0) + ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved); + if (queue->c2_qin.pl_nr > 0) { + int count = queue->c2_qin.pl_nr; + + rc = cl_io_submit_rw(env, io, CRT_READ, queue); + if (rc == 0) + task_io_account_read(PAGE_SIZE * count); + } + if (ria->ria_end_idx == ra_end_idx && ra_end_idx == (kms >> PAGE_SHIFT)) + ll_ra_stats_inc(inode, RA_STAT_EOF); + + if (ra_end_idx != ria->ria_end_idx) + ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END); + + /* TODO: discard all pages until page reinit route is implemented */ + cl_page_list_discard(env, io, &queue->c2_qin); + + /* Unlock unsent read pages in case of error. */ + cl_page_list_disown(env, io, &queue->c2_qin); + + cl_2queue_fini(env, queue); +out_io_fini: + cl_io_end(env, io); + cl_io_fini(env, io); +out_put_env: + cl_env_put(env, &refcheck); +out_free_work: + if (ra_end_idx > 0) + ll_ra_stats_inc_sbi(ll_i2sbi(inode), RA_STAT_ASYNC); + ll_readahead_work_free(work); +} + static int ll_readahead(const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, - struct ll_readahead_state *ras, bool hit) + struct ll_readahead_state *ras, bool hit, + struct file *file) { struct vvp_io *vio = vvp_env_io(env); struct ll_thread_info *lti = ll_env_info(env); - struct cl_attr *attr = vvp_env_thread_attr(env); - unsigned long len, mlen = 0, reserved; - pgoff_t ra_end, start = 0, end = 0; + unsigned long pages, pages_min = 0; + pgoff_t ra_end_idx = 0, start_idx = 0, end_idx = 0; struct inode *inode; struct ra_io_arg *ria = <i->lti_ria; struct cl_object *clob; @@ -441,15 +642,11 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io, clob = io->ci_obj; inode = vvp_object_inode(clob); - memset(ria, 0, sizeof *ria); - - cl_object_attr_lock(clob); - ret = cl_object_attr_get(env, clob, attr); - cl_object_attr_unlock(clob); - + memset(ria, 0, sizeof(*ria)); + ret = ll_readahead_file_kms(env, io, &kms); if (ret != 0) RETURN(ret); - kms = attr->cat_kms; + if (kms == 0) { ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN); RETURN(0); @@ -457,150 +654,120 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io, spin_lock(&ras->ras_lock); + /** + * Note: other thread might rollback the ras_next_readahead_idx, + * if it can not get the full size of prepared pages, see the + * end of this function. For stride read ahead, it needs to + * make sure the offset is no less than ras_stride_offset, + * so that stride read ahead can work correctly. + */ + if (stride_io_mode(ras)) + start_idx = max_t(pgoff_t, ras->ras_next_readahead_idx, + ras->ras_stride_offset >> PAGE_SHIFT); + else + start_idx = ras->ras_next_readahead_idx; + + if (ras->ras_window_pages > 0) + end_idx = ras->ras_window_start_idx + ras->ras_window_pages - 1; + /* Enlarge the RA window to encompass the full read */ if (vio->vui_ra_valid && - ras->ras_window_start + ras->ras_window_len < - vio->vui_ra_start + vio->vui_ra_count) { - ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count - - ras->ras_window_start; - } + end_idx < vio->vui_ra_start_idx + vio->vui_ra_pages - 1) + end_idx = vio->vui_ra_start_idx + vio->vui_ra_pages - 1; - /* Reserve a part of the read-ahead window that we'll be issuing */ - if (ras->ras_window_len > 0) { - /* - * Note: other thread might rollback the ras_next_readahead, - * if it can not get the full size of prepared pages, see the - * end of this function. For stride read ahead, it needs to - * make sure the offset is no less than ras_stride_offset, - * so that stride read ahead can work correctly. - */ - if (stride_io_mode(ras)) - start = max(ras->ras_next_readahead, - ras->ras_stride_offset); - else - start = ras->ras_next_readahead; - end = ras->ras_window_start + ras->ras_window_len - 1; - } + if (end_idx != 0) { + pgoff_t eof_index; - if (end != 0) { - unsigned long rpc_boundary; - /* - * Align RA window to an optimal boundary. - * - * XXX This would be better to align to cl_max_pages_per_rpc - * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may - * be aligned to the RAID stripe size in the future and that - * is more important than the RPC size. - */ - /* Note: we only trim the RPC, instead of extending the RPC - * to the boundary, so to avoid reading too much pages during - * random reading. */ - rpc_boundary = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1))); - if (rpc_boundary > 0) - rpc_boundary--; - - if (rpc_boundary > start) - end = rpc_boundary; - - /* Truncate RA window to end of file */ - end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); - - ras->ras_next_readahead = max(end, end + 1); - RAS_CDEBUG(ras); - } - ria->ria_start = start; - ria->ria_end = end; - /* If stride I/O mode is detected, get stride window*/ - if (stride_io_mode(ras)) { - ria->ria_stoff = ras->ras_stride_offset; - ria->ria_length = ras->ras_stride_length; - ria->ria_pages = ras->ras_stride_pages; - } + /* Truncate RA window to end of file */ + eof_index = (pgoff_t)((kms - 1) >> PAGE_SHIFT); + if (eof_index <= end_idx) { + end_idx = eof_index; + ria->ria_eof = true; + } + } + ria->ria_start_idx = start_idx; + ria->ria_end_idx = end_idx; + /* If stride I/O mode is detected, get stride window*/ + if (stride_io_mode(ras)) { + ria->ria_stoff = ras->ras_stride_offset; + ria->ria_length = ras->ras_stride_length; + ria->ria_bytes = ras->ras_stride_bytes; + } spin_unlock(&ras->ras_lock); - if (end == 0) { + if (end_idx == 0) { ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW); RETURN(0); } - len = ria_page_count(ria); - if (len == 0) { + pages = ria_page_count(ria); + if (pages == 0) { ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW); RETURN(0); } + RAS_CDEBUG(ras); CDEBUG(D_READA, DFID": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n", PFID(lu_object_fid(&clob->co_lu)), - ria->ria_start, ria->ria_end, - vio->vui_ra_valid ? vio->vui_ra_start : 0, - vio->vui_ra_valid ? vio->vui_ra_count : 0, + ria->ria_start_idx, ria->ria_end_idx, + vio->vui_ra_valid ? vio->vui_ra_start_idx : 0, + vio->vui_ra_valid ? vio->vui_ra_pages : 0, hit); /* at least to extend the readahead window to cover current read */ if (!hit && vio->vui_ra_valid && - vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) { - /* to the end of current read window. */ - mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start; - /* trim to RPC boundary */ - start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1); - mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start); - } + vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx) + ria->ria_end_idx_min = + vio->vui_ra_start_idx + vio->vui_ra_pages - 1; - reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen); - if (reserved < len) + ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, pages, + pages_min); + if (ria->ria_reserved < pages) ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT); CDEBUG(D_READA, "reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n", - reserved, len, mlen, + ria->ria_reserved, pages, pages_min, atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages), ll_i2sbi(inode)->ll_ra_info.ra_max_pages); - ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end); + ret = ll_read_ahead_pages(env, io, queue, ras, ria, &ra_end_idx); - if (reserved != 0) - ll_ra_count_put(ll_i2sbi(inode), reserved); + if (ria->ria_reserved != 0) + ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved); - if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) + if (ra_end_idx == end_idx && ra_end_idx == (kms >> PAGE_SHIFT)) ll_ra_stats_inc(inode, RA_STAT_EOF); - /* if we didn't get to the end of the region we reserved from - * the ras we need to go back and update the ras so that the - * next read-ahead tries from where we left off. we only do so - * if the region we failed to issue read-ahead on is still ahead - * of the app and behind the next index to start read-ahead from */ - CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n", - ra_end, end, ria->ria_end, ret); + CDEBUG(D_READA, + "ra_end_idx = %lu end_idx = %lu stride end = %lu pages = %d\n", + ra_end_idx, end_idx, ria->ria_end_idx, ret); - if (ra_end != end + 1) { + if (ra_end_idx != end_idx) ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END); + if (ra_end_idx > 0) { + /* update the ras so that the next read-ahead tries from + * where we left off. */ spin_lock(&ras->ras_lock); - if (ra_end < ras->ras_next_readahead && - index_in_window(ra_end, ras->ras_window_start, 0, - ras->ras_window_len)) { - ras->ras_next_readahead = ra_end; - RAS_CDEBUG(ras); - } + ras->ras_next_readahead_idx = ra_end_idx + 1; spin_unlock(&ras->ras_lock); + RAS_CDEBUG(ras); } RETURN(ret); } -static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras, - unsigned long index) +static void ras_set_start(struct ll_readahead_state *ras, pgoff_t index) { - ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1)); + ras->ras_window_start_idx = ras_align(ras, index); } /* called with the ras_lock held or from places where it doesn't matter */ -static void ras_reset(struct inode *inode, struct ll_readahead_state *ras, - unsigned long index) +static void ras_reset(struct ll_readahead_state *ras, pgoff_t index) { - ras->ras_last_readpage = index; ras->ras_consecutive_requests = 0; - ras->ras_consecutive_pages = 0; - ras->ras_window_len = 0; - ras_set_start(inode, ras, index); - ras->ras_next_readahead = max(ras->ras_window_start, index); + ras->ras_consecutive_bytes = 0; + ras->ras_window_pages = 0; + ras_set_start(ras, index); + ras->ras_next_readahead_idx = max(ras->ras_window_start_idx, index + 1); RAS_CDEBUG(ras); } @@ -610,112 +777,121 @@ static void ras_stride_reset(struct ll_readahead_state *ras) { ras->ras_consecutive_stride_requests = 0; ras->ras_stride_length = 0; - ras->ras_stride_pages = 0; + ras->ras_stride_bytes = 0; RAS_CDEBUG(ras); } void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras) { spin_lock_init(&ras->ras_lock); - ras_reset(inode, ras, 0); + ras->ras_rpc_pages = PTLRPC_MAX_BRW_PAGES; + ras_reset(ras, 0); + ras->ras_last_read_end_bytes = 0; ras->ras_requests = 0; } /* * Check whether the read request is in the stride window. - * If it is in the stride window, return 1, otherwise return 0. + * If it is in the stride window, return true, otherwise return false. */ -static int index_in_stride_window(struct ll_readahead_state *ras, - unsigned long index) +static bool read_in_stride_window(struct ll_readahead_state *ras, + loff_t pos, loff_t count) { - unsigned long stride_gap; + loff_t stride_gap; - if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 || - ras->ras_stride_pages == ras->ras_stride_length) - return 0; + if (ras->ras_stride_length == 0 || ras->ras_stride_bytes == 0 || + ras->ras_stride_bytes == ras->ras_stride_length) + return false; - stride_gap = index - ras->ras_last_readpage - 1; + stride_gap = pos - ras->ras_last_read_end_bytes - 1; /* If it is contiguous read */ if (stride_gap == 0) - return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages; + return ras->ras_consecutive_bytes + count <= + ras->ras_stride_bytes; /* Otherwise check the stride by itself */ - return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap && - ras->ras_consecutive_pages == ras->ras_stride_pages; + return (ras->ras_stride_length - ras->ras_stride_bytes) == stride_gap && + ras->ras_consecutive_bytes == ras->ras_stride_bytes && + count <= ras->ras_stride_bytes; } -static void ras_update_stride_detector(struct ll_readahead_state *ras, - unsigned long index) +static void ras_init_stride_detector(struct ll_readahead_state *ras, + loff_t pos, loff_t count) { - unsigned long stride_gap = index - ras->ras_last_readpage - 1; + loff_t stride_gap = pos - ras->ras_last_read_end_bytes - 1; - if (!stride_io_mode(ras) && (stride_gap != 0 || - ras->ras_consecutive_stride_requests == 0)) { - ras->ras_stride_pages = ras->ras_consecutive_pages; - ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages; - } - LASSERT(ras->ras_request_index == 0); LASSERT(ras->ras_consecutive_stride_requests == 0); - if (index <= ras->ras_last_readpage) { + if (pos <= ras->ras_last_read_end_bytes) { /*Reset stride window for forward read*/ ras_stride_reset(ras); return; } - ras->ras_stride_pages = ras->ras_consecutive_pages; - ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages; + ras->ras_stride_bytes = ras->ras_consecutive_bytes; + ras->ras_stride_length = stride_gap + ras->ras_consecutive_bytes; + ras->ras_consecutive_stride_requests++; + ras->ras_stride_offset = pos; RAS_CDEBUG(ras); - return; } static unsigned long -stride_page_count(struct ll_readahead_state *ras, unsigned long len) +stride_page_count(struct ll_readahead_state *ras, loff_t len) { - return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length, - ras->ras_stride_pages, ras->ras_stride_offset, - len); + loff_t bytes_count = + stride_byte_count(ras->ras_stride_offset, + ras->ras_stride_length, ras->ras_stride_bytes, + ras->ras_stride_offset, len); + + return (bytes_count + PAGE_SIZE - 1) >> PAGE_SHIFT; } /* Stride Read-ahead window will be increased inc_len according to * stride I/O pattern */ static void ras_stride_increase_window(struct ll_readahead_state *ras, - struct ll_ra_info *ra, - unsigned long inc_len) + struct ll_ra_info *ra, loff_t inc_bytes) { - unsigned long left, step, window_len; - unsigned long stride_len; + loff_t window_bytes, stride_bytes; + u64 left_bytes; + u64 step; + loff_t end; - LASSERT(ras->ras_stride_length > 0); - LASSERTF(ras->ras_window_start + ras->ras_window_len - >= ras->ras_stride_offset, "window_start %lu, window_len %lu" - " stride_offset %lu\n", ras->ras_window_start, - ras->ras_window_len, ras->ras_stride_offset); + /* temporarily store in page units to reduce LASSERT() cost below */ + end = ras->ras_window_start_idx + ras->ras_window_pages; - stride_len = ras->ras_window_start + ras->ras_window_len - - ras->ras_stride_offset; + LASSERT(ras->ras_stride_length > 0); + LASSERTF(end >= (ras->ras_stride_offset >> PAGE_SHIFT), + "window_start_idx %lu, window_pages %lu stride_offset %llu\n", + ras->ras_window_start_idx, ras->ras_window_pages, + ras->ras_stride_offset); - left = stride_len % ras->ras_stride_length; - window_len = ras->ras_window_len - left; + end <<= PAGE_SHIFT; + if (end <= ras->ras_stride_offset) + stride_bytes = 0; + else + stride_bytes = end - ras->ras_stride_offset; - if (left < ras->ras_stride_pages) - left += inc_len; - else - left = ras->ras_stride_pages + inc_len; + div64_u64_rem(stride_bytes, ras->ras_stride_length, &left_bytes); + window_bytes = ((loff_t)ras->ras_window_pages << PAGE_SHIFT) - + left_bytes; - LASSERT(ras->ras_stride_pages != 0); + if (left_bytes < ras->ras_stride_bytes) + left_bytes += inc_bytes; + else + left_bytes = ras->ras_stride_bytes + inc_bytes; - step = left / ras->ras_stride_pages; - left %= ras->ras_stride_pages; + LASSERT(ras->ras_stride_bytes != 0); - window_len += step * ras->ras_stride_length + left; + step = div64_u64_rem(left_bytes, ras->ras_stride_bytes, &left_bytes); - if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file) - ras->ras_window_len = window_len; + window_bytes += step * ras->ras_stride_length + left_bytes; - RAS_CDEBUG(ras); + if (stride_page_count(ras, window_bytes) <= ra->ra_max_pages_per_file) + ras->ras_window_pages = (window_bytes >> PAGE_SHIFT); + + RAS_CDEBUG(ras); } static void ras_increase_window(struct inode *inode, @@ -726,165 +902,257 @@ static void ras_increase_window(struct inode *inode, * but current clio architecture does not support retrieve such * information from lower layer. FIXME later */ - if (stride_io_mode(ras)) - ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode)); - else - ras->ras_window_len = min(ras->ras_window_len + - RAS_INCREASE_STEP(inode), - ra->ra_max_pages_per_file); + if (stride_io_mode(ras)) { + ras_stride_increase_window(ras, ra, + (loff_t)ras->ras_rpc_pages << PAGE_SHIFT); + } else { + pgoff_t window_pages; + + window_pages = min(ras->ras_window_pages + ras->ras_rpc_pages, + ra->ra_max_pages_per_file); + if (window_pages < ras->ras_rpc_pages) + ras->ras_window_pages = window_pages; + else + ras->ras_window_pages = ras_align(ras, window_pages); + } } -static void ras_update(struct ll_sb_info *sbi, struct inode *inode, - struct ll_readahead_state *ras, unsigned long index, - unsigned hit) +/** + * Seek within 8 pages are considered as sequential read for now. + */ +static inline bool is_loose_seq_read(struct ll_readahead_state *ras, loff_t pos) { - struct ll_ra_info *ra = &sbi->ll_ra_info; - int zero = 0, stride_detect = 0, ra_miss = 0; - ENTRY; - - spin_lock(&ras->ras_lock); + return pos_in_window(pos, ras->ras_last_read_end_bytes, + 8UL << PAGE_SHIFT, 8UL << PAGE_SHIFT); +} - ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS); - - /* reset the read-ahead window in two cases. First when the app seeks - * or reads to some other part of the file. Secondly if we get a - * read-ahead miss that we think we've previously issued. This can - * be a symptom of there being so many read-ahead pages that the VM is - * reclaiming it before we get to it. */ - if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) { - zero = 1; - ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); - } else if (!hit && ras->ras_window_len && - index < ras->ras_next_readahead && - index_in_window(index, ras->ras_window_start, 0, - ras->ras_window_len)) { - ra_miss = 1; - ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW); - } +static void ras_detect_read_pattern(struct ll_readahead_state *ras, + struct ll_sb_info *sbi, + loff_t pos, size_t count, bool mmap) +{ + bool stride_detect = false; + pgoff_t index = pos >> PAGE_SHIFT; - /* On the second access to a file smaller than the tunable - * ra_max_read_ahead_whole_pages trigger RA on all pages in the - * file up to ra_max_pages_per_file. This is simply a best effort - * and only occurs once per open file. Normal RA behavior is reverted - * to for subsequent IO. The mmap case does not increment - * ras_requests and thus can never trigger this behavior. */ - if (ras->ras_requests == 2 && !ras->ras_request_index) { - __u64 kms_pages; - - kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - - CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages, - ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); - - if (kms_pages && - kms_pages <= ra->ra_max_read_ahead_whole_pages) { - ras->ras_window_start = 0; - ras->ras_last_readpage = 0; - ras->ras_next_readahead = 0; - ras->ras_window_len = min(ra->ra_max_pages_per_file, - ra->ra_max_read_ahead_whole_pages); - GOTO(out_unlock, 0); - } - } - if (zero) { - /* check whether it is in stride I/O mode*/ - if (!index_in_stride_window(ras, index)) { - if (ras->ras_consecutive_stride_requests == 0 && - ras->ras_request_index == 0) { - ras_update_stride_detector(ras, index); - ras->ras_consecutive_stride_requests++; - } else { + /* + * Reset the read-ahead window in two cases. First when the app seeks + * or reads to some other part of the file. Secondly if we get a + * read-ahead miss that we think we've previously issued. This can + * be a symptom of there being so many read-ahead pages that the VM + * is reclaiming it before we get to it. + */ + if (!is_loose_seq_read(ras, pos)) { + /* Check whether it is in stride I/O mode */ + if (!read_in_stride_window(ras, pos, count)) { + if (ras->ras_consecutive_stride_requests == 0) + ras_init_stride_detector(ras, pos, count); + else ras_stride_reset(ras); - } - ras_reset(inode, ras, index); - ras->ras_consecutive_pages++; - GOTO(out_unlock, 0); + ras->ras_consecutive_bytes = 0; + ras_reset(ras, index); } else { - ras->ras_consecutive_pages = 0; + ras->ras_consecutive_bytes = 0; ras->ras_consecutive_requests = 0; if (++ras->ras_consecutive_stride_requests > 1) - stride_detect = 1; + stride_detect = true; RAS_CDEBUG(ras); } - } else { - if (ra_miss) { - if (index_in_stride_window(ras, index) && - stride_io_mode(ras)) { - /*If stride-RA hit cache miss, the stride dector - *will not be reset to avoid the overhead of - *redetecting read-ahead mode */ - if (index != ras->ras_last_readpage + 1) - ras->ras_consecutive_pages = 0; - ras_reset(inode, ras, index); - RAS_CDEBUG(ras); - } else { - /* Reset both stride window and normal RA - * window */ - ras_reset(inode, ras, index); - ras->ras_consecutive_pages++; - ras_stride_reset(ras); - GOTO(out_unlock, 0); - } - } else if (stride_io_mode(ras)) { - /* If this is contiguous read but in stride I/O mode - * currently, check whether stride step still is valid, - * if invalid, it will reset the stride ra window*/ - if (!index_in_stride_window(ras, index)) { - /* Shrink stride read-ahead window to be zero */ + ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); + } else if (stride_io_mode(ras)) { + /* + * If this is contiguous read but in stride I/O mode + * currently, check whether stride step still is valid, + * if invalid, it will reset the stride ra window to + * be zero. + */ + if (!read_in_stride_window(ras, pos, count)) { + ras_stride_reset(ras); + ras->ras_window_pages = 0; + ras->ras_next_readahead_idx = index; + } + } + + ras->ras_consecutive_bytes += count; + if (mmap) { + pgoff_t idx = ras->ras_consecutive_bytes >> PAGE_SHIFT; + + if ((idx >= 4 && (idx & 3UL) == 0) || stride_detect) + ras->ras_need_increase_window = true; + } else if ((ras->ras_consecutive_requests > 1 || stride_detect)) { + ras->ras_need_increase_window = true; + } + + ras->ras_last_read_end_bytes = pos + count - 1; +} + +void ll_ras_enter(struct file *f, loff_t pos, size_t count) +{ + struct ll_file_data *fd = LUSTRE_FPRIVATE(f); + struct ll_readahead_state *ras = &fd->fd_ras; + struct inode *inode = file_inode(f); + unsigned long index = pos >> PAGE_SHIFT; + struct ll_sb_info *sbi = ll_i2sbi(inode); + + spin_lock(&ras->ras_lock); + ras->ras_requests++; + ras->ras_consecutive_requests++; + ras->ras_need_increase_window = false; + ras->ras_no_miss_check = false; + /* + * On the second access to a file smaller than the tunable + * ra_max_read_ahead_whole_pages trigger RA on all pages in the + * file up to ra_max_pages_per_file. This is simply a best effort + * and only occurs once per open file. Normal RA behavior is reverted + * to for subsequent IO. + */ + if (ras->ras_requests >= 2) { + __u64 kms_pages; + struct ll_ra_info *ra = &sbi->ll_ra_info; + + kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; + + CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, + ra->ra_max_read_ahead_whole_pages, + ra->ra_max_pages_per_file); + + if (kms_pages && + kms_pages <= ra->ra_max_read_ahead_whole_pages) { + ras->ras_window_start_idx = 0; + ras->ras_next_readahead_idx = index + 1; + ras->ras_window_pages = min(ra->ra_max_pages_per_file, + ra->ra_max_read_ahead_whole_pages); + ras->ras_no_miss_check = true; + GOTO(out_unlock, 0); + } + } + ras_detect_read_pattern(ras, sbi, pos, count, false); +out_unlock: + spin_unlock(&ras->ras_lock); +} + +static bool index_in_stride_window(struct ll_readahead_state *ras, + pgoff_t index) +{ + loff_t pos = (loff_t)index << PAGE_SHIFT; + + if (ras->ras_stride_length == 0 || ras->ras_stride_bytes == 0 || + ras->ras_stride_bytes == ras->ras_stride_length) + return false; + + if (pos >= ras->ras_stride_offset) { + u64 offset; + + div64_u64_rem(pos - ras->ras_stride_offset, + ras->ras_stride_length, &offset); + if (offset < ras->ras_stride_bytes || + ras->ras_stride_length - offset < PAGE_SIZE) + return true; + } else if (ras->ras_stride_offset - pos < PAGE_SIZE) { + return true; + } + + return false; +} + +/* + * ll_ras_enter() is used to detect read pattern according to pos and count. + * + * ras_update() is used to detect cache miss and + * reset window or increase window accordingly + */ +static void ras_update(struct ll_sb_info *sbi, struct inode *inode, + struct ll_readahead_state *ras, pgoff_t index, + enum ras_update_flags flags) +{ + struct ll_ra_info *ra = &sbi->ll_ra_info; + bool hit = flags & LL_RAS_HIT; + + ENTRY; + spin_lock(&ras->ras_lock); + + if (!hit) + CDEBUG(D_READA, DFID " pages at %lu miss.\n", + PFID(ll_inode2fid(inode)), index); + ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS); + + /* + * The readahead window has been expanded to cover whole + * file size, we don't care whether ra miss happen or not. + * Because we will read whole file to page cache even if + * some pages missed. + */ + if (ras->ras_no_miss_check) + GOTO(out_unlock, 0); + + if (flags & LL_RAS_MMAP) + ras_detect_read_pattern(ras, sbi, (loff_t)index << PAGE_SHIFT, + PAGE_SIZE, true); + + if (!hit && ras->ras_window_pages && + index < ras->ras_next_readahead_idx && + pos_in_window(index, ras->ras_window_start_idx, 0, + ras->ras_window_pages)) { + ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW); + ras->ras_need_increase_window = false; + + if (index_in_stride_window(ras, index) && + stride_io_mode(ras)) { + /* + * if (index != ras->ras_last_readpage + 1) + * ras->ras_consecutive_pages = 0; + */ + ras_reset(ras, index); + + /* + * If stride-RA hit cache miss, the stride + * detector will not be reset to avoid the + * overhead of redetecting read-ahead mode, + * but on the condition that the stride window + * is still intersect with normal sequential + * read-ahead window. + */ + if (ras->ras_window_start_idx < ras->ras_stride_offset) ras_stride_reset(ras); - ras->ras_window_len = 0; - ras->ras_next_readahead = index; - } + RAS_CDEBUG(ras); + } else { + /* + * Reset both stride window and normal RA + * window. + */ + ras_reset(ras, index); + /* ras->ras_consecutive_pages++; */ + ras->ras_consecutive_bytes = 0; + ras_stride_reset(ras); + GOTO(out_unlock, 0); } } - ras->ras_consecutive_pages++; - ras->ras_last_readpage = index; - ras_set_start(inode, ras, index); + ras_set_start(ras, index); if (stride_io_mode(ras)) { /* Since stride readahead is sentivite to the offset * of read-ahead, so we use original offset here, - * instead of ras_window_start, which is RPC aligned */ - ras->ras_next_readahead = max(index, ras->ras_next_readahead); + * instead of ras_window_start_idx, which is RPC aligned. + */ + ras->ras_next_readahead_idx = max(index + 1, + ras->ras_next_readahead_idx); + ras->ras_window_start_idx = + max_t(pgoff_t, ras->ras_window_start_idx, + ras->ras_stride_offset >> PAGE_SHIFT); } else { - if (ras->ras_next_readahead < ras->ras_window_start) - ras->ras_next_readahead = ras->ras_window_start; + if (ras->ras_next_readahead_idx < ras->ras_window_start_idx) + ras->ras_next_readahead_idx = ras->ras_window_start_idx; if (!hit) - ras->ras_next_readahead = index + 1; - } - RAS_CDEBUG(ras); - - /* Trigger RA in the mmap case where ras_consecutive_requests - * is not incremented and thus can't be used to trigger RA */ - if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { - ras->ras_window_len = RAS_INCREASE_STEP(inode); - GOTO(out_unlock, 0); + ras->ras_next_readahead_idx = index + 1; } - /* Initially reset the stride window offset to next_readahead*/ - if (ras->ras_consecutive_stride_requests == 2 && stride_detect) { - /** - * Once stride IO mode is detected, next_readahead should be - * reset to make sure next_readahead > stride offset - */ - ras->ras_next_readahead = max(index, ras->ras_next_readahead); - ras->ras_stride_offset = index; - ras->ras_window_len = RAS_INCREASE_STEP(inode); + if (ras->ras_need_increase_window) { + ras_increase_window(inode, ras, ra); + ras->ras_need_increase_window = false; } - /* The initial ras_window_len is set to the request size. To avoid - * uselessly reading and discarding pages for random IO the window is - * only increased once per consecutive request received. */ - if ((ras->ras_consecutive_requests > 1 || stride_detect) && - !ras->ras_request_index) - ras_increase_window(inode, ras, ra); EXIT; out_unlock: - RAS_CDEBUG(ras); - ras->ras_request_index++; spin_unlock(&ras->ras_lock); - return; } int ll_writepage(struct page *vmpage, struct writeback_control *wbc) @@ -895,10 +1163,10 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) struct cl_io *io; struct cl_page *page; struct cl_object *clob; - struct cl_env_nest nest; bool redirtied = false; bool unlocked = false; int result; + __u16 refcheck; ENTRY; LASSERT(PageLocked(vmpage)); @@ -906,7 +1174,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) LASSERT(ll_i2dtexp(inode) != NULL); - env = cl_env_nested_get(&nest); + env = cl_env_get(&refcheck); if (IS_ERR(env)) GOTO(out, result = PTR_ERR(env)); @@ -956,7 +1224,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) * breaking kernel which assumes ->writepage should mark * PageWriteback or clean the page. */ result = cl_sync_file_range(inode, offset, - offset + PAGE_CACHE_SIZE - 1, + offset + PAGE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. @@ -967,7 +1235,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) } } - cl_env_nested_put(&nest, env); + cl_env_put(env, &refcheck); GOTO(out, result); out: @@ -984,17 +1252,15 @@ out: int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; - struct ll_sb_info *sbi = ll_i2sbi(inode); loff_t start; loff_t end; enum cl_fsync_mode mode; int range_whole = 0; int result; - int ignore_layout = 0; ENTRY; if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_CACHE_SHIFT; + start = (loff_t)mapping->writeback_index << PAGE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1009,16 +1275,13 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (wbc->sync_mode == WB_SYNC_ALL) mode = CL_FSYNC_LOCAL; - if (sbi->ll_umounting) - /* if the mountpoint is being umounted, all pages have to be - * evicted to avoid hitting LBUG when truncate_inode_pages() - * is called later on. */ - ignore_layout = 1; - if (ll_i2info(inode)->lli_clob == NULL) RETURN(0); - result = cl_sync_file_range(inode, start, end, mode, ignore_layout); + /* for directio, it would call writepages() to evict cached pages + * inside the IO context of write, which will cause deadlock at + * layout_conf since it waits for active IOs to complete. */ + result = cl_sync_file_range(inode, start, end, mode, 1); if (result > 0) { wbc->nr_to_write -= result; result = 0; @@ -1028,7 +1291,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (end == OBD_OBJECT_EOF) mapping->writeback_index = 0; else - mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) +1; + mapping->writeback_index = (end >> PAGE_SHIFT) + 1; } RETURN(result); } @@ -1051,7 +1314,8 @@ struct ll_cl_context *ll_cl_find(struct file *file) return found; } -void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io) +void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io, + enum lcc_type type) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx; @@ -1061,6 +1325,7 @@ void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io) lcc->lcc_cookie = current; lcc->lcc_env = env; lcc->lcc_io = io; + lcc->lcc_type = type; write_lock(&fd->fd_lock); list_add(&lcc->lcc_list, &fd->fd_lccs); @@ -1077,84 +1342,241 @@ void ll_cl_remove(struct file *file, const struct lu_env *env) write_unlock(&fd->fd_lock); } -static int ll_io_read_page(const struct lu_env *env, struct cl_io *io, - struct cl_page *page) +int ll_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, struct file *file) { struct inode *inode = vvp_object_inode(page->cp_obj); struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ll_file_data *fd = vvp_env_io(env)->vui_fd; + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct ll_readahead_state *ras = &fd->fd_ras; struct cl_2queue *queue = &io->ci_queue; + struct cl_sync_io *anchor = NULL; struct vvp_page *vpg; int rc = 0; + bool uptodate; ENTRY; vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page)); + uptodate = vpg->vpg_defer_uptodate; + if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && - sbi->ll_ra_info.ra_max_pages > 0) - ras_update(sbi, inode, ras, vvp_index(vpg), - vpg->vpg_defer_uptodate); + sbi->ll_ra_info.ra_max_pages > 0 && + !vpg->vpg_ra_updated) { + struct vvp_io *vio = vvp_env_io(env); + enum ras_update_flags flags = 0; + + if (uptodate) + flags |= LL_RAS_HIT; + if (!vio->vui_ra_valid) + flags |= LL_RAS_MMAP; + ras_update(sbi, inode, ras, vvp_index(vpg), flags); + } - if (vpg->vpg_defer_uptodate) { + cl_2queue_init(queue); + if (uptodate) { vpg->vpg_ra_used = 1; cl_page_export(env, page, 1); + cl_page_disown(env, io, page); + } else { + anchor = &vvp_env_info(env)->vti_anchor; + cl_sync_io_init(anchor, 1); + page->cp_sync_io = anchor; + + cl_2queue_add(queue, page); } - cl_2queue_init(queue); - /* - * Add page into the queue even when it is marked uptodate above. - * this will unlock it automatically as part of cl_page_list_disown(). - */ - cl_2queue_add(queue, page); if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && sbi->ll_ra_info.ra_max_pages > 0) { int rc2; rc2 = ll_readahead(env, io, &queue->c2_qin, ras, - vpg->vpg_defer_uptodate); + uptodate, file); CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n", PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg)); } - if (queue->c2_qin.pl_nr > 0) + if (queue->c2_qin.pl_nr > 0) { + int count = queue->c2_qin.pl_nr; rc = cl_io_submit_rw(env, io, CRT_READ, queue); + if (rc == 0) + task_io_account_read(PAGE_SIZE * count); + } - /* - * Unlock unsent pages in case of error. - */ + + if (anchor != NULL && !cl_page_is_owned(page, io)) { /* have sent */ + rc = cl_sync_io_wait(env, anchor, 0); + + cl_page_assume(env, io, page); + cl_page_list_del(env, &queue->c2_qout, page); + + if (!PageUptodate(cl_page_vmpage(page))) { + /* Failed to read a mirror, discard this page so that + * new page can be created with new mirror. + * + * TODO: this is not needed after page reinit + * route is implemented */ + cl_page_discard(env, io, page); + } + cl_page_disown(env, io, page); + } + + /* TODO: discard all pages until page reinit route is implemented */ + cl_page_list_discard(env, io, &queue->c2_qin); + + /* Unlock unsent read pages in case of error. */ cl_page_list_disown(env, io, &queue->c2_qin); + cl_2queue_fini(env, queue); RETURN(rc); } +/* + * Possible return value: + * 0 no async readahead triggered and fast read could not be used. + * 1 no async readahead, but fast read could be used. + * 2 async readahead triggered and fast read could be used too. + * < 0 on error. + */ +static int kickoff_async_readahead(struct file *file, unsigned long pages) +{ + struct ll_readahead_work *lrw; + struct inode *inode = file_inode(file); + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_readahead_state *ras = &fd->fd_ras; + struct ll_ra_info *ra = &sbi->ll_ra_info; + unsigned long throttle; + pgoff_t start_idx = ras_align(ras, ras->ras_next_readahead_idx); + pgoff_t end_idx = start_idx + pages - 1; + + throttle = min(ra->ra_async_pages_per_file_threshold, + ra->ra_max_pages_per_file); + /* + * If this is strided i/o or the window is smaller than the + * throttle limit, we do not do async readahead. Otherwise, + * we do async readahead, allowing the user thread to do fast i/o. + */ + if (stride_io_mode(ras) || !throttle || + ras->ras_window_pages < throttle) + return 0; + + if ((atomic_read(&ra->ra_cur_pages) + pages) > ra->ra_max_pages) + return 0; + + if (ras->ras_async_last_readpage_idx == start_idx) + return 1; + + /* ll_readahead_work_free() free it */ + OBD_ALLOC_PTR(lrw); + if (lrw) { + lrw->lrw_file = get_file(file); + lrw->lrw_start_idx = start_idx; + lrw->lrw_end_idx = end_idx; + spin_lock(&ras->ras_lock); + ras->ras_next_readahead_idx = end_idx + 1; + ras->ras_async_last_readpage_idx = start_idx; + spin_unlock(&ras->ras_lock); + ll_readahead_work_add(inode, lrw); + } else { + return -ENOMEM; + } + + return 2; +} + int ll_readpage(struct file *file, struct page *vmpage) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct cl_object *clob = ll_i2info(inode)->lli_clob; struct ll_cl_context *lcc; - const struct lu_env *env; - struct cl_io *io; + const struct lu_env *env = NULL; + struct cl_io *io = NULL; struct cl_page *page; + struct ll_sb_info *sbi = ll_i2sbi(inode); int result; ENTRY; lcc = ll_cl_find(file); - if (lcc == NULL) { + if (lcc != NULL) { + env = lcc->lcc_env; + io = lcc->lcc_io; + } + + if (io == NULL) { /* fast read */ + struct inode *inode = file_inode(file); + struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_readahead_state *ras = &fd->fd_ras; + struct lu_env *local_env = NULL; + unsigned long fast_read_pages = + max(RA_REMAIN_WINDOW_MIN, ras->ras_rpc_pages); + struct vvp_page *vpg; + + result = -ENODATA; + + /* TODO: need to verify the layout version to make sure + * the page is not invalid due to layout change. */ + page = cl_vmpage_page(vmpage, clob); + if (page == NULL) { + unlock_page(vmpage); + ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ); + RETURN(result); + } + + vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page)); + if (vpg->vpg_defer_uptodate) { + enum ras_update_flags flags = LL_RAS_HIT; + + if (lcc && lcc->lcc_type == LCC_MMAP) + flags |= LL_RAS_MMAP; + + /* For fast read, it updates read ahead state only + * if the page is hit in cache because non cache page + * case will be handled by slow read later. */ + ras_update(sbi, inode, ras, vvp_index(vpg), flags); + /* avoid duplicate ras_update() call */ + vpg->vpg_ra_updated = 1; + + /* Check if we can issue a readahead RPC, if that is + * the case, we can't do fast IO because we will need + * a cl_io to issue the RPC. */ + if (ras->ras_window_start_idx + ras->ras_window_pages < + ras->ras_next_readahead_idx + fast_read_pages || + kickoff_async_readahead(file, fast_read_pages) > 0) + result = 0; + } + + if (!env) { + local_env = cl_env_percpu_get(); + env = local_env; + } + + /* export the page and skip io stack */ + if (result == 0) { + vpg->vpg_ra_used = 1; + cl_page_export(env, page, 1); + } else { + ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ); + } + /* release page refcount before unlocking the page to ensure + * the object won't be destroyed in the calling path of + * cl_page_put(). Please see comment in ll_releasepage(). */ + cl_page_put(env, page); unlock_page(vmpage); - RETURN(-EIO); + if (local_env) + cl_env_percpu_put(local_env); + + RETURN(result); } - env = lcc->lcc_env; - io = lcc->lcc_io; - LASSERT(io != NULL); LASSERT(io->ci_state == CIS_IO_GOING); page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); if (!IS_ERR(page)) { LASSERT(page->cp_type == CPT_CACHEABLE); if (likely(!PageUptodate(vmpage))) { cl_page_assume(env, io, page); - result = ll_io_read_page(env, io, page); + + result = ll_io_read_page(env, io, page, file); } else { /* Page from a non-object file. */ unlock_page(vmpage); @@ -1167,28 +1589,3 @@ int ll_readpage(struct file *file, struct page *vmpage) } RETURN(result); } - -int ll_page_sync_io(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, enum cl_req_type crt) -{ - struct cl_2queue *queue; - int result; - - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - - queue = &io->ci_queue; - cl_2queue_init_page(queue, page); - - result = cl_io_submit_sync(env, io, crt, queue, 0); - LASSERT(cl_page_is_owned(page, io)); - - if (crt == CRT_READ) - /* - * in CRT_WRITE case page is left locked even in case of - * error. - */ - cl_page_list_disown(env, io, &queue->c2_qin); - cl_2queue_fini(env, queue); - - return result; -}