lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
+static inline bool ll_readahead_enabled(struct ll_sb_info *sbi)
+{
+ return sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+ sbi->ll_ra_info.ra_max_pages > 0;
+}
+
void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
return start <= pos && pos <= end;
}
+enum ll_ra_page_hint {
+ MAYNEED = 0, /* this page possibly accessed soon */
+ WILLNEED /* this page is gurateed to be needed */
+};
+
/**
* Initiates read-ahead of a page with given index.
*
* \retval 0: page was added into \a queue for read ahead.
*/
static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, pgoff_t index)
+ struct cl_page_list *queue, pgoff_t index,
+ enum ll_ra_page_hint hint)
{
struct cl_object *clob = io->ci_obj;
struct inode *inode = vvp_object_inode(clob);
- struct page *vmpage;
+ struct page *vmpage = NULL;
struct cl_page *page;
struct vvp_page *vpg;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
int rc = 0;
const char *msg = NULL;
+
ENTRY;
- vmpage = grab_cache_page_nowait(inode->i_mapping, index);
- if (vmpage == NULL) {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "g_c_p_n failed";
- GOTO(out, rc = -EBUSY);
+ switch (hint) {
+ case MAYNEED:
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+ if (vmpage == NULL) {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "g_c_p_n failed";
+ GOTO(out, rc = -EBUSY);
+ }
+ break;
+ case WILLNEED:
+ vmpage = find_or_create_page(inode->i_mapping, index,
+ GFP_NOFS);
+ if (vmpage == NULL)
+ GOTO(out, rc = -ENOMEM);
+ break;
+ default:
+ /* should not come here */
+ GOTO(out, rc = -EINVAL);
}
-
+
/* Check if vmpage was truncated or reclaimed */
if (vmpage->mapping != inode->i_mapping) {
which = RA_STAT_WRONG_GRAB_PAGE;
unlock_page(vmpage);
put_page(vmpage);
}
- if (msg != NULL) {
+ if (msg != NULL && hint == MAYNEED) {
ll_ra_stats_inc(inode, which);
CDEBUG(D_READA, "%s\n", msg);
if (ra.cra_end_idx == 0 || ra.cra_end_idx < page_idx) {
pgoff_t end_idx;
+ /*
+ * Do not shrink ria_end_idx at any case until
+ * the minimum end of current read is covered.
+ *
+ * Do not extend read lock accross stripe if
+ * lock contention detected.
+ */
+ if (ra.cra_contention &&
+ page_idx > ria->ria_end_idx_min) {
+ ria->ria_end_idx = *ra_end;
+ break;
+ }
+
cl_read_ahead_release(env, &ra);
rc = cl_io_read_ahead(env, io, page_idx, &ra);
if (rc < 0)
break;
- /* Do not shrink ria_end_idx at any case until
- * the minimum end of current read is covered.
- * And only shrink ria_end_idx if the matched
- * LDLM lock doesn't cover more. */
- if (page_idx > ra.cra_end_idx ||
- (ra.cra_contention &&
- page_idx > ria->ria_end_idx_min)) {
+ /*
+ * Only shrink ria_end_idx if the matched
+ * LDLM lock doesn't cover more.
+ */
+ if (page_idx > ra.cra_end_idx) {
ria->ria_end_idx = ra.cra_end_idx;
break;
}
break;
/* If the page is inside the read-ahead window */
- rc = ll_read_ahead_page(env, io, queue, page_idx);
+ rc = ll_read_ahead_page(env, io, queue, page_idx,
+ MAYNEED);
if (rc < 0 && rc != -EBUSY)
break;
if (rc == -EBUSY) {
__u64 kms;
int rc;
pgoff_t eof_index;
+ struct ll_sb_info *sbi;
work = container_of(wq, struct ll_readahead_work,
lrw_readahead_work);
- fd = LUSTRE_FPRIVATE(work->lrw_file);
+ fd = work->lrw_file->private_data;
ras = &fd->fd_ras;
file = work->lrw_file;
inode = file_inode(file);
+ sbi = ll_i2sbi(inode);
env = cl_env_alloc(&refcheck, LCT_NOREF);
if (IS_ERR(env))
ria->ria_end_idx = work->lrw_end_idx;
pages = ria->ria_end_idx - ria->ria_start_idx + 1;
- ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria,
+ ria->ria_reserved = ll_ra_count_get(sbi, ria,
ria_page_count(ria), pages_min);
CDEBUG(D_READA,
if (rc)
GOTO(out_put_env, rc);
+ /* overwrite jobid inited in vvp_io_init() */
+ if (strncmp(ll_i2info(inode)->lli_jobid, work->lrw_jobid,
+ sizeof(work->lrw_jobid)))
+ memcpy(ll_i2info(inode)->lli_jobid, work->lrw_jobid,
+ sizeof(work->lrw_jobid));
+
vvp_env_io(env)->vui_io_subtype = IO_NORMAL;
vvp_env_io(env)->vui_fd = fd;
io->ci_state = CIS_LOCKED;
out_free_work:
if (ra_end_idx > 0)
ll_ra_stats_inc_sbi(ll_i2sbi(inode), RA_STAT_ASYNC);
+ atomic_dec(&sbi->ll_ra_info.ra_async_inflight);
ll_readahead_work_free(work);
}
/* at least to extend the readahead window to cover current read */
if (!hit && vio->vui_ra_valid &&
- vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx)
+ vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx) {
ria->ria_end_idx_min =
vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
+ pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
+ ria->ria_start_idx;
+ }
ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, pages,
pages_min);
RETURN(ret);
}
+static int ll_readpages(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue,
+ pgoff_t start, pgoff_t end)
+{
+ int ret = 0;
+ __u64 kms;
+ pgoff_t page_idx;
+ int count = 0;
+
+ ENTRY;
+
+ ret = ll_readahead_file_kms(env, io, &kms);
+ if (ret != 0)
+ RETURN(ret);
+
+ if (kms == 0)
+ RETURN(0);
+
+ if (end != 0) {
+ unsigned long end_index;
+
+ end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT);
+ if (end_index <= end)
+ end = end_index;
+ }
+
+ for (page_idx = start; page_idx <= end; page_idx++) {
+ ret= ll_read_ahead_page(env, io, queue, page_idx,
+ WILLNEED);
+ if (ret < 0)
+ break;
+ else if (ret == 0) /* ret 1 is already uptodate */
+ count++;
+ }
+
+ RETURN(count > 0 ? count : ret);
+}
+
static void ras_set_start(struct ll_readahead_state *ras, pgoff_t index)
{
ras->ras_window_start_idx = ras_align(ras, index);
loff_t bytes_count =
stride_byte_count(ras->ras_stride_offset,
ras->ras_stride_length, ras->ras_stride_bytes,
- ras->ras_stride_offset, len);
+ ras->ras_window_start_idx << PAGE_SHIFT, len);
return (bytes_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
stride_bytes = end - ras->ras_stride_offset;
div64_u64_rem(stride_bytes, ras->ras_stride_length, &left_bytes);
- window_bytes = ((loff_t)ras->ras_window_pages << PAGE_SHIFT) -
- left_bytes;
-
- if (left_bytes < ras->ras_stride_bytes)
- left_bytes += inc_bytes;
- else
- left_bytes = ras->ras_stride_bytes + inc_bytes;
+ window_bytes = (ras->ras_window_pages << PAGE_SHIFT);
+ if (left_bytes < ras->ras_stride_bytes) {
+ if (ras->ras_stride_bytes - left_bytes >= inc_bytes) {
+ window_bytes += inc_bytes;
+ goto out;
+ } else {
+ window_bytes += (ras->ras_stride_bytes - left_bytes);
+ inc_bytes -= (ras->ras_stride_bytes - left_bytes);
+ }
+ } else {
+ window_bytes += (ras->ras_stride_length - left_bytes);
+ }
LASSERT(ras->ras_stride_bytes != 0);
- step = div64_u64_rem(left_bytes, ras->ras_stride_bytes, &left_bytes);
+ step = div64_u64_rem(inc_bytes, ras->ras_stride_bytes, &left_bytes);
window_bytes += step * ras->ras_stride_length + left_bytes;
+ LASSERT(window_bytes > 0);
- if (stride_page_count(ras, window_bytes) <= ra->ra_max_pages_per_file)
+out:
+ if (stride_page_count(ras, window_bytes) <=
+ ra->ra_max_pages_per_file || ras->ras_window_pages == 0)
ras->ras_window_pages = (window_bytes >> PAGE_SHIFT);
+ LASSERT(ras->ras_window_pages > 0);
+
RAS_CDEBUG(ras);
}
void ll_ras_enter(struct file *f, loff_t pos, size_t count)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
+ struct ll_file_data *fd = f->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct inode *inode = file_inode(f);
unsigned long index = pos >> PAGE_SHIFT;
struct ll_cl_context *ll_cl_find(struct file *file)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_cl_context *lcc;
struct ll_cl_context *found = NULL;
void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io,
enum lcc_type type)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
memset(lcc, 0, sizeof(*lcc));
void ll_cl_remove(struct file *file, const struct lu_env *env)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
write_lock(&fd->fd_lock);
{
struct inode *inode = vvp_object_inode(page->cp_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct cl_2queue *queue = &io->ci_queue;
struct cl_sync_io *anchor = NULL;
struct vvp_page *vpg;
- int rc = 0;
+ int rc = 0, rc2 = 0;
bool uptodate;
+ pgoff_t io_start_index;
+ pgoff_t io_end_index;
ENTRY;
vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
uptodate = vpg->vpg_defer_uptodate;
- if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
- sbi->ll_ra_info.ra_max_pages > 0 &&
- !vpg->vpg_ra_updated) {
+ if (ll_readahead_enabled(sbi) && !vpg->vpg_ra_updated) {
struct vvp_io *vio = vvp_env_io(env);
enum ras_update_flags flags = 0;
cl_2queue_add(queue, page);
}
- if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
- sbi->ll_ra_info.ra_max_pages > 0) {
- int rc2;
-
+ io_start_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos);
+ io_end_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_count - 1);
+ if (ll_readahead_enabled(sbi)) {
rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
uptodate, file);
- CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
+ CDEBUG(D_READA, DFID " %d pages read ahead at %lu\n",
+ PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
+ } else if (vvp_index(vpg) == io_start_index &&
+ io_end_index - io_start_index > 0) {
+ rc2 = ll_readpages(env, io, &queue->c2_qin, io_start_index + 1,
+ io_end_index);
+ CDEBUG(D_READA, DFID " %d pages read at %lu\n",
PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
}
struct ll_readahead_work *lrw;
struct inode *inode = file_inode(file);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct ll_ra_info *ra = &sbi->ll_ra_info;
unsigned long throttle;
* we do async readahead, allowing the user thread to do fast i/o.
*/
if (stride_io_mode(ras) || !throttle ||
- ras->ras_window_pages < throttle)
+ ras->ras_window_pages < throttle ||
+ atomic_read(&ra->ra_async_inflight) > ra->ra_async_max_active)
return 0;
if ((atomic_read(&ra->ra_cur_pages) + pages) > ra->ra_max_pages)
/* ll_readahead_work_free() free it */
OBD_ALLOC_PTR(lrw);
if (lrw) {
+ atomic_inc(&sbi->ll_ra_info.ra_async_inflight);
lrw->lrw_file = get_file(file);
lrw->lrw_start_idx = start_idx;
lrw->lrw_end_idx = end_idx;
ras->ras_next_readahead_idx = end_idx + 1;
ras->ras_async_last_readpage_idx = start_idx;
spin_unlock(&ras->ras_lock);
+ memcpy(lrw->lrw_jobid, ll_i2info(inode)->lli_jobid,
+ sizeof(lrw->lrw_jobid));
ll_readahead_work_add(inode, lrw);
} else {
return -ENOMEM;
if (io == NULL) { /* fast read */
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct lu_env *local_env = NULL;
struct vvp_page *vpg;