if (ra.cra_end_idx == 0 || ra.cra_end_idx < page_idx) {
pgoff_t end_idx;
+ /*
+ * Do not shrink ria_end_idx at any case until
+ * the minimum end of current read is covered.
+ *
+ * Do not extend read lock accross stripe if
+ * lock contention detected.
+ */
+ if (ra.cra_contention &&
+ page_idx > ria->ria_end_idx_min) {
+ ria->ria_end_idx = *ra_end;
+ break;
+ }
+
cl_read_ahead_release(env, &ra);
rc = cl_io_read_ahead(env, io, page_idx, &ra);
if (rc < 0)
break;
- /* Do not shrink ria_end_idx at any case until
- * the minimum end of current read is covered.
- * And only shrink ria_end_idx if the matched
- * LDLM lock doesn't cover more. */
- if (page_idx > ra.cra_end_idx ||
- (ra.cra_contention &&
- page_idx > ria->ria_end_idx_min)) {
+ /*
+ * Only shrink ria_end_idx if the matched
+ * LDLM lock doesn't cover more.
+ */
+ if (page_idx > ra.cra_end_idx) {
ria->ria_end_idx = ra.cra_end_idx;
break;
}
__u64 kms;
int rc;
pgoff_t eof_index;
+ struct ll_sb_info *sbi;
work = container_of(wq, struct ll_readahead_work,
lrw_readahead_work);
- fd = LUSTRE_FPRIVATE(work->lrw_file);
+ fd = work->lrw_file->private_data;
ras = &fd->fd_ras;
file = work->lrw_file;
inode = file_inode(file);
+ sbi = ll_i2sbi(inode);
env = cl_env_alloc(&refcheck, LCT_NOREF);
if (IS_ERR(env))
ria->ria_end_idx = work->lrw_end_idx;
pages = ria->ria_end_idx - ria->ria_start_idx + 1;
- ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria,
+ ria->ria_reserved = ll_ra_count_get(sbi, ria,
ria_page_count(ria), pages_min);
CDEBUG(D_READA,
out_free_work:
if (ra_end_idx > 0)
ll_ra_stats_inc_sbi(ll_i2sbi(inode), RA_STAT_ASYNC);
+ atomic_dec(&sbi->ll_ra_info.ra_async_inflight);
ll_readahead_work_free(work);
}
/* at least to extend the readahead window to cover current read */
if (!hit && vio->vui_ra_valid &&
- vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx)
+ vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx) {
ria->ria_end_idx_min =
vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
+ pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
+ ria->ria_start_idx;
+ }
ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, pages,
pages_min);
void ll_ras_enter(struct file *f, loff_t pos, size_t count)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
+ struct ll_file_data *fd = f->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct inode *inode = file_inode(f);
unsigned long index = pos >> PAGE_SHIFT;
struct ll_cl_context *ll_cl_find(struct file *file)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_cl_context *lcc;
struct ll_cl_context *found = NULL;
void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io,
enum lcc_type type)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
memset(lcc, 0, sizeof(*lcc));
void ll_cl_remove(struct file *file, const struct lu_env *env)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
write_lock(&fd->fd_lock);
{
struct inode *inode = vvp_object_inode(page->cp_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct cl_2queue *queue = &io->ci_queue;
struct cl_sync_io *anchor = NULL;
struct ll_readahead_work *lrw;
struct inode *inode = file_inode(file);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct ll_ra_info *ra = &sbi->ll_ra_info;
unsigned long throttle;
* we do async readahead, allowing the user thread to do fast i/o.
*/
if (stride_io_mode(ras) || !throttle ||
- ras->ras_window_pages < throttle)
+ ras->ras_window_pages < throttle ||
+ atomic_read(&ra->ra_async_inflight) > ra->ra_async_max_active)
return 0;
if ((atomic_read(&ra->ra_cur_pages) + pages) > ra->ra_max_pages)
/* ll_readahead_work_free() free it */
OBD_ALLOC_PTR(lrw);
if (lrw) {
+ atomic_inc(&sbi->ll_ra_info.ra_async_inflight);
lrw->lrw_file = get_file(file);
lrw->lrw_start_idx = start_idx;
lrw->lrw_end_idx = end_idx;
if (io == NULL) { /* fast read */
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct lu_env *local_env = NULL;
struct vvp_page *vpg;