+ struct page *page = llap->llap_page;
+ int rc;
+
+ page_cache_get(page);
+ llap->llap_defer_uptodate = defer;
+ llap->llap_ra_used = 0;
+ rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
+ NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
+ CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
+ ASYNC_READY | ASYNC_URGENT);
+ if (rc) {
+ LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
+ page_cache_release(page);
+ }
+ RETURN(rc);
+}
+
+static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
+{
+ LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
+ ra->ra_stats[which]++;
+}
+
+static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
+
+ spin_lock(&sbi->ll_lock);
+ ll_ra_stats_inc_unlocked(ra, which);
+ spin_unlock(&sbi->ll_lock);
+}
+
+void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
+{
+ if (!llap->llap_defer_uptodate || llap->llap_ra_used)
+ return;
+
+ ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+}
+
+#define RAS_CDEBUG(ras) \
+ CDEBUG(D_READA, \
+ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
+ ras->ras_last_readpage, ras->ras_consecutive_requests, \
+ ras->ras_consecutive_pages, ras->ras_window_start, \
+ ras->ras_window_len, ras->ras_next_readahead, \
+ ras->ras_requests, ras->ras_request_index);
+
+static int index_in_window(unsigned long index, unsigned long point,
+ unsigned long before, unsigned long after)
+{
+ unsigned long start = point - before, end = point + after;
+
+ if (start > point)
+ start = 0;
+ if (end < point)
+ end = ~0;
+
+ return start <= index && index <= end;
+}
+
+static struct ll_readahead_state *ll_ras_get(struct file *f)
+{
+ struct ll_file_data *fd;
+
+ fd = LUSTRE_FPRIVATE(f);
+ return &fd->fd_ras;
+}
+
+void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ ras->ras_requests++;
+ ras->ras_request_index = 0;
+ ras->ras_consecutive_requests++;
+ rar->lrr_reader = current;
+
+ list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ spin_unlock(&ras->ras_lock);
+}
+
+void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ list_del_init(&rar->lrr_linkage);
+ spin_unlock(&ras->ras_lock);
+}
+
+static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
+{
+ struct ll_ra_read *scan;
+
+ list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ if (scan->lrr_reader == current)
+ return scan;
+ }
+ return NULL;
+}
+
+struct ll_ra_read *ll_ra_read_get(struct file *f)
+{
+ struct ll_readahead_state *ras;
+ struct ll_ra_read *bead;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ spin_unlock(&ras->ras_lock);
+ return bead;
+}
+
+static int ll_readahead(struct ll_readahead_state *ras,
+ struct obd_export *exp, struct address_space *mapping,
+ struct obd_io_group *oig, int flags)
+{
+ unsigned long i, start = 0, end = 0, reserved;
+ struct ll_async_page *llap;
+ struct page *page;
+ int rc, ret = 0, match_failed = 0;
+ __u64 kms;
+ unsigned int gfp_mask;
+ struct inode *inode;
+ struct lov_stripe_md *lsm;
+ struct ll_ra_read *bead;
+ struct ost_lvb lvb;