+
+#define RIA_DEBUG(ria) \
+ CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
+ ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
+ ria->ria_pages)
+
+#define RAS_INCREASE_STEP (1024 * 1024 >> CFS_PAGE_SHIFT)
+
+static inline int stride_io_mode(struct ll_readahead_state *ras)
+{
+ return ras->ras_consecutive_stride_requests > 1;
+}
+
+/* The function calculates how much pages will be read in
+ * [off, off + length], which will be read by stride I/O mode,
+ * stride_offset = st_off, stride_lengh = st_len,
+ * stride_pages = st_pgs
+ */
+static unsigned long
+stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
+ unsigned long off, unsigned length)
+{
+ unsigned long cont_len = st_off > off ? st_off - off : 0;
+ __u64 stride_len = length + off > st_off ?
+ length + off + 1 - st_off : 0;
+ unsigned long left, pg_count;
+
+ if (st_len == 0 || length == 0)
+ return length;
+
+ left = do_div(stride_len, st_len);
+ left = min(left, st_pgs);
+
+ pg_count = left + stride_len * st_pgs + cont_len;
+
+ LASSERT(pg_count >= left);
+
+ CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %u"
+ "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
+
+ return pg_count;
+}
+
+static int ria_page_count(struct ra_io_arg *ria)
+{
+ __u64 length = ria->ria_end >= ria->ria_start ?
+ ria->ria_end - ria->ria_start + 1 : 0;
+
+ return stride_pg_count(ria->ria_stoff, ria->ria_length,
+ ria->ria_pages, ria->ria_start,
+ length);
+}
+
+/*Check whether the index is in the defined ra-window */
+static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
+{
+ /* If ria_length == ria_pages, it means non-stride I/O mode,
+ * idx should always inside read-ahead window in this case
+ * For stride I/O mode, just check whether the idx is inside
+ * the ria_pages. */
+ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
+ (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
+}
+
+static int ll_read_ahead_pages(struct obd_export *exp,
+ struct obd_io_group *oig,
+ struct ra_io_arg *ria,
+ unsigned long *reserved_pages,
+ struct address_space *mapping,
+ unsigned long *ra_end)
+{
+ int rc, count = 0, stride_ria;
+ unsigned long page_idx;
+
+ LASSERT(ria != NULL);
+ RIA_DEBUG(ria);
+
+ stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
+ for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
+ *reserved_pages > 0; page_idx++) {
+ if (ras_inside_ra_window(page_idx, ria)) {
+ /* If the page is inside the read-ahead window*/
+ rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
+ if (rc == 1) {
+ (*reserved_pages)--;
+ count ++;
+ } else if (rc == -ENOLCK)
+ break;
+ } else if (stride_ria) {
+ /* If it is not in the read-ahead window, and it is
+ * read-ahead mode, then check whether it should skip
+ * the stride gap */
+ pgoff_t offset;
+ /* FIXME: This assertion only is valid when it is for
+ * forward read-ahead, it will be fixed when backward
+ * read-ahead is implemented */
+ LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
+ " gap of ra window,it should bigger than stride"
+ " offset %lu \n", page_idx, ria->ria_stoff);
+
+ offset = page_idx - ria->ria_stoff;
+ offset = offset % (ria->ria_length);
+ if (offset > ria->ria_pages) {
+ page_idx += ria->ria_length - offset;
+ CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
+ ria->ria_length - offset);
+ continue;
+ }
+ }
+ }
+ *ra_end = page_idx;
+ return count;
+}
+
+static int ll_readahead(struct ll_readahead_state *ras,
+ struct obd_export *exp, struct address_space *mapping,
+ struct obd_io_group *oig, int flags)
+{
+ unsigned long start = 0, end = 0, reserved;
+ unsigned long ra_end, len;
+ struct inode *inode;
+ struct lov_stripe_md *lsm;
+ struct ll_ra_read *bead;
+ struct ost_lvb lvb;
+ struct ra_io_arg ria = { 0 };
+ int ret = 0;
+ __u64 kms;
+ ENTRY;
+
+ inode = mapping->host;
+ lsm = ll_i2info(inode)->lli_smd;
+
+ lov_stripe_lock(lsm);
+ inode_init_lvb(inode, &lvb);
+ obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
+ kms = lvb.lvb_size;
+ lov_stripe_unlock(lsm);
+ if (kms == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ RETURN(0);
+ }
+
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ /* Enlarge the RA window to encompass the full read */
+ if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
+ bead->lrr_start + bead->lrr_count) {
+ ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ ras->ras_window_start;
+ }
+ /* Reserve a part of the read-ahead window that we'll be issuing */
+ if (ras->ras_window_len) {
+ start = ras->ras_next_readahead;
+ end = ras->ras_window_start + ras->ras_window_len - 1;
+ }
+ if (end != 0) {
+ /* Truncate RA window to end of file */
+ end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+ ras->ras_next_readahead = max(end, end + 1);
+ RAS_CDEBUG(ras);
+ }
+ ria.ria_start = start;
+ ria.ria_end = end;
+ /* If stride I/O mode is detected, get stride window*/
+ if (stride_io_mode(ras)) {
+ ria.ria_stoff = ras->ras_stride_offset;
+ ria.ria_length = ras->ras_stride_length;
+ ria.ria_pages = ras->ras_stride_pages;
+ }
+ spin_unlock(&ras->ras_lock);
+
+ if (end == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ RETURN(0);
+ }
+ len = ria_page_count(&ria);
+ if (len == 0)
+ RETURN(0);
+
+ reserved = ll_ra_count_get(ll_i2sbi(inode), len);
+
+ if (reserved < len)
+ ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+
+ CDEBUG(D_READA, "reserved page %lu \n", reserved);
+
+ ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
+
+ LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
+ if (reserved != 0)
+ ll_ra_count_put(ll_i2sbi(inode), reserved);
+
+ if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
+ ll_ra_stats_inc(mapping, RA_STAT_EOF);
+
+ /* if we didn't get to the end of the region we reserved from
+ * the ras we need to go back and update the ras so that the
+ * next read-ahead tries from where we left off. we only do so
+ * if the region we failed to issue read-ahead on is still ahead
+ * of the app and behind the next index to start read-ahead from */
+ CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
+ ra_end, end, ria.ria_end);
+
+ if (ra_end != (end + 1)) {
+ spin_lock(&ras->ras_lock);
+ if (ra_end < ras->ras_next_readahead &&
+ index_in_window(ra_end, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ras->ras_next_readahead = ra_end;
+ RAS_CDEBUG(ras);
+ }
+ spin_unlock(&ras->ras_lock);
+ }
+
+ RETURN(ret);
+}
+
+static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
+{
+ ras->ras_window_start = index & (~(RAS_INCREASE_STEP - 1));
+}
+
+/* called with the ras_lock held or from places where it doesn't matter */
+static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
+{
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_requests = 0;
+ ras->ras_consecutive_pages = 0;
+ ras->ras_window_len = 0;
+ ras_set_start(ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index);
+
+ RAS_CDEBUG(ras);
+}
+
+/* called with the ras_lock held or from places where it doesn't matter */
+static void ras_stride_reset(struct ll_readahead_state *ras)
+{
+ ras->ras_consecutive_stride_requests = 0;
+ RAS_CDEBUG(ras);
+}
+
+void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
+{
+ spin_lock_init(&ras->ras_lock);
+ ras_reset(ras, 0);
+ ras->ras_requests = 0;
+ INIT_LIST_HEAD(&ras->ras_read_beads);
+}
+
+/* Check whether the read request is in the stride window.
+ * If it is in the stride window, return 1, otherwise return 0.
+ * and also update stride_gap and stride_pages.
+ */
+static int index_in_stride_window(unsigned long index,
+ struct ll_readahead_state *ras,
+ struct inode *inode)
+{
+ int stride_gap = index - ras->ras_last_readpage - 1;
+
+ LASSERT(stride_gap != 0);
+
+ if (ras->ras_consecutive_pages == 0)
+ return 0;
+
+ /*Otherwise check the stride by itself */
+ if ((ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
+ ras->ras_consecutive_pages == ras->ras_stride_pages)
+ return 1;
+
+ if (stride_gap >= 0) {
+ /*
+ * only set stride_pages, stride_length if
+ * it is forward reading ( stride_gap > 0)
+ */
+ ras->ras_stride_pages = ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
+ } else {
+ /*
+ * If stride_gap < 0,(back_forward reading),
+ * reset the stride_pages/length.
+ * FIXME:back_ward stride I/O read.
+ *
+ */
+ ras->ras_stride_pages = 0;
+ ras->ras_stride_length = 0;
+ }
+ RAS_CDEBUG(ras);
+
+ return 0;
+}
+
+static unsigned long
+stride_page_count(struct ll_readahead_state *ras, unsigned long len)
+{
+ return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
+ ras->ras_stride_pages, ras->ras_stride_offset,
+ len);
+}
+
+/* Stride Read-ahead window will be increased inc_len according to
+ * stride I/O pattern */
+static void ras_stride_increase_window(struct ll_readahead_state *ras,
+ struct ll_ra_info *ra,
+ unsigned long inc_len)
+{
+ unsigned long left, step, window_len;
+ unsigned long stride_len;
+
+ LASSERT(ras->ras_stride_length > 0);
+
+ stride_len = ras->ras_window_start + ras->ras_window_len -
+ ras->ras_stride_offset;
+
+ LASSERTF(stride_len >= 0, "window_start %lu, window_len %lu"
+ " stride_offset %lu\n", ras->ras_window_start,
+ ras->ras_window_len, ras->ras_stride_offset);
+
+ left = stride_len % ras->ras_stride_length;
+
+ window_len = ras->ras_window_len - left;
+
+ if (left < ras->ras_stride_pages)
+ left += inc_len;
+ else
+ left = ras->ras_stride_pages + inc_len;
+
+ LASSERT(ras->ras_stride_pages != 0);
+
+ step = left / ras->ras_stride_pages;
+ left %= ras->ras_stride_pages;
+
+ window_len += step * ras->ras_stride_length + left;
+
+ if (stride_page_count(ras, window_len) <= ra->ra_max_pages)
+ ras->ras_window_len = window_len;
+
+ RAS_CDEBUG(ras);
+}
+
+/* Set stride I/O read-ahead window start offset */
+static void ras_set_stride_offset(struct ll_readahead_state *ras)
+{
+ unsigned long window_len = ras->ras_next_readahead -
+ ras->ras_window_start;
+ unsigned long left;
+
+ LASSERT(ras->ras_stride_length != 0);
+
+ left = window_len % ras->ras_stride_length;
+
+ ras->ras_stride_offset = ras->ras_next_readahead - left;
+
+ RAS_CDEBUG(ras);
+}
+
+static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ int zero = 0, stride_zero = 0, stride_detect = 0, ra_miss = 0;
+ ENTRY;
+
+ spin_lock(&ras->ras_lock);
+
+ ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
+
+ /* reset the read-ahead window in two cases. First when the app seeks
+ * or reads to some other part of the file. Secondly if we get a
+ * read-ahead miss that we think we've previously issued. This can
+ * be a symptom of there being so many read-ahead pages that the VM is
+ * reclaiming it before we get to it. */
+ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
+ zero = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
+ /* check whether it is in stride I/O mode*/
+ if (!index_in_stride_window(index, ras, inode))
+ stride_zero = 1;
+ } else if (!hit && ras->ras_window_len &&
+ index < ras->ras_next_readahead &&
+ index_in_window(index, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ zero = 1;
+ ra_miss = 1;
+ /* If it hits read-ahead miss and the stride I/O is still
+ * not detected, reset stride stuff to re-detect the whole
+ * stride I/O mode to avoid complication */
+ if (!stride_io_mode(ras))
+ stride_zero = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
+ }
+
+ /* On the second access to a file smaller than the tunable
+ * ra_max_read_ahead_whole_pages trigger RA on all pages in the
+ * file up to ra_max_pages. This is simply a best effort and
+ * only occurs once per open file. Normal RA behavior is reverted
+ * to for subsequent IO. The mmap case does not increment
+ * ras_requests and thus can never trigger this behavior. */
+ if (ras->ras_requests == 2 && !ras->ras_request_index) {
+ __u64 kms_pages;
+
+ kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
+ CFS_PAGE_SHIFT;
+
+ CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
+ ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
+
+ if (kms_pages &&
+ kms_pages <= ra->ra_max_read_ahead_whole_pages) {
+ ras->ras_window_start = 0;
+ ras->ras_last_readpage = 0;
+ ras->ras_next_readahead = 0;
+ ras->ras_window_len = min(ra->ra_max_pages,
+ ra->ra_max_read_ahead_whole_pages);
+ GOTO(out_unlock, 0);
+ }
+ }
+
+ if (zero) {
+ /* If it is discontinuous read, check
+ * whether it is stride I/O mode*/
+ if (stride_zero) {
+ ras_reset(ras, index);
+ ras->ras_consecutive_pages++;
+ ras_stride_reset(ras);
+ RAS_CDEBUG(ras);
+ GOTO(out_unlock, 0);
+ } else {
+ /* The read is still in stride window or
+ * it hits read-ahead miss */
+
+ /* If ra-window miss is hitted, which probably means VM
+ * pressure, and some read-ahead pages were reclaimed.So
+ * the length of ra-window will not increased, but also
+ * not reset to avoid redetecting the stride I/O mode.*/
+ ras->ras_consecutive_requests = 0;
+ if (!ra_miss) {
+ ras->ras_consecutive_pages = 0;
+ if (++ras->ras_consecutive_stride_requests > 1)
+ stride_detect = 1;
+ }
+ RAS_CDEBUG(ras);
+ }
+ } else if (ras->ras_consecutive_stride_requests > 1) {
+ /* If this is contiguous read but in stride I/O mode
+ * currently, check whether stride step still is valid,
+ * if invalid, it will reset the stride ra window*/
+ if (ras->ras_consecutive_pages + 1 > ras->ras_stride_pages)
+ ras_stride_reset(ras);
+ }
+
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_pages++;
+ ras_set_start(ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start,
+ ras->ras_next_readahead);
+ RAS_CDEBUG(ras);
+
+ /* Trigger RA in the mmap case where ras_consecutive_requests
+ * is not incremented and thus can't be used to trigger RA */
+ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
+ ras->ras_window_len = RAS_INCREASE_STEP;
+ GOTO(out_unlock, 0);
+ }
+
+ /* Initially reset the stride window offset to next_readahead*/
+ if (ras->ras_consecutive_stride_requests == 2 && stride_detect)
+ ras_set_stride_offset(ras);
+
+ /* The initial ras_window_len is set to the request size. To avoid
+ * uselessly reading and discarding pages for random IO the window is
+ * only increased once per consecutive request received. */
+ if ((ras->ras_consecutive_requests > 1 &&
+ !ras->ras_request_index) || stride_detect) {
+ if (stride_io_mode(ras))
+ ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP);
+ else
+ ras->ras_window_len = min(ras->ras_window_len +
+ RAS_INCREASE_STEP,
+ ra->ra_max_pages);
+ }
+ EXIT;
+out_unlock:
+ RAS_CDEBUG(ras);
+ ras->ras_request_index++;
+ spin_unlock(&ras->ras_lock);
+ return;
+}
+
+int ll_writepage(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_export *exp;
+ struct ll_async_page *llap;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(PageLocked(page));
+
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL)
+ GOTO(out, rc = -EINVAL);
+
+ llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
+ if (IS_ERR(llap))
+ GOTO(out, rc = PTR_ERR(llap));
+
+ LASSERT(!llap->llap_nocache);
+ LASSERT(!PageWriteback(page));
+ set_page_writeback(page);
+
+ page_cache_get(page);
+ if (llap->llap_write_queued) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
+ rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
+ llap->llap_cookie,
+ ASYNC_READY | ASYNC_URGENT);
+ } else {
+ rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
+ ASYNC_READY | ASYNC_URGENT);
+ }
+ if (rc) {
+ /* re-dirty page on error so it retries write */
+ if (PageWriteback(page))
+ end_page_writeback(page);
+
+ /* resend page only for not started IO*/
+ if (!PageError(page))
+ ll_redirty_page(page);
+
+ page_cache_release(page);
+ }
+out:
+ if (rc) {
+ if (!lli->lli_async_rc)
+ lli->lli_async_rc = rc;
+ /* resend page only for not started IO*/
+ unlock_page(page);
+ }
+ RETURN(rc);
+}
+
+/*
+ * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
+ * read-ahead assumes it is valid to issue readpage all the way up to
+ * i_size, but our dlm locks make that not the case. We disable the
+ * kernel's read-ahead and do our own by walking ahead in the page cache
+ * checking for dlm lock coverage. the main difference between 2.4 and
+ * 2.6 is how read-ahead gets batched and issued, but we're using our own,
+ * so they look the same.
+ */
+int ll_readpage(struct file *filp, struct page *page)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
+ struct inode *inode = page->mapping->host;
+ struct obd_export *exp;
+ struct ll_async_page *llap;
+ struct obd_io_group *oig = NULL;
+ struct lustre_handle *lockh = NULL;
+ int rc;
+ ENTRY;
+
+ LASSERT(PageLocked(page));
+ LASSERT(!PageUptodate(page));
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
+ inode->i_ino, inode->i_generation, inode,
+ (((loff_t)page->index) << CFS_PAGE_SHIFT),
+ (((loff_t)page->index) << CFS_PAGE_SHIFT));
+ LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
+
+ if (!ll_i2info(inode)->lli_smd) {
+ /* File with no objects - one big hole */
+ /* We use this just for remove_from_page_cache that is not
+ * exported, we'd make page back up to date. */
+ ll_truncate_complete_page(page);
+ clear_page(kmap(page));
+ kunmap(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ RETURN(0);
+ }
+
+ rc = oig_init(&oig);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL)
+ GOTO(out, rc = -EINVAL);
+
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ lockh = &fd->fd_cwlockh;
+
+ llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh);
+ if (IS_ERR(llap)) {
+ if (PTR_ERR(llap) == -ENOLCK) {
+ CWARN("ino %lu page %lu (%llu) not covered by "
+ "a lock (mmap?). check debug logs.\n",
+ inode->i_ino, page->index,
+ (long long)page->index << PAGE_CACHE_SHIFT);
+ }
+ GOTO(out, rc = PTR_ERR(llap));
+ }
+
+ if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
+ ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
+ llap->llap_defer_uptodate);
+
+
+ if (llap->llap_defer_uptodate) {
+ /* This is the callpath if we got the page from a readahead */
+ llap->llap_ra_used = 1;
+ rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
+ fd->fd_flags);
+ if (rc > 0)
+ obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
+ NULL, oig);
+ LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
+ SetPageUptodate(page);
+ unlock_page(page);
+ GOTO(out_oig, rc = 0);
+ }
+
+ rc = ll_issue_page_read(exp, llap, oig, 0);
+ if (rc)
+ GOTO(out, rc);
+
+ LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
+ /* We have just requested the actual page we want, see if we can tack
+ * on some readahead to that page's RPC before it is sent. */
+ if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
+ ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
+ fd->fd_flags);
+
+ rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
+
+out:
+ if (rc)
+ unlock_page(page);
+out_oig:
+ if (oig != NULL)
+ oig_release(oig);
+ RETURN(rc);
+}
+
+static void ll_file_put_pages(struct page **pages, int numpages)
+{
+ int i;
+ struct page **pp;
+ ENTRY;
+
+ for (i = 0, pp = pages; i < numpages; i++, pp++) {
+ if (*pp) {
+ LL_CDEBUG_PAGE(D_PAGE, (*pp), "free\n");
+ __ll_put_llap(*pp);
+ if (page_private(*pp))
+ CERROR("the llap wasn't freed\n");
+ (*pp)->mapping = NULL;
+ if (page_count(*pp) != 1)
+ CERROR("page %p, flags %#lx, count %i, private %p\n",
+ (*pp), (unsigned long)(*pp)->flags, page_count(*pp),
+ (void*)page_private(*pp));
+ __free_pages(*pp, 0);
+ }
+ }
+ OBD_FREE(pages, numpages * sizeof(struct page*));
+ EXIT;
+}
+
+static struct page **ll_file_prepare_pages(int numpages, struct inode *inode,
+ unsigned long first)
+{
+ struct page **pages;
+ int i;
+ int rc = 0;
+ ENTRY;
+
+ OBD_ALLOC(pages, sizeof(struct page *) * numpages);
+ if (pages == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
+ for (i = 0; i < numpages; i++) {
+ struct page *page;
+ struct ll_async_page *llap;
+
+ page = alloc_pages(GFP_HIGHUSER, 0);
+ if (page == NULL)
+ GOTO(err, rc = -ENOMEM);
+ pages[i] = page;
+ /* llap_from_page needs page index and mapping to be set */
+ page->index = first++;
+ page->mapping = inode->i_mapping;
+ llap = llap_from_page(page, LLAP_ORIGIN_LOCKLESS_IO);
+ if (IS_ERR(llap))
+ GOTO(err, rc = PTR_ERR(llap));
+ llap->llap_lockless_io_page = 1;
+ }
+ RETURN(pages);
+err:
+ ll_file_put_pages(pages, numpages);
+ RETURN(ERR_PTR(rc));
+ }
+
+static ssize_t ll_file_copy_pages(struct page **pages, int numpages,
+ char *buf, loff_t pos, size_t count, int rw)
+{
+ ssize_t amount = 0;
+ int i;
+ int updatechecksum = ll_i2sbi(pages[0]->mapping->host)->ll_flags &
+ LL_SBI_CHECKSUM;
+ ENTRY;
+
+ for (i = 0; i < numpages; i++) {
+ unsigned offset, bytes, left;
+ char *vaddr;
+
+ vaddr = kmap(pages[i]);
+ offset = pos & (CFS_PAGE_SIZE - 1);
+ bytes = min_t(unsigned, CFS_PAGE_SIZE - offset, count);
+ LL_CDEBUG_PAGE(D_PAGE, pages[i], "op = %s, addr = %p, "
+ "buf = %p, bytes = %u\n",
+ (rw == WRITE) ? "CFU" : "CTU",
+ vaddr + offset, buf, bytes);
+ if (rw == WRITE) {
+ left = copy_from_user(vaddr + offset, buf, bytes);
+ if (updatechecksum) {
+ struct ll_async_page *llap;
+
+ llap = llap_cast_private(pages[i]);
+ llap->llap_checksum = crc32_le(0, vaddr,
+ CFS_PAGE_SIZE);
+ }
+ } else {
+ left = copy_to_user(buf, vaddr + offset, bytes);
+ }
+ kunmap(pages[i]);
+ amount += bytes;
+ if (left) {
+ amount -= left;
+ break;
+ }
+ buf += bytes;
+ count -= bytes;
+ pos += bytes;
+ }
+ if (amount == 0)
+ RETURN(-EFAULT);
+ RETURN(amount);
+}
+
+static int ll_file_oig_pages(struct inode * inode, struct page **pages,
+ int numpages, loff_t pos, size_t count, int rw)
+{
+ struct obd_io_group *oig;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_export *exp;
+ loff_t org_pos = pos;
+ obd_flag brw_flags;
+ int rc;
+ int i;
+ ENTRY;
+
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL)
+ RETURN(-EINVAL);
+ rc = oig_init(&oig);
+ if (rc)
+ RETURN(rc);
+ brw_flags = OBD_BRW_SRVLOCK;
+ if (capable(CAP_SYS_RESOURCE))
+ brw_flags |= OBD_BRW_NOQUOTA;
+
+ for (i = 0; i < numpages; i++) {
+ struct ll_async_page *llap;
+ unsigned from, bytes;
+
+ from = pos & (CFS_PAGE_SIZE - 1);
+ bytes = min_t(unsigned, CFS_PAGE_SIZE - from,
+ count - pos + org_pos);
+ llap = llap_cast_private(pages[i]);
+ LASSERT(llap);
+
+ lock_page(pages[i]);
+
+ LL_CDEBUG_PAGE(D_PAGE, pages[i], "offset "LPU64","
+ " from %u, bytes = %u\n",
+ (__u64)pos, from, bytes);
+ LASSERTF(pos >> CFS_PAGE_SHIFT == pages[i]->index,
+ "wrong page index %lu (%lu)\n",
+ pages[i]->index,
+ (unsigned long)(pos >> CFS_PAGE_SHIFT));
+ rc = obd_queue_group_io(exp, lli->lli_smd, NULL, oig,
+ llap->llap_cookie,
+ (rw == WRITE) ?
+ OBD_BRW_WRITE:OBD_BRW_READ,
+ from, bytes, brw_flags,
+ ASYNC_READY | ASYNC_URGENT |
+ ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
+ if (rc) {
+ i++;
+ GOTO(out, rc);
+ }
+ pos += bytes;
+ }
+ rc = obd_trigger_group_io(exp, lli->lli_smd, NULL, oig);
+ if (rc)
+ GOTO(out, rc);
+ rc = oig_wait(oig);
+out:
+ while(--i >= 0)
+ unlock_page(pages[i]);
+ oig_release(oig);
+ RETURN(rc);
+}
+
+ssize_t ll_file_lockless_io(struct file *file, char *buf, size_t count,
+ loff_t *ppos, int rw)
+{
+ loff_t pos;
+ struct inode *inode = file->f_dentry->d_inode;
+ ssize_t rc = 0;
+ int max_pages;
+ size_t amount = 0;
+ unsigned long first, last;
+ ENTRY;
+
+ if (rw == READ) {
+ loff_t isize;
+
+ ll_inode_size_lock(inode, 0);
+ isize = i_size_read(inode);
+ ll_inode_size_unlock(inode, 0);
+ if (*ppos >= isize)
+ GOTO(out, rc = 0);
+ if (*ppos + count >= isize)
+ count -= *ppos + count - isize;
+ if (count == 0)
+ GOTO(out, rc);
+ } else {
+ rc = generic_write_checks(file, ppos, &count, 0);
+ if (rc)
+ GOTO(out, rc);
+ rc = ll_remove_suid(file->f_dentry, file->f_vfsmnt);
+ if (rc)
+ GOTO(out, rc);
+ }
+ pos = *ppos;
+ first = pos >> CFS_PAGE_SHIFT;
+ last = (pos + count - 1) >> CFS_PAGE_SHIFT;
+ max_pages = PTLRPC_MAX_BRW_PAGES *
+ ll_i2info(inode)->lli_smd->lsm_stripe_count;
+ CDEBUG(D_INFO, "%u, stripe_count = %u\n",
+ PTLRPC_MAX_BRW_PAGES /* max_pages_per_rpc */,
+ ll_i2info(inode)->lli_smd->lsm_stripe_count);
+
+ while (first <= last && rc >= 0) {
+ int pages_for_io;
+ struct page **pages;
+ size_t bytes = count - amount;
+
+ pages_for_io = min_t(int, last - first + 1, max_pages);
+ pages = ll_file_prepare_pages(pages_for_io, inode, first);
+ if (IS_ERR(pages)) {
+ rc = PTR_ERR(pages);
+ break;
+ }
+ if (rw == WRITE) {
+ rc = ll_file_copy_pages(pages, pages_for_io, buf,
+ pos + amount, bytes, rw);
+ if (rc < 0)
+ GOTO(put_pages, rc);
+ bytes = rc;
+ }
+ rc = ll_file_oig_pages(inode, pages, pages_for_io,
+ pos + amount, bytes, rw);
+ if (rc)
+ GOTO(put_pages, rc);
+ if (rw == READ) {
+ rc = ll_file_copy_pages(pages, pages_for_io, buf,
+ pos + amount, bytes, rw);
+ if (rc < 0)
+ GOTO(put_pages, rc);
+ bytes = rc;
+ }
+ amount += bytes;
+ buf += bytes;
+put_pages:
+ ll_file_put_pages(pages, pages_for_io);
+ first += pages_for_io;
+ /* a short read/write check */
+ if (pos + amount < ((loff_t)first << CFS_PAGE_SHIFT))
+ break;
+ }
+ /* NOTE: don't update i_size and KMS in absence of LDLM locks even
+ * write makes the file large */
+ file_accessed(file);
+ if (rw == READ && amount < count && rc == 0) {
+ unsigned long not_cleared;
+
+ not_cleared = clear_user(buf, count - amount);
+ amount = count - not_cleared;
+ if (not_cleared)
+ rc = -EFAULT;
+ }
+ if (amount > 0) {
+ lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
+ (rw == WRITE) ?
+ LPROC_LL_LOCKLESS_WRITE :
+ LPROC_LL_LOCKLESS_READ,
+ (long)amount);
+ *ppos += amount;
+ RETURN(amount);
+ }
+out:
+ RETURN(rc);
+}