* LRU pages, otherwise, it could cause deadlock.
*/
pages = min(sbi->ll_cache->ccc_lru_max >> 2, pages);
+ /**
+ * if this happen, we reserve more pages than needed,
+ * this will make us leak @ra_cur_pages, because
+ * ll_ra_count_put() acutally freed @pages.
+ */
+ if (WARN_ON_ONCE(pages_min > pages))
+ pages_min = pages;
/*
* If read-ahead pages left are less than 1M, do not do read-ahead,
int exceed = 0;
int result;
struct iov_iter iter;
+ pgoff_t page_offset;
ENTRY;
if (!vio->vui_ra_valid) {
vio->vui_ra_valid = true;
vio->vui_ra_start_idx = cl_index(obj, pos);
- vio->vui_ra_pages = cl_index(obj, tot + PAGE_SIZE - 1);
- /* If both start and end are unaligned, we read one more page
- * than the index math suggests. */
- if ((pos & ~PAGE_MASK) != 0 && ((pos + tot) & ~PAGE_MASK) != 0)
+ vio->vui_ra_pages = 0;
+ page_offset = pos & ~PAGE_MASK;
+ if (page_offset) {
vio->vui_ra_pages++;
+ if (tot > PAGE_SIZE - page_offset)
+ tot -= (PAGE_SIZE - page_offset);
+ else
+ tot = 0;
+ }
+ vio->vui_ra_pages += (tot + PAGE_SIZE - 1) >> PAGE_SHIFT;
CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
- tot, vio->vui_ra_start_idx, vio->vui_ra_pages);
+ vio->vui_tot_count, vio->vui_ra_start_idx,
+ vio->vui_ra_pages);
}
/* BUG: 5972 */
dd if=$file.$i of=/dev/null bs=$bsize count=$size_KB 2>/dev/null
done
+ $LCTL get_param llite.*.max_cached_mb
+ $LCTL get_param llite.*.read_ahead_stats
local miss=$($LCTL get_param -n llite.*.read_ahead_stats |
get_named_value 'misses' | calc_total)