+/* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
+ *
+ * There is an llap attached onto every page in lustre, linked off @sbi.
+ * We add an llap to the list so we don't lose our place during list walking.
+ * If llaps in the list are being moved they will only move to the end
+ * of the LRU, and we aren't terribly interested in those pages here (we
+ * start at the beginning of the list where the least-used llaps are.
+ */
+int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
+{
+ struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
+ unsigned long total, want, count = 0;
+
+ total = sbi->ll_async_page_count;
+
+ /* There can be a large number of llaps (600k or more in a large
+ * memory machine) so the VM 1/6 shrink ratio is likely too much.
+ * Since we are freeing pages also, we don't necessarily want to
+ * shrink so much. Limit to 40MB of pages + llaps per call. */
+ if (shrink_fraction == 0)
+ want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
+ else
+ want = (total + shrink_fraction - 1) / shrink_fraction;
+
+ if (want > 40 << (20 - CFS_PAGE_SHIFT))
+ want = 40 << (20 - CFS_PAGE_SHIFT);
+
+ CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
+ want, total, shrink_fraction);
+
+ spin_lock(&sbi->ll_lock);
+ list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
+
+ while (--total >= 0 && count < want) {
+ struct page *page;
+ int keep;
+
+ if (unlikely(need_resched())) {
+ spin_unlock(&sbi->ll_lock);
+ cond_resched();
+ spin_lock(&sbi->ll_lock);
+ }
+
+ llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
+ list_del_init(&dummy_llap.llap_pglist_item);
+ if (llap == NULL)
+ break;
+
+ page = llap->llap_page;
+ LASSERT(page != NULL);
+
+ list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
+
+ /* Page needs/undergoing IO */
+ if (TryLockPage(page)) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
+ continue;
+ }
+
+ keep = (llap->llap_write_queued || PageDirty(page) ||
+ PageWriteback(page) || (!PageUptodate(page) &&
+ llap->llap_origin != LLAP_ORIGIN_READAHEAD));
+
+ LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
+ keep ? "keep" : "drop",
+ llap->llap_write_queued ? "wq " : "",
+ PageDirty(page) ? "pd " : "",
+ PageUptodate(page) ? "" : "!pu ",
+ PageWriteback(page) ? "wb" : "",
+ llap->llap_defer_uptodate ? "" : "!du",
+ llap_origins[llap->llap_origin]);
+
+ /* If page is dirty or undergoing IO don't discard it */
+ if (keep) {
+ unlock_page(page);
+ continue;
+ }
+
+ page_cache_get(page);
+ spin_unlock(&sbi->ll_lock);
+
+ if (page->mapping != NULL) {
+ ll_teardown_mmaps(page->mapping,
+ (__u64)page->index << CFS_PAGE_SHIFT,
+ ((__u64)page->index << CFS_PAGE_SHIFT)|
+ ~CFS_PAGE_MASK);
+ if (!PageDirty(page) && !page_mapped(page)) {
+ ll_ra_accounting(llap, page->mapping);
+ ll_truncate_complete_page(page);
+ ++count;
+ } else {
+ LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
+ " because it is "
+ "%s\n",
+ PageDirty(page)?
+ "dirty":"mapped");
+ }
+ }
+ unlock_page(page);
+ page_cache_release(page);
+
+ spin_lock(&sbi->ll_lock);
+ }
+ list_del(&dummy_llap.llap_pglist_item);
+ spin_unlock(&sbi->ll_lock);
+
+ CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
+ count, want, total);
+
+ return count;
+}
+