1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
44 #define DEBUG_SUBSYSTEM S_LLITE
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53 for (pos = (head)->prev, n = pos->prev; pos != (head); \
54 pos = n, n = pos->prev )
57 kmem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62 struct page *page, int flags)
64 struct ll_inode_info *lli = ll_i2info(inode);
65 struct lov_stripe_md *lsm = lli->lli_smd;
71 pg.off = ((obd_off)page->index) << PAGE_SHIFT;
73 if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
74 pg.count = inode->i_size % PAGE_SIZE;
78 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80 inode->i_ino, pg.off, pg.off);
82 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84 inode->i_ino, inode, inode->i_size, page->mapping->host,
85 page->mapping->host->i_size, page->index, pg.off);
90 if (cmd == OBD_BRW_WRITE)
91 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92 LPROC_LL_BRW_WRITE, pg.count);
94 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
95 LPROC_LL_BRW_READ, pg.count);
96 rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
98 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
100 CERROR("error from obd_brw: rc = %d\n", rc);
104 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
106 /* this isn't where truncate starts. roughly:
107 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
108 * we grab the lock back in setattr_raw to avoid races. */
109 void ll_truncate(struct inode *inode)
111 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
115 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
116 inode->i_generation, inode, inode->i_size);
119 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
125 if (lov_merge_size(lsm, 0) == inode->i_size) {
126 CDEBUG(D_VFSTRACE, "skipping punch for "LPX64" (size = %llu)\n",
127 lsm->lsm_object_id, inode->i_size);
129 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %llu)\n",
130 lsm->lsm_object_id, inode->i_size);
132 oa.o_id = lsm->lsm_object_id;
133 oa.o_valid = OBD_MD_FLID;
134 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
135 OBD_MD_FLATIME |OBD_MD_FLMTIME |OBD_MD_FLCTIME);
137 /* truncate == punch from new size to absolute end of file */
138 /* NB: must call obd_punch with i_sem held! It updates kms! */
139 rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
140 OBD_OBJECT_EOF, NULL);
142 CERROR("obd_truncate fails (%d) ino %lu\n", rc,
145 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE|OBD_MD_FLBLOCKS|
146 OBD_MD_FLATIME | OBD_MD_FLMTIME |
154 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
155 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
158 struct inode *inode = page->mapping->host;
159 struct ll_inode_info *lli = ll_i2info(inode);
160 struct lov_stripe_md *lsm = lli->lli_smd;
161 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
168 LASSERT(PageLocked(page));
169 (void)llap_cast_private(page); /* assertion */
171 /* Check to see if we should return -EIO right away */
174 pga.count = PAGE_SIZE;
177 oa.o_id = lsm->lsm_object_id;
178 oa.o_mode = inode->i_mode;
179 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
181 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
186 if (PageUptodate(page)) {
187 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
191 /* We're completely overwriting an existing page, so _don't_ set it up
192 * to date until commit_write */
193 if (from == 0 && to == PAGE_SIZE) {
194 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
195 POISON_PAGE(page, 0x11);
199 /* If are writing to a new page, no need to read old data. The extent
200 * locking will have updated the KMS, and for our purposes here we can
201 * treat it like i_size. */
202 kms = lov_merge_size(lsm, 1);
204 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
206 memset(kmap(page), 0, PAGE_SIZE);
208 GOTO(prepare_done, rc = 0);
211 /* XXX could be an async ocp read.. read-ahead? */
212 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
214 /* bug 1598: don't clobber blksize */
215 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
216 obdo_refresh_inode(inode, &oa, oa.o_valid);
222 SetPageUptodate(page);
227 struct ll_async_page *llap_from_cookie(void *cookie)
229 struct ll_async_page *llap = cookie;
230 if (llap->llap_magic != LLAP_MAGIC)
231 return ERR_PTR(-EINVAL);
235 static int ll_ap_make_ready(void *data, int cmd)
237 struct ll_async_page *llap;
241 llap = llap_from_cookie(data);
245 page = llap->llap_page;
247 LASSERT(cmd != OBD_BRW_READ);
249 /* we're trying to write, but the page is locked.. come back later */
250 if (TryLockPage(page))
253 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
254 page_cache_get(page);
256 /* if we left PageDirty we might get another writepage call
257 * in the future. list walkers are bright enough
258 * to check page dirty so we can leave it on whatever list
259 * its on. XXX also, we're called with the cli list so if
260 * we got the page cache list we'd create a lock inversion
261 * with the removepage path which gets the page lock then the
263 clear_page_dirty(page);
267 /* We have two reasons for giving llite the opportunity to change the
268 * write length of a given queued page as it builds the RPC containing
271 * 1) Further extending writes may have landed in the page cache
272 * since a partial write first queued this page requiring us
273 * to write more from the page cache.
274 * 2) We might have raced with truncate and want to avoid performing
275 * write RPCs that are just going to be thrown away by the
276 * truncate's punch on the storage targets.
278 * The kms serves these purposes as it is set at both truncate and extending
281 static int ll_ap_refresh_count(void *data, int cmd)
283 struct ll_async_page *llap;
284 struct lov_stripe_md *lsm;
289 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
290 LASSERT(cmd != OBD_BRW_READ);
292 llap = llap_from_cookie(data);
294 RETURN(PTR_ERR(llap));
296 page = llap->llap_page;
297 lsm = ll_i2info(page->mapping->host)->lli_smd;
298 kms = lov_merge_size(lsm, 1);
300 /* catch race with truncate */
301 if (((__u64)page->index << PAGE_SHIFT) >= kms)
304 /* catch sub-page write at end of file */
305 if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
306 return kms % PAGE_SIZE;
311 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
313 struct lov_stripe_md *lsm;
314 obd_flag valid_flags;
316 lsm = ll_i2info(inode)->lli_smd;
318 oa->o_id = lsm->lsm_object_id;
319 oa->o_valid = OBD_MD_FLID;
320 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
321 if (cmd == OBD_BRW_WRITE) {
322 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
323 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
324 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
326 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
329 obdo_from_inode(oa, inode, valid_flags);
332 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
334 struct ll_async_page *llap;
337 llap = llap_from_cookie(data);
343 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
347 static struct obd_async_page_ops ll_async_page_ops = {
348 .ap_make_ready = ll_ap_make_ready,
349 .ap_refresh_count = ll_ap_refresh_count,
350 .ap_fill_obdo = ll_ap_fill_obdo,
351 .ap_completion = ll_ap_completion,
354 struct ll_async_page *llap_cast_private(struct page *page)
356 struct ll_async_page *llap = (struct ll_async_page *)page->private;
358 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
359 "page %p private %lu gave magic %d which != %d\n",
360 page, page->private, llap->llap_magic, LLAP_MAGIC);
365 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
367 * There is an llap attached onto every page in lustre, linked off @sbi.
368 * We add an llap to the list so we don't lose our place during list walking.
369 * If llaps in the list are being moved they will only move to the end
370 * of the LRU, and we aren't terribly interested in those pages here (we
371 * start at the beginning of the list where the least-used llaps are.
373 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
375 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
376 unsigned long total, want, count = 0;
378 total = sbi->ll_async_page_count;
380 /* There can be a large number of llaps (600k or more in a large
381 * memory machine) so the VM 1/6 shrink ratio is likely too much.
382 * Since we are freeing pages also, we don't necessarily want to
383 * shrink so much. Limit to 40MB of pages + llaps per call. */
384 if (shrink_fraction == 0)
385 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
387 want = (total + shrink_fraction - 1) / shrink_fraction;
389 if (want > 40 << (20 - PAGE_CACHE_SHIFT))
390 want = 40 << (20 - PAGE_CACHE_SHIFT);
392 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
393 want, total, shrink_fraction);
395 spin_lock(&sbi->ll_lock);
396 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
398 while (--total >= 0 && count < want) {
401 if (unlikely(need_resched())) {
402 spin_unlock(&sbi->ll_lock);
404 spin_lock(&sbi->ll_lock);
407 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
408 list_del_init(&dummy_llap.llap_pglist_item);
412 page = llap->llap_page;
413 LASSERT(page != NULL);
415 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
417 /* Page needs/undergoing IO */
418 if (TryLockPage(page)) {
419 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
423 /* If page is dirty or undergoing IO don't discard it */
424 if (llap->llap_write_queued || PageDirty(page) ||
425 (!PageUptodate(page) &&
426 llap->llap_origin != LLAP_ORIGIN_READAHEAD)) {
428 LL_CDEBUG_PAGE(D_PAGE, page, "can't drop from cache: "
429 "%s%s%s%s origin %s\n",
430 llap->llap_write_queued ? "wq " : "",
431 PageDirty(page) ? "pd " : "",
432 PageUptodate(page) ? "" : "!pu ",
433 llap->llap_defer_uptodate ? "" : "!du",
434 llap_origins[llap->llap_origin]);
438 page_cache_get(page);
439 spin_unlock(&sbi->ll_lock);
442 LL_CDEBUG_PAGE(D_PAGE, page, "drop from cache %lu/%lu\n",
444 if (page->mapping != NULL) {
445 ll_ra_accounting(page, page->mapping);
446 ll_truncate_complete_page(page);
449 page_cache_release(page);
451 spin_lock(&sbi->ll_lock);
453 list_del(&dummy_llap.llap_pglist_item);
454 spin_unlock(&sbi->ll_lock);
456 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
462 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
464 struct ll_async_page *llap;
465 struct obd_export *exp;
466 struct inode *inode = page->mapping->host;
467 struct ll_sb_info *sbi = ll_i2sbi(inode);
471 LASSERT(ll_async_page_slab);
472 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
474 llap = llap_cast_private(page);
476 /* move to end of LRU list */
477 spin_lock(&sbi->ll_lock);
478 sbi->ll_pglist_gen++;
479 list_del_init(&llap->llap_pglist_item);
480 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
481 spin_unlock(&sbi->ll_lock);
485 exp = ll_i2obdexp(page->mapping->host);
487 RETURN(ERR_PTR(-EINVAL));
489 /* limit the number of lustre-cached pages */
490 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
491 llap_shrink_cache(sbi, 0);
493 OBD_SLAB_ALLOC(llap, ll_async_page_slab, SLAB_KERNEL,
494 ll_async_page_slab_size);
496 RETURN(ERR_PTR(-ENOMEM));
497 llap->llap_magic = LLAP_MAGIC;
498 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
499 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
500 (obd_off)page->index << PAGE_SHIFT,
501 &ll_async_page_ops, llap, &llap->llap_cookie);
503 OBD_SLAB_FREE(llap, ll_async_page_slab,
504 ll_async_page_slab_size);
508 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
509 page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
510 /* also zeroing the PRIVBITS low order bitflags */
511 __set_page_ll_data(page, llap);
512 llap->llap_page = page;
514 spin_lock(&sbi->ll_lock);
515 sbi->ll_pglist_gen++;
516 sbi->ll_async_page_count++;
517 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
518 spin_unlock(&sbi->ll_lock);
521 llap->llap_origin = origin;
525 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
526 struct ll_async_page *llap,
527 unsigned to, obd_flag async_flags)
529 unsigned long size_index = inode->i_size >> PAGE_SHIFT;
530 struct obd_io_group *oig;
534 /* _make_ready only sees llap once we've unlocked the page */
535 llap->llap_write_queued = 1;
536 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
537 llap->llap_cookie, OBD_BRW_WRITE, 0, 0, 0,
540 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
541 //llap_write_pending(inode, llap);
545 llap->llap_write_queued = 0;
551 /* make full-page requests if we are not at EOF (bug 4410) */
552 if (to != PAGE_SIZE && llap->llap_page->index < size_index) {
553 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
554 "sync write before EOF: size_index %lu, to %d\n",
557 } else if (to != PAGE_SIZE && llap->llap_page->index == size_index) {
558 int size_to = inode->i_size & ~PAGE_MASK;
559 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
560 "sync write at EOF: size_index %lu, to %d/%d\n",
561 size_index, to, size_to);
566 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
567 llap->llap_cookie, OBD_BRW_WRITE, 0, to, 0,
568 ASYNC_READY | ASYNC_URGENT |
569 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
573 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
579 if (!rc && async_flags & ASYNC_READY)
580 unlock_page(llap->llap_page);
582 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
590 /* update our write count to account for i_size increases that may have
591 * happened since we've queued the page for io. */
593 /* be careful not to return success without setting the page Uptodate or
594 * the next pass through prepare_write will read in stale data from disk. */
595 int ll_commit_write(struct file *file, struct page *page, unsigned from,
598 struct inode *inode = page->mapping->host;
599 struct ll_inode_info *lli = ll_i2info(inode);
600 struct lov_stripe_md *lsm = lli->lli_smd;
601 struct obd_export *exp;
602 struct ll_async_page *llap;
607 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
608 LASSERT(inode == file->f_dentry->d_inode);
609 LASSERT(PageLocked(page));
611 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
612 inode, page, from, to, page->index);
614 llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
616 RETURN(PTR_ERR(llap));
618 exp = ll_i2obdexp(inode);
622 /* queue a write for some time in the future the first time we
624 if (!PageDirty(page)) {
625 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
626 LPROC_LL_DIRTY_MISSES);
628 rc = queue_or_sync_write(exp, inode, llap, to, 0);
632 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
633 LPROC_LL_DIRTY_HITS);
636 /* put the page in the page cache, from now on ll_removepage is
637 * responsible for cleaning up the llap.
638 * only set page dirty when it's queued to be write out */
639 if (llap->llap_write_queued)
640 set_page_dirty(page);
643 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
645 spin_lock(&lli->lli_lock);
646 obd_increase_kms(exp, lsm, size);
647 spin_unlock(&lli->lli_lock);
648 if (size > inode->i_size)
649 inode->i_size = size;
650 SetPageUptodate(page);
651 } else if (size > inode->i_size) {
652 /* this page beyond the pales of i_size, so it can't be
653 * truncated in ll_p_r_e during lock revoking. we must
654 * teardown our book-keeping here. */
660 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
662 struct ll_ra_info *ra = &sbi->ll_ra_info;
666 spin_lock(&sbi->ll_lock);
667 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
668 ra->ra_cur_pages += ret;
669 spin_unlock(&sbi->ll_lock);
674 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
676 struct ll_ra_info *ra = &sbi->ll_ra_info;
677 spin_lock(&sbi->ll_lock);
678 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
679 ra->ra_cur_pages, len);
680 ra->ra_cur_pages -= len;
681 spin_unlock(&sbi->ll_lock);
684 /* called for each page in a completed rpc.*/
685 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
687 struct ll_async_page *llap;
691 llap = llap_from_cookie(data);
697 page = llap->llap_page;
698 LASSERT(PageLocked(page));
700 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
702 if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
703 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
706 if (cmd == OBD_BRW_READ) {
707 if (!llap->llap_defer_uptodate)
708 SetPageUptodate(page);
710 llap->llap_write_queued = 0;
712 ClearPageError(page);
714 if (cmd == OBD_BRW_READ) {
715 llap->llap_defer_uptodate = 0;
718 ClearPageLaunder(page);
725 if (0 && cmd == OBD_BRW_WRITE) {
726 llap_write_complete(page->mapping->host, llap);
727 ll_try_done_writing(page->mapping->host);
730 if (PageWriteback(page)) {
731 end_page_writeback(page);
733 page_cache_release(page);
737 /* the kernel calls us here when a page is unhashed from the page cache.
738 * the page will be locked and the kernel is holding a spinlock, so
739 * we need to be careful. we're just tearing down our book-keeping
741 void ll_removepage(struct page *page)
743 struct inode *inode = page->mapping->host;
744 struct obd_export *exp;
745 struct ll_async_page *llap;
746 struct ll_sb_info *sbi = ll_i2sbi(inode);
750 LASSERT(!in_interrupt());
752 /* sync pages or failed read pages can leave pages in the page
753 * cache that don't have our data associated with them anymore */
754 if (page->private == 0) {
759 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
761 exp = ll_i2obdexp(inode);
763 CERROR("page %p ind %lu gave null export\n", page, page->index);
768 llap = llap_from_page(page, 0);
770 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
771 page->index, PTR_ERR(llap));
776 //llap_write_complete(inode, llap);
777 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
780 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
782 /* this unconditional free is only safe because the page lock
783 * is providing exclusivity to memory pressure/truncate/writeback..*/
784 __clear_page_ll_data(page);
786 spin_lock(&sbi->ll_lock);
787 if (!list_empty(&llap->llap_pglist_item))
788 list_del_init(&llap->llap_pglist_item);
789 sbi->ll_pglist_gen++;
790 sbi->ll_async_page_count--;
791 spin_unlock(&sbi->ll_lock);
792 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
796 static int ll_page_matches(struct page *page)
798 struct lustre_handle match_lockh = {0};
799 struct inode *inode = page->mapping->host;
800 ldlm_policy_data_t page_extent;
804 page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
805 page_extent.l_extent.end =
806 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
807 flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
808 matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
809 ll_i2info(inode)->lli_smd, LDLM_EXTENT,
810 &page_extent, LCK_PR | LCK_PW, &flags, inode,
815 static int ll_issue_page_read(struct obd_export *exp,
816 struct ll_async_page *llap,
817 struct obd_io_group *oig, int defer)
819 struct page *page = llap->llap_page;
822 page_cache_get(page);
823 llap->llap_defer_uptodate = defer;
824 llap->llap_ra_used = 0;
825 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
826 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
827 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
830 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
831 page_cache_release(page);
836 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
838 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
839 ra->ra_stats[which]++;
842 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
844 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
845 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
847 spin_lock(&sbi->ll_lock);
848 ll_ra_stats_inc_unlocked(ra, which);
849 spin_unlock(&sbi->ll_lock);
852 void ll_ra_accounting(struct page *page, struct address_space *mapping)
854 struct ll_async_page *llap;
856 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
860 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
863 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
866 #define RAS_CDEBUG(ras) \
867 CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n", \
868 ras->ras_last_readpage, ras->ras_consecutive, \
869 ras->ras_window_start, ras->ras_window_len, \
870 ras->ras_next_readahead);
872 static int index_in_window(unsigned long index, unsigned long point,
873 unsigned long before, unsigned long after)
875 unsigned long start = point - before, end = point + after;
882 return start <= index && index <= end;
885 static int ll_readahead(struct ll_readahead_state *ras,
886 struct obd_export *exp, struct address_space *mapping,
887 struct obd_io_group *oig, int flags)
889 unsigned long i, start = 0, end = 0, reserved;
890 struct ll_async_page *llap;
892 int rc, ret = 0, match_failed = 0;
894 unsigned int gfp_mask;
897 kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
899 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
903 spin_lock(&ras->ras_lock);
904 /* reserve a part of the read-ahead window that we'll be issuing */
905 if (ras->ras_window_len) {
906 start = ras->ras_next_readahead;
907 end = ras->ras_window_start + ras->ras_window_len - 1;
908 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
909 ras->ras_next_readahead = max(end, end + 1);
913 spin_unlock(&ras->ras_lock);
916 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
920 reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
921 if (reserved < end - start + 1)
922 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
924 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
926 gfp_mask |= __GFP_NOWARN;
929 for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
930 /* skip locked pages from previous readpage calls */
931 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
933 CDEBUG(D_READA, "g_c_p_n failed\n");
937 /* we do this first so that we can see the page in the /proc
939 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
940 if (IS_ERR(llap) || llap->llap_defer_uptodate)
943 /* skip completed pages */
944 if (Page_Uptodate(page))
947 /* bail when we hit the end of the lock. */
948 if ((rc = ll_page_matches(page)) <= 0) {
949 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
950 "lock match failed: rc %d\n", rc);
951 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
956 rc = ll_issue_page_read(exp, llap, oig, 1);
960 LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
961 "started read-ahead\n");
965 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
966 "skipping read-ahead\n");
970 page_cache_release(page);
973 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
975 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
976 if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
977 ll_ra_stats_inc(mapping, RA_STAT_EOF);
979 /* if we didn't get to the end of the region we reserved from
980 * the ras we need to go back and update the ras so that the
981 * next read-ahead tries from where we left off. we only do so
982 * if the region we failed to issue read-ahead on is still ahead
983 * of the app and behind the next index to start read-ahead from */
985 spin_lock(&ras->ras_lock);
986 if (i < ras->ras_next_readahead &&
987 index_in_window(i, ras->ras_window_start, 0,
988 ras->ras_window_len)) {
989 ras->ras_next_readahead = i;
992 spin_unlock(&ras->ras_lock);
998 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1000 ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
1003 /* called with the ras_lock held or from places where it doesn't matter */
1004 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1006 ras->ras_last_readpage = index;
1007 ras->ras_consecutive = 1;
1008 ras->ras_window_len = 0;
1009 ras_set_start(ras, index);
1010 ras->ras_next_readahead = ras->ras_window_start;
1015 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1017 spin_lock_init(&ras->ras_lock);
1021 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
1022 unsigned long index, unsigned hit)
1024 struct ll_ra_info *ra = &sbi->ll_ra_info;
1028 spin_lock(&sbi->ll_lock);
1029 spin_lock(&ras->ras_lock);
1031 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1033 /* reset the read-ahead window in two cases. First when the app seeks
1034 * or reads to some other part of the file. Secondly if we get a
1035 * read-ahead miss that we think we've previously issued. This can
1036 * be a symptom of there being so many read-ahead pages that the VM is
1037 * reclaiming it before we get to it. */
1038 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1040 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1041 } else if (!hit && ras->ras_window_len &&
1042 index < ras->ras_next_readahead &&
1043 index_in_window(index, ras->ras_window_start, 0,
1044 ras->ras_window_len)) {
1046 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1050 ras_reset(ras, index);
1051 GOTO(out_unlock, 0);
1054 ras->ras_last_readpage = index;
1055 ras->ras_consecutive++;
1056 ras_set_start(ras, index);
1057 ras->ras_next_readahead = max(ras->ras_window_start,
1058 ras->ras_next_readahead);
1060 /* wait for a few pages to arrive before issuing readahead to avoid
1061 * the worst overutilization */
1062 if (ras->ras_consecutive == 3) {
1063 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
1064 GOTO(out_unlock, 0);
1067 /* we need to increase the window sometimes. we'll arbitrarily
1068 * do it half-way through the pages in an rpc */
1069 if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) ==
1070 (PTLRPC_MAX_BRW_PAGES >> 1)) {
1071 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
1072 ras->ras_window_len = min(ras->ras_window_len,
1079 spin_unlock(&ras->ras_lock);
1080 spin_unlock(&sbi->ll_lock);
1085 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1086 * read-ahead assumes it is valid to issue readpage all the way up to
1087 * i_size, but our dlm locks make that not the case. We disable the
1088 * kernel's read-ahead and do our own by walking ahead in the page cache
1089 * checking for dlm lock coverage. the main difference between 2.4 and
1090 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1091 * so they look the same.
1093 int ll_readpage(struct file *filp, struct page *page)
1095 struct ll_file_data *fd = filp->private_data;
1096 struct inode *inode = page->mapping->host;
1097 struct obd_export *exp;
1098 struct ll_async_page *llap;
1099 struct obd_io_group *oig = NULL;
1103 LASSERT(PageLocked(page));
1104 LASSERT(!PageUptodate(page));
1105 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
1106 inode->i_ino, inode->i_generation, inode,
1107 (((obd_off)page->index) << PAGE_SHIFT));
1108 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1110 rc = oig_init(&oig);
1114 exp = ll_i2obdexp(inode);
1116 GOTO(out, rc = -EINVAL);
1118 llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1120 GOTO(out, rc = PTR_ERR(llap));
1122 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1123 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1124 llap->llap_defer_uptodate);
1126 if (llap->llap_defer_uptodate) {
1127 llap->llap_ra_used = 1;
1128 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1131 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1133 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1134 SetPageUptodate(page);
1136 GOTO(out_oig, rc = 0);
1139 rc = ll_page_matches(page);
1141 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1147 CWARN("ino %lu page %lu (%llu) not covered by "
1148 "a lock (mmap?). check debug logs.\n",
1149 inode->i_ino, page->index,
1150 (long long)page->index << PAGE_CACHE_SHIFT);
1154 rc = ll_issue_page_read(exp, llap, oig, 0);
1158 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1159 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1160 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1163 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);