1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
44 #define DEBUG_SUBSYSTEM S_LLITE
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53 for (pos = (head)->prev, n = pos->prev; pos != (head); \
54 pos = n, n = pos->prev )
57 /* SYNCHRONOUS I/O to object storage for an inode */
58 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
59 struct page *page, int flags)
61 struct ll_inode_info *lli = ll_i2info(inode);
62 struct lov_stripe_md *lsm = lli->lli_smd;
68 pg.off = ((obd_off)page->index) << PAGE_SHIFT;
70 if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
71 pg.count = inode->i_size % PAGE_SIZE;
75 CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
76 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
79 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
81 inode->i_ino, inode, inode->i_size, page->mapping->host,
82 page->mapping->host->i_size, page->index, pg.off);
87 if (cmd == OBD_BRW_WRITE)
88 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
89 LPROC_LL_BRW_WRITE, pg.count);
91 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92 LPROC_LL_BRW_READ, pg.count);
93 rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
95 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
97 CERROR("error from obd_brw: rc = %d\n", rc);
101 /* this isn't where truncate starts. roughly:
102 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
103 * we grab the lock back in setattr_raw to avoid races. */
104 void ll_truncate(struct inode *inode)
106 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
110 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
111 inode->i_generation, inode);
114 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
120 oa.o_id = lsm->lsm_object_id;
121 oa.o_gr = lsm->lsm_object_gr;
122 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
123 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE|OBD_MD_FLMODE|OBD_MD_FLATIME|
124 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
126 CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
127 oa.o_id, inode->i_size);
129 /* truncate == punch from new size to absolute end of file */
130 /* NB: obd_punch must be called with i_sem held! It updates the kms! */
131 rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
132 OBD_OBJECT_EOF, NULL);
134 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
136 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
137 OBD_MD_FLATIME | OBD_MD_FLMTIME |
144 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
145 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
148 struct inode *inode = page->mapping->host;
149 struct ll_inode_info *lli = ll_i2info(inode);
150 struct lov_stripe_md *lsm = lli->lli_smd;
151 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
158 if (!PageLocked(page))
161 /* Check to see if we should return -EIO right away */
164 pga.count = PAGE_SIZE;
167 oa.o_id = lsm->lsm_object_id;
168 oa.o_gr = lsm->lsm_object_gr;
169 oa.o_mode = inode->i_mode;
170 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE
171 | OBD_MD_FLTYPE | OBD_MD_FLGROUP;
173 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
178 if (PageUptodate(page))
181 /* We're completely overwriting an existing page, so _don't_ set it up
182 * to date until commit_write */
183 if (from == 0 && to == PAGE_SIZE) {
184 POISON_PAGE(page, 0x11);
188 /* If are writing to a new page, no need to read old data. The extent
189 * locking will have updated the KMS, and for our purposes here we can
190 * treat it like i_size. */
191 kms = lov_merge_size(lsm, 1);
193 memset(kmap(page), 0, PAGE_SIZE);
195 GOTO(prepare_done, rc = 0);
198 /* XXX could be an async ocp read.. read-ahead? */
199 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
201 /* bug 1598: don't clobber blksize */
202 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
203 obdo_refresh_inode(inode, &oa, oa.o_valid);
209 SetPageUptodate(page);
214 int ll_write_count(struct page *page)
216 struct inode *inode = page->mapping->host;
218 /* catch race with truncate */
219 if (((loff_t)page->index << PAGE_SHIFT) >= inode->i_size)
222 /* catch sub-page write at end of file */
223 if (((loff_t)page->index << PAGE_SHIFT) + PAGE_SIZE > inode->i_size)
224 return inode->i_size % PAGE_SIZE;
229 struct ll_async_page *llap_from_cookie(void *cookie)
231 struct ll_async_page *llap = cookie;
232 if (llap->llap_magic != LLAP_MAGIC)
233 return ERR_PTR(-EINVAL);
237 static int ll_ap_make_ready(void *data, int cmd)
239 struct ll_async_page *llap;
243 llap = llap_from_cookie(data);
247 page = llap->llap_page;
249 if (cmd == OBD_BRW_READ)
252 /* we're trying to write, but the page is locked.. come back later */
253 if (TryLockPage(page))
256 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
257 page_cache_get(page);
259 /* if we left PageDirty we might get another writepage call
260 * in the future. list walkers are bright enough
261 * to check page dirty so we can leave it on whatever list
262 * its on. XXX also, we're called with the cli list so if
263 * we got the page cache list we'd create a lock inversion
264 * with the removepage path which gets the page lock then the
266 clear_page_dirty(page);
270 static int ll_ap_refresh_count(void *data, int cmd)
272 struct ll_async_page *llap;
275 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
276 LASSERT(cmd != OBD_BRW_READ);
278 llap = llap_from_cookie(data);
280 RETURN(PTR_ERR(llap));
282 return ll_write_count(llap->llap_page);
285 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
287 struct lov_stripe_md *lsm;
288 obd_flag valid_flags;
290 lsm = ll_i2info(inode)->lli_smd;
292 oa->o_id = lsm->lsm_object_id;
293 oa->o_gr = lsm->lsm_object_gr;
294 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
295 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
296 if (cmd == OBD_BRW_WRITE) {
297 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
298 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
299 obdo_fid(oa)->mds = ll_i2info(inode)->lli_mds;
300 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
302 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
305 obdo_from_inode(oa, inode, valid_flags);
308 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
310 struct ll_async_page *llap;
313 llap = llap_from_cookie(data);
319 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
323 static struct obd_async_page_ops ll_async_page_ops = {
324 .ap_make_ready = ll_ap_make_ready,
325 .ap_refresh_count = ll_ap_refresh_count,
326 .ap_fill_obdo = ll_ap_fill_obdo,
327 .ap_completion = ll_ap_completion,
330 /* XXX have the exp be an argument? */
331 struct ll_async_page *llap_from_page(struct page *page)
333 struct ll_async_page *llap;
334 struct obd_export *exp;
335 struct inode *inode = page->mapping->host;
336 struct ll_sb_info *sbi = ll_i2sbi(inode);
340 llap = (struct ll_async_page *)page->private;
342 if (llap->llap_magic != LLAP_MAGIC)
343 RETURN(ERR_PTR(-EINVAL));
347 exp = ll_i2obdexp(page->mapping->host);
349 RETURN(ERR_PTR(-EINVAL));
351 OBD_ALLOC(llap, sizeof(*llap));
353 RETURN(ERR_PTR(-ENOMEM));
354 llap->llap_magic = LLAP_MAGIC;
355 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
356 (obd_off)page->index << PAGE_SHIFT,
357 &ll_async_page_ops, llap, &llap->llap_cookie);
359 OBD_FREE(llap, sizeof(*llap));
363 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
364 page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
365 /* also zeroing the PRIVBITS low order bitflags */
366 page->private = (unsigned long)llap;
367 llap->llap_page = page;
369 spin_lock(&sbi->ll_pglist_lock);
370 sbi->ll_pglist_gen++;
371 list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
372 spin_unlock(&sbi->ll_pglist_lock);
377 void lov_increase_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
379 /* update our write count to account for i_size increases that may have
380 * happened since we've queued the page for io. */
382 /* be careful not to return success without setting the page Uptodate or
383 * the next pass through prepare_write will read in stale data from disk. */
384 int ll_commit_write(struct file *file, struct page *page, unsigned from,
387 struct inode *inode = page->mapping->host;
388 struct ll_inode_info *lli = ll_i2info(inode);
389 struct lov_stripe_md *lsm = lli->lli_smd;
390 struct obd_export *exp = NULL;
391 struct ll_async_page *llap;
396 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
397 LASSERT(inode == file->f_dentry->d_inode);
398 LASSERT(PageLocked(page));
400 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
401 inode, page, from, to, page->index);
403 llap = llap_from_page(page);
405 RETURN(PTR_ERR(llap));
407 /* queue a write for some time in the future the first time we
409 if (!PageDirty(page)) {
410 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
411 LPROC_LL_DIRTY_MISSES);
413 exp = ll_i2obdexp(inode);
417 /* _make_ready only sees llap once we've unlocked the page */
418 llap->llap_write_queued = 1;
419 rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
420 OBD_BRW_WRITE, 0, 0, 0, 0);
421 if (rc != 0) { /* async failed, try sync.. */
422 struct obd_io_group *oig;
427 llap->llap_write_queued = 0;
428 rc = obd_queue_group_io(exp, lsm, NULL, oig,
430 OBD_BRW_WRITE, 0, to, 0,
431 ASYNC_READY | ASYNC_URGENT |
438 rc = obd_trigger_group_io(exp, lsm, NULL, oig);
447 LL_CDEBUG_PAGE(D_PAGE, page, "write queued\n");
448 //llap_write_pending(inode, llap);
450 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
451 LPROC_LL_DIRTY_HITS);
454 /* put the page in the page cache, from now on ll_removepage is
455 * responsible for cleaning up the llap */
456 set_page_dirty(page);
460 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
461 lov_increase_kms(exp, lsm, size);
462 if (size > inode->i_size)
463 inode->i_size = size;
464 SetPageUptodate(page);
469 /* the kernel calls us here when a page is unhashed from the page cache.
470 * the page will be locked and the kernel is holding a spinlock, so
471 * we need to be careful. we're just tearing down our book-keeping
473 void ll_removepage(struct page *page)
475 struct inode *inode = page->mapping->host;
476 struct obd_export *exp;
477 struct ll_async_page *llap;
478 struct ll_sb_info *sbi = ll_i2sbi(inode);
482 LASSERT(!in_interrupt());
484 /* sync pages or failed read pages can leave pages in the page
485 * cache that don't have our data associated with them anymore */
486 if (page->private == 0) {
491 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
493 exp = ll_i2obdexp(inode);
495 CERROR("page %p ind %lu gave null export\n", page, page->index);
500 llap = llap_from_page(page);
502 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
503 page->index, PTR_ERR(llap));
508 //llap_write_complete(inode, llap);
509 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
512 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
514 /* this unconditional free is only safe because the page lock
515 * is providing exclusivity to memory pressure/truncate/writeback..*/
518 spin_lock(&sbi->ll_pglist_lock);
519 if (!list_empty(&llap->llap_proc_item))
520 list_del_init(&llap->llap_proc_item);
521 sbi->ll_pglist_gen++;
522 spin_unlock(&sbi->ll_pglist_lock);
523 OBD_FREE(llap, sizeof(*llap));
527 static int ll_page_matches(struct page *page, int fd_flags)
529 struct lustre_handle match_lockh = {0};
530 struct inode *inode = page->mapping->host;
531 ldlm_policy_data_t page_extent;
535 if (fd_flags & LL_FILE_CW_LOCKED)
538 page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
539 page_extent.l_extent.end =
540 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
541 flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
542 matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
543 ll_i2info(inode)->lli_smd, LDLM_EXTENT,
544 &page_extent, LCK_PR | LCK_PW, &flags, inode,
549 static int ll_issue_page_read(struct obd_export *exp,
550 struct ll_async_page *llap,
551 struct obd_io_group *oig, int defer)
553 struct page *page = llap->llap_page;
556 page_cache_get(page);
557 llap->llap_defer_uptodate = defer;
558 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
559 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
560 PAGE_SIZE, 0, ASYNC_COUNT_STABLE);
562 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
563 page_cache_release(page);
568 #define LL_RA_MIN(inode) ((unsigned long)PTLRPC_MAX_BRW_PAGES / 2)
569 #define LL_RA_MAX(inode) ((ll_i2info(inode)->lli_smd->lsm_xfersize * 3) >> \
572 static void ll_readahead(struct ll_readahead_state *ras,
573 struct obd_export *exp, struct address_space *mapping,
574 struct obd_io_group *oig, int flags)
576 unsigned long i, start, end;
577 struct ll_async_page *llap;
581 if (mapping->host->i_size == 0)
584 spin_lock(&ras->ras_lock);
586 /* make sure to issue a window's worth of read-ahead pages */
588 start = end - ras->ras_window;
592 /* but don't iterate over pages that we've already issued. this
593 * will set start to end + 1 if we've already read-ahead up to
594 * ras_last sothe for() won't be entered */
595 if (ras->ras_next_index > start)
596 start = ras->ras_next_index;
598 ras->ras_next_index = end + 1;
600 CDEBUG(D_READA, "ni %lu last %lu win %lu: reading from %lu to %lu\n",
601 ras->ras_next_index, ras->ras_last, ras->ras_window,
604 spin_unlock(&ras->ras_lock);
606 /* clamp to filesize */
607 i = (mapping->host->i_size - 1) >> PAGE_CACHE_SHIFT;
610 for (i = start; i <= end; i++) {
611 /* grab_cache_page_nowait returns null if this races with
612 * truncating the page (page->mapping == NULL) */
613 page = grab_cache_page_nowait(mapping, i);
617 /* the book-keeping above promises that we've tried
618 * all the indices from start to end, so we don't
619 * stop if anyone returns an error. This may not be good. */
620 if (Page_Uptodate(page))
623 if ((rc = ll_page_matches(page, flags)) <= 0) {
624 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
625 "lock match failed: rc %d\n", rc);
629 llap = llap_from_page(page);
630 if (IS_ERR(llap) || llap->llap_defer_uptodate)
633 rc = ll_issue_page_read(exp, llap, oig, 1);
635 LL_CDEBUG_PAGE(D_PAGE, page, "started read-ahead\n");
638 LL_CDEBUG_PAGE(D_PAGE, page, "skipping read-ahead\n");
642 page_cache_release(page);
646 /* called with the ras_lock held or from places where it doesn't matter */
647 static void ll_readahead_set(struct inode *inode,
648 struct ll_readahead_state *ras,
651 ras->ras_next_index = index;
652 if (ras->ras_next_index != ~0UL)
653 ras->ras_next_index++;
654 ras->ras_window = LL_RA_MIN(inode);
655 ras->ras_last = ras->ras_next_index + ras->ras_window;
656 if (ras->ras_last < ras->ras_next_index)
657 ras->ras_last = ~0UL;
658 CDEBUG(D_READA, "ni %lu last %lu win %lu: set %lu\n",
659 ras->ras_next_index, ras->ras_last, ras->ras_window,
663 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
665 spin_lock_init(&ras->ras_lock);
666 ll_readahead_set(inode, ras, 0);
669 static void ll_readahead_update(struct inode *inode,
670 struct ll_readahead_state *ras,
671 unsigned long index, int hit)
673 unsigned long issued_start, new_last;
675 spin_lock(&ras->ras_lock);
677 /* we're interested in noticing the index's relation to the
678 * previously issued read-ahead pages */
679 issued_start = ras->ras_next_index - ras->ras_window - 1;
680 if (issued_start > ras->ras_next_index)
683 CDEBUG(D_READA, "ni %lu last %lu win %lu: %s ind %lu start %lu\n",
684 ras->ras_next_index, ras->ras_last, ras->ras_window,
685 hit ? "hit" : "miss", index, issued_start);
687 index == ras->ras_next_index && index == ras->ras_last + 1) {
688 /* special case the kernel's read-ahead running into the
689 * page just beyond our read-ahead window as an extension
690 * of our read-ahead. sigh. wishing it was easier to
691 * turn off 2.4's read-ahead. */
692 ras->ras_window = min(LL_RA_MAX(inode), ras->ras_window + 1);
694 ras->ras_next_index = index + 1;
695 ras->ras_last = index;
697 (index > issued_start || ras->ras_next_index >= index)) {
698 /* deal with a miss way out of the window. we interpret
699 * this as a seek and restart the window */
700 ll_readahead_set(inode, ras, index);
703 issued_start <= index && index < ras->ras_next_index) {
704 /* a miss inside the window? surely its memory pressure
705 * evicting our read pages before the app can see them.
706 * we shrink the window aggressively */
707 unsigned long old_window = ras->ras_window;
709 ras->ras_window = max(ras->ras_window / 2, LL_RA_MIN(inode));
710 ras->ras_last -= old_window - ras->ras_window;
711 if (ras->ras_next_index > ras->ras_last)
712 ras->ras_next_index = ras->ras_last + 1;
713 CDEBUG(D_READA, "ni %lu last %lu win %lu: miss inside\n",
714 ras->ras_next_index, ras->ras_last, ras->ras_window);
717 issued_start <= index && index < ras->ras_next_index) {
718 /* a hit inside the window. grow the window by twice the
719 * number of pages that are satisified within the window. */
720 ras->ras_window = min(LL_RA_MAX(inode), ras->ras_window + 2);
722 /* we want the next readahead pass to issue a windows worth
723 * beyond where the app currently is */
724 new_last = index + ras->ras_window;
725 if (new_last > ras->ras_last)
726 ras->ras_last = new_last;
728 CDEBUG(D_READA, "ni %lu last %lu win %lu: extended window/last\n",
729 ras->ras_next_index, ras->ras_last, ras->ras_window);
732 spin_unlock(&ras->ras_lock);
736 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
737 * read-ahead assumes it is valid to issue readpage all the way up to
738 * i_size, but our dlm locks make that not the case. We disable the
739 * kernel's read-ahead and do our own by walking ahead in the page cache
740 * checking for dlm lock coverage. the main difference between 2.4 and
741 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
742 * so they look the same.
744 int ll_readpage(struct file *filp, struct page *page)
746 struct ll_file_data *fd = filp->private_data;
747 struct inode *inode = page->mapping->host;
748 struct obd_export *exp;
749 struct ll_async_page *llap;
750 struct obd_io_group *oig = NULL;
754 LASSERT(PageLocked(page));
755 LASSERT(!PageUptodate(page));
756 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
757 inode->i_ino, inode->i_generation, inode,
758 (((obd_off)page->index) << PAGE_SHIFT));
759 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
765 exp = ll_i2obdexp(inode);
767 GOTO(out, rc = -EINVAL);
769 llap = llap_from_page(page);
771 GOTO(out, rc = PTR_ERR(llap));
773 if (llap->llap_defer_uptodate) {
774 ll_readahead_update(inode, &fd->fd_ras, page->index, 1);
775 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,fd->fd_flags);
776 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL,
778 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
779 SetPageUptodate(page);
781 GOTO(out_oig, rc = 0);
784 ll_readahead_update(inode, &fd->fd_ras, page->index, 0);
786 rc = ll_page_matches(page, fd->fd_flags);
788 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
793 static unsigned long next_print;
794 CDEBUG(D_INODE, "ino %lu page %lu (%llu) didn't match a lock\n",
795 inode->i_ino, page->index,
796 (long long)page->index << PAGE_CACHE_SHIFT);
797 if (time_after(jiffies, next_print)) {
798 CERROR("ino %lu page %lu (%llu) not covered by "
799 "a lock (mmap?). check debug logs.\n",
800 inode->i_ino, page->index,
801 (long long)page->index << PAGE_CACHE_SHIFT);
802 ldlm_dump_all_namespaces();
803 if (next_print == 0) {
804 CERROR("%s\n", portals_debug_dumpstack());
805 portals_debug_dumplog();
807 next_print = jiffies + 30 * HZ;
811 rc = ll_issue_page_read(exp, llap, oig, 0);
815 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
816 if ((ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD))
817 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,fd->fd_flags);
819 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
831 /* this is for read pages. we issue them as ready but not urgent. when
832 * someone waits on them we fire them off, hopefully merged with adjacent
833 * reads that were queued by read-ahead. */
834 int ll_sync_page(struct page *page)
836 struct obd_export *exp;
837 struct ll_async_page *llap;
841 /* we're using a low bit flag to signify that a queued read should
842 * be issued once someone goes to lock it. it is also cleared
843 * as the page is built into an RPC */
844 if (!test_and_clear_bit(LL_PRIVBITS_READ, &page->private))
847 /* careful to only deref page->mapping after checking the bit */
848 exp = ll_i2obdexp(page->mapping->host);
852 llap = llap_from_page(page);
854 RETURN(PTR_ERR(llap));
856 LL_CDEBUG_PAGE(D_PAGE, page, "setting ready|urgent\n");
858 rc = obd_set_async_flags(exp, ll_i2info(page->mapping->host)->lli_smd,
859 NULL, llap->llap_cookie,
860 ASYNC_READY|ASYNC_URGENT);