1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #include <asm/segment.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
45 #define DEBUG_SUBSYSTEM S_LLITE
47 //#include <lustre_mdc.h>
48 #include <lustre_lite.h>
49 #include "llite_internal.h"
50 #include <linux/lustre_compat25.h>
52 #ifndef list_for_each_prev_safe
53 #define list_for_each_prev_safe(pos, n, head) \
54 for (pos = (head)->prev, n = pos->prev; pos != (head); \
55 pos = n, n = pos->prev )
58 cfs_mem_cache_t *ll_async_page_slab = NULL;
59 size_t ll_async_page_slab_size = 0;
61 /* SYNCHRONOUS I/O to object storage for an inode */
62 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
63 struct page *page, int flags)
65 struct ll_inode_info *lli = ll_i2info(inode);
66 struct lov_stripe_md *lsm = lli->lli_smd;
67 struct obd_info oinfo = { { { 0 } } };
73 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
75 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
76 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
78 pg.count = CFS_PAGE_SIZE;
80 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
81 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
82 inode->i_ino, pg.off, pg.off);
84 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
85 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
86 page->mapping->host, i_size_read(page->mapping->host),
92 if (cmd & OBD_BRW_WRITE)
93 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
96 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
100 /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
101 opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
102 oinfo.oi_capa = ll_osscapa_get(inode, opc);
103 rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
104 capa_put(oinfo.oi_capa);
106 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
108 CERROR("error from obd_brw: rc = %d\n", rc);
112 /* this isn't where truncate starts. roughly:
113 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
114 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
117 * must be called under ->lli_size_sem */
118 void ll_truncate(struct inode *inode)
120 struct ll_inode_info *lli = ll_i2info(inode);
121 struct obd_info oinfo = { { { 0 } } };
126 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
127 inode->i_generation, inode, i_size_read(inode),
130 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
131 if (lli->lli_size_sem_owner != current) {
137 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
142 LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
144 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
146 lov_stripe_lock(lli->lli_smd);
147 inode_init_lvb(inode, &lvb);
148 obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
149 if (lvb.lvb_size == i_size_read(inode)) {
150 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
151 lli->lli_smd->lsm_object_id, i_size_read(inode),
153 lov_stripe_unlock(lli->lli_smd);
157 obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
158 lov_stripe_unlock(lli->lli_smd);
160 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
161 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
162 /* If the truncate leaves behind a partial page, update its
164 struct page *page = find_get_page(inode->i_mapping,
165 i_size_read(inode) >>
168 struct ll_async_page *llap = llap_cast_private(page);
170 char *kaddr = kmap_atomic(page, KM_USER0);
171 llap->llap_checksum =
172 crc32_le(0, kaddr, CFS_PAGE_SIZE);
173 kunmap_atomic(kaddr, KM_USER0);
175 page_cache_release(page);
179 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
180 lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
182 oinfo.oi_md = lli->lli_smd;
183 oinfo.oi_policy.l_extent.start = i_size_read(inode);
184 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
186 oa.o_id = lli->lli_smd->lsm_object_id;
187 oa.o_gr = lli->lli_smd->lsm_object_gr;
188 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
190 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
191 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
192 OBD_MD_FLFID | OBD_MD_FLGENER);
194 ll_inode_size_unlock(inode, 0);
196 oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
197 rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
198 ll_truncate_free_capa(oinfo.oi_capa);
200 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
202 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
203 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
208 ll_inode_size_unlock(inode, 0);
211 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
214 struct inode *inode = page->mapping->host;
215 struct ll_inode_info *lli = ll_i2info(inode);
216 struct lov_stripe_md *lsm = lli->lli_smd;
217 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
218 struct obd_info oinfo = { { { 0 } } };
225 LASSERT(PageLocked(page));
226 (void)llap_cast_private(page); /* assertion */
228 /* Check to see if we should return -EIO right away */
231 pga.count = CFS_PAGE_SIZE;
234 oa.o_mode = inode->i_mode;
235 oa.o_id = lsm->lsm_object_id;
236 oa.o_gr = lsm->lsm_object_gr;
237 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
238 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
239 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
243 rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
247 if (PageUptodate(page)) {
248 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
252 /* We're completely overwriting an existing page, so _don't_ set it up
253 * to date until commit_write */
254 if (from == 0 && to == CFS_PAGE_SIZE) {
255 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
256 POISON_PAGE(page, 0x11);
260 /* If are writing to a new page, no need to read old data. The extent
261 * locking will have updated the KMS, and for our purposes here we can
262 * treat it like i_size. */
263 lov_stripe_lock(lsm);
264 inode_init_lvb(inode, &lvb);
265 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
266 lov_stripe_unlock(lsm);
267 if (lvb.lvb_size <= offset) {
268 char *kaddr = kmap_atomic(page, KM_USER0);
269 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
270 lvb.lvb_size, offset);
271 memset(kaddr, 0, CFS_PAGE_SIZE);
272 kunmap_atomic(kaddr, KM_USER0);
273 GOTO(prepare_done, rc = 0);
276 /* XXX could be an async ocp read.. read-ahead? */
277 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
279 /* bug 1598: don't clobber blksize */
280 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
281 obdo_refresh_inode(inode, &oa, oa.o_valid);
287 SetPageUptodate(page);
292 static int ll_ap_make_ready(void *data, int cmd)
294 struct ll_async_page *llap;
298 llap = LLAP_FROM_COOKIE(data);
299 page = llap->llap_page;
301 LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
302 page->mapping->host->i_ino, page->index);
304 /* we're trying to write, but the page is locked.. come back later */
305 if (TryLockPage(page))
308 LASSERT(!PageWriteback(page));
310 /* if we left PageDirty we might get another writepage call
311 * in the future. list walkers are bright enough
312 * to check page dirty so we can leave it on whatever list
313 * its on. XXX also, we're called with the cli list so if
314 * we got the page cache list we'd create a lock inversion
315 * with the removepage path which gets the page lock then the
317 LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
318 page->mapping->host->i_ino, page->index);
319 clear_page_dirty_for_io(page);
321 /* This actually clears the dirty bit in the radix tree.*/
322 set_page_writeback(page);
324 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
325 page_cache_get(page);
330 /* We have two reasons for giving llite the opportunity to change the
331 * write length of a given queued page as it builds the RPC containing
334 * 1) Further extending writes may have landed in the page cache
335 * since a partial write first queued this page requiring us
336 * to write more from the page cache. (No further races are possible, since
337 * by the time this is called, the page is locked.)
338 * 2) We might have raced with truncate and want to avoid performing
339 * write RPCs that are just going to be thrown away by the
340 * truncate's punch on the storage targets.
342 * The kms serves these purposes as it is set at both truncate and extending
345 static int ll_ap_refresh_count(void *data, int cmd)
347 struct ll_inode_info *lli;
348 struct ll_async_page *llap;
349 struct lov_stripe_md *lsm;
356 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
357 LASSERT(cmd != OBD_BRW_READ);
359 llap = LLAP_FROM_COOKIE(data);
360 page = llap->llap_page;
361 inode = page->mapping->host;
362 lli = ll_i2info(inode);
365 lov_stripe_lock(lsm);
366 inode_init_lvb(inode, &lvb);
367 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
369 lov_stripe_unlock(lsm);
371 /* catch race with truncate */
372 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
375 /* catch sub-page write at end of file */
376 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
377 return kms % CFS_PAGE_SIZE;
379 return CFS_PAGE_SIZE;
382 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
384 struct lov_stripe_md *lsm;
385 obd_flag valid_flags;
387 lsm = ll_i2info(inode)->lli_smd;
389 oa->o_id = lsm->lsm_object_id;
390 oa->o_gr = lsm->lsm_object_gr;
391 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
392 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
393 if (cmd & OBD_BRW_WRITE) {
394 oa->o_valid |= OBD_MD_FLEPOCH;
395 oa->o_easize = ll_i2info(inode)->lli_ioepoch;
397 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
398 OBD_MD_FLUID | OBD_MD_FLGID |
399 OBD_MD_FLFID | OBD_MD_FLGENER;
402 obdo_from_inode(oa, inode, valid_flags);
405 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
407 struct ll_async_page *llap;
410 llap = LLAP_FROM_COOKIE(data);
411 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
416 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
419 struct ll_async_page *llap;
422 llap = LLAP_FROM_COOKIE(data);
423 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
428 static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
430 struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
431 int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
433 return ll_osscapa_get(llap->llap_page->mapping->host, opc);
436 static struct obd_async_page_ops ll_async_page_ops = {
437 .ap_make_ready = ll_ap_make_ready,
438 .ap_refresh_count = ll_ap_refresh_count,
439 .ap_fill_obdo = ll_ap_fill_obdo,
440 .ap_update_obdo = ll_ap_update_obdo,
441 .ap_completion = ll_ap_completion,
442 .ap_lookup_capa = ll_ap_lookup_capa,
445 struct ll_async_page *llap_cast_private(struct page *page)
447 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
449 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
450 "page %p private %lu gave magic %d which != %d\n",
451 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
456 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
458 * There is an llap attached onto every page in lustre, linked off @sbi.
459 * We add an llap to the list so we don't lose our place during list walking.
460 * If llaps in the list are being moved they will only move to the end
461 * of the LRU, and we aren't terribly interested in those pages here (we
462 * start at the beginning of the list where the least-used llaps are.
464 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
466 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
467 unsigned long total, want, count = 0;
469 total = sbi->ll_async_page_count;
471 /* There can be a large number of llaps (600k or more in a large
472 * memory machine) so the VM 1/6 shrink ratio is likely too much.
473 * Since we are freeing pages also, we don't necessarily want to
474 * shrink so much. Limit to 40MB of pages + llaps per call. */
475 if (shrink_fraction == 0)
476 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
478 want = (total + shrink_fraction - 1) / shrink_fraction;
480 if (want > 40 << (20 - CFS_PAGE_SHIFT))
481 want = 40 << (20 - CFS_PAGE_SHIFT);
483 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
484 want, total, shrink_fraction);
486 spin_lock(&sbi->ll_lock);
487 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
489 while (--total >= 0 && count < want) {
493 if (unlikely(need_resched())) {
494 spin_unlock(&sbi->ll_lock);
496 spin_lock(&sbi->ll_lock);
499 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
500 list_del_init(&dummy_llap.llap_pglist_item);
504 page = llap->llap_page;
505 LASSERT(page != NULL);
507 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
509 /* Page needs/undergoing IO */
510 if (TryLockPage(page)) {
511 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
515 keep = (llap->llap_write_queued || PageDirty(page) ||
516 PageWriteback(page) || (!PageUptodate(page) &&
517 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
519 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
520 keep ? "keep" : "drop",
521 llap->llap_write_queued ? "wq " : "",
522 PageDirty(page) ? "pd " : "",
523 PageUptodate(page) ? "" : "!pu ",
524 PageWriteback(page) ? "wb" : "",
525 llap->llap_defer_uptodate ? "" : "!du",
526 llap_origins[llap->llap_origin]);
528 /* If page is dirty or undergoing IO don't discard it */
534 page_cache_get(page);
535 spin_unlock(&sbi->ll_lock);
537 if (page->mapping != NULL) {
538 ll_teardown_mmaps(page->mapping,
539 (__u64)page->index << CFS_PAGE_SHIFT,
540 ((__u64)page->index << CFS_PAGE_SHIFT)|
542 if (!PageDirty(page) && !page_mapped(page)) {
543 ll_ra_accounting(llap, page->mapping);
544 ll_truncate_complete_page(page);
547 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
555 page_cache_release(page);
557 spin_lock(&sbi->ll_lock);
559 list_del(&dummy_llap.llap_pglist_item);
560 spin_unlock(&sbi->ll_lock);
562 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
568 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
570 struct ll_async_page *llap;
571 struct obd_export *exp;
572 struct inode *inode = page->mapping->host;
573 struct ll_sb_info *sbi;
578 static int triggered;
581 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
583 libcfs_debug_dumpstack(NULL);
586 RETURN(ERR_PTR(-EINVAL));
588 sbi = ll_i2sbi(inode);
589 LASSERT(ll_async_page_slab);
590 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
592 llap = llap_cast_private(page);
594 /* move to end of LRU list, except when page is just about to
596 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
597 spin_lock(&sbi->ll_lock);
598 sbi->ll_pglist_gen++;
599 list_del_init(&llap->llap_pglist_item);
600 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
601 spin_unlock(&sbi->ll_lock);
606 exp = ll_i2dtexp(page->mapping->host);
608 RETURN(ERR_PTR(-EINVAL));
610 /* limit the number of lustre-cached pages */
611 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
612 llap_shrink_cache(sbi, 0);
614 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
615 ll_async_page_slab_size);
617 RETURN(ERR_PTR(-ENOMEM));
618 llap->llap_magic = LLAP_MAGIC;
619 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
621 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
622 (obd_off)page->index << CFS_PAGE_SHIFT,
623 &ll_async_page_ops, llap, &llap->llap_cookie);
625 OBD_SLAB_FREE(llap, ll_async_page_slab,
626 ll_async_page_slab_size);
630 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
631 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
632 /* also zeroing the PRIVBITS low order bitflags */
633 __set_page_ll_data(page, llap);
634 llap->llap_page = page;
635 spin_lock(&sbi->ll_lock);
636 sbi->ll_pglist_gen++;
637 sbi->ll_async_page_count++;
638 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
639 INIT_LIST_HEAD(&llap->llap_pending_write);
640 spin_unlock(&sbi->ll_lock);
643 if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
645 char *kaddr = kmap_atomic(page, KM_USER0);
646 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
647 kunmap_atomic(kaddr, KM_USER0);
648 if (origin == LLAP_ORIGIN_READAHEAD ||
649 origin == LLAP_ORIGIN_READPAGE) {
650 llap->llap_checksum = 0;
651 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
652 llap->llap_checksum == 0) {
653 llap->llap_checksum = csum;
654 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
655 } else if (llap->llap_checksum == csum) {
656 /* origin == LLAP_ORIGIN_WRITEPAGE */
657 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
660 /* origin == LLAP_ORIGIN_WRITEPAGE */
661 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
662 "%x!\n", llap->llap_checksum, csum);
666 llap->llap_origin = origin;
670 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
671 struct ll_async_page *llap,
672 unsigned to, obd_flag async_flags)
674 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
675 struct obd_io_group *oig;
676 struct ll_sb_info *sbi = ll_i2sbi(inode);
677 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
680 /* _make_ready only sees llap once we've unlocked the page */
681 llap->llap_write_queued = 1;
682 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
683 llap->llap_cookie, OBD_BRW_WRITE | noquot,
684 0, 0, 0, async_flags);
686 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
690 llap->llap_write_queued = 0;
691 /* Do not pass llap here as it is sync write. */
692 llap_write_pending(inode, NULL);
698 /* make full-page requests if we are not at EOF (bug 4410) */
699 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
700 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
701 "sync write before EOF: size_index %lu, to %d\n",
704 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
705 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
706 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
707 "sync write at EOF: size_index %lu, to %d/%d\n",
708 size_index, to, size_to);
713 /* compare the checksum once before the page leaves llite */
714 if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
715 llap->llap_checksum != 0)) {
717 struct page *page = llap->llap_page;
718 char *kaddr = kmap_atomic(page, KM_USER0);
719 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
720 kunmap_atomic(kaddr, KM_USER0);
721 if (llap->llap_checksum == csum) {
722 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
725 CERROR("page %p old cksum %x != new cksum %x!\n",
726 page, llap->llap_checksum, csum);
730 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
731 llap->llap_cookie, OBD_BRW_WRITE | noquot,
732 0, to, 0, ASYNC_READY | ASYNC_URGENT |
733 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
737 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
743 if (!rc && async_flags & ASYNC_READY) {
744 unlock_page(llap->llap_page);
745 if (PageWriteback(llap->llap_page)) {
746 end_page_writeback(llap->llap_page);
750 if (rc == 0 && llap_write_complete(inode, llap))
751 ll_queue_done_writing(inode, 0);
753 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
761 /* update our write count to account for i_size increases that may have
762 * happened since we've queued the page for io. */
764 /* be careful not to return success without setting the page Uptodate or
765 * the next pass through prepare_write will read in stale data from disk. */
766 int ll_commit_write(struct file *file, struct page *page, unsigned from,
769 struct inode *inode = page->mapping->host;
770 struct ll_inode_info *lli = ll_i2info(inode);
771 struct lov_stripe_md *lsm = lli->lli_smd;
772 struct obd_export *exp;
773 struct ll_async_page *llap;
778 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
779 LASSERT(inode == file->f_dentry->d_inode);
780 LASSERT(PageLocked(page));
782 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
783 inode, page, from, to, page->index);
785 llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
787 RETURN(PTR_ERR(llap));
789 exp = ll_i2dtexp(inode);
793 llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
796 * queue a write for some time in the future the first time we
799 * This is different from what other file systems do: they usually
800 * just mark page (and some of its buffers) dirty and rely on
801 * balance_dirty_pages() to start a write-back. Lustre wants write-back
802 * to be started earlier for the following reasons:
804 * (1) with a large number of clients we need to limit the amount
805 * of cached data on the clients a lot;
807 * (2) large compute jobs generally want compute-only then io-only
808 * and the IO should complete as quickly as possible;
810 * (3) IO is batched up to the RPC size and is async until the
811 * client max cache is hit
812 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
815 if (!PageDirty(page)) {
816 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
818 rc = queue_or_sync_write(exp, inode, llap, to, 0);
822 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
825 /* put the page in the page cache, from now on ll_removepage is
826 * responsible for cleaning up the llap.
827 * only set page dirty when it's queued to be write out */
828 if (llap->llap_write_queued)
829 set_page_dirty(page);
832 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
833 ll_inode_size_lock(inode, 0);
835 lov_stripe_lock(lsm);
836 obd_adjust_kms(exp, lsm, size, 0);
837 lov_stripe_unlock(lsm);
838 if (size > i_size_read(inode))
839 i_size_write(inode, size);
840 SetPageUptodate(page);
841 } else if (size > i_size_read(inode)) {
842 /* this page beyond the pales of i_size, so it can't be
843 * truncated in ll_p_r_e during lock revoking. we must
844 * teardown our book-keeping here. */
847 ll_inode_size_unlock(inode, 0);
851 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
853 struct ll_ra_info *ra = &sbi->ll_ra_info;
857 spin_lock(&sbi->ll_lock);
858 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
859 ra->ra_cur_pages += ret;
860 spin_unlock(&sbi->ll_lock);
865 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
867 struct ll_ra_info *ra = &sbi->ll_ra_info;
868 spin_lock(&sbi->ll_lock);
869 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
870 ra->ra_cur_pages, len);
871 ra->ra_cur_pages -= len;
872 spin_unlock(&sbi->ll_lock);
875 /* called for each page in a completed rpc.*/
876 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
878 struct ll_async_page *llap;
883 llap = LLAP_FROM_COOKIE(data);
884 page = llap->llap_page;
885 LASSERT(PageLocked(page));
886 LASSERT(CheckWriteback(page,cmd));
888 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
890 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
891 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
894 if (cmd & OBD_BRW_READ) {
895 if (!llap->llap_defer_uptodate)
896 SetPageUptodate(page);
898 llap->llap_write_queued = 0;
900 ClearPageError(page);
902 if (cmd & OBD_BRW_READ) {
903 llap->llap_defer_uptodate = 0;
907 set_bit(AS_ENOSPC, &page->mapping->flags);
909 set_bit(AS_EIO, &page->mapping->flags);
914 if (cmd & OBD_BRW_WRITE) {
915 /* Only rc == 0, write succeed, then this page could be deleted
916 * from the pending_writing list
918 if (rc == 0 && llap_write_complete(page->mapping->host, llap))
919 ll_queue_done_writing(page->mapping->host, 0);
922 if (PageWriteback(page)) {
923 end_page_writeback(page);
925 page_cache_release(page);
930 /* the kernel calls us here when a page is unhashed from the page cache.
931 * the page will be locked and the kernel is holding a spinlock, so
932 * we need to be careful. we're just tearing down our book-keeping
934 void ll_removepage(struct page *page)
936 struct inode *inode = page->mapping->host;
937 struct obd_export *exp;
938 struct ll_async_page *llap;
939 struct ll_sb_info *sbi = ll_i2sbi(inode);
943 LASSERT(!in_interrupt());
945 /* sync pages or failed read pages can leave pages in the page
946 * cache that don't have our data associated with them anymore */
947 if (page_private(page) == 0) {
952 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
954 exp = ll_i2dtexp(inode);
956 CERROR("page %p ind %lu gave null export\n", page, page->index);
961 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
963 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
964 page->index, PTR_ERR(llap));
969 if (llap_write_complete(inode, llap))
970 ll_queue_done_writing(inode, 0);
972 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
975 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
977 /* this unconditional free is only safe because the page lock
978 * is providing exclusivity to memory pressure/truncate/writeback..*/
979 __clear_page_ll_data(page);
981 spin_lock(&sbi->ll_lock);
982 if (!list_empty(&llap->llap_pglist_item))
983 list_del_init(&llap->llap_pglist_item);
984 sbi->ll_pglist_gen++;
985 sbi->ll_async_page_count--;
986 spin_unlock(&sbi->ll_lock);
987 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
991 static int ll_page_matches(struct page *page, int fd_flags)
993 struct lustre_handle match_lockh = {0};
994 struct inode *inode = page->mapping->host;
995 ldlm_policy_data_t page_extent;
999 if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
1002 page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
1003 page_extent.l_extent.end =
1004 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
1005 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
1006 if (!(fd_flags & LL_FILE_READAHEAD))
1007 flags |= LDLM_FL_CBPENDING;
1008 matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
1009 ll_i2info(inode)->lli_smd, LDLM_EXTENT,
1010 &page_extent, LCK_PR | LCK_PW, &flags, inode,
1015 static int ll_issue_page_read(struct obd_export *exp,
1016 struct ll_async_page *llap,
1017 struct obd_io_group *oig, int defer)
1019 struct page *page = llap->llap_page;
1022 page_cache_get(page);
1023 llap->llap_defer_uptodate = defer;
1024 llap->llap_ra_used = 0;
1025 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1026 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1027 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1028 ASYNC_READY | ASYNC_URGENT);
1030 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1031 page_cache_release(page);
1036 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1038 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1039 ra->ra_stats[which]++;
1042 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1044 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1045 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1047 spin_lock(&sbi->ll_lock);
1048 ll_ra_stats_inc_unlocked(ra, which);
1049 spin_unlock(&sbi->ll_lock);
1052 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1054 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1057 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1060 #define RAS_CDEBUG(ras) \
1062 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1063 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1064 ras->ras_consecutive_pages, ras->ras_window_start, \
1065 ras->ras_window_len, ras->ras_next_readahead, \
1066 ras->ras_requests, ras->ras_request_index);
1068 static int index_in_window(unsigned long index, unsigned long point,
1069 unsigned long before, unsigned long after)
1071 unsigned long start = point - before, end = point + after;
1078 return start <= index && index <= end;
1081 static struct ll_readahead_state *ll_ras_get(struct file *f)
1083 struct ll_file_data *fd;
1085 fd = LUSTRE_FPRIVATE(f);
1089 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1091 struct ll_readahead_state *ras;
1093 ras = ll_ras_get(f);
1095 spin_lock(&ras->ras_lock);
1096 ras->ras_requests++;
1097 ras->ras_request_index = 0;
1098 ras->ras_consecutive_requests++;
1099 rar->lrr_reader = current;
1101 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1102 spin_unlock(&ras->ras_lock);
1105 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1107 struct ll_readahead_state *ras;
1109 ras = ll_ras_get(f);
1111 spin_lock(&ras->ras_lock);
1112 list_del_init(&rar->lrr_linkage);
1113 spin_unlock(&ras->ras_lock);
1116 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1118 struct ll_ra_read *scan;
1120 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1121 if (scan->lrr_reader == current)
1127 struct ll_ra_read *ll_ra_read_get(struct file *f)
1129 struct ll_readahead_state *ras;
1130 struct ll_ra_read *bead;
1132 ras = ll_ras_get(f);
1134 spin_lock(&ras->ras_lock);
1135 bead = ll_ra_read_get_locked(ras);
1136 spin_unlock(&ras->ras_lock);
1140 static int ll_readahead(struct ll_readahead_state *ras,
1141 struct obd_export *exp, struct address_space *mapping,
1142 struct obd_io_group *oig, int flags)
1144 unsigned long i, start = 0, end = 0, reserved;
1145 struct ll_async_page *llap;
1147 int rc, ret = 0, match_failed = 0;
1149 unsigned int gfp_mask;
1150 struct inode *inode;
1151 struct lov_stripe_md *lsm;
1152 struct ll_ra_read *bead;
1156 inode = mapping->host;
1157 lsm = ll_i2info(inode)->lli_smd;
1159 lov_stripe_lock(lsm);
1160 inode_init_lvb(inode, &lvb);
1161 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1163 lov_stripe_unlock(lsm);
1165 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1169 spin_lock(&ras->ras_lock);
1170 bead = ll_ra_read_get_locked(ras);
1171 /* Enlarge the RA window to encompass the full read */
1172 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1173 bead->lrr_start + bead->lrr_count) {
1174 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1175 ras->ras_window_start;
1177 /* Reserve a part of the read-ahead window that we'll be issuing */
1178 if (ras->ras_window_len) {
1179 start = ras->ras_next_readahead;
1180 end = ras->ras_window_start + ras->ras_window_len - 1;
1183 /* Truncate RA window to end of file */
1184 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1185 ras->ras_next_readahead = max(end, end + 1);
1188 spin_unlock(&ras->ras_lock);
1191 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1195 reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1196 if (reserved < end - start + 1)
1197 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1199 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1201 gfp_mask |= __GFP_NOWARN;
1204 for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1205 /* skip locked pages from previous readpage calls */
1206 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1208 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1209 CDEBUG(D_READA, "g_c_p_n failed\n");
1213 /* Check if page was truncated or reclaimed */
1214 if (page->mapping != mapping) {
1215 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1216 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1220 /* we do this first so that we can see the page in the /proc
1222 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1223 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1226 /* skip completed pages */
1227 if (Page_Uptodate(page))
1230 /* bail when we hit the end of the lock. */
1231 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1232 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1233 "lock match failed: rc %d\n", rc);
1234 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1239 rc = ll_issue_page_read(exp, llap, oig, 1);
1243 LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1244 "started read-ahead\n");
1247 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1248 "skipping read-ahead\n");
1252 page_cache_release(page);
1255 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1257 ll_ra_count_put(ll_i2sbi(inode), reserved);
1258 if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1259 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1261 /* if we didn't get to the end of the region we reserved from
1262 * the ras we need to go back and update the ras so that the
1263 * next read-ahead tries from where we left off. we only do so
1264 * if the region we failed to issue read-ahead on is still ahead
1265 * of the app and behind the next index to start read-ahead from */
1267 spin_lock(&ras->ras_lock);
1268 if (i < ras->ras_next_readahead &&
1269 index_in_window(i, ras->ras_window_start, 0,
1270 ras->ras_window_len)) {
1271 ras->ras_next_readahead = i;
1274 spin_unlock(&ras->ras_lock);
1280 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1282 ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1285 /* called with the ras_lock held or from places where it doesn't matter */
1286 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1288 ras->ras_last_readpage = index;
1289 ras->ras_consecutive_requests = 0;
1290 ras->ras_consecutive_pages = 0;
1291 ras->ras_window_len = 0;
1292 ras_set_start(ras, index);
1293 ras->ras_next_readahead = max(ras->ras_window_start, index);
1298 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1300 spin_lock_init(&ras->ras_lock);
1302 ras->ras_requests = 0;
1303 INIT_LIST_HEAD(&ras->ras_read_beads);
1306 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1307 struct ll_readahead_state *ras, unsigned long index,
1310 struct ll_ra_info *ra = &sbi->ll_ra_info;
1314 spin_lock(&sbi->ll_lock);
1315 spin_lock(&ras->ras_lock);
1317 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1319 /* reset the read-ahead window in two cases. First when the app seeks
1320 * or reads to some other part of the file. Secondly if we get a
1321 * read-ahead miss that we think we've previously issued. This can
1322 * be a symptom of there being so many read-ahead pages that the VM is
1323 * reclaiming it before we get to it. */
1324 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1326 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1327 } else if (!hit && ras->ras_window_len &&
1328 index < ras->ras_next_readahead &&
1329 index_in_window(index, ras->ras_window_start, 0,
1330 ras->ras_window_len)) {
1332 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1335 /* On the second access to a file smaller than the tunable
1336 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1337 * file up to ra_max_pages. This is simply a best effort and
1338 * only occurs once per open file. Normal RA behavior is reverted
1339 * to for subsequent IO. The mmap case does not increment
1340 * ras_requests and thus can never trigger this behavior. */
1341 if (ras->ras_requests == 2 && !ras->ras_request_index) {
1344 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1347 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1348 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1351 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1352 ras->ras_window_start = 0;
1353 ras->ras_last_readpage = 0;
1354 ras->ras_next_readahead = 0;
1355 ras->ras_window_len = min(ra->ra_max_pages,
1356 ra->ra_max_read_ahead_whole_pages);
1357 GOTO(out_unlock, 0);
1362 ras_reset(ras, index);
1363 GOTO(out_unlock, 0);
1366 ras->ras_last_readpage = index;
1367 ras->ras_consecutive_pages++;
1368 ras_set_start(ras, index);
1369 ras->ras_next_readahead = max(ras->ras_window_start,
1370 ras->ras_next_readahead);
1372 /* Trigger RA in the mmap case where ras_consecutive_requests
1373 * is not incremented and thus can't be used to trigger RA */
1374 if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1375 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1376 GOTO(out_unlock, 0);
1379 /* The initial ras_window_len is set to the request size. To avoid
1380 * uselessly reading and discarding pages for random IO the window is
1381 * only increased once per consecutive request received. */
1382 if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1383 ras->ras_window_len = min(ras->ras_window_len +
1384 (1024 * 1024 >> CFS_PAGE_SHIFT),
1391 ras->ras_request_index++;
1392 spin_unlock(&ras->ras_lock);
1393 spin_unlock(&sbi->ll_lock);
1397 int ll_writepage(struct page *page)
1399 struct inode *inode = page->mapping->host;
1400 struct ll_inode_info *lli = ll_i2info(inode);
1401 struct obd_export *exp;
1402 struct ll_async_page *llap;
1406 LASSERT(!PageDirty(page));
1407 LASSERT(PageLocked(page));
1409 exp = ll_i2dtexp(inode);
1411 GOTO(out, rc = -EINVAL);
1413 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1415 GOTO(out, rc = PTR_ERR(llap));
1417 LASSERT(!PageWriteback(page));
1418 set_page_writeback(page);
1420 page_cache_get(page);
1421 if (llap->llap_write_queued) {
1422 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1423 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1425 ASYNC_READY | ASYNC_URGENT);
1427 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1428 ASYNC_READY | ASYNC_URGENT);
1431 page_cache_release(page);
1434 if (!lli->lli_async_rc)
1435 lli->lli_async_rc = rc;
1436 /* re-dirty page on error so it retries write */
1437 if (PageWriteback(page)) {
1438 end_page_writeback(page);
1440 /* resend page only for not started IO*/
1441 if (!PageError(page))
1442 ll_redirty_page(page);
1449 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1450 * read-ahead assumes it is valid to issue readpage all the way up to
1451 * i_size, but our dlm locks make that not the case. We disable the
1452 * kernel's read-ahead and do our own by walking ahead in the page cache
1453 * checking for dlm lock coverage. the main difference between 2.4 and
1454 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1455 * so they look the same.
1457 int ll_readpage(struct file *filp, struct page *page)
1459 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1460 struct inode *inode = page->mapping->host;
1461 struct obd_export *exp;
1462 struct ll_async_page *llap;
1463 struct obd_io_group *oig = NULL;
1467 LASSERT(PageLocked(page));
1468 LASSERT(!PageUptodate(page));
1469 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1470 inode->i_ino, inode->i_generation, inode,
1471 (((loff_t)page->index) << CFS_PAGE_SHIFT),
1472 (((loff_t)page->index) << CFS_PAGE_SHIFT));
1473 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1475 if (!ll_i2info(inode)->lli_smd) {
1476 /* File with no objects - one big hole */
1477 /* We use this just for remove_from_page_cache that is not
1478 * exported, we'd make page back up to date. */
1479 ll_truncate_complete_page(page);
1480 clear_page(kmap(page));
1482 SetPageUptodate(page);
1487 rc = oig_init(&oig);
1491 exp = ll_i2dtexp(inode);
1493 GOTO(out, rc = -EINVAL);
1495 llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1497 GOTO(out, rc = PTR_ERR(llap));
1499 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1500 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1501 llap->llap_defer_uptodate);
1504 if (llap->llap_defer_uptodate) {
1505 /* This is the callpath if we got the page from a readahead */
1506 llap->llap_ra_used = 1;
1507 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1510 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1512 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1513 SetPageUptodate(page);
1515 GOTO(out_oig, rc = 0);
1518 if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1519 rc = ll_page_matches(page, fd->fd_flags);
1521 LL_CDEBUG_PAGE(D_ERROR, page,
1522 "lock match failed: rc %d\n", rc);
1527 CWARN("ino %lu page %lu (%llu) not covered by "
1528 "a lock (mmap?). check debug logs.\n",
1529 inode->i_ino, page->index,
1530 (long long)page->index << CFS_PAGE_SHIFT);
1534 rc = ll_issue_page_read(exp, llap, oig, 0);
1538 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1539 /* We have just requested the actual page we want, see if we can tack
1540 * on some readahead to that page's RPC before it is sent. */
1541 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1542 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1545 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);