1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/autoconf.h>
24 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/stat.h>
28 #include <linux/errno.h>
29 #include <linux/smp_lock.h>
30 #include <linux/unistd.h>
31 #include <linux/version.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
36 #include <linux/stat.h>
37 #include <asm/uaccess.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp_lock.h>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 //#include <lustre_mdc.h>
45 #include <lustre_lite.h>
46 #include <obd_cksum.h>
47 #include "llite_internal.h"
48 #include <linux/lustre_compat25.h>
50 #ifndef list_for_each_prev_safe
51 #define list_for_each_prev_safe(pos, n, head) \
52 for (pos = (head)->prev, n = pos->prev; pos != (head); \
53 pos = n, n = pos->prev )
56 cfs_mem_cache_t *ll_async_page_slab = NULL;
57 size_t ll_async_page_slab_size = 0;
59 /* SYNCHRONOUS I/O to object storage for an inode */
60 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
61 struct page *page, int flags)
63 struct ll_inode_info *lli = ll_i2info(inode);
64 struct lov_stripe_md *lsm = lli->lli_smd;
65 struct obd_info oinfo = { { { 0 } } };
71 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
73 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
74 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
76 pg.count = CFS_PAGE_SIZE;
78 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80 inode->i_ino, pg.off, pg.off);
82 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
83 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
84 page->mapping->host, i_size_read(page->mapping->host),
90 if (cmd & OBD_BRW_WRITE)
91 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
94 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
98 /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
99 opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
100 oinfo.oi_capa = ll_osscapa_get(inode, opc);
101 rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
102 capa_put(oinfo.oi_capa);
104 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
106 CERROR("error from obd_brw: rc = %d\n", rc);
110 /* this isn't where truncate starts. roughly:
111 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
112 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
115 * must be called under ->lli_size_sem */
116 void ll_truncate(struct inode *inode)
118 struct ll_inode_info *lli = ll_i2info(inode);
119 struct obd_info oinfo = { { { 0 } } };
124 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
125 inode->i_generation, inode, i_size_read(inode),
128 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
129 if (lli->lli_size_sem_owner != current) {
135 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
140 LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
142 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
144 lov_stripe_lock(lli->lli_smd);
145 inode_init_lvb(inode, &lvb);
146 rc = obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
147 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
148 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
149 lli->lli_smd->lsm_object_id, i_size_read(inode),
151 lov_stripe_unlock(lli->lli_smd);
155 obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
156 lov_stripe_unlock(lli->lli_smd);
158 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
159 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
160 /* If the truncate leaves behind a partial page, update its
162 struct page *page = find_get_page(inode->i_mapping,
163 i_size_read(inode) >>
166 struct ll_async_page *llap = llap_cast_private(page);
168 char *kaddr = kmap_atomic(page, KM_USER0);
169 llap->llap_checksum =
170 init_checksum(OSC_DEFAULT_CKSUM);
171 llap->llap_checksum =
172 compute_checksum(llap->llap_checksum,
173 kaddr, CFS_PAGE_SIZE,
175 kunmap_atomic(kaddr, KM_USER0);
177 page_cache_release(page);
181 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
182 lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
184 oinfo.oi_md = lli->lli_smd;
185 oinfo.oi_policy.l_extent.start = i_size_read(inode);
186 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
188 oa.o_id = lli->lli_smd->lsm_object_id;
189 oa.o_gr = lli->lli_smd->lsm_object_gr;
190 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
192 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
193 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
194 OBD_MD_FLFID | OBD_MD_FLGENER);
196 ll_inode_size_unlock(inode, 0);
198 oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
199 rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
200 ll_truncate_free_capa(oinfo.oi_capa);
202 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
204 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
205 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
210 ll_inode_size_unlock(inode, 0);
213 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
216 struct inode *inode = page->mapping->host;
217 struct ll_inode_info *lli = ll_i2info(inode);
218 struct lov_stripe_md *lsm = lli->lli_smd;
219 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
220 struct obd_info oinfo = { { { 0 } } };
227 LASSERT(PageLocked(page));
228 (void)llap_cast_private(page); /* assertion */
230 /* Check to see if we should return -EIO right away */
233 pga.count = CFS_PAGE_SIZE;
236 oa.o_mode = inode->i_mode;
237 oa.o_id = lsm->lsm_object_id;
238 oa.o_gr = lsm->lsm_object_gr;
239 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
240 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
241 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
245 rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
249 if (PageUptodate(page)) {
250 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
254 /* We're completely overwriting an existing page, so _don't_ set it up
255 * to date until commit_write */
256 if (from == 0 && to == CFS_PAGE_SIZE) {
257 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
258 POISON_PAGE(page, 0x11);
262 /* If are writing to a new page, no need to read old data. The extent
263 * locking will have updated the KMS, and for our purposes here we can
264 * treat it like i_size. */
265 lov_stripe_lock(lsm);
266 inode_init_lvb(inode, &lvb);
267 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
268 lov_stripe_unlock(lsm);
269 if (lvb.lvb_size <= offset) {
270 char *kaddr = kmap_atomic(page, KM_USER0);
271 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
272 lvb.lvb_size, offset);
273 memset(kaddr, 0, CFS_PAGE_SIZE);
274 kunmap_atomic(kaddr, KM_USER0);
275 GOTO(prepare_done, rc = 0);
278 /* XXX could be an async ocp read.. read-ahead? */
279 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
281 /* bug 1598: don't clobber blksize */
282 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
283 obdo_refresh_inode(inode, &oa, oa.o_valid);
289 SetPageUptodate(page);
294 static int ll_ap_make_ready(void *data, int cmd)
296 struct ll_async_page *llap;
300 llap = LLAP_FROM_COOKIE(data);
301 page = llap->llap_page;
303 LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
304 page->mapping->host->i_ino, page->index);
306 /* we're trying to write, but the page is locked.. come back later */
307 if (TryLockPage(page))
310 LASSERT(!PageWriteback(page));
312 /* if we left PageDirty we might get another writepage call
313 * in the future. list walkers are bright enough
314 * to check page dirty so we can leave it on whatever list
315 * its on. XXX also, we're called with the cli list so if
316 * we got the page cache list we'd create a lock inversion
317 * with the removepage path which gets the page lock then the
319 LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
320 page->mapping->host->i_ino, page->index);
321 clear_page_dirty_for_io(page);
323 /* This actually clears the dirty bit in the radix tree.*/
324 set_page_writeback(page);
326 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
327 page_cache_get(page);
332 /* We have two reasons for giving llite the opportunity to change the
333 * write length of a given queued page as it builds the RPC containing
336 * 1) Further extending writes may have landed in the page cache
337 * since a partial write first queued this page requiring us
338 * to write more from the page cache. (No further races are possible, since
339 * by the time this is called, the page is locked.)
340 * 2) We might have raced with truncate and want to avoid performing
341 * write RPCs that are just going to be thrown away by the
342 * truncate's punch on the storage targets.
344 * The kms serves these purposes as it is set at both truncate and extending
347 static int ll_ap_refresh_count(void *data, int cmd)
349 struct ll_inode_info *lli;
350 struct ll_async_page *llap;
351 struct lov_stripe_md *lsm;
358 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
359 LASSERT(cmd != OBD_BRW_READ);
361 llap = LLAP_FROM_COOKIE(data);
362 page = llap->llap_page;
363 inode = page->mapping->host;
364 lli = ll_i2info(inode);
367 lov_stripe_lock(lsm);
368 inode_init_lvb(inode, &lvb);
369 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
371 lov_stripe_unlock(lsm);
373 /* catch race with truncate */
374 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
377 /* catch sub-page write at end of file */
378 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
379 return kms % CFS_PAGE_SIZE;
381 return CFS_PAGE_SIZE;
384 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
386 struct lov_stripe_md *lsm;
387 obd_flag valid_flags;
389 lsm = ll_i2info(inode)->lli_smd;
391 oa->o_id = lsm->lsm_object_id;
392 oa->o_gr = lsm->lsm_object_gr;
393 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
394 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
395 if (cmd & OBD_BRW_WRITE) {
396 oa->o_valid |= OBD_MD_FLEPOCH;
397 oa->o_easize = ll_i2info(inode)->lli_ioepoch;
399 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
400 OBD_MD_FLUID | OBD_MD_FLGID |
401 OBD_MD_FLFID | OBD_MD_FLGENER;
404 obdo_from_inode(oa, inode, valid_flags);
407 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
409 struct ll_async_page *llap;
412 llap = LLAP_FROM_COOKIE(data);
413 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
418 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
421 struct ll_async_page *llap;
424 llap = LLAP_FROM_COOKIE(data);
425 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
430 static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
432 struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
433 int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
435 return ll_osscapa_get(llap->llap_page->mapping->host, opc);
438 static struct obd_async_page_ops ll_async_page_ops = {
439 .ap_make_ready = ll_ap_make_ready,
440 .ap_refresh_count = ll_ap_refresh_count,
441 .ap_fill_obdo = ll_ap_fill_obdo,
442 .ap_update_obdo = ll_ap_update_obdo,
443 .ap_completion = ll_ap_completion,
444 .ap_lookup_capa = ll_ap_lookup_capa,
447 struct ll_async_page *llap_cast_private(struct page *page)
449 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
451 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
452 "page %p private %lu gave magic %d which != %d\n",
453 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
458 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
460 * There is an llap attached onto every page in lustre, linked off @sbi.
461 * We add an llap to the list so we don't lose our place during list walking.
462 * If llaps in the list are being moved they will only move to the end
463 * of the LRU, and we aren't terribly interested in those pages here (we
464 * start at the beginning of the list where the least-used llaps are.
466 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
468 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
469 unsigned long total, want, count = 0;
471 total = sbi->ll_async_page_count;
473 /* There can be a large number of llaps (600k or more in a large
474 * memory machine) so the VM 1/6 shrink ratio is likely too much.
475 * Since we are freeing pages also, we don't necessarily want to
476 * shrink so much. Limit to 40MB of pages + llaps per call. */
477 if (shrink_fraction == 0)
478 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
480 want = (total + shrink_fraction - 1) / shrink_fraction;
482 if (want > 40 << (20 - CFS_PAGE_SHIFT))
483 want = 40 << (20 - CFS_PAGE_SHIFT);
485 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
486 want, total, shrink_fraction);
488 spin_lock(&sbi->ll_lock);
489 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
491 while (--total >= 0 && count < want) {
495 if (unlikely(need_resched())) {
496 spin_unlock(&sbi->ll_lock);
498 spin_lock(&sbi->ll_lock);
501 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
502 list_del_init(&dummy_llap.llap_pglist_item);
506 page = llap->llap_page;
507 LASSERT(page != NULL);
509 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
511 /* Page needs/undergoing IO */
512 if (TryLockPage(page)) {
513 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
517 keep = (llap->llap_write_queued || PageDirty(page) ||
518 PageWriteback(page) || (!PageUptodate(page) &&
519 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
521 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
522 keep ? "keep" : "drop",
523 llap->llap_write_queued ? "wq " : "",
524 PageDirty(page) ? "pd " : "",
525 PageUptodate(page) ? "" : "!pu ",
526 PageWriteback(page) ? "wb" : "",
527 llap->llap_defer_uptodate ? "" : "!du",
528 llap_origins[llap->llap_origin]);
530 /* If page is dirty or undergoing IO don't discard it */
536 page_cache_get(page);
537 spin_unlock(&sbi->ll_lock);
539 if (page->mapping != NULL) {
540 ll_teardown_mmaps(page->mapping,
541 (__u64)page->index << CFS_PAGE_SHIFT,
542 ((__u64)page->index << CFS_PAGE_SHIFT)|
544 if (!PageDirty(page) && !page_mapped(page)) {
545 ll_ra_accounting(llap, page->mapping);
546 ll_truncate_complete_page(page);
549 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
557 page_cache_release(page);
559 spin_lock(&sbi->ll_lock);
561 list_del(&dummy_llap.llap_pglist_item);
562 spin_unlock(&sbi->ll_lock);
564 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
570 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
572 struct ll_async_page *llap;
573 struct obd_export *exp;
574 struct inode *inode = page->mapping->host;
575 struct ll_sb_info *sbi;
580 static int triggered;
583 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
585 libcfs_debug_dumpstack(NULL);
588 RETURN(ERR_PTR(-EINVAL));
590 sbi = ll_i2sbi(inode);
591 LASSERT(ll_async_page_slab);
592 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
594 llap = llap_cast_private(page);
596 /* move to end of LRU list, except when page is just about to
598 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
599 spin_lock(&sbi->ll_lock);
600 sbi->ll_pglist_gen++;
601 list_del_init(&llap->llap_pglist_item);
602 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
603 spin_unlock(&sbi->ll_lock);
608 exp = ll_i2dtexp(page->mapping->host);
610 RETURN(ERR_PTR(-EINVAL));
612 /* limit the number of lustre-cached pages */
613 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
614 llap_shrink_cache(sbi, 0);
616 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
617 ll_async_page_slab_size);
619 RETURN(ERR_PTR(-ENOMEM));
620 llap->llap_magic = LLAP_MAGIC;
621 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
623 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
624 (obd_off)page->index << CFS_PAGE_SHIFT,
625 &ll_async_page_ops, llap, &llap->llap_cookie);
627 OBD_SLAB_FREE(llap, ll_async_page_slab,
628 ll_async_page_slab_size);
632 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
633 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
634 /* also zeroing the PRIVBITS low order bitflags */
635 __set_page_ll_data(page, llap);
636 llap->llap_page = page;
637 spin_lock(&sbi->ll_lock);
638 sbi->ll_pglist_gen++;
639 sbi->ll_async_page_count++;
640 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
641 INIT_LIST_HEAD(&llap->llap_pending_write);
642 spin_unlock(&sbi->ll_lock);
645 if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
647 char *kaddr = kmap_atomic(page, KM_USER0);
648 csum = init_checksum(OSC_DEFAULT_CKSUM);
649 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
651 kunmap_atomic(kaddr, KM_USER0);
652 if (origin == LLAP_ORIGIN_READAHEAD ||
653 origin == LLAP_ORIGIN_READPAGE) {
654 llap->llap_checksum = 0;
655 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
656 llap->llap_checksum == 0) {
657 llap->llap_checksum = csum;
658 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
659 } else if (llap->llap_checksum == csum) {
660 /* origin == LLAP_ORIGIN_WRITEPAGE */
661 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
664 /* origin == LLAP_ORIGIN_WRITEPAGE */
665 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
666 "%x!\n", llap->llap_checksum, csum);
670 llap->llap_origin = origin;
674 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
675 struct ll_async_page *llap,
676 unsigned to, obd_flag async_flags)
678 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
679 struct obd_io_group *oig;
680 struct ll_sb_info *sbi = ll_i2sbi(inode);
681 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
684 /* _make_ready only sees llap once we've unlocked the page */
685 llap->llap_write_queued = 1;
686 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
687 llap->llap_cookie, OBD_BRW_WRITE | noquot,
688 0, 0, 0, async_flags);
690 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
694 llap->llap_write_queued = 0;
695 /* Do not pass llap here as it is sync write. */
696 llap_write_pending(inode, NULL);
702 /* make full-page requests if we are not at EOF (bug 4410) */
703 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
704 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
705 "sync write before EOF: size_index %lu, to %d\n",
708 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
709 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
710 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
711 "sync write at EOF: size_index %lu, to %d/%d\n",
712 size_index, to, size_to);
717 /* compare the checksum once before the page leaves llite */
718 if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
719 llap->llap_checksum != 0)) {
721 struct page *page = llap->llap_page;
722 char *kaddr = kmap_atomic(page, KM_USER0);
723 csum = init_checksum(OSC_DEFAULT_CKSUM);
724 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
726 kunmap_atomic(kaddr, KM_USER0);
727 if (llap->llap_checksum == csum) {
728 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
731 CERROR("page %p old cksum %x != new cksum %x!\n",
732 page, llap->llap_checksum, csum);
736 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
737 llap->llap_cookie, OBD_BRW_WRITE | noquot,
738 0, to, 0, ASYNC_READY | ASYNC_URGENT |
739 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
743 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
749 if (!rc && async_flags & ASYNC_READY) {
750 unlock_page(llap->llap_page);
751 if (PageWriteback(llap->llap_page)) {
752 end_page_writeback(llap->llap_page);
756 if (rc == 0 && llap_write_complete(inode, llap))
757 ll_queue_done_writing(inode, 0);
759 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
767 /* update our write count to account for i_size increases that may have
768 * happened since we've queued the page for io. */
770 /* be careful not to return success without setting the page Uptodate or
771 * the next pass through prepare_write will read in stale data from disk. */
772 int ll_commit_write(struct file *file, struct page *page, unsigned from,
775 struct inode *inode = page->mapping->host;
776 struct ll_inode_info *lli = ll_i2info(inode);
777 struct lov_stripe_md *lsm = lli->lli_smd;
778 struct obd_export *exp;
779 struct ll_async_page *llap;
784 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
785 LASSERT(inode == file->f_dentry->d_inode);
786 LASSERT(PageLocked(page));
788 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
789 inode, page, from, to, page->index);
791 llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
793 RETURN(PTR_ERR(llap));
795 exp = ll_i2dtexp(inode);
799 llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
802 * queue a write for some time in the future the first time we
805 * This is different from what other file systems do: they usually
806 * just mark page (and some of its buffers) dirty and rely on
807 * balance_dirty_pages() to start a write-back. Lustre wants write-back
808 * to be started earlier for the following reasons:
810 * (1) with a large number of clients we need to limit the amount
811 * of cached data on the clients a lot;
813 * (2) large compute jobs generally want compute-only then io-only
814 * and the IO should complete as quickly as possible;
816 * (3) IO is batched up to the RPC size and is async until the
817 * client max cache is hit
818 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
821 if (!PageDirty(page)) {
822 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
824 rc = queue_or_sync_write(exp, inode, llap, to, 0);
828 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
831 /* put the page in the page cache, from now on ll_removepage is
832 * responsible for cleaning up the llap.
833 * only set page dirty when it's queued to be write out */
834 if (llap->llap_write_queued)
835 set_page_dirty(page);
838 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
839 ll_inode_size_lock(inode, 0);
841 lov_stripe_lock(lsm);
842 obd_adjust_kms(exp, lsm, size, 0);
843 lov_stripe_unlock(lsm);
844 if (size > i_size_read(inode))
845 i_size_write(inode, size);
846 SetPageUptodate(page);
847 } else if (size > i_size_read(inode)) {
848 /* this page beyond the pales of i_size, so it can't be
849 * truncated in ll_p_r_e during lock revoking. we must
850 * teardown our book-keeping here. */
853 ll_inode_size_unlock(inode, 0);
857 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
859 struct ll_ra_info *ra = &sbi->ll_ra_info;
863 spin_lock(&sbi->ll_lock);
864 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
865 ra->ra_cur_pages += ret;
866 spin_unlock(&sbi->ll_lock);
871 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
873 struct ll_ra_info *ra = &sbi->ll_ra_info;
874 spin_lock(&sbi->ll_lock);
875 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
876 ra->ra_cur_pages, len);
877 ra->ra_cur_pages -= len;
878 spin_unlock(&sbi->ll_lock);
881 /* called for each page in a completed rpc.*/
882 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
884 struct ll_async_page *llap;
889 llap = LLAP_FROM_COOKIE(data);
890 page = llap->llap_page;
891 LASSERT(PageLocked(page));
892 LASSERT(CheckWriteback(page,cmd));
894 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
896 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
897 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
900 if (cmd & OBD_BRW_READ) {
901 if (!llap->llap_defer_uptodate)
902 SetPageUptodate(page);
904 llap->llap_write_queued = 0;
906 ClearPageError(page);
908 if (cmd & OBD_BRW_READ) {
909 llap->llap_defer_uptodate = 0;
913 set_bit(AS_ENOSPC, &page->mapping->flags);
915 set_bit(AS_EIO, &page->mapping->flags);
920 if (cmd & OBD_BRW_WRITE) {
921 /* Only rc == 0, write succeed, then this page could be deleted
922 * from the pending_writing list
924 if (rc == 0 && llap_write_complete(page->mapping->host, llap))
925 ll_queue_done_writing(page->mapping->host, 0);
928 if (PageWriteback(page)) {
929 end_page_writeback(page);
931 page_cache_release(page);
936 /* the kernel calls us here when a page is unhashed from the page cache.
937 * the page will be locked and the kernel is holding a spinlock, so
938 * we need to be careful. we're just tearing down our book-keeping
940 void ll_removepage(struct page *page)
942 struct inode *inode = page->mapping->host;
943 struct obd_export *exp;
944 struct ll_async_page *llap;
945 struct ll_sb_info *sbi = ll_i2sbi(inode);
949 LASSERT(!in_interrupt());
951 /* sync pages or failed read pages can leave pages in the page
952 * cache that don't have our data associated with them anymore */
953 if (page_private(page) == 0) {
958 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
960 exp = ll_i2dtexp(inode);
962 CERROR("page %p ind %lu gave null export\n", page, page->index);
967 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
969 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
970 page->index, PTR_ERR(llap));
975 if (llap_write_complete(inode, llap))
976 ll_queue_done_writing(inode, 0);
978 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
981 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
983 /* this unconditional free is only safe because the page lock
984 * is providing exclusivity to memory pressure/truncate/writeback..*/
985 __clear_page_ll_data(page);
987 spin_lock(&sbi->ll_lock);
988 if (!list_empty(&llap->llap_pglist_item))
989 list_del_init(&llap->llap_pglist_item);
990 sbi->ll_pglist_gen++;
991 sbi->ll_async_page_count--;
992 spin_unlock(&sbi->ll_lock);
993 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
997 static int ll_page_matches(struct page *page, int fd_flags)
999 struct lustre_handle match_lockh = {0};
1000 struct inode *inode = page->mapping->host;
1001 ldlm_policy_data_t page_extent;
1005 if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
1008 page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
1009 page_extent.l_extent.end =
1010 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
1011 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
1012 if (!(fd_flags & LL_FILE_READAHEAD))
1013 flags |= LDLM_FL_CBPENDING;
1014 matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
1015 ll_i2info(inode)->lli_smd, LDLM_EXTENT,
1016 &page_extent, LCK_PR | LCK_PW, &flags, inode,
1021 static int ll_issue_page_read(struct obd_export *exp,
1022 struct ll_async_page *llap,
1023 struct obd_io_group *oig, int defer)
1025 struct page *page = llap->llap_page;
1028 page_cache_get(page);
1029 llap->llap_defer_uptodate = defer;
1030 llap->llap_ra_used = 0;
1031 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1032 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1033 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1034 ASYNC_READY | ASYNC_URGENT);
1036 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1037 page_cache_release(page);
1042 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1044 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1045 ra->ra_stats[which]++;
1048 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1050 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1051 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1053 spin_lock(&sbi->ll_lock);
1054 ll_ra_stats_inc_unlocked(ra, which);
1055 spin_unlock(&sbi->ll_lock);
1058 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1060 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1063 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1066 #define RAS_CDEBUG(ras) \
1068 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
1069 "csr %lu sf %lu sp %lu sl %lu \n", \
1070 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1071 ras->ras_consecutive_pages, ras->ras_window_start, \
1072 ras->ras_window_len, ras->ras_next_readahead, \
1073 ras->ras_requests, ras->ras_request_index, \
1074 ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
1075 ras->ras_stride_pages, ras->ras_stride_length)
1077 static int index_in_window(unsigned long index, unsigned long point,
1078 unsigned long before, unsigned long after)
1080 unsigned long start = point - before, end = point + after;
1087 return start <= index && index <= end;
1090 static struct ll_readahead_state *ll_ras_get(struct file *f)
1092 struct ll_file_data *fd;
1094 fd = LUSTRE_FPRIVATE(f);
1098 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1100 struct ll_readahead_state *ras;
1102 ras = ll_ras_get(f);
1104 spin_lock(&ras->ras_lock);
1105 ras->ras_requests++;
1106 ras->ras_request_index = 0;
1107 ras->ras_consecutive_requests++;
1108 rar->lrr_reader = current;
1110 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1111 spin_unlock(&ras->ras_lock);
1114 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1116 struct ll_readahead_state *ras;
1118 ras = ll_ras_get(f);
1120 spin_lock(&ras->ras_lock);
1121 list_del_init(&rar->lrr_linkage);
1122 spin_unlock(&ras->ras_lock);
1125 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1127 struct ll_ra_read *scan;
1129 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1130 if (scan->lrr_reader == current)
1136 struct ll_ra_read *ll_ra_read_get(struct file *f)
1138 struct ll_readahead_state *ras;
1139 struct ll_ra_read *bead;
1141 ras = ll_ras_get(f);
1143 spin_lock(&ras->ras_lock);
1144 bead = ll_ra_read_get_locked(ras);
1145 spin_unlock(&ras->ras_lock);
1149 static int ll_read_ahead_page(struct obd_export *exp, struct obd_io_group *oig,
1150 int index, struct address_space *mapping)
1152 struct ll_async_page *llap;
1154 unsigned int gfp_mask = 0;
1157 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1159 gfp_mask |= __GFP_NOWARN;
1161 page = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
1163 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1164 CDEBUG(D_READA, "g_c_p_n failed\n");
1168 /* Check if page was truncated or reclaimed */
1169 if (page->mapping != mapping) {
1170 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1171 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1172 GOTO(unlock_page, rc = 0);
1175 /* we do this first so that we can see the page in the /proc
1177 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1178 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1179 if (PTR_ERR(llap) == -ENOLCK) {
1180 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1181 CDEBUG(D_READA | D_PAGE,
1182 "Adding page to cache failed index "
1184 CDEBUG(D_READA, "nolock page\n");
1185 GOTO(unlock_page, rc = -ENOLCK);
1187 CDEBUG(D_READA, "read-ahead page\n");
1188 GOTO(unlock_page, rc = 0);
1191 /* skip completed pages */
1192 if (Page_Uptodate(page))
1193 GOTO(unlock_page, rc = 0);
1195 /* bail out when we hit the end of the lock. */
1196 rc = ll_issue_page_read(exp, llap, oig, 1);
1198 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "started read-ahead\n");
1203 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "skipping read-ahead\n");
1205 page_cache_release(page);
1209 /* ra_io_arg will be filled in the beginning of ll_readahead with
1210 * ras_lock, then the following ll_read_ahead_pages will read RA
1211 * pages according to this arg, all the items in this structure are
1212 * counted by page index.
1215 unsigned long ria_start; /* start offset of read-ahead*/
1216 unsigned long ria_end; /* end offset of read-ahead*/
1217 /* If stride read pattern is detected, ria_stoff means where
1218 * stride read is started. Note: for normal read-ahead, the
1219 * value here is meaningless, and also it will not be accessed*/
1221 /* ria_length and ria_pages are the length and pages length in the
1222 * stride I/O mode. And they will also be used to check whether
1223 * it is stride I/O read-ahead in the read-ahead pages*/
1224 unsigned long ria_length;
1225 unsigned long ria_pages;
1228 #define RIA_DEBUG(ria) \
1229 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
1230 ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
1233 #define RAS_INCREASE_STEP (1024 * 1024 >> CFS_PAGE_SHIFT)
1235 static inline int stride_io_mode(struct ll_readahead_state *ras)
1237 return ras->ras_consecutive_stride_requests > 1;
1240 /* The function calculates how much pages will be read in
1241 * [off, off + length], which will be read by stride I/O mode,
1242 * stride_offset = st_off, stride_lengh = st_len,
1243 * stride_pages = st_pgs
1245 static unsigned long
1246 stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
1247 unsigned long off, unsigned length)
1249 unsigned long cont_len = st_off > off ? st_off - off : 0;
1250 unsigned long stride_len = length + off > st_off ?
1251 length + off + 1 - st_off : 0;
1252 unsigned long left, pg_count;
1254 if (st_len == 0 || length == 0)
1257 left = do_div(stride_len, st_len);
1258 left = min(left, st_pgs);
1260 pg_count = left + stride_len * st_pgs + cont_len;
1262 LASSERT(pg_count >= left);
1264 CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %u"
1265 "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
1270 static int ria_page_count(struct ra_io_arg *ria)
1272 __u64 length = ria->ria_end >= ria->ria_start ?
1273 ria->ria_end - ria->ria_start + 1 : 0;
1275 return stride_pg_count(ria->ria_stoff, ria->ria_length,
1276 ria->ria_pages, ria->ria_start,
1280 /*Check whether the index is in the defined ra-window */
1281 static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
1283 /* If ria_length == ria_pages, it means non-stride I/O mode,
1284 * idx should always inside read-ahead window in this case
1285 * For stride I/O mode, just check whether the idx is inside
1287 return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
1288 (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
1291 static int ll_read_ahead_pages(struct obd_export *exp,
1292 struct obd_io_group *oig,
1293 struct ra_io_arg *ria,
1294 unsigned long *reserved_pages,
1295 struct address_space *mapping,
1296 unsigned long *ra_end)
1298 int rc, count = 0, stride_ria;
1299 unsigned long page_idx;
1301 LASSERT(ria != NULL);
1304 stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
1305 for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
1306 *reserved_pages > 0; page_idx++) {
1307 if (ras_inside_ra_window(page_idx, ria)) {
1308 /* If the page is inside the read-ahead window*/
1309 rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
1311 (*reserved_pages)--;
1313 } else if (rc == -ENOLCK)
1315 } else if (stride_ria) {
1316 /* If it is not in the read-ahead window, and it is
1317 * read-ahead mode, then check whether it should skip
1320 /* FIXME: This assertion only is valid when it is for
1321 * forward read-ahead, it will be fixed when backward
1322 * read-ahead is implemented */
1323 LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
1324 " gap of ra window,it should bigger than stride"
1325 " offset %lu \n", page_idx, ria->ria_stoff);
1327 offset = page_idx - ria->ria_stoff;
1328 offset = offset % (ria->ria_length);
1329 if (offset > ria->ria_pages) {
1330 page_idx += ria->ria_length - offset;
1331 CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
1332 ria->ria_length - offset);
1341 static int ll_readahead(struct ll_readahead_state *ras,
1342 struct obd_export *exp, struct address_space *mapping,
1343 struct obd_io_group *oig, int flags)
1345 unsigned long start = 0, end = 0, reserved;
1346 unsigned long ra_end, len;
1347 struct inode *inode;
1348 struct lov_stripe_md *lsm;
1349 struct ll_ra_read *bead;
1351 struct ra_io_arg ria = { 0 };
1356 inode = mapping->host;
1357 lsm = ll_i2info(inode)->lli_smd;
1359 lov_stripe_lock(lsm);
1360 inode_init_lvb(inode, &lvb);
1361 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1363 lov_stripe_unlock(lsm);
1365 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1369 spin_lock(&ras->ras_lock);
1370 bead = ll_ra_read_get_locked(ras);
1371 /* Enlarge the RA window to encompass the full read */
1372 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1373 bead->lrr_start + bead->lrr_count) {
1374 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1375 ras->ras_window_start;
1377 /* Reserve a part of the read-ahead window that we'll be issuing */
1378 if (ras->ras_window_len) {
1379 start = ras->ras_next_readahead;
1380 end = ras->ras_window_start + ras->ras_window_len - 1;
1383 /* Truncate RA window to end of file */
1384 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1385 ras->ras_next_readahead = max(end, end + 1);
1388 ria.ria_start = start;
1390 /* If stride I/O mode is detected, get stride window*/
1391 if (stride_io_mode(ras)) {
1392 ria.ria_length = ras->ras_stride_length;
1393 ria.ria_pages = ras->ras_stride_pages;
1395 spin_unlock(&ras->ras_lock);
1398 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1401 len = ria_page_count(&ria);
1405 reserved = ll_ra_count_get(ll_i2sbi(inode), len);
1407 if (reserved < end - start + 1)
1408 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1410 CDEBUG(D_READA, "reserved page %lu \n", reserved);
1412 ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
1414 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1416 ll_ra_count_put(ll_i2sbi(inode), reserved);
1418 if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
1419 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1421 /* if we didn't get to the end of the region we reserved from
1422 * the ras we need to go back and update the ras so that the
1423 * next read-ahead tries from where we left off. we only do so
1424 * if the region we failed to issue read-ahead on is still ahead
1425 * of the app and behind the next index to start read-ahead from */
1426 CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
1427 ra_end, end, ria.ria_end);
1429 if (ra_end != (end + 1)) {
1430 spin_lock(&ras->ras_lock);
1431 if (ra_end < ras->ras_next_readahead &&
1432 index_in_window(ra_end, ras->ras_window_start, 0,
1433 ras->ras_window_len)) {
1434 ras->ras_next_readahead = ra_end;
1437 spin_unlock(&ras->ras_lock);
1443 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1445 ras->ras_window_start = index & (~(RAS_INCREASE_STEP - 1));
1448 /* called with the ras_lock held or from places where it doesn't matter */
1449 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1451 ras->ras_last_readpage = index;
1452 ras->ras_consecutive_requests = 0;
1453 ras->ras_consecutive_pages = 0;
1454 ras->ras_window_len = 0;
1455 ras_set_start(ras, index);
1456 ras->ras_next_readahead = max(ras->ras_window_start, index);
1461 /* called with the ras_lock held or from places where it doesn't matter */
1462 static void ras_stride_reset(struct ll_readahead_state *ras)
1464 ras->ras_consecutive_stride_requests = 0;
1468 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1470 spin_lock_init(&ras->ras_lock);
1472 ras->ras_requests = 0;
1473 INIT_LIST_HEAD(&ras->ras_read_beads);
1476 /* Check whether the read request is in the stride window.
1477 * If it is in the stride window, return 1, otherwise return 0.
1478 * and also update stride_gap and stride_pages.
1480 static int index_in_stride_window(unsigned long index,
1481 struct ll_readahead_state *ras,
1482 struct inode *inode)
1484 int stride_gap = index - ras->ras_last_readpage - 1;
1486 LASSERT(stride_gap != 0);
1488 if (ras->ras_consecutive_pages == 0)
1491 /*Otherwise check the stride by itself */
1492 if ((ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
1493 ras->ras_consecutive_pages == ras->ras_stride_pages)
1496 if (stride_gap >= 0) {
1498 * only set stride_pages, stride_length if
1499 * it is forward reading ( stride_gap > 0)
1501 ras->ras_stride_pages = ras->ras_consecutive_pages;
1502 ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
1505 * If stride_gap < 0,(back_forward reading),
1506 * reset the stride_pages/length.
1507 * FIXME:back_ward stride I/O read.
1510 ras->ras_stride_pages = 0;
1511 ras->ras_stride_length = 0;
1518 static unsigned long
1519 stride_page_count(struct ll_readahead_state *ras, unsigned long len)
1521 return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
1522 ras->ras_stride_pages, ras->ras_stride_offset,
1526 /* Stride Read-ahead window will be increased inc_len according to
1527 * stride I/O pattern */
1528 static void ras_stride_increase_window(struct ll_readahead_state *ras,
1529 struct ll_ra_info *ra,
1530 unsigned long inc_len)
1532 unsigned long left, step, window_len;
1533 unsigned long stride_len;
1535 LASSERT(ras->ras_stride_length > 0);
1537 stride_len = ras->ras_window_start + ras->ras_window_len -
1538 ras->ras_stride_offset;
1540 LASSERTF(stride_len >= 0, "window_start %lu, window_len %lu"
1541 " stride_offset %lu\n", ras->ras_window_start,
1542 ras->ras_window_len, ras->ras_stride_offset);
1544 left = stride_len % ras->ras_stride_length;
1546 window_len = ras->ras_window_len - left;
1548 if (left < ras->ras_stride_pages)
1551 left = ras->ras_stride_pages + inc_len;
1553 LASSERT(ras->ras_stride_pages != 0);
1555 step = left / ras->ras_stride_pages;
1556 left %= ras->ras_stride_pages;
1558 window_len += step * ras->ras_stride_length + left;
1560 if (stride_page_count(ras, window_len) <= ra->ra_max_pages)
1561 ras->ras_window_len = window_len;
1566 /* Set stride I/O read-ahead window start offset */
1567 static void ras_set_stride_offset(struct ll_readahead_state *ras)
1569 unsigned long window_len = ras->ras_next_readahead -
1570 ras->ras_window_start;
1573 LASSERT(ras->ras_stride_length != 0);
1575 left = window_len % ras->ras_stride_length;
1577 ras->ras_stride_offset = ras->ras_next_readahead - left;
1582 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1583 struct ll_readahead_state *ras, unsigned long index,
1586 struct ll_ra_info *ra = &sbi->ll_ra_info;
1587 int zero = 0, stride_zero = 0, stride_detect = 0, ra_miss = 0;
1590 spin_lock(&sbi->ll_lock);
1591 spin_lock(&ras->ras_lock);
1593 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1595 /* reset the read-ahead window in two cases. First when the app seeks
1596 * or reads to some other part of the file. Secondly if we get a
1597 * read-ahead miss that we think we've previously issued. This can
1598 * be a symptom of there being so many read-ahead pages that the VM is
1599 * reclaiming it before we get to it. */
1600 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1602 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1603 /* check whether it is in stride I/O mode*/
1604 if (!index_in_stride_window(index, ras, inode))
1606 } else if (!hit && ras->ras_window_len &&
1607 index < ras->ras_next_readahead &&
1608 index_in_window(index, ras->ras_window_start, 0,
1609 ras->ras_window_len)) {
1612 /* If it hits read-ahead miss and the stride I/O is still
1613 * not detected, reset stride stuff to re-detect the whole
1614 * stride I/O mode to avoid complication */
1615 if (!stride_io_mode(ras))
1617 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1620 /* On the second access to a file smaller than the tunable
1621 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1622 * file up to ra_max_pages. This is simply a best effort and
1623 * only occurs once per open file. Normal RA behavior is reverted
1624 * to for subsequent IO. The mmap case does not increment
1625 * ras_requests and thus can never trigger this behavior. */
1626 if (ras->ras_requests == 2 && !ras->ras_request_index) {
1629 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1632 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1633 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1636 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1637 ras->ras_window_start = 0;
1638 ras->ras_last_readpage = 0;
1639 ras->ras_next_readahead = 0;
1640 ras->ras_window_len = min(ra->ra_max_pages,
1641 ra->ra_max_read_ahead_whole_pages);
1642 GOTO(out_unlock, 0);
1647 /* If it is discontinuous read, check
1648 * whether it is stride I/O mode*/
1650 ras_reset(ras, index);
1651 ras->ras_consecutive_pages++;
1652 ras_stride_reset(ras);
1654 GOTO(out_unlock, 0);
1656 /* The read is still in stride window or
1657 * it hits read-ahead miss */
1659 /* If ra-window miss is hitted, which probably means VM
1660 * pressure, and some read-ahead pages were reclaimed.So
1661 * the length of ra-window will not increased, but also
1662 * not reset to avoid redetecting the stride I/O mode.*/
1663 ras->ras_consecutive_requests = 0;
1665 ras->ras_consecutive_pages = 0;
1666 if (++ras->ras_consecutive_stride_requests > 1)
1671 } else if (ras->ras_consecutive_stride_requests > 1) {
1672 /* If this is contiguous read but in stride I/O mode
1673 * currently, check whether stride step still is valid,
1674 * if invalid, it will reset the stride ra window*/
1675 if (ras->ras_consecutive_pages + 1 > ras->ras_stride_pages)
1676 ras_stride_reset(ras);
1679 ras->ras_last_readpage = index;
1680 ras->ras_consecutive_pages++;
1681 ras_set_start(ras, index);
1682 ras->ras_next_readahead = max(ras->ras_window_start,
1683 ras->ras_next_readahead);
1686 /* Trigger RA in the mmap case where ras_consecutive_requests
1687 * is not incremented and thus can't be used to trigger RA */
1688 if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
1689 ras->ras_window_len = RAS_INCREASE_STEP;
1690 GOTO(out_unlock, 0);
1693 /* Initially reset the stride window offset to next_readahead*/
1694 if (ras->ras_consecutive_stride_requests == 2 && stride_detect)
1695 ras_set_stride_offset(ras);
1697 /* The initial ras_window_len is set to the request size. To avoid
1698 * uselessly reading and discarding pages for random IO the window is
1699 * only increased once per consecutive request received. */
1700 if ((ras->ras_consecutive_requests > 1 &&
1701 !ras->ras_request_index) || stride_detect) {
1702 if (stride_io_mode(ras))
1703 ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP);
1705 ras->ras_window_len = min(ras->ras_window_len +
1712 ras->ras_request_index++;
1713 spin_unlock(&ras->ras_lock);
1714 spin_unlock(&sbi->ll_lock);
1718 int ll_writepage(struct page *page)
1720 struct inode *inode = page->mapping->host;
1721 struct ll_inode_info *lli = ll_i2info(inode);
1722 struct obd_export *exp;
1723 struct ll_async_page *llap;
1727 LASSERT(PageLocked(page));
1729 exp = ll_i2dtexp(inode);
1731 GOTO(out, rc = -EINVAL);
1733 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1735 GOTO(out, rc = PTR_ERR(llap));
1737 LASSERT(!PageWriteback(page));
1738 set_page_writeback(page);
1740 page_cache_get(page);
1741 if (llap->llap_write_queued) {
1742 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1743 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1745 ASYNC_READY | ASYNC_URGENT);
1747 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1748 ASYNC_READY | ASYNC_URGENT);
1751 page_cache_release(page);
1754 if (!lli->lli_async_rc)
1755 lli->lli_async_rc = rc;
1756 /* re-dirty page on error so it retries write */
1757 if (PageWriteback(page)) {
1758 end_page_writeback(page);
1760 /* resend page only for not started IO*/
1761 if (!PageError(page))
1762 ll_redirty_page(page);
1769 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1770 * read-ahead assumes it is valid to issue readpage all the way up to
1771 * i_size, but our dlm locks make that not the case. We disable the
1772 * kernel's read-ahead and do our own by walking ahead in the page cache
1773 * checking for dlm lock coverage. the main difference between 2.4 and
1774 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1775 * so they look the same.
1777 int ll_readpage(struct file *filp, struct page *page)
1779 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1780 struct inode *inode = page->mapping->host;
1781 struct obd_export *exp;
1782 struct ll_async_page *llap;
1783 struct obd_io_group *oig = NULL;
1787 LASSERT(PageLocked(page));
1788 LASSERT(!PageUptodate(page));
1789 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1790 inode->i_ino, inode->i_generation, inode,
1791 (((loff_t)page->index) << CFS_PAGE_SHIFT),
1792 (((loff_t)page->index) << CFS_PAGE_SHIFT));
1793 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1795 if (!ll_i2info(inode)->lli_smd) {
1796 /* File with no objects - one big hole */
1797 /* We use this just for remove_from_page_cache that is not
1798 * exported, we'd make page back up to date. */
1799 ll_truncate_complete_page(page);
1800 clear_page(kmap(page));
1802 SetPageUptodate(page);
1807 rc = oig_init(&oig);
1811 exp = ll_i2dtexp(inode);
1813 GOTO(out, rc = -EINVAL);
1815 llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1817 GOTO(out, rc = PTR_ERR(llap));
1819 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1820 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1821 llap->llap_defer_uptodate);
1824 if (llap->llap_defer_uptodate) {
1825 /* This is the callpath if we got the page from a readahead */
1826 llap->llap_ra_used = 1;
1827 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1830 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1832 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1833 SetPageUptodate(page);
1835 GOTO(out_oig, rc = 0);
1838 if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1839 rc = ll_page_matches(page, fd->fd_flags);
1841 LL_CDEBUG_PAGE(D_ERROR, page,
1842 "lock match failed: rc %d\n", rc);
1847 CWARN("ino %lu page %lu (%llu) not covered by "
1848 "a lock (mmap?). check debug logs.\n",
1849 inode->i_ino, page->index,
1850 (long long)page->index << CFS_PAGE_SHIFT);
1854 rc = ll_issue_page_read(exp, llap, oig, 0);
1858 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1859 /* We have just requested the actual page we want, see if we can tack
1860 * on some readahead to that page's RPC before it is sent. */
1861 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1862 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1865 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);