1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
41 # include <asm/segment.h>
44 #include <linux/pagemap.h>
45 #include <linux/smp_lock.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
49 //#include <lustre_mdc.h>
50 #include <lustre_lite.h>
51 #include "llite_internal.h"
52 #include <linux/lustre_compat25.h>
54 #ifndef list_for_each_prev_safe
55 #define list_for_each_prev_safe(pos, n, head) \
56 for (pos = (head)->prev, n = pos->prev; pos != (head); \
57 pos = n, n = pos->prev )
60 cfs_mem_cache_t *ll_async_page_slab = NULL;
61 size_t ll_async_page_slab_size = 0;
63 /* SYNCHRONOUS I/O to object storage for an inode */
64 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
65 struct page *page, int flags)
67 struct ll_inode_info *lli = ll_i2info(inode);
68 struct lov_stripe_md *lsm = lli->lli_smd;
69 struct obd_info oinfo = { { { 0 } } };
75 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
77 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
78 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
80 pg.count = CFS_PAGE_SIZE;
82 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
83 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
84 inode->i_ino, pg.off, pg.off);
86 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
87 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
88 page->mapping->host, i_size_read(page->mapping->host),
94 if (cmd & OBD_BRW_WRITE)
95 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
98 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
102 /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
103 opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
104 oinfo.oi_capa = ll_osscapa_get(inode, opc);
105 rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
106 capa_put(oinfo.oi_capa);
108 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
110 CERROR("error from obd_brw: rc = %d\n", rc);
114 /* this isn't where truncate starts. roughly:
115 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
116 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
119 * must be called under ->lli_size_sem */
120 void ll_truncate(struct inode *inode)
122 struct ll_inode_info *lli = ll_i2info(inode);
123 struct obd_info oinfo = { { { 0 } } };
128 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
129 inode->i_generation, inode, i_size_read(inode),
132 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
133 if (lli->lli_size_sem_owner != current) {
139 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
144 LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
146 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
148 lov_stripe_lock(lli->lli_smd);
149 inode_init_lvb(inode, &lvb);
150 rc = obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
151 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
152 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
153 lli->lli_smd->lsm_object_id, i_size_read(inode),
155 lov_stripe_unlock(lli->lli_smd);
159 obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
160 lov_stripe_unlock(lli->lli_smd);
162 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
163 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
164 /* If the truncate leaves behind a partial page, update its
166 struct page *page = find_get_page(inode->i_mapping,
167 i_size_read(inode) >>
170 struct ll_async_page *llap = llap_cast_private(page);
172 char *kaddr = kmap_atomic(page, KM_USER0);
173 llap->llap_checksum =
174 init_checksum(OSC_DEFAULT_CKSUM);
175 llap->llap_checksum =
176 compute_checksum(llap->llap_checksum,
177 kaddr, CFS_PAGE_SIZE,
179 kunmap_atomic(kaddr, KM_USER0);
181 page_cache_release(page);
185 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
186 lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
188 oinfo.oi_md = lli->lli_smd;
189 oinfo.oi_policy.l_extent.start = i_size_read(inode);
190 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
192 oa.o_id = lli->lli_smd->lsm_object_id;
193 oa.o_gr = lli->lli_smd->lsm_object_gr;
194 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
196 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
197 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
198 OBD_MD_FLFID | OBD_MD_FLGENER);
200 ll_inode_size_unlock(inode, 0);
202 oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
203 rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
204 ll_truncate_free_capa(oinfo.oi_capa);
206 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
208 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
209 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
214 ll_inode_size_unlock(inode, 0);
217 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
220 struct inode *inode = page->mapping->host;
221 struct ll_inode_info *lli = ll_i2info(inode);
222 struct lov_stripe_md *lsm = lli->lli_smd;
223 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
224 struct obd_info oinfo = { { { 0 } } };
231 LASSERT(PageLocked(page));
232 (void)llap_cast_private(page); /* assertion */
234 /* Check to see if we should return -EIO right away */
237 pga.count = CFS_PAGE_SIZE;
240 oa.o_mode = inode->i_mode;
241 oa.o_id = lsm->lsm_object_id;
242 oa.o_gr = lsm->lsm_object_gr;
243 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
244 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
245 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
249 rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
253 if (PageUptodate(page)) {
254 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
258 /* We're completely overwriting an existing page, so _don't_ set it up
259 * to date until commit_write */
260 if (from == 0 && to == CFS_PAGE_SIZE) {
261 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
262 POISON_PAGE(page, 0x11);
266 /* If are writing to a new page, no need to read old data. The extent
267 * locking will have updated the KMS, and for our purposes here we can
268 * treat it like i_size. */
269 lov_stripe_lock(lsm);
270 inode_init_lvb(inode, &lvb);
271 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
272 lov_stripe_unlock(lsm);
273 if (lvb.lvb_size <= offset) {
274 char *kaddr = kmap_atomic(page, KM_USER0);
275 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
276 lvb.lvb_size, offset);
277 memset(kaddr, 0, CFS_PAGE_SIZE);
278 kunmap_atomic(kaddr, KM_USER0);
279 GOTO(prepare_done, rc = 0);
282 /* XXX could be an async ocp read.. read-ahead? */
283 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
285 /* bug 1598: don't clobber blksize */
286 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
287 obdo_refresh_inode(inode, &oa, oa.o_valid);
293 SetPageUptodate(page);
298 static int ll_ap_make_ready(void *data, int cmd)
300 struct ll_async_page *llap;
304 llap = LLAP_FROM_COOKIE(data);
305 page = llap->llap_page;
307 LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
308 page->mapping->host->i_ino, page->index);
310 /* we're trying to write, but the page is locked.. come back later */
311 if (TryLockPage(page))
314 LASSERT(!PageWriteback(page));
316 /* if we left PageDirty we might get another writepage call
317 * in the future. list walkers are bright enough
318 * to check page dirty so we can leave it on whatever list
319 * its on. XXX also, we're called with the cli list so if
320 * we got the page cache list we'd create a lock inversion
321 * with the removepage path which gets the page lock then the
323 LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
324 page->mapping->host->i_ino, page->index);
325 clear_page_dirty_for_io(page);
327 /* This actually clears the dirty bit in the radix tree.*/
328 set_page_writeback(page);
330 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
331 page_cache_get(page);
336 /* We have two reasons for giving llite the opportunity to change the
337 * write length of a given queued page as it builds the RPC containing
340 * 1) Further extending writes may have landed in the page cache
341 * since a partial write first queued this page requiring us
342 * to write more from the page cache. (No further races are possible, since
343 * by the time this is called, the page is locked.)
344 * 2) We might have raced with truncate and want to avoid performing
345 * write RPCs that are just going to be thrown away by the
346 * truncate's punch on the storage targets.
348 * The kms serves these purposes as it is set at both truncate and extending
351 static int ll_ap_refresh_count(void *data, int cmd)
353 struct ll_inode_info *lli;
354 struct ll_async_page *llap;
355 struct lov_stripe_md *lsm;
362 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
363 LASSERT(cmd != OBD_BRW_READ);
365 llap = LLAP_FROM_COOKIE(data);
366 page = llap->llap_page;
367 inode = page->mapping->host;
368 lli = ll_i2info(inode);
371 lov_stripe_lock(lsm);
372 inode_init_lvb(inode, &lvb);
373 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
375 lov_stripe_unlock(lsm);
377 /* catch race with truncate */
378 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
381 /* catch sub-page write at end of file */
382 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
383 return kms % CFS_PAGE_SIZE;
385 return CFS_PAGE_SIZE;
388 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
390 struct lov_stripe_md *lsm;
391 obd_flag valid_flags;
393 lsm = ll_i2info(inode)->lli_smd;
395 oa->o_id = lsm->lsm_object_id;
396 oa->o_gr = lsm->lsm_object_gr;
397 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
398 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
399 if (cmd & OBD_BRW_WRITE) {
400 oa->o_valid |= OBD_MD_FLEPOCH;
401 oa->o_easize = ll_i2info(inode)->lli_ioepoch;
403 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
404 OBD_MD_FLUID | OBD_MD_FLGID |
405 OBD_MD_FLFID | OBD_MD_FLGENER;
408 obdo_from_inode(oa, inode, valid_flags);
411 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
413 struct ll_async_page *llap;
416 llap = LLAP_FROM_COOKIE(data);
417 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
422 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
425 struct ll_async_page *llap;
428 llap = LLAP_FROM_COOKIE(data);
429 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
434 static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
436 struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
437 int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
439 return ll_osscapa_get(llap->llap_page->mapping->host, opc);
442 static struct obd_async_page_ops ll_async_page_ops = {
443 .ap_make_ready = ll_ap_make_ready,
444 .ap_refresh_count = ll_ap_refresh_count,
445 .ap_fill_obdo = ll_ap_fill_obdo,
446 .ap_update_obdo = ll_ap_update_obdo,
447 .ap_completion = ll_ap_completion,
448 .ap_lookup_capa = ll_ap_lookup_capa,
451 struct ll_async_page *llap_cast_private(struct page *page)
453 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
455 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
456 "page %p private %lu gave magic %d which != %d\n",
457 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
462 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
464 * There is an llap attached onto every page in lustre, linked off @sbi.
465 * We add an llap to the list so we don't lose our place during list walking.
466 * If llaps in the list are being moved they will only move to the end
467 * of the LRU, and we aren't terribly interested in those pages here (we
468 * start at the beginning of the list where the least-used llaps are.
470 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
472 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
473 unsigned long total, want, count = 0;
475 total = sbi->ll_async_page_count;
477 /* There can be a large number of llaps (600k or more in a large
478 * memory machine) so the VM 1/6 shrink ratio is likely too much.
479 * Since we are freeing pages also, we don't necessarily want to
480 * shrink so much. Limit to 40MB of pages + llaps per call. */
481 if (shrink_fraction == 0)
482 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
484 want = (total + shrink_fraction - 1) / shrink_fraction;
486 if (want > 40 << (20 - CFS_PAGE_SHIFT))
487 want = 40 << (20 - CFS_PAGE_SHIFT);
489 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
490 want, total, shrink_fraction);
492 spin_lock(&sbi->ll_lock);
493 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
495 while (--total >= 0 && count < want) {
499 if (unlikely(need_resched())) {
500 spin_unlock(&sbi->ll_lock);
502 spin_lock(&sbi->ll_lock);
505 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
506 list_del_init(&dummy_llap.llap_pglist_item);
510 page = llap->llap_page;
511 LASSERT(page != NULL);
513 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
515 /* Page needs/undergoing IO */
516 if (TryLockPage(page)) {
517 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
521 keep = (llap->llap_write_queued || PageDirty(page) ||
522 PageWriteback(page) || (!PageUptodate(page) &&
523 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
525 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
526 keep ? "keep" : "drop",
527 llap->llap_write_queued ? "wq " : "",
528 PageDirty(page) ? "pd " : "",
529 PageUptodate(page) ? "" : "!pu ",
530 PageWriteback(page) ? "wb" : "",
531 llap->llap_defer_uptodate ? "" : "!du",
532 llap_origins[llap->llap_origin]);
534 /* If page is dirty or undergoing IO don't discard it */
540 page_cache_get(page);
541 spin_unlock(&sbi->ll_lock);
543 if (page->mapping != NULL) {
544 ll_teardown_mmaps(page->mapping,
545 (__u64)page->index << CFS_PAGE_SHIFT,
546 ((__u64)page->index << CFS_PAGE_SHIFT)|
548 if (!PageDirty(page) && !page_mapped(page)) {
549 ll_ra_accounting(llap, page->mapping);
550 ll_truncate_complete_page(page);
553 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
561 page_cache_release(page);
563 spin_lock(&sbi->ll_lock);
565 list_del(&dummy_llap.llap_pglist_item);
566 spin_unlock(&sbi->ll_lock);
568 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
574 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
576 struct ll_async_page *llap;
577 struct obd_export *exp;
578 struct inode *inode = page->mapping->host;
579 struct ll_sb_info *sbi;
584 static int triggered;
587 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
589 libcfs_debug_dumpstack(NULL);
592 RETURN(ERR_PTR(-EINVAL));
594 sbi = ll_i2sbi(inode);
595 LASSERT(ll_async_page_slab);
596 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
598 llap = llap_cast_private(page);
600 /* move to end of LRU list, except when page is just about to
602 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
603 spin_lock(&sbi->ll_lock);
604 sbi->ll_pglist_gen++;
605 list_del_init(&llap->llap_pglist_item);
606 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
607 spin_unlock(&sbi->ll_lock);
612 exp = ll_i2dtexp(page->mapping->host);
614 RETURN(ERR_PTR(-EINVAL));
616 /* limit the number of lustre-cached pages */
617 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
618 llap_shrink_cache(sbi, 0);
620 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
621 ll_async_page_slab_size);
623 RETURN(ERR_PTR(-ENOMEM));
624 llap->llap_magic = LLAP_MAGIC;
625 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
627 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
628 (obd_off)page->index << CFS_PAGE_SHIFT,
629 &ll_async_page_ops, llap, &llap->llap_cookie);
631 OBD_SLAB_FREE(llap, ll_async_page_slab,
632 ll_async_page_slab_size);
636 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
637 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
638 /* also zeroing the PRIVBITS low order bitflags */
639 __set_page_ll_data(page, llap);
640 llap->llap_page = page;
641 spin_lock(&sbi->ll_lock);
642 sbi->ll_pglist_gen++;
643 sbi->ll_async_page_count++;
644 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
645 INIT_LIST_HEAD(&llap->llap_pending_write);
646 spin_unlock(&sbi->ll_lock);
649 if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
651 char *kaddr = kmap_atomic(page, KM_USER0);
652 csum = init_checksum(OSC_DEFAULT_CKSUM);
653 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
655 kunmap_atomic(kaddr, KM_USER0);
656 if (origin == LLAP_ORIGIN_READAHEAD ||
657 origin == LLAP_ORIGIN_READPAGE) {
658 llap->llap_checksum = 0;
659 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
660 llap->llap_checksum == 0) {
661 llap->llap_checksum = csum;
662 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
663 } else if (llap->llap_checksum == csum) {
664 /* origin == LLAP_ORIGIN_WRITEPAGE */
665 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
668 /* origin == LLAP_ORIGIN_WRITEPAGE */
669 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
670 "%x!\n", llap->llap_checksum, csum);
674 llap->llap_origin = origin;
678 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
679 struct ll_async_page *llap,
680 unsigned to, obd_flag async_flags)
682 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
683 struct obd_io_group *oig;
684 struct ll_sb_info *sbi = ll_i2sbi(inode);
685 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
688 /* _make_ready only sees llap once we've unlocked the page */
689 llap->llap_write_queued = 1;
690 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
691 llap->llap_cookie, OBD_BRW_WRITE | noquot,
692 0, 0, 0, async_flags);
694 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
698 llap->llap_write_queued = 0;
699 /* Do not pass llap here as it is sync write. */
700 llap_write_pending(inode, NULL);
706 /* make full-page requests if we are not at EOF (bug 4410) */
707 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
708 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
709 "sync write before EOF: size_index %lu, to %d\n",
712 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
713 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
714 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
715 "sync write at EOF: size_index %lu, to %d/%d\n",
716 size_index, to, size_to);
721 /* compare the checksum once before the page leaves llite */
722 if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
723 llap->llap_checksum != 0)) {
725 struct page *page = llap->llap_page;
726 char *kaddr = kmap_atomic(page, KM_USER0);
727 csum = init_checksum(OSC_DEFAULT_CKSUM);
728 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
730 kunmap_atomic(kaddr, KM_USER0);
731 if (llap->llap_checksum == csum) {
732 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
735 CERROR("page %p old cksum %x != new cksum %x!\n",
736 page, llap->llap_checksum, csum);
740 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
741 llap->llap_cookie, OBD_BRW_WRITE | noquot,
742 0, to, 0, ASYNC_READY | ASYNC_URGENT |
743 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
747 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
753 if (!rc && async_flags & ASYNC_READY) {
754 unlock_page(llap->llap_page);
755 if (PageWriteback(llap->llap_page)) {
756 end_page_writeback(llap->llap_page);
760 if (rc == 0 && llap_write_complete(inode, llap))
761 ll_queue_done_writing(inode, 0);
763 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
771 /* update our write count to account for i_size increases that may have
772 * happened since we've queued the page for io. */
774 /* be careful not to return success without setting the page Uptodate or
775 * the next pass through prepare_write will read in stale data from disk. */
776 int ll_commit_write(struct file *file, struct page *page, unsigned from,
779 struct inode *inode = page->mapping->host;
780 struct ll_inode_info *lli = ll_i2info(inode);
781 struct lov_stripe_md *lsm = lli->lli_smd;
782 struct obd_export *exp;
783 struct ll_async_page *llap;
788 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
789 LASSERT(inode == file->f_dentry->d_inode);
790 LASSERT(PageLocked(page));
792 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
793 inode, page, from, to, page->index);
795 llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
797 RETURN(PTR_ERR(llap));
799 exp = ll_i2dtexp(inode);
803 llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
806 * queue a write for some time in the future the first time we
809 * This is different from what other file systems do: they usually
810 * just mark page (and some of its buffers) dirty and rely on
811 * balance_dirty_pages() to start a write-back. Lustre wants write-back
812 * to be started earlier for the following reasons:
814 * (1) with a large number of clients we need to limit the amount
815 * of cached data on the clients a lot;
817 * (2) large compute jobs generally want compute-only then io-only
818 * and the IO should complete as quickly as possible;
820 * (3) IO is batched up to the RPC size and is async until the
821 * client max cache is hit
822 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
825 if (!PageDirty(page)) {
826 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
828 rc = queue_or_sync_write(exp, inode, llap, to, 0);
832 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
835 /* put the page in the page cache, from now on ll_removepage is
836 * responsible for cleaning up the llap.
837 * only set page dirty when it's queued to be write out */
838 if (llap->llap_write_queued)
839 set_page_dirty(page);
842 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
843 ll_inode_size_lock(inode, 0);
845 lov_stripe_lock(lsm);
846 obd_adjust_kms(exp, lsm, size, 0);
847 lov_stripe_unlock(lsm);
848 if (size > i_size_read(inode))
849 i_size_write(inode, size);
850 SetPageUptodate(page);
851 } else if (size > i_size_read(inode)) {
852 /* this page beyond the pales of i_size, so it can't be
853 * truncated in ll_p_r_e during lock revoking. we must
854 * teardown our book-keeping here. */
857 ll_inode_size_unlock(inode, 0);
861 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
863 struct ll_ra_info *ra = &sbi->ll_ra_info;
867 spin_lock(&sbi->ll_lock);
868 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
869 ra->ra_cur_pages += ret;
870 spin_unlock(&sbi->ll_lock);
875 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
877 struct ll_ra_info *ra = &sbi->ll_ra_info;
878 spin_lock(&sbi->ll_lock);
879 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
880 ra->ra_cur_pages, len);
881 ra->ra_cur_pages -= len;
882 spin_unlock(&sbi->ll_lock);
885 /* called for each page in a completed rpc.*/
886 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
888 struct ll_async_page *llap;
893 llap = LLAP_FROM_COOKIE(data);
894 page = llap->llap_page;
895 LASSERT(PageLocked(page));
896 LASSERT(CheckWriteback(page,cmd));
898 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
900 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
901 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
904 if (cmd & OBD_BRW_READ) {
905 if (!llap->llap_defer_uptodate)
906 SetPageUptodate(page);
908 llap->llap_write_queued = 0;
910 ClearPageError(page);
912 if (cmd & OBD_BRW_READ) {
913 llap->llap_defer_uptodate = 0;
917 set_bit(AS_ENOSPC, &page->mapping->flags);
919 set_bit(AS_EIO, &page->mapping->flags);
924 if (cmd & OBD_BRW_WRITE) {
925 /* Only rc == 0, write succeed, then this page could be deleted
926 * from the pending_writing list
928 if (rc == 0 && llap_write_complete(page->mapping->host, llap))
929 ll_queue_done_writing(page->mapping->host, 0);
932 if (PageWriteback(page)) {
933 end_page_writeback(page);
935 page_cache_release(page);
940 /* the kernel calls us here when a page is unhashed from the page cache.
941 * the page will be locked and the kernel is holding a spinlock, so
942 * we need to be careful. we're just tearing down our book-keeping
944 void ll_removepage(struct page *page)
946 struct inode *inode = page->mapping->host;
947 struct obd_export *exp;
948 struct ll_async_page *llap;
949 struct ll_sb_info *sbi = ll_i2sbi(inode);
953 LASSERT(!in_interrupt());
955 /* sync pages or failed read pages can leave pages in the page
956 * cache that don't have our data associated with them anymore */
957 if (page_private(page) == 0) {
962 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
964 exp = ll_i2dtexp(inode);
966 CERROR("page %p ind %lu gave null export\n", page, page->index);
971 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
973 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
974 page->index, PTR_ERR(llap));
979 if (llap_write_complete(inode, llap))
980 ll_queue_done_writing(inode, 0);
982 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
985 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
987 /* this unconditional free is only safe because the page lock
988 * is providing exclusivity to memory pressure/truncate/writeback..*/
989 __clear_page_ll_data(page);
991 spin_lock(&sbi->ll_lock);
992 if (!list_empty(&llap->llap_pglist_item))
993 list_del_init(&llap->llap_pglist_item);
994 sbi->ll_pglist_gen++;
995 sbi->ll_async_page_count--;
996 spin_unlock(&sbi->ll_lock);
997 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
1001 static int ll_page_matches(struct page *page, int fd_flags)
1003 struct lustre_handle match_lockh = {0};
1004 struct inode *inode = page->mapping->host;
1005 ldlm_policy_data_t page_extent;
1009 if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
1012 page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
1013 page_extent.l_extent.end =
1014 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
1015 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
1016 if (!(fd_flags & LL_FILE_READAHEAD))
1017 flags |= LDLM_FL_CBPENDING;
1018 matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
1019 ll_i2info(inode)->lli_smd, LDLM_EXTENT,
1020 &page_extent, LCK_PR | LCK_PW, &flags, inode,
1025 static int ll_issue_page_read(struct obd_export *exp,
1026 struct ll_async_page *llap,
1027 struct obd_io_group *oig, int defer)
1029 struct page *page = llap->llap_page;
1032 page_cache_get(page);
1033 llap->llap_defer_uptodate = defer;
1034 llap->llap_ra_used = 0;
1035 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1036 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1037 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1038 ASYNC_READY | ASYNC_URGENT);
1040 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1041 page_cache_release(page);
1046 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1048 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1049 ra->ra_stats[which]++;
1052 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1054 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1055 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1057 spin_lock(&sbi->ll_lock);
1058 ll_ra_stats_inc_unlocked(ra, which);
1059 spin_unlock(&sbi->ll_lock);
1062 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1064 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1067 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1070 #define RAS_CDEBUG(ras) \
1072 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
1073 "csr %lu sf %lu sp %lu sl %lu \n", \
1074 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1075 ras->ras_consecutive_pages, ras->ras_window_start, \
1076 ras->ras_window_len, ras->ras_next_readahead, \
1077 ras->ras_requests, ras->ras_request_index, \
1078 ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
1079 ras->ras_stride_pages, ras->ras_stride_length)
1081 static int index_in_window(unsigned long index, unsigned long point,
1082 unsigned long before, unsigned long after)
1084 unsigned long start = point - before, end = point + after;
1091 return start <= index && index <= end;
1094 static struct ll_readahead_state *ll_ras_get(struct file *f)
1096 struct ll_file_data *fd;
1098 fd = LUSTRE_FPRIVATE(f);
1102 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1104 struct ll_readahead_state *ras;
1106 ras = ll_ras_get(f);
1108 spin_lock(&ras->ras_lock);
1109 ras->ras_requests++;
1110 ras->ras_request_index = 0;
1111 ras->ras_consecutive_requests++;
1112 rar->lrr_reader = current;
1114 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1115 spin_unlock(&ras->ras_lock);
1118 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1120 struct ll_readahead_state *ras;
1122 ras = ll_ras_get(f);
1124 spin_lock(&ras->ras_lock);
1125 list_del_init(&rar->lrr_linkage);
1126 spin_unlock(&ras->ras_lock);
1129 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1131 struct ll_ra_read *scan;
1133 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1134 if (scan->lrr_reader == current)
1140 struct ll_ra_read *ll_ra_read_get(struct file *f)
1142 struct ll_readahead_state *ras;
1143 struct ll_ra_read *bead;
1145 ras = ll_ras_get(f);
1147 spin_lock(&ras->ras_lock);
1148 bead = ll_ra_read_get_locked(ras);
1149 spin_unlock(&ras->ras_lock);
1153 static int ll_read_ahead_page(struct obd_export *exp, struct obd_io_group *oig,
1154 int index, struct address_space *mapping)
1156 struct ll_async_page *llap;
1158 unsigned int gfp_mask = 0;
1161 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1163 gfp_mask |= __GFP_NOWARN;
1165 page = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
1167 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1168 CDEBUG(D_READA, "g_c_p_n failed\n");
1172 /* Check if page was truncated or reclaimed */
1173 if (page->mapping != mapping) {
1174 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1175 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1176 GOTO(unlock_page, rc = 0);
1179 /* we do this first so that we can see the page in the /proc
1181 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1182 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1183 if (PTR_ERR(llap) == -ENOLCK) {
1184 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1185 CDEBUG(D_READA | D_PAGE,
1186 "Adding page to cache failed index "
1188 CDEBUG(D_READA, "nolock page\n");
1189 GOTO(unlock_page, rc = -ENOLCK);
1191 CDEBUG(D_READA, "read-ahead page\n");
1192 GOTO(unlock_page, rc = 0);
1195 /* skip completed pages */
1196 if (Page_Uptodate(page))
1197 GOTO(unlock_page, rc = 0);
1199 /* bail out when we hit the end of the lock. */
1200 rc = ll_issue_page_read(exp, llap, oig, 1);
1202 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "started read-ahead\n");
1207 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "skipping read-ahead\n");
1209 page_cache_release(page);
1213 /* ra_io_arg will be filled in the beginning of ll_readahead with
1214 * ras_lock, then the following ll_read_ahead_pages will read RA
1215 * pages according to this arg, all the items in this structure are
1216 * counted by page index.
1219 unsigned long ria_start; /* start offset of read-ahead*/
1220 unsigned long ria_end; /* end offset of read-ahead*/
1221 /* If stride read pattern is detected, ria_stoff means where
1222 * stride read is started. Note: for normal read-ahead, the
1223 * value here is meaningless, and also it will not be accessed*/
1225 /* ria_length and ria_pages are the length and pages length in the
1226 * stride I/O mode. And they will also be used to check whether
1227 * it is stride I/O read-ahead in the read-ahead pages*/
1228 unsigned long ria_length;
1229 unsigned long ria_pages;
1232 #define RIA_DEBUG(ria) \
1233 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
1234 ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
1237 #define RAS_INCREASE_STEP (1024 * 1024 >> CFS_PAGE_SHIFT)
1239 static inline int stride_io_mode(struct ll_readahead_state *ras)
1241 return ras->ras_consecutive_stride_requests > 1;
1244 /* The function calculates how much pages will be read in
1245 * [off, off + length], which will be read by stride I/O mode,
1246 * stride_offset = st_off, stride_lengh = st_len,
1247 * stride_pages = st_pgs
1249 static unsigned long
1250 stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
1251 unsigned long off, unsigned length)
1253 unsigned long cont_len = st_off > off ? st_off - off : 0;
1254 unsigned long stride_len = length + off > st_off ?
1255 length + off + 1 - st_off : 0;
1256 unsigned long left, pg_count;
1258 if (st_len == 0 || length == 0)
1261 left = do_div(stride_len, st_len);
1262 left = min(left, st_pgs);
1264 pg_count = left + stride_len * st_pgs + cont_len;
1266 LASSERT(pg_count >= left);
1268 CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %u"
1269 "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
1274 static int ria_page_count(struct ra_io_arg *ria)
1276 __u64 length = ria->ria_end >= ria->ria_start ?
1277 ria->ria_end - ria->ria_start + 1 : 0;
1279 return stride_pg_count(ria->ria_stoff, ria->ria_length,
1280 ria->ria_pages, ria->ria_start,
1284 /*Check whether the index is in the defined ra-window */
1285 static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
1287 /* If ria_length == ria_pages, it means non-stride I/O mode,
1288 * idx should always inside read-ahead window in this case
1289 * For stride I/O mode, just check whether the idx is inside
1291 return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
1292 (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
1295 static int ll_read_ahead_pages(struct obd_export *exp,
1296 struct obd_io_group *oig,
1297 struct ra_io_arg *ria,
1298 unsigned long *reserved_pages,
1299 struct address_space *mapping,
1300 unsigned long *ra_end)
1302 int rc, count = 0, stride_ria;
1303 unsigned long page_idx;
1305 LASSERT(ria != NULL);
1308 stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
1309 for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
1310 *reserved_pages > 0; page_idx++) {
1311 if (ras_inside_ra_window(page_idx, ria)) {
1312 /* If the page is inside the read-ahead window*/
1313 rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
1315 (*reserved_pages)--;
1317 } else if (rc == -ENOLCK)
1319 } else if (stride_ria) {
1320 /* If it is not in the read-ahead window, and it is
1321 * read-ahead mode, then check whether it should skip
1324 /* FIXME: This assertion only is valid when it is for
1325 * forward read-ahead, it will be fixed when backward
1326 * read-ahead is implemented */
1327 LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
1328 " gap of ra window,it should bigger than stride"
1329 " offset %lu \n", page_idx, ria->ria_stoff);
1331 offset = page_idx - ria->ria_stoff;
1332 offset = offset % (ria->ria_length);
1333 if (offset > ria->ria_pages) {
1334 page_idx += ria->ria_length - offset;
1335 CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
1336 ria->ria_length - offset);
1345 static int ll_readahead(struct ll_readahead_state *ras,
1346 struct obd_export *exp, struct address_space *mapping,
1347 struct obd_io_group *oig, int flags)
1349 unsigned long start = 0, end = 0, reserved;
1350 unsigned long ra_end, len;
1351 struct inode *inode;
1352 struct lov_stripe_md *lsm;
1353 struct ll_ra_read *bead;
1355 struct ra_io_arg ria = { 0 };
1360 inode = mapping->host;
1361 lsm = ll_i2info(inode)->lli_smd;
1363 lov_stripe_lock(lsm);
1364 inode_init_lvb(inode, &lvb);
1365 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1367 lov_stripe_unlock(lsm);
1369 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1373 spin_lock(&ras->ras_lock);
1374 bead = ll_ra_read_get_locked(ras);
1375 /* Enlarge the RA window to encompass the full read */
1376 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1377 bead->lrr_start + bead->lrr_count) {
1378 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1379 ras->ras_window_start;
1381 /* Reserve a part of the read-ahead window that we'll be issuing */
1382 if (ras->ras_window_len) {
1383 start = ras->ras_next_readahead;
1384 end = ras->ras_window_start + ras->ras_window_len - 1;
1387 /* Truncate RA window to end of file */
1388 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1389 ras->ras_next_readahead = max(end, end + 1);
1392 ria.ria_start = start;
1394 /* If stride I/O mode is detected, get stride window*/
1395 if (stride_io_mode(ras)) {
1396 ria.ria_length = ras->ras_stride_length;
1397 ria.ria_pages = ras->ras_stride_pages;
1399 spin_unlock(&ras->ras_lock);
1402 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1405 len = ria_page_count(&ria);
1409 reserved = ll_ra_count_get(ll_i2sbi(inode), len);
1411 if (reserved < end - start + 1)
1412 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1414 CDEBUG(D_READA, "reserved page %lu \n", reserved);
1416 ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
1418 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1420 ll_ra_count_put(ll_i2sbi(inode), reserved);
1422 if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
1423 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1425 /* if we didn't get to the end of the region we reserved from
1426 * the ras we need to go back and update the ras so that the
1427 * next read-ahead tries from where we left off. we only do so
1428 * if the region we failed to issue read-ahead on is still ahead
1429 * of the app and behind the next index to start read-ahead from */
1430 CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
1431 ra_end, end, ria.ria_end);
1433 if (ra_end != (end + 1)) {
1434 spin_lock(&ras->ras_lock);
1435 if (ra_end < ras->ras_next_readahead &&
1436 index_in_window(ra_end, ras->ras_window_start, 0,
1437 ras->ras_window_len)) {
1438 ras->ras_next_readahead = ra_end;
1441 spin_unlock(&ras->ras_lock);
1447 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1449 ras->ras_window_start = index & (~(RAS_INCREASE_STEP - 1));
1452 /* called with the ras_lock held or from places where it doesn't matter */
1453 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1455 ras->ras_last_readpage = index;
1456 ras->ras_consecutive_requests = 0;
1457 ras->ras_consecutive_pages = 0;
1458 ras->ras_window_len = 0;
1459 ras_set_start(ras, index);
1460 ras->ras_next_readahead = max(ras->ras_window_start, index);
1465 /* called with the ras_lock held or from places where it doesn't matter */
1466 static void ras_stride_reset(struct ll_readahead_state *ras)
1468 ras->ras_consecutive_stride_requests = 0;
1472 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1474 spin_lock_init(&ras->ras_lock);
1476 ras->ras_requests = 0;
1477 INIT_LIST_HEAD(&ras->ras_read_beads);
1480 /* Check whether the read request is in the stride window.
1481 * If it is in the stride window, return 1, otherwise return 0.
1482 * and also update stride_gap and stride_pages.
1484 static int index_in_stride_window(unsigned long index,
1485 struct ll_readahead_state *ras,
1486 struct inode *inode)
1488 int stride_gap = index - ras->ras_last_readpage - 1;
1490 LASSERT(stride_gap != 0);
1492 if (ras->ras_consecutive_pages == 0)
1495 /*Otherwise check the stride by itself */
1496 if ((ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
1497 ras->ras_consecutive_pages == ras->ras_stride_pages)
1500 if (stride_gap >= 0) {
1502 * only set stride_pages, stride_length if
1503 * it is forward reading ( stride_gap > 0)
1505 ras->ras_stride_pages = ras->ras_consecutive_pages;
1506 ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
1509 * If stride_gap < 0,(back_forward reading),
1510 * reset the stride_pages/length.
1511 * FIXME:back_ward stride I/O read.
1514 ras->ras_stride_pages = 0;
1515 ras->ras_stride_length = 0;
1522 static unsigned long
1523 stride_page_count(struct ll_readahead_state *ras, unsigned long len)
1525 return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
1526 ras->ras_stride_pages, ras->ras_stride_offset,
1530 /* Stride Read-ahead window will be increased inc_len according to
1531 * stride I/O pattern */
1532 static void ras_stride_increase_window(struct ll_readahead_state *ras,
1533 struct ll_ra_info *ra,
1534 unsigned long inc_len)
1536 unsigned long left, step, window_len;
1537 unsigned long stride_len;
1539 LASSERT(ras->ras_stride_length > 0);
1541 stride_len = ras->ras_window_start + ras->ras_window_len -
1542 ras->ras_stride_offset;
1544 LASSERTF(stride_len > 0, "window_start %lu, window_len %lu"
1545 "stride_offset %lu\n", ras->ras_window_start,
1546 ras->ras_window_len, ras->ras_stride_offset);
1548 left = stride_len % ras->ras_stride_length;
1550 window_len = ras->ras_window_len - left;
1552 if (left < ras->ras_stride_pages)
1555 left = ras->ras_stride_pages + inc_len;
1557 LASSERT(ras->ras_stride_pages != 0);
1559 step = left / ras->ras_stride_pages;
1560 left %= ras->ras_stride_pages;
1562 window_len += step * ras->ras_stride_length + left;
1564 if (stride_page_count(ras, window_len) <= ra->ra_max_pages)
1565 ras->ras_window_len = window_len;
1570 /* Set stride I/O read-ahead window start offset */
1571 static void ras_set_stride_offset(struct ll_readahead_state *ras)
1573 unsigned long window_len = ras->ras_next_readahead -
1574 ras->ras_window_start;
1577 LASSERT(ras->ras_stride_length != 0);
1579 left = window_len % ras->ras_stride_length;
1581 ras->ras_stride_offset = ras->ras_next_readahead - left;
1586 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1587 struct ll_readahead_state *ras, unsigned long index,
1590 struct ll_ra_info *ra = &sbi->ll_ra_info;
1591 int zero = 0, stride_zero = 0, stride_detect = 0, ra_miss = 0;
1594 spin_lock(&sbi->ll_lock);
1595 spin_lock(&ras->ras_lock);
1597 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1599 /* reset the read-ahead window in two cases. First when the app seeks
1600 * or reads to some other part of the file. Secondly if we get a
1601 * read-ahead miss that we think we've previously issued. This can
1602 * be a symptom of there being so many read-ahead pages that the VM is
1603 * reclaiming it before we get to it. */
1604 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1606 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1607 /* check whether it is in stride I/O mode*/
1608 if (!index_in_stride_window(index, ras, inode))
1610 } else if (!hit && ras->ras_window_len &&
1611 index < ras->ras_next_readahead &&
1612 index_in_window(index, ras->ras_window_start, 0,
1613 ras->ras_window_len)) {
1616 /* If it hits read-ahead miss and the stride I/O is still
1617 * not detected, reset stride stuff to re-detect the whole
1618 * stride I/O mode to avoid complication */
1619 if (!stride_io_mode(ras))
1621 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1624 /* On the second access to a file smaller than the tunable
1625 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1626 * file up to ra_max_pages. This is simply a best effort and
1627 * only occurs once per open file. Normal RA behavior is reverted
1628 * to for subsequent IO. The mmap case does not increment
1629 * ras_requests and thus can never trigger this behavior. */
1630 if (ras->ras_requests == 2 && !ras->ras_request_index) {
1633 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1636 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1637 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1640 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1641 ras->ras_window_start = 0;
1642 ras->ras_last_readpage = 0;
1643 ras->ras_next_readahead = 0;
1644 ras->ras_window_len = min(ra->ra_max_pages,
1645 ra->ra_max_read_ahead_whole_pages);
1646 GOTO(out_unlock, 0);
1651 /* If it is discontinuous read, check
1652 * whether it is stride I/O mode*/
1654 ras_reset(ras, index);
1655 ras->ras_consecutive_pages++;
1656 ras_stride_reset(ras);
1658 GOTO(out_unlock, 0);
1660 /* The read is still in stride window or
1661 * it hits read-ahead miss */
1663 /* If ra-window miss is hitted, which probably means VM
1664 * pressure, and some read-ahead pages were reclaimed.So
1665 * the length of ra-window will not increased, but also
1666 * not reset to avoid redetecting the stride I/O mode.*/
1667 ras->ras_consecutive_requests = 0;
1669 ras->ras_consecutive_pages = 0;
1670 if (++ras->ras_consecutive_stride_requests > 1)
1675 } else if (ras->ras_consecutive_stride_requests > 1) {
1676 /* If this is contiguous read but in stride I/O mode
1677 * currently, check whether stride step still is valid,
1678 * if invalid, it will reset the stride ra window*/
1679 if (ras->ras_consecutive_pages + 1 > ras->ras_stride_pages)
1680 ras_stride_reset(ras);
1683 ras->ras_last_readpage = index;
1684 ras->ras_consecutive_pages++;
1685 ras_set_start(ras, index);
1686 ras->ras_next_readahead = max(ras->ras_window_start,
1687 ras->ras_next_readahead);
1690 /* Trigger RA in the mmap case where ras_consecutive_requests
1691 * is not incremented and thus can't be used to trigger RA */
1692 if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
1693 ras->ras_window_len = RAS_INCREASE_STEP;
1694 GOTO(out_unlock, 0);
1697 /* Initially reset the stride window offset to next_readahead*/
1698 if (ras->ras_consecutive_stride_requests == 2 && stride_detect)
1699 ras_set_stride_offset(ras);
1701 /* The initial ras_window_len is set to the request size. To avoid
1702 * uselessly reading and discarding pages for random IO the window is
1703 * only increased once per consecutive request received. */
1704 if ((ras->ras_consecutive_requests > 1 &&
1705 !ras->ras_request_index) || stride_detect) {
1706 if (stride_io_mode(ras))
1707 ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP);
1709 ras->ras_window_len = min(ras->ras_window_len +
1716 ras->ras_request_index++;
1717 spin_unlock(&ras->ras_lock);
1718 spin_unlock(&sbi->ll_lock);
1722 int ll_writepage(struct page *page)
1724 struct inode *inode = page->mapping->host;
1725 struct ll_inode_info *lli = ll_i2info(inode);
1726 struct obd_export *exp;
1727 struct ll_async_page *llap;
1731 LASSERT(PageLocked(page));
1733 exp = ll_i2dtexp(inode);
1735 GOTO(out, rc = -EINVAL);
1737 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1739 GOTO(out, rc = PTR_ERR(llap));
1741 LASSERT(!PageWriteback(page));
1742 set_page_writeback(page);
1744 page_cache_get(page);
1745 if (llap->llap_write_queued) {
1746 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1747 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1749 ASYNC_READY | ASYNC_URGENT);
1751 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1752 ASYNC_READY | ASYNC_URGENT);
1755 page_cache_release(page);
1758 if (!lli->lli_async_rc)
1759 lli->lli_async_rc = rc;
1760 /* re-dirty page on error so it retries write */
1761 if (PageWriteback(page)) {
1762 end_page_writeback(page);
1764 /* resend page only for not started IO*/
1765 if (!PageError(page))
1766 ll_redirty_page(page);
1773 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1774 * read-ahead assumes it is valid to issue readpage all the way up to
1775 * i_size, but our dlm locks make that not the case. We disable the
1776 * kernel's read-ahead and do our own by walking ahead in the page cache
1777 * checking for dlm lock coverage. the main difference between 2.4 and
1778 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1779 * so they look the same.
1781 int ll_readpage(struct file *filp, struct page *page)
1783 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1784 struct inode *inode = page->mapping->host;
1785 struct obd_export *exp;
1786 struct ll_async_page *llap;
1787 struct obd_io_group *oig = NULL;
1791 LASSERT(PageLocked(page));
1792 LASSERT(!PageUptodate(page));
1793 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1794 inode->i_ino, inode->i_generation, inode,
1795 (((loff_t)page->index) << CFS_PAGE_SHIFT),
1796 (((loff_t)page->index) << CFS_PAGE_SHIFT));
1797 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1799 if (!ll_i2info(inode)->lli_smd) {
1800 /* File with no objects - one big hole */
1801 /* We use this just for remove_from_page_cache that is not
1802 * exported, we'd make page back up to date. */
1803 ll_truncate_complete_page(page);
1804 clear_page(kmap(page));
1806 SetPageUptodate(page);
1811 rc = oig_init(&oig);
1815 exp = ll_i2dtexp(inode);
1817 GOTO(out, rc = -EINVAL);
1819 llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1821 GOTO(out, rc = PTR_ERR(llap));
1823 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1824 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1825 llap->llap_defer_uptodate);
1828 if (llap->llap_defer_uptodate) {
1829 /* This is the callpath if we got the page from a readahead */
1830 llap->llap_ra_used = 1;
1831 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1834 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1836 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1837 SetPageUptodate(page);
1839 GOTO(out_oig, rc = 0);
1842 if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1843 rc = ll_page_matches(page, fd->fd_flags);
1845 LL_CDEBUG_PAGE(D_ERROR, page,
1846 "lock match failed: rc %d\n", rc);
1851 CWARN("ino %lu page %lu (%llu) not covered by "
1852 "a lock (mmap?). check debug logs.\n",
1853 inode->i_ino, page->index,
1854 (long long)page->index << CFS_PAGE_SHIFT);
1858 rc = ll_issue_page_read(exp, llap, oig, 0);
1862 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1863 /* We have just requested the actual page we want, see if we can tack
1864 * on some readahead to that page's RPC before it is sent. */
1865 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1866 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1869 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);