1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Lustre Lite I/O page cache routines shared by different kernel revs
41 #ifndef AUTOCONF_INCLUDED
42 #include <linux/config.h>
44 #include <linux/kernel.h>
46 #include <linux/string.h>
47 #include <linux/stat.h>
48 #include <linux/errno.h>
49 #include <linux/smp_lock.h>
50 #include <linux/unistd.h>
51 #include <linux/version.h>
52 #include <asm/system.h>
53 #include <asm/uaccess.h>
56 #include <linux/stat.h>
57 #include <asm/uaccess.h>
59 #include <linux/pagemap.h>
60 #include <linux/smp_lock.h>
62 #define DEBUG_SUBSYSTEM S_LLITE
64 #include <linux/page-flags.h>
66 #include <lustre_lite.h>
67 #include "llite_internal.h"
68 #include <linux/lustre_compat25.h>
70 #ifndef list_for_each_prev_safe
71 #define list_for_each_prev_safe(pos, n, head) \
72 for (pos = (head)->prev, n = pos->prev; pos != (head); \
73 pos = n, n = pos->prev )
76 cfs_mem_cache_t *ll_async_page_slab = NULL;
77 size_t ll_async_page_slab_size = 0;
79 /* SYNCHRONOUS I/O to object storage for an inode */
80 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
81 struct page *page, int flags)
83 struct ll_inode_info *lli = ll_i2info(inode);
84 struct lov_stripe_md *lsm = lli->lli_smd;
85 struct obd_info oinfo = { { { 0 } } };
91 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
93 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
94 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
96 pg.count = CFS_PAGE_SIZE;
98 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
99 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
100 inode->i_ino, pg.off, pg.off);
102 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
103 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
104 page->mapping->host, i_size_read(page->mapping->host),
105 page->index, pg.off);
110 if (cmd & OBD_BRW_WRITE)
111 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
114 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
118 rc = obd_brw(cmd, ll_i2obdexp(inode), &oinfo, 1, &pg, NULL);
120 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
122 CERROR("error from obd_brw: rc = %d\n", rc);
126 int ll_file_punch(struct inode * inode, loff_t new_size, int srvlock)
128 struct ll_inode_info *lli = ll_i2info(inode);
129 struct obd_info oinfo = { { { 0 } } };
130 struct obdo oa = { 0 };
135 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
136 lli->lli_smd->lsm_object_id, new_size, new_size);
138 oinfo.oi_md = lli->lli_smd;
139 oinfo.oi_policy.l_extent.start = new_size;
140 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
142 oa.o_id = lli->lli_smd->lsm_object_id;
143 oa.o_gr = lli->lli_smd->lsm_object_gr;
144 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
146 valid = OBD_MD_FLTYPE | OBD_MD_FLMODE |OBD_MD_FLFID |
147 OBD_MD_FLATIME | OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGENER |
150 /* set OBD_MD_FLFLAGS in o_valid, only if we
151 * set OBD_FL_TRUNCLOCK, otherwise ost_punch
152 * and filter_setattr get confused, see the comment
154 oa.o_flags = OBD_FL_TRUNCLOCK;
155 oa.o_valid |= OBD_MD_FLFLAGS;
159 * 1. do not use inode's timestamps because concurrent
160 * stat might fill the inode with out-of-date times,
161 * send current instead
163 * 2.do no update lsm, as long as stat (via
164 * ll_glimpse_size) will bring attributes from osts
166 oa.o_mtime = oa.o_ctime = LTIME_S(CURRENT_TIME);
167 oa.o_valid |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
169 struct ost_lvb *xtimes;
170 /* truncate under locks
172 * 1. update inode's mtime and ctime as long as
173 * concurrent stat (via ll_glimpse_size) might bring
176 * 2. update lsm so that next stat (via
177 * ll_glimpse_size) could get correct values in lsm */
178 OBD_ALLOC_PTR(xtimes);
182 lov_stripe_lock(lli->lli_smd);
183 LTIME_S(inode->i_mtime) = LTIME_S(CURRENT_TIME);
184 LTIME_S(inode->i_ctime) = LTIME_S(CURRENT_TIME);
185 xtimes->lvb_mtime = LTIME_S(inode->i_mtime);
186 xtimes->lvb_ctime = LTIME_S(inode->i_ctime);
187 obd_update_lvb(ll_i2obdexp(inode), lli->lli_smd, xtimes,
188 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
189 lov_stripe_unlock(lli->lli_smd);
190 OBD_FREE_PTR(xtimes);
192 valid |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
194 obdo_from_inode(&oa, inode, valid);
196 rc = obd_punch_rqset(ll_i2obdexp(inode), &oinfo, NULL);
198 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
201 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
202 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
205 /* this isn't where truncate starts. roughly:
206 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
207 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
210 * must be called under ->lli_size_sem */
211 void ll_truncate(struct inode *inode)
213 struct ll_inode_info *lli = ll_i2info(inode);
214 int srvlock = test_bit(LLI_F_SRVLOCK, &lli->lli_flags);
217 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",
218 inode->i_ino, inode->i_generation, inode, i_size_read(inode),
221 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
222 if (lli->lli_size_sem_owner != current) {
228 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
233 LASSERT(SEM_COUNT(&lli->lli_size_sem) <= 0);
239 /* XXX I'm pretty sure this is a hack to paper over a more
240 * fundamental race condition. */
241 lov_stripe_lock(lli->lli_smd);
242 inode_init_lvb(inode, &lvb);
243 rc = obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 0);
244 inode->i_blocks = lvb.lvb_blocks;
245 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
246 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64
248 lli->lli_smd->lsm_object_id, i_size_read(inode),
250 lov_stripe_unlock(lli->lli_smd);
254 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
255 i_size_read(inode), 1);
256 lov_stripe_unlock(lli->lli_smd);
259 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
260 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
261 /* If the truncate leaves a partial page, update its checksum */
262 struct page *page = find_get_page(inode->i_mapping,
263 i_size_read(inode) >>
266 struct ll_async_page *llap = llap_cast_private(page);
268 char *kaddr = kmap_atomic(page, KM_USER0);
269 llap->llap_checksum =
270 init_checksum(OSC_DEFAULT_CKSUM);
271 llap->llap_checksum =
272 compute_checksum(llap->llap_checksum,
273 kaddr, CFS_PAGE_SIZE,
275 kunmap_atomic(kaddr, KM_USER0);
277 page_cache_release(page);
281 new_size = i_size_read(inode);
282 ll_inode_size_unlock(inode, 0);
284 ll_file_punch(inode, new_size, 0);
286 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LOCKLESS_TRUNC, 1);
292 ll_inode_size_unlock(inode, 0);
295 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
298 struct inode *inode = page->mapping->host;
299 struct ll_inode_info *lli = ll_i2info(inode);
300 struct lov_stripe_md *lsm = lli->lli_smd;
301 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
302 struct obd_info oinfo = { { { 0 } } };
304 struct obdo oa = { 0 };
309 LASSERT(PageLocked(page));
310 (void)llap_cast_private(page); /* assertion */
312 /* Check to see if we should return -EIO right away */
315 pga.count = CFS_PAGE_SIZE;
318 oa.o_mode = inode->i_mode;
319 oa.o_id = lsm->lsm_object_id;
320 oa.o_gr = lsm->lsm_object_gr;
321 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
322 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
323 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
327 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oinfo, 1, &pga, NULL);
331 if (PageUptodate(page)) {
332 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
336 /* We're completely overwriting an existing page, so _don't_ set it up
337 * to date until commit_write */
338 if (from == 0 && to == CFS_PAGE_SIZE) {
339 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
340 POISON_PAGE(page, 0x11);
344 /* If are writing to a new page, no need to read old data. The extent
345 * locking will have updated the KMS, and for our purposes here we can
346 * treat it like i_size. */
347 lov_stripe_lock(lsm);
348 inode_init_lvb(inode, &lvb);
349 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
350 lov_stripe_unlock(lsm);
351 if (lvb.lvb_size <= offset) {
352 char *kaddr = kmap_atomic(page, KM_USER0);
353 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
354 lvb.lvb_size, offset);
355 memset(kaddr, 0, CFS_PAGE_SIZE);
356 kunmap_atomic(kaddr, KM_USER0);
357 GOTO(prepare_done, rc = 0);
360 /* XXX could be an async ocp read.. read-ahead? */
361 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
363 /* bug 1598: don't clobber blksize */
364 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
365 obdo_refresh_inode(inode, &oa, oa.o_valid);
371 SetPageUptodate(page);
377 * make page ready for ASYNC write
378 * \param data - pointer to llap cookie
379 * \param cmd - is OBD_BRW_* macroses
381 * \retval 0 is page successfully prepared to send
382 * \retval -EAGAIN is page not need to send
384 static int ll_ap_make_ready(void *data, int cmd)
386 struct ll_async_page *llap;
390 llap = LLAP_FROM_COOKIE(data);
391 page = llap->llap_page;
393 /* we're trying to write, but the page is locked.. come back later */
394 if (TryLockPage(page))
397 LASSERTF(!(cmd & OBD_BRW_READ) || !PageWriteback(page),
398 "cmd %x page %p ino %lu index %lu fl %lx\n", cmd, page,
399 page->mapping->host->i_ino, page->index, (long)page->flags);
401 /* if we left PageDirty we might get another writepage call
402 * in the future. list walkers are bright enough
403 * to check page dirty so we can leave it on whatever list
404 * its on. XXX also, we're called with the cli list so if
405 * we got the page cache list we'd create a lock inversion
406 * with the removepage path which gets the page lock then the
408 if(!clear_page_dirty_for_io(page)) {
413 /* This actually clears the dirty bit in the radix tree.*/
414 set_page_writeback(page);
416 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
417 page_cache_get(page);
422 /* We have two reasons for giving llite the opportunity to change the
423 * write length of a given queued page as it builds the RPC containing
426 * 1) Further extending writes may have landed in the page cache
427 * since a partial write first queued this page requiring us
428 * to write more from the page cache. (No further races are possible,
429 * since by the time this is called, the page is locked.)
430 * 2) We might have raced with truncate and want to avoid performing
431 * write RPCs that are just going to be thrown away by the
432 * truncate's punch on the storage targets.
434 * The kms serves these purposes as it is set at both truncate and extending
437 static int ll_ap_refresh_count(void *data, int cmd)
439 struct ll_inode_info *lli;
440 struct ll_async_page *llap;
441 struct lov_stripe_md *lsm;
448 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
449 LASSERT(cmd != OBD_BRW_READ);
451 llap = LLAP_FROM_COOKIE(data);
452 page = llap->llap_page;
453 inode = page->mapping->host;
454 lli = ll_i2info(inode);
457 lov_stripe_lock(lsm);
458 inode_init_lvb(inode, &lvb);
459 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
461 lov_stripe_unlock(lsm);
463 /* catch race with truncate */
464 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
467 /* catch sub-page write at end of file */
468 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
469 return kms % CFS_PAGE_SIZE;
471 return CFS_PAGE_SIZE;
474 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
476 struct lov_stripe_md *lsm;
477 obd_flag valid_flags;
479 lsm = ll_i2info(inode)->lli_smd;
481 oa->o_id = lsm->lsm_object_id;
482 oa->o_gr = lsm->lsm_object_gr;
483 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
484 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
485 if (cmd & OBD_BRW_WRITE) {
486 oa->o_valid |= OBD_MD_FLEPOCH;
487 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
489 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
490 OBD_MD_FLUID | OBD_MD_FLGID |
491 OBD_MD_FLFID | OBD_MD_FLGENER;
494 obdo_from_inode(oa, inode, valid_flags);
495 /* Bug11742 - set the OBD_FL_MMAP flag for memory mapped files */
496 if (atomic_read(&(ll_i2info(inode)->lli_mmap_cnt)) != 0) {
497 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
498 oa->o_valid |= OBD_MD_FLFLAGS;
499 oa->o_flags = OBD_FL_MMAP;
501 oa->o_flags |= OBD_FL_MMAP;
506 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
508 struct ll_async_page *llap;
511 llap = LLAP_FROM_COOKIE(data);
512 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
517 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
520 struct ll_async_page *llap;
523 llap = LLAP_FROM_COOKIE(data);
524 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
529 static struct obd_async_page_ops ll_async_page_ops = {
530 .ap_make_ready = ll_ap_make_ready,
531 .ap_refresh_count = ll_ap_refresh_count,
532 .ap_fill_obdo = ll_ap_fill_obdo,
533 .ap_update_obdo = ll_ap_update_obdo,
534 .ap_completion = ll_ap_completion,
537 struct ll_async_page *llap_cast_private(struct page *page)
539 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
541 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
542 "page %p private %lu gave magic %d which != %d\n",
543 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
548 /* Try to reap @target pages in the specific @cpu's async page list.
550 * There is an llap attached onto every page in lustre, linked off @sbi.
551 * We add an llap to the list so we don't lose our place during list walking.
552 * If llaps in the list are being moved they will only move to the end
553 * of the LRU, and we aren't terribly interested in those pages here (we
554 * start at the beginning of the list where the least-used llaps are. */
555 static inline int llap_shrink_cache_internal(struct ll_sb_info *sbi,
558 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
559 struct ll_pglist_data *pd;
560 struct list_head *head;
563 pd = ll_pglist_cpu_lock(sbi, cpu);
564 head = &pd->llpd_list;
565 list_add(&dummy_llap.llap_pglist_item, head);
566 while (count < target) {
570 if (unlikely(need_resched())) {
571 list_del(&dummy_llap.llap_pglist_item);
572 ll_pglist_cpu_unlock(sbi, cpu);
573 /* vmscan::shrink_slab() have own schedule() */
577 llap = llite_pglist_next_llap(head,
578 &dummy_llap.llap_pglist_item);
579 list_del_init(&dummy_llap.llap_pglist_item);
583 page = llap->llap_page;
584 LASSERT(page != NULL);
586 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
588 /* Page needs/undergoing IO */
589 if (TryLockPage(page)) {
590 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
594 keep = (llap->llap_write_queued || PageDirty(page) ||
595 PageWriteback(page) || (!PageUptodate(page) &&
596 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
598 LL_CDEBUG_PAGE(D_PAGE, page,
599 "%s LRU page: %s%s%s%s%s origin %s\n",
600 keep ? "keep" : "drop",
601 llap->llap_write_queued ? "wq " : "",
602 PageDirty(page) ? "pd " : "",
603 PageUptodate(page) ? "" : "!pu ",
604 PageWriteback(page) ? "wb" : "",
605 llap->llap_defer_uptodate ? "" : "!du",
606 llap_origins[llap->llap_origin]);
608 /* If page is dirty or undergoing IO don't discard it */
614 page_cache_get(page);
615 ll_pglist_cpu_unlock(sbi, cpu);
617 if (page->mapping != NULL) {
618 ll_teardown_mmaps(page->mapping,
619 (__u64)page->index << CFS_PAGE_SHIFT,
620 ((__u64)page->index << CFS_PAGE_SHIFT)|
622 if (!PageDirty(page) && !page_mapped(page)) {
623 ll_ra_accounting(llap, page->mapping);
624 truncate_complete_page(page->mapping, page);
627 LL_CDEBUG_PAGE(D_PAGE, page,
628 "Not dropping page because it is"
629 " %s\n", PageDirty(page) ?
634 page_cache_release(page);
636 ll_pglist_cpu_lock(sbi, cpu);
638 list_del(&dummy_llap.llap_pglist_item);
639 ll_pglist_cpu_unlock(sbi, cpu);
641 CDEBUG(D_CACHE, "shrank %d, expected %d however. \n", count, target);
646 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
648 * At first, this code calculates total pages wanted by @shrink_fraction, then
649 * it deduces how many pages should be reaped from each cpu in proportion as
650 * their own # of page count(llpd_count).
652 int llap_shrink_cache(struct ll_sb_info *sbi, int nr_to_scan)
654 unsigned long total, want, percpu_want, count = 0;
657 total = lcounter_read_positive(&sbi->ll_async_page_count);
658 if (total == 0 || nr_to_scan == 0)
663 /* Since we are freeing pages also, we don't necessarily want to
664 * shrink so much. Limit to 40MB of pages + llaps per call. */
665 if (want > 40 << (20 - CFS_PAGE_SHIFT))
666 want = 40 << (20 - CFS_PAGE_SHIFT);
668 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (asked for %u)\n",
669 want, total, nr_to_scan);
671 nr_cpus = num_possible_cpus();
672 cpu = sbi->ll_async_page_clock_hand;
673 /* we at most do one round */
677 cpu = (cpu + 1) % nr_cpus;
678 c = LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_count;
679 if (!cpu_online(cpu))
682 percpu_want = want / ((total / (c + 1)) + 1);
683 if (percpu_want == 0)
686 count += llap_shrink_cache_internal(sbi, cpu, percpu_want);
688 sbi->ll_async_page_clock_hand = cpu;
689 } while (cpu != sbi->ll_async_page_clock_hand);
691 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
694 return lcounter_read_positive(&sbi->ll_async_page_count);
697 /* Rebalance the async page queue len for each cpu. We hope that the cpu
698 * which do much IO job has a relative longer queue len.
699 * This function should be called with preempt disabled.
701 static inline int llap_async_cache_rebalance(struct ll_sb_info *sbi)
703 unsigned long sample = 0, *cpu_sample, bias, slice;
704 struct ll_pglist_data *pd;
707 int w1 = 7, w2 = 3, base = (w1 + w2); /* weight value */
710 if (!spin_trylock(&sbi->ll_async_page_reblnc_lock)) {
711 /* someone else is doing the job */
715 pcnt = &LL_PGLIST_DATA(sbi)->llpd_sample_count;
716 if (!atomic_read(pcnt)) {
717 /* rare case, somebody else has gotten this job done */
718 spin_unlock(&sbi->ll_async_page_reblnc_lock);
722 sbi->ll_async_page_reblnc_count++;
723 cpu_sample = sbi->ll_async_page_sample;
724 memset(cpu_sample, 0, num_possible_cpus() * sizeof(unsigned long));
725 for_each_online_cpu(cpu) {
726 pcnt = &LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_sample_count;
727 cpu_sample[cpu] = atomic_read(pcnt);
729 sample += cpu_sample[cpu];
733 surplus = sbi->ll_async_page_max;
734 slice = surplus / sample + 1;
735 sample /= num_online_cpus();
737 for_each_online_cpu(cpu) {
738 pd = LL_PGLIST_DATA_CPU(sbi, cpu);
739 if (labs((long int)sample - cpu_sample[cpu]) > bias) {
740 unsigned long budget = pd->llpd_budget;
741 /* weighted original queue length and expected queue
742 * length to avoid thrashing. */
743 pd->llpd_budget = (budget * w1) / base +
744 (slice * cpu_sample[cpu]) * w2 / base;
747 surplus -= min_t(int, pd->llpd_budget, surplus);
749 surplus /= cpus_weight(mask) ?: 1;
750 for_each_cpu_mask(cpu, mask)
751 LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_budget += surplus;
752 spin_unlock(&sbi->ll_async_page_reblnc_lock);
754 /* We need to call llap_shrink_cache_internal() for every cpu to
755 * ensure the sbi->ll_async_page_max limit is enforced. */
756 for_each_cpu_mask(cpu, mask) {
757 pd = LL_PGLIST_DATA_CPU(sbi, cpu);
758 llap_shrink_cache_internal(sbi, cpu, max_t(int, pd->llpd_count-
759 pd->llpd_budget, 0) + 32);
765 static struct ll_async_page *llap_from_page_with_lockh(struct page *page,
767 struct lustre_handle *lockh,
770 struct ll_async_page *llap;
771 struct obd_export *exp;
772 struct inode *inode = page->mapping->host;
773 struct ll_sb_info *sbi;
774 struct ll_pglist_data *pd;
779 static int triggered;
782 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
784 libcfs_debug_dumpstack(NULL);
787 RETURN(ERR_PTR(-EINVAL));
789 sbi = ll_i2sbi(inode);
790 LASSERT(ll_async_page_slab);
791 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
793 exp = ll_i2obdexp(page->mapping->host);
795 RETURN(ERR_PTR(-EINVAL));
797 llap = llap_cast_private(page);
799 #if 0 /* disabled since we take lock ref in readahead, see bug 16774/21252 */
800 if (origin == LLAP_ORIGIN_READAHEAD && lockh) {
801 /* the page could belong to another lock for which
802 * we don't hold a reference. We need to check that
803 * a reference is taken on a lock covering this page.
804 * For readpage origin, this is fine because
805 * ll_file_readv() took a reference on lock(s) covering
806 * the whole read. However, for readhead, we don't have
807 * this guarantee, so we need to check that the lock
808 * matched in ll_file_readv() also covers this page */
809 __u64 offset = ((loff_t)page->index) << CFS_PAGE_SHIFT;
810 if (!obd_get_lock(exp, ll_i2info(inode)->lli_smd,
811 &llap->llap_cookie, OBD_BRW_READ,
812 offset, offset + CFS_PAGE_SIZE - 1,
814 RETURN(ERR_PTR(-ENOLCK));
817 /* move to end of LRU list, except when page is just about to
819 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
820 int old_cpu = llap->llap_pglist_cpu;
821 struct ll_pglist_data *old_pd;
823 pd = ll_pglist_double_lock(sbi, old_cpu, &old_pd);
825 while (old_cpu != llap->llap_pglist_cpu) {
826 /* rarely case, someone else is touching this
828 ll_pglist_double_unlock(sbi, old_cpu);
829 old_cpu = llap->llap_pglist_cpu;
830 pd=ll_pglist_double_lock(sbi, old_cpu, &old_pd);
833 list_move_tail(&llap->llap_pglist_item,
836 if (pd->llpd_cpu != old_cpu) {
838 old_pd->llpd_count--;
840 llap->llap_pglist_cpu = pd->llpd_cpu;
843 ll_pglist_double_unlock(sbi, old_cpu);
848 /* limit the number of lustre-cached pages */
850 pd = LL_PGLIST_DATA(sbi);
851 target = pd->llpd_count - pd->llpd_budget;
854 atomic_inc(&pd->llpd_sample_count);
855 if (atomic_read(&pd->llpd_sample_count) >
856 sbi->ll_async_page_sample_max) {
857 pd->llpd_reblnc_count++;
858 rc = llap_async_cache_rebalance(sbi);
860 target = pd->llpd_count - pd->llpd_budget;
862 /* if rc equals 1, it means other cpu is doing the rebalance
863 * job, and our budget # would be modified when we read it.
864 * Furthermore, it is much likely being increased because
865 * we have already reached the rebalance threshold. In this
866 * case, we skip to shrink cache here. */
867 if ((rc == 0) && target > 0)
868 llap_shrink_cache_internal(sbi, cpu, target + 32);
872 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_IO,
873 ll_async_page_slab_size);
875 RETURN(ERR_PTR(-ENOMEM));
876 llap->llap_magic = LLAP_MAGIC;
877 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
879 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
880 (obd_off)page->index << CFS_PAGE_SHIFT,
881 &ll_async_page_ops, llap, &llap->llap_cookie,
884 OBD_SLAB_FREE(llap, ll_async_page_slab,
885 ll_async_page_slab_size);
889 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
890 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
891 /* also zeroing the PRIVBITS low order bitflags */
892 __set_page_ll_data(page, llap);
893 llap->llap_page = page;
895 lcounter_inc(&sbi->ll_async_page_count);
896 pd = ll_pglist_lock(sbi);
897 list_add_tail(&llap->llap_pglist_item, &pd->llpd_list);
901 llap->llap_pglist_cpu = pd->llpd_cpu;
902 ll_pglist_unlock(sbi);
905 if (unlikely(sbi->ll_flags & LL_SBI_LLITE_CHECKSUM)) {
907 char *kaddr = kmap_atomic(page, KM_USER0);
908 csum = init_checksum(OSC_DEFAULT_CKSUM);
909 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
911 kunmap_atomic(kaddr, KM_USER0);
912 if (origin == LLAP_ORIGIN_READAHEAD ||
913 origin == LLAP_ORIGIN_READPAGE) {
914 llap->llap_checksum = 0;
915 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
916 llap->llap_checksum == 0) {
917 llap->llap_checksum = csum;
918 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
919 } else if (llap->llap_checksum == csum) {
920 /* origin == LLAP_ORIGIN_WRITEPAGE */
921 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
924 /* origin == LLAP_ORIGIN_WRITEPAGE */
925 if (!atomic_read(&(ll_i2info(inode)->lli_mmap_cnt))) {
926 LL_CDEBUG_PAGE(D_ERROR, page,
927 "old cksum %x != new %x!\n",
928 llap->llap_checksum, csum);
930 /* mmapped page was modified */
932 "page %p old cksum %x != new %x\n",
933 page, llap->llap_checksum, csum);
935 llap->llap_checksum = csum;
939 llap->llap_origin = origin;
943 static inline struct ll_async_page *llap_from_page(struct page *page,
946 return llap_from_page_with_lockh(page, origin, NULL, 0);
949 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
950 struct ll_async_page *llap,
951 unsigned to, obd_flag async_flags)
953 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
954 struct obd_io_group *oig;
955 struct ll_sb_info *sbi = ll_i2sbi(inode);
956 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
957 int brwflags = OBD_BRW_ASYNC;
960 /* _make_ready only sees llap once we've unlocked the page */
961 llap->llap_write_queued = 1;
962 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
963 llap->llap_cookie, OBD_BRW_WRITE | noquot,
964 0, 0, brwflags, async_flags);
966 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
967 llap_write_pending(inode, llap);
971 llap->llap_write_queued = 0;
977 /* make full-page requests if we are not at EOF (bug 4410) */
978 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
979 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
980 "sync write before EOF: size_index %lu, to %d\n",
983 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index){
984 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
985 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
986 "sync write at EOF: size_index %lu, to %d/%d\n",
987 size_index, to, size_to);
992 /* compare the checksum once before the page leaves llite */
993 if (unlikely((sbi->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
994 llap->llap_checksum != 0)) {
996 struct page *page = llap->llap_page;
997 char *kaddr = kmap_atomic(page, KM_USER0);
998 csum = init_checksum(OSC_DEFAULT_CKSUM);
999 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
1001 kunmap_atomic(kaddr, KM_USER0);
1002 if (llap->llap_checksum == csum) {
1003 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
1006 CERROR("page %p old cksum %x != new cksum %x!\n",
1007 page, llap->llap_checksum, csum);
1011 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
1012 llap->llap_cookie, OBD_BRW_WRITE | noquot,
1013 0, to, 0, ASYNC_READY | ASYNC_URGENT |
1014 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
1018 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1024 if (!rc && async_flags & ASYNC_READY) {
1025 unlock_page(llap->llap_page);
1026 if (PageWriteback(llap->llap_page))
1027 end_page_writeback(llap->llap_page);
1030 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
1038 /* update our write count to account for i_size increases that may have
1039 * happened since we've queued the page for io. */
1041 /* be careful not to return success without setting the page Uptodate or
1042 * the next pass through prepare_write will read in stale data from disk. */
1043 int ll_commit_write(struct file *file, struct page *page, unsigned from,
1046 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1047 struct inode *inode = page->mapping->host;
1048 struct ll_inode_info *lli = ll_i2info(inode);
1049 struct lov_stripe_md *lsm = lli->lli_smd;
1050 struct obd_export *exp;
1051 struct ll_async_page *llap;
1053 struct lustre_handle *lockh = NULL;
1057 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
1058 LASSERT(inode == file->f_dentry->d_inode);
1059 LASSERT(PageLocked(page));
1061 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
1062 inode, page, from, to, page->index);
1064 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
1065 lockh = &fd->fd_cwlockh;
1067 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh,
1070 RETURN(PTR_ERR(llap));
1072 exp = ll_i2obdexp(inode);
1076 llap->llap_ignore_quota = cfs_capable(CFS_CAP_SYS_RESOURCE);
1078 /* queue a write for some time in the future the first time we
1080 if (!PageDirty(page)) {
1081 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
1083 rc = queue_or_sync_write(exp, inode, llap, to, 0);
1087 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
1090 /* put the page in the page cache, from now on ll_removepage is
1091 * responsible for cleaning up the llap.
1092 * only set page dirty when it's queued to be write out */
1093 if (llap->llap_write_queued)
1094 set_page_dirty(page);
1097 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
1098 ll_inode_size_lock(inode, 0);
1100 lov_stripe_lock(lsm);
1101 obd_adjust_kms(exp, lsm, size, 0);
1102 lov_stripe_unlock(lsm);
1103 if (size > i_size_read(inode))
1104 i_size_write(inode, size);
1105 SetPageUptodate(page);
1106 } else if (size > i_size_read(inode)) {
1107 /* this page beyond the pales of i_size, so it can't be
1108 * truncated in ll_p_r_e during lock revoking. we must
1109 * teardown our book-keeping here. */
1110 ll_removepage(page);
1112 ll_inode_size_unlock(inode, 0);
1116 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
1118 /* ra_io_arg will be filled in the beginning of ll_readahead with
1119 * ras_lock, then the following ll_read_ahead_pages will read RA
1120 * pages according to this arg, all the items in this structure are
1121 * counted by page index.
1124 unsigned long ria_start; /* start offset of read-ahead*/
1125 unsigned long ria_end; /* end offset of read-ahead*/
1126 /* If stride read pattern is detected, ria_stoff means where
1127 * stride read is started. Note: for normal read-ahead, the
1128 * value here is meaningless, and also it will not be accessed*/
1130 /* ria_length and ria_pages are the length and pages length in the
1131 * stride I/O mode. And they will also be used to check whether
1132 * it is stride I/O read-ahead in the read-ahead pages*/
1133 unsigned long ria_length;
1134 unsigned long ria_pages;
1137 /* WARNING: This algorithm is used to reduce the contention on
1138 * sbi->ll_lock. It should work well if the ra_max_pages is much
1139 * greater than the single file's read-ahead window.
1141 * TODO: There may exist a `global sync problem' in this implementation.
1142 * Considering the global ra window is 100M, and each file's ra window is 10M,
1143 * there are over 10 files trying to get its ra budget and reach
1144 * ll_ra_count_get at the exactly same time. All of them will get a zero ra
1145 * window, although the global window is 100M. -jay
1147 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria,
1150 struct ll_ra_info *ra = &sbi->ll_ra_info;
1151 unsigned long ret = 0;
1155 * If read-ahead pages left are less than 1M, do not do read-ahead,
1156 * otherwise it will form small read RPC(< 1M), which hurt server
1157 * performance a lot.
1159 if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages))
1162 ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
1163 if (ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
1166 if (ria->ria_pages == 0)
1167 /* it needs 1M align again after trimed by ra_max_pages */
1168 if (ret > ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES))
1169 ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
1171 if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
1172 atomic_sub(ret, &ra->ra_cur_pages);
1180 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
1182 struct ll_ra_info *ra = &sbi->ll_ra_info;
1183 atomic_sub(len, &ra->ra_cur_pages);
1186 /* called for each page in a completed rpc.*/
1187 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
1189 struct ll_async_page *llap;
1191 struct obd_export *exp;
1196 llap = LLAP_FROM_COOKIE(data);
1197 page = llap->llap_page;
1198 LASSERT(PageLocked(page));
1199 LASSERT(CheckWriteback(page,cmd));
1201 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
1203 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate) {
1204 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
1206 LASSERT(lustre_handle_is_used(&llap->llap_lockh_granted));
1207 exp = ll_i2obdexp(page->mapping->host);
1208 end = ((loff_t)page->index) << CFS_PAGE_SHIFT;
1209 end += CFS_PAGE_SIZE - 1;
1210 obd_cancel(exp, ll_i2info(page->mapping->host)->lli_smd, LCK_PR,
1211 &llap->llap_lockh_granted, OBD_FAST_LOCK, end);
1215 if (cmd & OBD_BRW_READ) {
1216 if (!llap->llap_defer_uptodate)
1217 SetPageUptodate(page);
1219 llap->llap_write_queued = 0;
1221 ClearPageError(page);
1223 if (cmd & OBD_BRW_READ) {
1224 llap->llap_defer_uptodate = 0;
1228 set_bit(AS_ENOSPC, &page->mapping->flags);
1230 set_bit(AS_EIO, &page->mapping->flags);
1233 /* be carefull about clear WB.
1234 * if WB will cleared after page lock is released - paralel IO can be
1235 * started before ap_make_ready is finished - so we will be have page
1236 * with PG_Writeback set from ->writepage() and completed READ which
1237 * clear this flag */
1238 if ((cmd & OBD_BRW_WRITE) && PageWriteback(page))
1239 end_page_writeback(page);
1243 if (cmd & OBD_BRW_WRITE) {
1244 llap_write_complete(page->mapping->host, llap);
1245 ll_try_done_writing(page->mapping->host);
1248 page_cache_release(page);
1253 static void __ll_put_llap(struct page *page)
1255 struct inode *inode = page->mapping->host;
1256 struct obd_export *exp;
1257 struct ll_async_page *llap;
1258 struct ll_sb_info *sbi = ll_i2sbi(inode);
1259 struct ll_pglist_data *pd;
1263 exp = ll_i2obdexp(inode);
1265 CERROR("page %p ind %lu gave null export\n", page, page->index);
1270 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
1272 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
1273 page->index, PTR_ERR(llap));
1278 //llap_write_complete(inode, llap);
1279 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
1282 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
1284 /* this unconditional free is only safe because the page lock
1285 * is providing exclusivity to memory pressure/truncate/writeback..*/
1286 __clear_page_ll_data(page);
1288 lcounter_dec(&sbi->ll_async_page_count);
1289 cpu = llap->llap_pglist_cpu;
1290 pd = ll_pglist_cpu_lock(sbi, cpu);
1293 if (!list_empty(&llap->llap_pglist_item))
1294 list_del_init(&llap->llap_pglist_item);
1295 ll_pglist_cpu_unlock(sbi, cpu);
1296 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
1301 /* the kernel calls us here when a page is unhashed from the page cache.
1302 * the page will be locked and the kernel is holding a spinlock, so
1303 * we need to be careful. we're just tearing down our book-keeping
1305 void ll_removepage(struct page *page)
1307 struct ll_async_page *llap = llap_cast_private(page);
1310 LASSERT(!in_interrupt());
1312 /* sync pages or failed read pages can leave pages in the page
1313 * cache that don't have our data associated with them anymore */
1314 if (page_private(page) == 0) {
1319 ll_ra_accounting(llap, page->mapping);
1320 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
1321 __ll_put_llap(page);
1326 static int ll_issue_page_read(struct obd_export *exp,
1327 struct ll_async_page *llap,
1328 struct obd_io_group *oig, int defer)
1330 struct page *page = llap->llap_page;
1333 page_cache_get(page);
1334 llap->llap_defer_uptodate = defer;
1335 llap->llap_ra_used = 0;
1336 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1337 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1338 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1339 ASYNC_READY | ASYNC_URGENT);
1341 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1342 page_cache_release(page);
1347 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
1349 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1350 lprocfs_counter_incr(sbi->ll_ra_stats, which);
1353 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1355 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1356 ll_ra_stats_inc_sbi(sbi, which);
1359 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1361 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1364 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1367 #define RAS_CDEBUG(ras) \
1369 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
1370 "csr %lu sf %lu sp %lu sl %lu \n", \
1371 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1372 ras->ras_consecutive_pages, ras->ras_window_start, \
1373 ras->ras_window_len, ras->ras_next_readahead, \
1374 ras->ras_requests, ras->ras_request_index, \
1375 ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
1376 ras->ras_stride_pages, ras->ras_stride_length)
1378 static int index_in_window(unsigned long index, unsigned long point,
1379 unsigned long before, unsigned long after)
1381 unsigned long start = point - before, end = point + after;
1388 return start <= index && index <= end;
1391 struct ll_thread_data *ll_td_get()
1394 struct ll_thread_data *ltd = current->journal_info;
1396 LASSERT(ltd == NULL || ltd->ltd_magic == LTD_MAGIC);
1403 void ll_td_set(struct ll_thread_data *ltd)
1407 ltd = current->journal_info;
1408 LASSERT(ltd == NULL || ltd->ltd_magic == LTD_MAGIC);
1409 current->journal_info = NULL;
1413 LASSERT(current->journal_info == NULL);
1414 LASSERT(ltd->ltd_magic == LTD_MAGIC);
1415 current->journal_info = ltd;
1419 static struct ll_readahead_state *ll_ras_get(struct file *f)
1421 struct ll_file_data *fd;
1423 fd = LUSTRE_FPRIVATE(f);
1427 void ll_ra_read_init(struct file *f, struct ll_ra_read *rar,
1428 loff_t offset, size_t count)
1430 struct ll_readahead_state *ras;
1432 ras = ll_ras_get(f);
1434 rar->lrr_start = offset >> CFS_PAGE_SHIFT;
1435 rar->lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
1437 spin_lock(&ras->ras_lock);
1438 ras->ras_requests++;
1439 ras->ras_request_index = 0;
1440 ras->ras_consecutive_requests++;
1441 rar->lrr_reader = current;
1443 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1444 spin_unlock(&ras->ras_lock);
1447 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1449 struct ll_readahead_state *ras;
1451 ras = ll_ras_get(f);
1453 spin_lock(&ras->ras_lock);
1454 list_del_init(&rar->lrr_linkage);
1455 spin_unlock(&ras->ras_lock);
1458 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1460 struct ll_ra_read *scan;
1462 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1463 if (scan->lrr_reader == current)
1469 struct ll_ra_read *ll_ra_read_get(struct file *f)
1471 struct ll_readahead_state *ras;
1472 struct ll_ra_read *bead;
1474 ras = ll_ras_get(f);
1476 spin_lock(&ras->ras_lock);
1477 bead = ll_ra_read_get_locked(ras);
1478 spin_unlock(&ras->ras_lock);
1482 static int ll_read_ahead_page(struct obd_export *exp, struct obd_io_group *oig,
1483 pgoff_t index, struct address_space *mapping)
1485 struct ll_async_page *llap;
1487 unsigned int gfp_mask = 0;
1488 int rc = 0, flags = 0;
1489 struct lustre_handle lockh = { 0 };
1492 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1494 gfp_mask |= __GFP_NOWARN;
1496 page = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
1498 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1499 CDEBUG(D_READA, "g_c_p_n failed\n");
1503 /* Check if page was truncated or reclaimed */
1504 if (page->mapping != mapping) {
1505 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1506 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1507 GOTO(unlock_page, rc = 0);
1510 #if 0 /* the fast lock stored in ltd can't be guaranteed to be the lock used
1511 * by the llap returned by "llap_from_page_with_lockh" if there is a
1512 * ready llap, for lock check against readahead is disabled.
1513 * see bug 16774/21252 */
1516 if (ltd && ltd->lock_style > 0) {
1517 __u64 offset = ((loff_t)page->index) << CFS_PAGE_SHIFT;
1518 lockh = ltd2lockh(ltd, offset,
1519 offset + CFS_PAGE_SIZE - 1);
1520 if (ltd->lock_style == LL_LOCK_STYLE_FASTLOCK)
1521 flags = OBD_FAST_LOCK;
1525 /* we do this first so that we can see the page in the /proc
1527 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READAHEAD, &lockh,
1529 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1530 /* bail out when we hit the end of the lock. */
1531 if (PTR_ERR(llap) == -ENOLCK) {
1532 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1533 CDEBUG(D_READA | D_PAGE,
1534 "Adding page to cache failed index "
1536 CDEBUG(D_READA, "nolock page\n");
1537 GOTO(unlock_page, rc = -ENOLCK);
1539 CDEBUG(D_READA, "read-ahead page\n");
1540 GOTO(unlock_page, rc = 0);
1543 /* skip completed pages */
1544 if (Page_Uptodate(page))
1545 GOTO(unlock_page, rc = 0);
1547 if (!lustre_handle_is_used(&lockh)) {
1548 start = ((loff_t)index) << CFS_PAGE_SHIFT;
1549 end = start + CFS_PAGE_SIZE - 1;
1550 rc = obd_get_lock(exp,
1551 ll_i2info(mapping->host)->lli_smd,
1552 &llap->llap_cookie, OBD_BRW_READ,
1553 start, end, &lockh, OBD_FAST_LOCK);
1554 /* Is the lock being cancelling? */
1556 GOTO(unlock_page, rc = 0);
1559 llap->llap_lockh_granted = lockh;
1561 rc = ll_issue_page_read(exp, llap, oig, 1);
1563 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "started read-ahead\n");
1566 llap->llap_lockh_granted.cookie = 0;
1568 if (lustre_handle_is_used(&lockh))
1569 ldlm_lock_decref(&lockh, LCK_PR);
1572 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "skipping read-ahead\n");
1574 page_cache_release(page);
1578 #define RIA_DEBUG(ria) \
1579 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
1580 ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
1583 #define INIT_RAS_WINDOW_PAGES PTLRPC_MAX_BRW_PAGES
1585 static inline int stride_io_mode(struct ll_readahead_state *ras)
1587 return ras->ras_consecutive_stride_requests > 1;
1590 /* The function calculates how much pages will be read in
1591 * [off, off + length], in such stride IO area,
1592 * stride_offset = st_off, stride_lengh = st_len,
1593 * stride_pages = st_pgs
1595 * |------------------|*****|------------------|*****|------------|*****|....
1598 * |----- st_len -----|
1600 * How many pages it should read in such pattern
1601 * |-------------------------------------------------------------|
1603 * |<------ length ------->|
1605 * = |<----->| + |-------------------------------------| + |---|
1606 * start_left st_pgs * i end_left
1608 static unsigned long
1609 stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
1610 unsigned long off, unsigned long length)
1612 __u64 start = off > st_off ? off - st_off : 0;
1613 __u64 end = off + length > st_off ? off + length - st_off : 0;
1614 unsigned long start_left = 0;
1615 unsigned long end_left = 0;
1616 unsigned long pg_count;
1618 if (st_len == 0 || length == 0 || end == 0)
1621 start_left = do_div(start, st_len);
1622 if (start_left < st_pgs)
1623 start_left = st_pgs - start_left;
1627 end_left = do_div(end, st_len);
1628 if (end_left > st_pgs)
1631 CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n",
1632 start, end, start_left, end_left);
1635 pg_count = end_left - (st_pgs - start_left);
1637 pg_count = start_left + st_pgs * (end - start - 1) + end_left;
1639 CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu"
1640 "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
1645 static int ria_page_count(struct ra_io_arg *ria)
1647 __u64 length = ria->ria_end >= ria->ria_start ?
1648 ria->ria_end - ria->ria_start + 1 : 0;
1650 return stride_pg_count(ria->ria_stoff, ria->ria_length,
1651 ria->ria_pages, ria->ria_start,
1655 /*Check whether the index is in the defined ra-window */
1656 static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
1658 /* If ria_length == ria_pages, it means non-stride I/O mode,
1659 * idx should always inside read-ahead window in this case
1660 * For stride I/O mode, just check whether the idx is inside
1662 return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
1663 (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
1664 ria->ria_length < ria->ria_pages);
1667 static int ll_read_ahead_pages(struct obd_export *exp,
1668 struct obd_io_group *oig,
1669 struct ra_io_arg *ria,
1670 unsigned long *reserved_pages,
1671 struct address_space *mapping,
1672 unsigned long *ra_end)
1674 int rc, count = 0, stride_ria;
1675 unsigned long page_idx;
1677 LASSERT(ria != NULL);
1680 stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
1681 for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
1682 *reserved_pages > 0; page_idx++) {
1683 if (ras_inside_ra_window(page_idx, ria)) {
1684 /* If the page is inside the read-ahead window*/
1685 rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
1687 (*reserved_pages)--;
1689 } else if (rc == -ENOLCK)
1691 } else if (stride_ria) {
1692 /* If it is not in the read-ahead window, and it is
1693 * read-ahead mode, then check whether it should skip
1696 /* FIXME: This assertion only is valid when it is for
1697 * forward read-ahead, it will be fixed when backward
1698 * read-ahead is implemented */
1699 LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
1700 "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
1701 ria->ria_start, ria->ria_end, ria->ria_stoff,
1702 ria->ria_length, ria->ria_pages);
1704 offset = page_idx - ria->ria_stoff;
1705 offset = offset % (ria->ria_length);
1706 if (offset > ria->ria_pages) {
1707 page_idx += ria->ria_length - offset;
1708 CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
1709 ria->ria_length - offset);
1720 * Current readahead process
1723 * ll_file_readv (init ll_readahead_state for the open file)
1726 * |---> ll_readpage (read page)
1729 * | ras_update (update read-ahead window according to read pattern)
1732 * |--- ll_readahead (read_ahead pages)
1735 * During this process, ras_update controls how many ahead pages it should
1736 * read by adjusting read-ahead window(RA window).The window is represented
1737 * by following three varibles (all these values are counted by pages)
1739 * 1. ras_window_start: start offset of the read-ahead window. It is
1740 * initialized as the read offset, then as pages
1741 * are being read, it will be set as the last
1742 * page(Note: it is 1M aligned, so it actually
1743 * is last_page_index & ~index & (~(256 - 1));
1745 * 2. ras_window_len: length of the read-ahead window. The read-ahead window
1746 * length is decided by two factors
1748 * a. It is at least >= current read syscall length.
1749 * b. If continguous read is detected, (Note: it is syscall
1750 * continguous, intead of page-read contingous) the
1751 * read-ahead window len will be increased by 1M each
1753 * c. If stride read pattern is detected, the read-ahead
1754 * window will also be increased 1M but by stride pattern.
1755 * stride pattern is defined by ras_stride_length,
1756 * ras_stride_pages and ras_stride_gap. (see
1757 * ll_readahead_states comments)
1759 * 3. ras_next_readahead: current offset in the read-ahead window, i.e. where
1760 * ll_readahead will start in next next-ahead.
1763 * Cache miss: If memory load is very high, it begins to evicted the page from cache,
1764 * also includes read-ahead pages, once we found read-ahead page is being evicted before
1765 * it is "really" accessed, it will reset the read-ahead window to the current read extent
1766 * i.e. from current page to the end of this read.
1768 * In flight read-ahead amount is controlled by 2 varible (read-ahead rate)
1769 * ra_max_pages: how much max in-flight read-ahead pages on the client.
1770 * ra_max_pages_per_file: how much max in-flight read-ahead pages per file.
1772 static int ll_readahead(struct ll_readahead_state *ras,
1773 struct obd_export *exp, struct address_space *mapping,
1774 struct obd_io_group *oig, int flags)
1776 unsigned long start = 0, end = 0, reserved;
1777 unsigned long ra_end, len;
1778 struct inode *inode;
1779 struct lov_stripe_md *lsm;
1780 struct ll_ra_read *bead;
1782 struct ra_io_arg ria = { 0 };
1787 inode = mapping->host;
1788 lsm = ll_i2info(inode)->lli_smd;
1790 lov_stripe_lock(lsm);
1791 inode_init_lvb(inode, &lvb);
1792 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
1794 lov_stripe_unlock(lsm);
1796 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1800 spin_lock(&ras->ras_lock);
1801 bead = ll_ra_read_get_locked(ras);
1802 /* Enlarge the RA window to encompass the full read */
1803 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1804 bead->lrr_start + bead->lrr_count) {
1805 obd_off read_end = ((obd_off)(bead->lrr_start +
1806 bead->lrr_count))<<CFS_PAGE_SHIFT;
1807 obd_extent_calc(exp, lsm, OBD_CALC_STRIPE_RPC_END_ALIGN,
1809 ras->ras_window_len = ((read_end + 1) >> CFS_PAGE_SHIFT) -
1810 ras->ras_window_start;
1812 /* Reserve a part of the read-ahead window that we'll be issuing */
1813 if (ras->ras_window_len) {
1814 start = ras->ras_next_readahead;
1815 end = ras->ras_window_start + ras->ras_window_len - 1;
1818 /* Truncate RA window to end of file */
1819 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1820 ras->ras_next_readahead = max(end, end + 1);
1823 ria.ria_start = start;
1825 /* If stride I/O mode is detected, get stride window*/
1826 if (stride_io_mode(ras)) {
1827 ria.ria_stoff = ras->ras_stride_offset;
1828 ria.ria_length = ras->ras_stride_length;
1829 ria.ria_pages = ras->ras_stride_pages;
1831 spin_unlock(&ras->ras_lock);
1834 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1838 len = ria_page_count(&ria);
1842 reserved = ll_ra_count_get(ll_i2sbi(inode), &ria, len);
1844 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1846 CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
1847 cfs_atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
1848 ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
1850 ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
1852 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1854 ll_ra_count_put(ll_i2sbi(inode), reserved);
1856 if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
1857 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1859 /* if we didn't get to the end of the region we reserved from
1860 * the ras we need to go back and update the ras so that the
1861 * next read-ahead tries from where we left off. we only do so
1862 * if the region we failed to issue read-ahead on is still ahead
1863 * of the app and behind the next index to start read-ahead from */
1864 CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
1865 ra_end, end, ria.ria_end);
1867 if (ra_end != (end + 1)) {
1868 spin_lock(&ras->ras_lock);
1869 if (ra_end < ras->ras_next_readahead &&
1870 index_in_window(ra_end, ras->ras_window_start, 0,
1871 ras->ras_window_len)) {
1872 ras->ras_next_readahead = ra_end;
1875 spin_unlock(&ras->ras_lock);
1881 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1883 /* Since stride readahead is sentivite to the offset
1884 * of read-ahead, so we use original offset here,
1885 * instead of ras_window_start, which is 1M aligned*/
1886 if (stride_io_mode(ras))
1887 ras->ras_window_start = index;
1889 ras->ras_window_start = index & (~(INIT_RAS_WINDOW_PAGES - 1));
1892 /* called with the ras_lock held or from places where it doesn't matter */
1893 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1895 ras->ras_last_readpage = index;
1896 ras->ras_consecutive_requests = 0;
1897 ras->ras_consecutive_pages = 0;
1898 ras->ras_window_len = 0;
1899 ras_set_start(ras, index);
1900 ras->ras_next_readahead = max(ras->ras_window_start, index);
1905 /* called with the ras_lock held or from places where it doesn't matter */
1906 static void ras_stride_reset(struct ll_readahead_state *ras)
1908 ras->ras_consecutive_stride_requests = 0;
1909 ras->ras_stride_length = 0;
1910 ras->ras_stride_pages = 0;
1914 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1916 spin_lock_init(&ras->ras_lock);
1918 ras->ras_requests = 0;
1919 INIT_LIST_HEAD(&ras->ras_read_beads);
1923 * Check whether the read request is in the stride window.
1924 * If it is in the stride window, return 1, otherwise return 0.
1926 static int index_in_stride_window(unsigned long index,
1927 struct ll_readahead_state *ras,
1928 struct inode *inode)
1930 unsigned long stride_gap = index - ras->ras_last_readpage - 1;
1932 if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
1933 ras->ras_stride_pages == ras->ras_stride_length)
1936 /* If it is contiguous read */
1937 if (stride_gap == 0)
1938 return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
1940 /*Otherwise check the stride by itself */
1941 return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
1942 ras->ras_consecutive_pages == ras->ras_stride_pages;
1945 static void ras_update_stride_detector(struct ll_readahead_state *ras,
1946 unsigned long index)
1948 unsigned long stride_gap = index - ras->ras_last_readpage - 1;
1950 if (!stride_io_mode(ras) && (stride_gap != 0 ||
1951 ras->ras_consecutive_stride_requests == 0)) {
1952 ras->ras_stride_pages = ras->ras_consecutive_pages;
1953 ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
1955 LASSERT(ras->ras_request_index == 0);
1956 LASSERT(ras->ras_consecutive_stride_requests == 0);
1958 if (index <= ras->ras_last_readpage) {
1959 /*Reset stride window for forward read*/
1960 ras_stride_reset(ras);
1964 ras->ras_stride_pages = ras->ras_consecutive_pages;
1965 ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
1971 static unsigned long
1972 stride_page_count(struct ll_readahead_state *ras, unsigned long len)
1974 return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
1975 ras->ras_stride_pages, ras->ras_stride_offset,
1979 /* Stride Read-ahead window will be increased inc_len according to
1980 * the stride I/O pattern.
1982 * |------------------------|------------------------|--------------------|
1983 * ras_stride_offset ras_window_start ras_window_end
1984 * |<---ras_window_len----->|<---ras_inc_len---->|
1986 * ras_stride_offset: where the stride IO mode started,
1987 * Note: stride_page is always in front of stride_gap page, see comments in
1988 * ll_readahead_states and stride_pg_count
1990 * This function calculates how much ras_window should increase(ras_inc_len)
1991 * according to @Inc_len, Note: in stride read-ahead algorithm, stride_gap is
1992 * also acconted in the ras_window_len, so basically,
1994 * ras_inc_len = (inc_len/ stride_page) * (stride_page + stride_gap)
1997 static void ras_stride_increase_window(struct ll_readahead_state *ras,
1998 struct ll_ra_info *ra,
1999 unsigned long inc_len)
2001 unsigned long left, step, window_len;
2002 unsigned long stride_len;
2004 LASSERT(ras->ras_stride_length > 0);
2005 LASSERTF(ras->ras_window_start + ras->ras_window_len
2006 >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
2007 " stride_offset %lu sp %lu sl %lu\n", ras->ras_window_start,
2008 ras->ras_window_len, ras->ras_stride_offset, ras->ras_stride_pages,
2009 ras->ras_stride_length);
2011 stride_len = ras->ras_window_start + ras->ras_window_len -
2012 ras->ras_stride_offset;
2014 left = stride_len % ras->ras_stride_length;
2015 window_len = ras->ras_window_len - left;
2017 if (left < ras->ras_stride_pages)
2020 left = ras->ras_stride_pages + inc_len;
2022 LASSERT(ras->ras_stride_pages != 0);
2024 step = left / ras->ras_stride_pages;
2025 left %= ras->ras_stride_pages;
2027 window_len += step * ras->ras_stride_length + left;
2029 if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
2030 ras->ras_window_len = window_len;
2035 static void ras_increase_window(struct ll_readahead_state *ras,
2036 struct ll_ra_info *ra, struct inode *inode)
2038 __u64 step = INIT_RAS_WINDOW_PAGES;
2040 if (stride_io_mode(ras))
2041 ras_stride_increase_window(ras, ra, (unsigned long)step);
2043 ras->ras_window_len = min(ras->ras_window_len +
2044 (unsigned long)step,
2045 ra->ra_max_pages_per_file);
2048 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
2049 struct ll_readahead_state *ras, unsigned long index,
2052 struct ll_ra_info *ra = &sbi->ll_ra_info;
2053 int zero = 0, stride_detect = 0, ra_miss = 0;
2056 spin_lock(&ras->ras_lock);
2058 ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
2060 /* reset the read-ahead window in two cases. First when the app seeks
2061 * or reads to some other part of the file. Secondly if we get a
2062 * read-ahead miss that we think we've previously issued. This can
2063 * be a symptom of there being so many read-ahead pages that the VM is
2064 * reclaiming it before we get to it. */
2065 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
2067 ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
2068 } else if (!hit && ras->ras_window_len &&
2069 index < ras->ras_next_readahead &&
2070 index_in_window(index, ras->ras_window_start, 0,
2071 ras->ras_window_len)) {
2073 ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
2076 /* On the second access to a file smaller than the tunable
2077 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
2078 * file up to ra_max_pages_per_file. This is simply a best effort
2079 * and only occurs once per open file. Normal RA behavior is reverted
2080 * to for subsequent IO. The mmap case does not increment
2081 * ras_requests and thus can never trigger this behavior. */
2082 if (ras->ras_requests == 2 && !ras->ras_request_index) {
2085 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
2088 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
2089 ra->ra_max_read_ahead_whole_pages,
2090 ra->ra_max_pages_per_file);
2093 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
2094 ras->ras_window_start = 0;
2095 ras->ras_last_readpage = 0;
2096 ras->ras_next_readahead = 0;
2097 ras->ras_window_len = min(ra->ra_max_pages_per_file,
2098 ra->ra_max_read_ahead_whole_pages);
2099 GOTO(out_unlock, 0);
2103 /* check whether it is in stride I/O mode*/
2104 if (!index_in_stride_window(index, ras, inode)) {
2105 if (ras->ras_consecutive_stride_requests == 0 &&
2106 ras->ras_request_index == 0) {
2107 ras_update_stride_detector(ras, index);
2108 ras->ras_consecutive_stride_requests ++;
2110 ras_stride_reset(ras);
2112 ras_reset(ras, index);
2113 ras->ras_consecutive_pages++;
2114 GOTO(out_unlock, 0);
2116 ras->ras_consecutive_pages = 0;
2117 ras->ras_consecutive_requests = 0;
2118 if (++ras->ras_consecutive_stride_requests > 1)
2124 if (index_in_stride_window(index, ras, inode) &&
2125 stride_io_mode(ras)) {
2126 /*If stride-RA hit cache miss, the stride dector
2127 *will not be reset to avoid the overhead of
2128 *redetecting read-ahead mode */
2129 if (index != ras->ras_last_readpage + 1)
2130 ras->ras_consecutive_pages = 0;
2131 ras_reset(ras, index);
2134 /* Reset both stride window and normal RA
2136 ras_reset(ras, index);
2137 ras->ras_consecutive_pages++;
2138 ras_stride_reset(ras);
2139 GOTO(out_unlock, 0);
2141 } else if (stride_io_mode(ras)) {
2142 /* If this is contiguous read but in stride I/O mode
2143 * currently, check whether stride step still is valid,
2144 * if invalid, it will reset the stride ra window*/
2145 if (!index_in_stride_window(index, ras, inode)) {
2146 /* Shrink stride read-ahead window to be zero */
2147 ras_stride_reset(ras);
2148 ras->ras_window_len = 0;
2149 ras->ras_next_readahead = index;
2153 ras->ras_consecutive_pages++;
2154 ras->ras_last_readpage = index;
2155 ras_set_start(ras, index);
2156 ras->ras_next_readahead = max(ras->ras_window_start,
2157 ras->ras_next_readahead);
2160 /* Trigger RA in the mmap case where ras_consecutive_requests
2161 * is not incremented and thus can't be used to trigger RA */
2162 if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
2163 ras->ras_window_len = INIT_RAS_WINDOW_PAGES;
2164 GOTO(out_unlock, 0);
2167 /* Initially reset the stride window offset to next_readahead*/
2168 if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
2170 * Once stride IO mode is detected, next_readahead should be
2171 * reset to make sure next_readahead > stride offset
2173 ras->ras_next_readahead = max(index, ras->ras_next_readahead);
2174 ras->ras_stride_offset = index;
2175 ras->ras_window_len = INIT_RAS_WINDOW_PAGES;
2178 /* The initial ras_window_len is set to the request size. To avoid
2179 * uselessly reading and discarding pages for random IO the window is
2180 * only increased once per consecutive request received. */
2181 if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
2182 !ras->ras_request_index)
2183 ras_increase_window(ras, ra, inode);
2187 ras->ras_request_index++;
2188 spin_unlock(&ras->ras_lock);
2192 int ll_writepage(struct page *page)
2194 struct inode *inode = page->mapping->host;
2195 struct ll_inode_info *lli = ll_i2info(inode);
2196 struct obd_export *exp;
2197 struct ll_async_page *llap;
2198 struct ll_thread_data *ltd;
2199 struct lustre_handle *lockh = NULL;
2203 LASSERT(PageLocked(page));
2205 exp = ll_i2obdexp(inode);
2207 GOTO(out, rc = -EINVAL);
2210 /* currently, no FAST lock in write path */
2211 if (ltd && ltd->lock_style == LL_LOCK_STYLE_TREELOCK) {
2212 __u64 offset = ((loff_t)page->index) << CFS_PAGE_SHIFT;
2213 lockh = ltd2lockh(ltd, offset, offset + CFS_PAGE_SIZE - 1);
2216 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_WRITEPAGE, lockh, 0);
2218 GOTO(out, rc = PTR_ERR(llap));
2220 LASSERT(!PageWriteback(page));
2221 set_page_writeback(page);
2223 page_cache_get(page);
2224 if (llap->llap_write_queued) {
2225 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
2226 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
2228 ASYNC_READY | ASYNC_URGENT);
2230 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
2231 ASYNC_READY | ASYNC_URGENT);
2234 /* re-dirty page on error so it retries write */
2235 if (PageWriteback(page))
2236 end_page_writeback(page);
2238 /* resend page only for not started IO*/
2239 if (!PageError(page))
2240 ll_redirty_page(page);
2242 page_cache_release(page);
2246 if (!lli->lli_async_rc)
2247 lli->lli_async_rc = rc;
2248 /* resend page only for not started IO*/
2255 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
2256 * read-ahead assumes it is valid to issue readpage all the way up to
2257 * i_size, but our dlm locks make that not the case. We disable the
2258 * kernel's read-ahead and do our own by walking ahead in the page cache
2259 * checking for dlm lock coverage. the main difference between 2.4 and
2260 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
2261 * so they look the same.
2263 int ll_readpage(struct file *filp, struct page *page)
2265 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
2266 struct inode *inode = page->mapping->host;
2267 struct obd_export *exp;
2268 struct ll_async_page *llap;
2269 struct obd_io_group *oig = NULL;
2270 struct lustre_handle *lockh = NULL;
2274 LASSERT(PageLocked(page));
2275 LASSERT(!PageUptodate(page));
2276 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
2277 inode->i_ino, inode->i_generation, inode,
2278 (((loff_t)page->index) << CFS_PAGE_SHIFT),
2279 (((loff_t)page->index) << CFS_PAGE_SHIFT));
2280 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
2282 if (!ll_i2info(inode)->lli_smd) {
2283 /* File with no objects - one big hole */
2284 /* We use this just for remove_from_page_cache that is not
2285 * exported, we'd make page back up to date. */
2286 truncate_complete_page(page->mapping, page);
2287 clear_page(kmap(page));
2289 SetPageUptodate(page);
2294 rc = oig_init(&oig);
2298 exp = ll_i2obdexp(inode);
2300 GOTO(out, rc = -EINVAL);
2302 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
2303 lockh = &fd->fd_cwlockh;
2305 struct ll_thread_data *ltd;
2307 if (ltd && ltd->lock_style > 0) {
2308 __u64 offset = ((loff_t)page->index) << CFS_PAGE_SHIFT;
2309 lockh = ltd2lockh(ltd, offset,
2310 offset + CFS_PAGE_SIZE - 1);
2311 if (ltd->lock_style == LL_LOCK_STYLE_FASTLOCK)
2312 flags = OBD_FAST_LOCK;
2316 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh,
2319 if (PTR_ERR(llap) == -ENOLCK) {
2320 CWARN("ino %lu page %lu (%llu) not covered by "
2321 "a lock (mmap?). check debug logs.\n",
2322 inode->i_ino, page->index,
2323 (long long)page->index << PAGE_CACHE_SHIFT);
2325 GOTO(out, rc = PTR_ERR(llap));
2328 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages_per_file &&
2329 ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
2330 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
2331 llap->llap_defer_uptodate);
2334 if (llap->llap_defer_uptodate) {
2335 /* This is the callpath if we got the page from a readahead */
2336 llap->llap_ra_used = 1;
2337 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages_per_file &&
2338 ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
2339 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
2342 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
2344 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
2345 SetPageUptodate(page);
2347 GOTO(out_oig, rc = 0);
2350 rc = ll_issue_page_read(exp, llap, oig, 0);
2354 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
2355 /* We have just requested the actual page we want, see if we can tack
2356 * on some readahead to that page's RPC before it is sent. */
2357 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages_per_file &&
2358 ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
2359 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
2362 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);