1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Lustre Lite I/O page cache routines shared by different kernel revs
41 #ifndef AUTOCONF_INCLUDED
42 #include <linux/config.h>
44 #include <linux/kernel.h>
46 #include <linux/string.h>
47 #include <linux/stat.h>
48 #include <linux/errno.h>
49 #include <linux/smp_lock.h>
50 #include <linux/unistd.h>
51 #include <linux/version.h>
52 #include <asm/system.h>
53 #include <asm/uaccess.h>
56 #include <linux/stat.h>
57 #include <asm/uaccess.h>
59 #include <linux/pagemap.h>
60 #include <linux/smp_lock.h>
62 #define DEBUG_SUBSYSTEM S_LLITE
64 #include <linux/page-flags.h>
66 #include <lustre_lite.h>
67 #include "llite_internal.h"
68 #include <linux/lustre_compat25.h>
70 #ifndef list_for_each_prev_safe
71 #define list_for_each_prev_safe(pos, n, head) \
72 for (pos = (head)->prev, n = pos->prev; pos != (head); \
73 pos = n, n = pos->prev )
76 cfs_mem_cache_t *ll_async_page_slab = NULL;
77 size_t ll_async_page_slab_size = 0;
79 /* SYNCHRONOUS I/O to object storage for an inode */
80 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
81 struct page *page, int flags)
83 struct ll_inode_info *lli = ll_i2info(inode);
84 struct lov_stripe_md *lsm = lli->lli_smd;
85 struct obd_info oinfo = { { { 0 } } };
91 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
93 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
94 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
96 pg.count = CFS_PAGE_SIZE;
98 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
99 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
100 inode->i_ino, pg.off, pg.off);
102 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
103 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
104 page->mapping->host, i_size_read(page->mapping->host),
105 page->index, pg.off);
110 if (cmd & OBD_BRW_WRITE)
111 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
114 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
118 rc = obd_brw(cmd, ll_i2obdexp(inode), &oinfo, 1, &pg, NULL);
120 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
122 CERROR("error from obd_brw: rc = %d\n", rc);
126 int ll_file_punch(struct inode * inode, loff_t new_size, int srvlock)
128 struct ll_inode_info *lli = ll_i2info(inode);
129 struct obd_info oinfo = { { { 0 } } };
130 struct obdo oa = { 0 };
135 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
136 lli->lli_smd->lsm_object_id, new_size, new_size);
138 oinfo.oi_md = lli->lli_smd;
139 oinfo.oi_policy.l_extent.start = new_size;
140 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
142 oa.o_id = lli->lli_smd->lsm_object_id;
143 oa.o_gr = lli->lli_smd->lsm_object_gr;
144 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
146 valid = OBD_MD_FLTYPE | OBD_MD_FLMODE |OBD_MD_FLFID |
147 OBD_MD_FLATIME | OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGENER |
150 /* set OBD_MD_FLFLAGS in o_valid, only if we
151 * set OBD_FL_TRUNCLOCK, otherwise ost_punch
152 * and filter_setattr get confused, see the comment
154 oa.o_flags = OBD_FL_TRUNCLOCK;
155 oa.o_valid |= OBD_MD_FLFLAGS;
159 * 1. do not use inode's timestamps because concurrent
160 * stat might fill the inode with out-of-date times,
161 * send current instead
163 * 2.do no update lsm, as long as stat (via
164 * ll_glimpse_size) will bring attributes from osts
166 oa.o_mtime = oa.o_ctime = LTIME_S(CURRENT_TIME);
167 oa.o_valid |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
169 struct ost_lvb *xtimes;
170 /* truncate under locks
172 * 1. update inode's mtime and ctime as long as
173 * concurrent stat (via ll_glimpse_size) might bring
176 * 2. update lsm so that next stat (via
177 * ll_glimpse_size) could get correct values in lsm */
178 OBD_ALLOC_PTR(xtimes);
182 lov_stripe_lock(lli->lli_smd);
183 LTIME_S(inode->i_mtime) = LTIME_S(CURRENT_TIME);
184 LTIME_S(inode->i_ctime) = LTIME_S(CURRENT_TIME);
185 xtimes->lvb_mtime = LTIME_S(inode->i_mtime);
186 xtimes->lvb_ctime = LTIME_S(inode->i_ctime);
187 obd_update_lvb(ll_i2obdexp(inode), lli->lli_smd, xtimes,
188 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
189 lov_stripe_unlock(lli->lli_smd);
190 OBD_FREE_PTR(xtimes);
192 valid |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
194 obdo_from_inode(&oa, inode, valid);
196 rc = obd_punch_rqset(ll_i2obdexp(inode), &oinfo, NULL);
198 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
201 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
202 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
205 /* this isn't where truncate starts. roughly:
206 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
207 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
210 * must be called under ->lli_size_sem */
211 void ll_truncate(struct inode *inode)
213 struct ll_inode_info *lli = ll_i2info(inode);
214 int srvlock = test_bit(LLI_F_SRVLOCK, &lli->lli_flags);
217 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",
218 inode->i_ino, inode->i_generation, inode, i_size_read(inode),
221 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
222 if (lli->lli_size_sem_owner != current) {
228 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
233 LASSERT(SEM_COUNT(&lli->lli_size_sem) <= 0);
239 /* XXX I'm pretty sure this is a hack to paper over a more
240 * fundamental race condition. */
241 lov_stripe_lock(lli->lli_smd);
242 inode_init_lvb(inode, &lvb);
243 rc = obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 0);
244 inode->i_blocks = lvb.lvb_blocks;
245 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
246 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64
248 lli->lli_smd->lsm_object_id, i_size_read(inode),
250 lov_stripe_unlock(lli->lli_smd);
254 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
255 i_size_read(inode), 1);
256 lov_stripe_unlock(lli->lli_smd);
259 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
260 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
261 /* If the truncate leaves a partial page, update its checksum */
262 struct page *page = find_get_page(inode->i_mapping,
263 i_size_read(inode) >>
266 struct ll_async_page *llap = llap_cast_private(page);
268 char *kaddr = kmap_atomic(page, KM_USER0);
269 llap->llap_checksum =
270 init_checksum(OSC_DEFAULT_CKSUM);
271 llap->llap_checksum =
272 compute_checksum(llap->llap_checksum,
273 kaddr, CFS_PAGE_SIZE,
275 kunmap_atomic(kaddr, KM_USER0);
277 page_cache_release(page);
281 new_size = i_size_read(inode);
282 ll_inode_size_unlock(inode, 0);
284 ll_file_punch(inode, new_size, 0);
286 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LOCKLESS_TRUNC, 1);
292 ll_inode_size_unlock(inode, 0);
295 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
298 struct inode *inode = page->mapping->host;
299 struct ll_inode_info *lli = ll_i2info(inode);
300 struct lov_stripe_md *lsm = lli->lli_smd;
301 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
302 struct obd_info oinfo = { { { 0 } } };
304 struct obdo oa = { 0 };
309 LASSERT(PageLocked(page));
310 (void)llap_cast_private(page); /* assertion */
312 /* Check to see if we should return -EIO right away */
315 pga.count = CFS_PAGE_SIZE;
318 oa.o_mode = inode->i_mode;
319 oa.o_id = lsm->lsm_object_id;
320 oa.o_gr = lsm->lsm_object_gr;
321 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
322 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
323 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
327 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oinfo, 1, &pga, NULL);
331 if (PageUptodate(page)) {
332 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
336 /* We're completely overwriting an existing page, so _don't_ set it up
337 * to date until commit_write */
338 if (from == 0 && to == CFS_PAGE_SIZE) {
339 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
340 POISON_PAGE(page, 0x11);
344 /* If are writing to a new page, no need to read old data. The extent
345 * locking will have updated the KMS, and for our purposes here we can
346 * treat it like i_size. */
347 lov_stripe_lock(lsm);
348 inode_init_lvb(inode, &lvb);
349 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
350 lov_stripe_unlock(lsm);
351 if (lvb.lvb_size <= offset) {
352 char *kaddr = kmap_atomic(page, KM_USER0);
353 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
354 lvb.lvb_size, offset);
355 memset(kaddr, 0, CFS_PAGE_SIZE);
356 kunmap_atomic(kaddr, KM_USER0);
357 GOTO(prepare_done, rc = 0);
360 /* XXX could be an async ocp read.. read-ahead? */
361 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
363 /* bug 1598: don't clobber blksize */
364 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
365 obdo_refresh_inode(inode, &oa, oa.o_valid);
371 SetPageUptodate(page);
377 * make page ready for ASYNC write
378 * \param data - pointer to llap cookie
379 * \param cmd - is OBD_BRW_* macroses
381 * \retval 0 is page successfully prepared to send
382 * \retval -EAGAIN is page not need to send
384 static int ll_ap_make_ready(void *data, int cmd)
386 struct ll_async_page *llap;
390 llap = LLAP_FROM_COOKIE(data);
391 page = llap->llap_page;
393 /* we're trying to write, but the page is locked.. come back later */
394 if (TryLockPage(page))
397 LASSERTF(!(cmd & OBD_BRW_READ) || !PageWriteback(page),
398 "cmd %x page %p ino %lu index %lu fl %lx\n", cmd, page,
399 page->mapping->host->i_ino, page->index, page->flags);
401 /* if we left PageDirty we might get another writepage call
402 * in the future. list walkers are bright enough
403 * to check page dirty so we can leave it on whatever list
404 * its on. XXX also, we're called with the cli list so if
405 * we got the page cache list we'd create a lock inversion
406 * with the removepage path which gets the page lock then the
408 if(!clear_page_dirty_for_io(page)) {
413 /* This actually clears the dirty bit in the radix tree.*/
414 set_page_writeback(page);
416 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
417 page_cache_get(page);
422 /* We have two reasons for giving llite the opportunity to change the
423 * write length of a given queued page as it builds the RPC containing
426 * 1) Further extending writes may have landed in the page cache
427 * since a partial write first queued this page requiring us
428 * to write more from the page cache. (No further races are possible,
429 * since by the time this is called, the page is locked.)
430 * 2) We might have raced with truncate and want to avoid performing
431 * write RPCs that are just going to be thrown away by the
432 * truncate's punch on the storage targets.
434 * The kms serves these purposes as it is set at both truncate and extending
437 static int ll_ap_refresh_count(void *data, int cmd)
439 struct ll_inode_info *lli;
440 struct ll_async_page *llap;
441 struct lov_stripe_md *lsm;
448 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
449 LASSERT(cmd != OBD_BRW_READ);
451 llap = LLAP_FROM_COOKIE(data);
452 page = llap->llap_page;
453 inode = page->mapping->host;
454 lli = ll_i2info(inode);
457 lov_stripe_lock(lsm);
458 inode_init_lvb(inode, &lvb);
459 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
461 lov_stripe_unlock(lsm);
463 /* catch race with truncate */
464 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
467 /* catch sub-page write at end of file */
468 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
469 return kms % CFS_PAGE_SIZE;
471 return CFS_PAGE_SIZE;
474 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
476 struct lov_stripe_md *lsm;
477 obd_flag valid_flags;
479 lsm = ll_i2info(inode)->lli_smd;
481 oa->o_id = lsm->lsm_object_id;
482 oa->o_gr = lsm->lsm_object_gr;
483 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
484 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
485 if (cmd & OBD_BRW_WRITE) {
486 oa->o_valid |= OBD_MD_FLEPOCH;
487 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
489 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
490 OBD_MD_FLUID | OBD_MD_FLGID |
491 OBD_MD_FLFID | OBD_MD_FLGENER;
494 obdo_from_inode(oa, inode, valid_flags);
497 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
499 struct ll_async_page *llap;
502 llap = LLAP_FROM_COOKIE(data);
503 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
508 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
511 struct ll_async_page *llap;
514 llap = LLAP_FROM_COOKIE(data);
515 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
520 static struct obd_async_page_ops ll_async_page_ops = {
521 .ap_make_ready = ll_ap_make_ready,
522 .ap_refresh_count = ll_ap_refresh_count,
523 .ap_fill_obdo = ll_ap_fill_obdo,
524 .ap_update_obdo = ll_ap_update_obdo,
525 .ap_completion = ll_ap_completion,
528 struct ll_async_page *llap_cast_private(struct page *page)
530 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
532 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
533 "page %p private %lu gave magic %d which != %d\n",
534 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
539 /* Try to reap @target pages in the specific @cpu's async page list.
541 * There is an llap attached onto every page in lustre, linked off @sbi.
542 * We add an llap to the list so we don't lose our place during list walking.
543 * If llaps in the list are being moved they will only move to the end
544 * of the LRU, and we aren't terribly interested in those pages here (we
545 * start at the beginning of the list where the least-used llaps are. */
546 static inline int llap_shrink_cache_internal(struct ll_sb_info *sbi,
549 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
550 struct ll_pglist_data *pd;
551 struct list_head *head;
554 pd = ll_pglist_cpu_lock(sbi, cpu);
555 head = &pd->llpd_list;
556 list_add(&dummy_llap.llap_pglist_item, head);
557 while (count < target) {
561 if (unlikely(need_resched())) {
562 list_del(&dummy_llap.llap_pglist_item);
563 ll_pglist_cpu_unlock(sbi, cpu);
564 /* vmscan::shrink_slab() have own schedule() */
568 llap = llite_pglist_next_llap(head,
569 &dummy_llap.llap_pglist_item);
570 list_del_init(&dummy_llap.llap_pglist_item);
574 page = llap->llap_page;
575 LASSERT(page != NULL);
577 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
579 /* Page needs/undergoing IO */
580 if (TryLockPage(page)) {
581 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
585 keep = (llap->llap_write_queued || PageDirty(page) ||
586 PageWriteback(page) || (!PageUptodate(page) &&
587 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
589 LL_CDEBUG_PAGE(D_PAGE, page,
590 "%s LRU page: %s%s%s%s%s origin %s\n",
591 keep ? "keep" : "drop",
592 llap->llap_write_queued ? "wq " : "",
593 PageDirty(page) ? "pd " : "",
594 PageUptodate(page) ? "" : "!pu ",
595 PageWriteback(page) ? "wb" : "",
596 llap->llap_defer_uptodate ? "" : "!du",
597 llap_origins[llap->llap_origin]);
599 /* If page is dirty or undergoing IO don't discard it */
605 page_cache_get(page);
606 ll_pglist_cpu_unlock(sbi, cpu);
608 if (page->mapping != NULL) {
609 ll_teardown_mmaps(page->mapping,
610 (__u64)page->index << CFS_PAGE_SHIFT,
611 ((__u64)page->index << CFS_PAGE_SHIFT)|
613 if (!PageDirty(page) && !page_mapped(page)) {
614 ll_ra_accounting(llap, page->mapping);
615 ll_truncate_complete_page(page);
618 LL_CDEBUG_PAGE(D_PAGE, page,
619 "Not dropping page because it is"
620 " %s\n", PageDirty(page) ?
625 page_cache_release(page);
627 ll_pglist_cpu_lock(sbi, cpu);
629 list_del(&dummy_llap.llap_pglist_item);
630 ll_pglist_cpu_unlock(sbi, cpu);
632 CDEBUG(D_CACHE, "shrank %d, expected %d however. \n", count, target);
637 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
639 * At first, this code calculates total pages wanted by @shrink_fraction, then
640 * it deduces how many pages should be reaped from each cpu in proportion as
641 * their own # of page count(llpd_count).
643 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
645 unsigned long total, want, percpu_want, count = 0;
648 total = lcounter_read_positive(&sbi->ll_async_page_count);
652 #ifdef HAVE_SHRINKER_CACHE
653 want = shrink_fraction;
657 /* There can be a large number of llaps (600k or more in a large
658 * memory machine) so the VM 1/6 shrink ratio is likely too much.
659 * Since we are freeing pages also, we don't necessarily want to
660 * shrink so much. Limit to 40MB of pages + llaps per call. */
661 if (shrink_fraction <= 0)
662 want = total - sbi->ll_async_page_max + 32*num_online_cpus();
664 want = (total + shrink_fraction - 1) / shrink_fraction;
667 if (want > 40 << (20 - CFS_PAGE_SHIFT))
668 want = 40 << (20 - CFS_PAGE_SHIFT);
670 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
671 want, total, shrink_fraction);
673 nr_cpus = num_possible_cpus();
674 cpu = sbi->ll_async_page_clock_hand;
675 /* we at most do one round */
679 cpu = (cpu + 1) % nr_cpus;
680 c = LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_count;
681 if (!cpu_online(cpu))
684 percpu_want = want / ((total / (c + 1)) + 1);
685 if (percpu_want == 0)
688 count += llap_shrink_cache_internal(sbi, cpu, percpu_want);
690 sbi->ll_async_page_clock_hand = cpu;
691 } while (cpu != sbi->ll_async_page_clock_hand);
693 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
696 #ifdef HAVE_SHRINKER_CACHE
697 return lcounter_read_positive(&sbi->ll_async_page_count);
703 /* Rebalance the async page queue len for each cpu. We hope that the cpu
704 * which do much IO job has a relative longer queue len.
705 * This function should be called with preempt disabled.
707 static inline int llap_async_cache_rebalance(struct ll_sb_info *sbi)
709 unsigned long sample = 0, *cpu_sample, bias, slice;
710 struct ll_pglist_data *pd;
713 int w1 = 7, w2 = 3, base = (w1 + w2); /* weight value */
716 if (!spin_trylock(&sbi->ll_async_page_reblnc_lock)) {
717 /* someone else is doing the job */
721 pcnt = &LL_PGLIST_DATA(sbi)->llpd_sample_count;
722 if (!atomic_read(pcnt)) {
723 /* rare case, somebody else has gotten this job done */
724 spin_unlock(&sbi->ll_async_page_reblnc_lock);
728 sbi->ll_async_page_reblnc_count++;
729 cpu_sample = sbi->ll_async_page_sample;
730 memset(cpu_sample, 0, num_possible_cpus() * sizeof(unsigned long));
731 for_each_online_cpu(cpu) {
732 pcnt = &LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_sample_count;
733 cpu_sample[cpu] = atomic_read(pcnt);
735 sample += cpu_sample[cpu];
739 surplus = sbi->ll_async_page_max;
740 slice = surplus / sample + 1;
741 sample /= num_online_cpus();
743 for_each_online_cpu(cpu) {
744 pd = LL_PGLIST_DATA_CPU(sbi, cpu);
745 if (labs((long int)sample - cpu_sample[cpu]) > bias) {
746 unsigned long budget = pd->llpd_budget;
747 /* weighted original queue length and expected queue
748 * length to avoid thrashing. */
749 pd->llpd_budget = (budget * w1) / base +
750 (slice * cpu_sample[cpu]) * w2 / base;
753 surplus -= min_t(int, pd->llpd_budget, surplus);
755 surplus /= cpus_weight(mask) ?: 1;
756 for_each_cpu_mask(cpu, mask)
757 LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_budget += surplus;
758 spin_unlock(&sbi->ll_async_page_reblnc_lock);
760 /* We need to call llap_shrink_cache_internal() for every cpu to
761 * ensure the sbi->ll_async_page_max limit is enforced. */
762 for_each_cpu_mask(cpu, mask) {
763 pd = LL_PGLIST_DATA_CPU(sbi, cpu);
764 llap_shrink_cache_internal(sbi, cpu, max_t(int, pd->llpd_count-
765 pd->llpd_budget, 0) + 32);
771 static struct ll_async_page *llap_from_page_with_lockh(struct page *page,
773 struct lustre_handle *lockh)
775 struct ll_async_page *llap;
776 struct obd_export *exp;
777 struct inode *inode = page->mapping->host;
778 struct ll_sb_info *sbi;
779 struct ll_pglist_data *pd;
784 static int triggered;
787 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
789 libcfs_debug_dumpstack(NULL);
792 RETURN(ERR_PTR(-EINVAL));
794 sbi = ll_i2sbi(inode);
795 LASSERT(ll_async_page_slab);
796 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
798 llap = llap_cast_private(page);
800 /* move to end of LRU list, except when page is just about to
802 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
803 int old_cpu = llap->llap_pglist_cpu;
804 struct ll_pglist_data *old_pd;
806 pd = ll_pglist_double_lock(sbi, old_cpu, &old_pd);
808 while (old_cpu != llap->llap_pglist_cpu) {
809 /* rarely case, someone else is touching this
811 ll_pglist_double_unlock(sbi, old_cpu);
812 old_cpu = llap->llap_pglist_cpu;
813 pd=ll_pglist_double_lock(sbi, old_cpu, &old_pd);
816 list_move(&llap->llap_pglist_item,
819 if (pd->llpd_cpu != old_cpu) {
821 old_pd->llpd_count--;
823 llap->llap_pglist_cpu = pd->llpd_cpu;
826 ll_pglist_double_unlock(sbi, old_cpu);
831 exp = ll_i2obdexp(page->mapping->host);
833 RETURN(ERR_PTR(-EINVAL));
835 /* limit the number of lustre-cached pages */
837 pd = LL_PGLIST_DATA(sbi);
838 target = pd->llpd_count - pd->llpd_budget;
841 atomic_inc(&pd->llpd_sample_count);
842 if (atomic_read(&pd->llpd_sample_count) >
843 sbi->ll_async_page_sample_max) {
844 pd->llpd_reblnc_count++;
845 rc = llap_async_cache_rebalance(sbi);
847 target = pd->llpd_count - pd->llpd_budget;
849 /* if rc equals 1, it means other cpu is doing the rebalance
850 * job, and our budget # would be modified when we read it.
851 * Furthermore, it is much likely being increased because
852 * we have already reached the rebalance threshold. In this
853 * case, we skip to shrink cache here. */
854 if ((rc == 0) && target > 0)
855 llap_shrink_cache_internal(sbi, cpu, target + 32);
859 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
860 ll_async_page_slab_size);
862 RETURN(ERR_PTR(-ENOMEM));
863 llap->llap_magic = LLAP_MAGIC;
864 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
866 /* XXX: for bug 11270 - check for lockless origin here! */
867 if (origin == LLAP_ORIGIN_LOCKLESS_IO)
868 llap->llap_nocache = 1;
870 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
871 (obd_off)page->index << CFS_PAGE_SHIFT,
872 &ll_async_page_ops, llap, &llap->llap_cookie,
873 llap->llap_nocache, lockh);
875 OBD_SLAB_FREE(llap, ll_async_page_slab,
876 ll_async_page_slab_size);
880 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
881 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
882 /* also zeroing the PRIVBITS low order bitflags */
883 __set_page_ll_data(page, llap);
884 llap->llap_page = page;
886 lcounter_inc(&sbi->ll_async_page_count);
887 pd = ll_pglist_lock(sbi);
888 list_add_tail(&llap->llap_pglist_item, &pd->llpd_list);
892 llap->llap_pglist_cpu = pd->llpd_cpu;
893 ll_pglist_unlock(sbi);
896 if (unlikely(sbi->ll_flags & LL_SBI_LLITE_CHECKSUM)) {
898 char *kaddr = kmap_atomic(page, KM_USER0);
899 csum = init_checksum(OSC_DEFAULT_CKSUM);
900 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
902 kunmap_atomic(kaddr, KM_USER0);
903 if (origin == LLAP_ORIGIN_READAHEAD ||
904 origin == LLAP_ORIGIN_READPAGE ||
905 origin == LLAP_ORIGIN_LOCKLESS_IO) {
906 llap->llap_checksum = 0;
907 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
908 llap->llap_checksum == 0) {
909 llap->llap_checksum = csum;
910 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
911 } else if (llap->llap_checksum == csum) {
912 /* origin == LLAP_ORIGIN_WRITEPAGE */
913 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
916 /* origin == LLAP_ORIGIN_WRITEPAGE */
917 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
918 "%x!\n", llap->llap_checksum, csum);
922 llap->llap_origin = origin;
926 static inline struct ll_async_page *llap_from_page(struct page *page,
929 return llap_from_page_with_lockh(page, origin, NULL);
932 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
933 struct ll_async_page *llap,
934 unsigned to, obd_flag async_flags)
936 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
937 struct obd_io_group *oig;
938 struct ll_sb_info *sbi = ll_i2sbi(inode);
939 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
940 int brwflags = OBD_BRW_ASYNC;
943 /* _make_ready only sees llap once we've unlocked the page */
944 llap->llap_write_queued = 1;
945 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
946 llap->llap_cookie, OBD_BRW_WRITE | noquot,
947 0, 0, brwflags, async_flags);
949 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
950 llap_write_pending(inode, llap);
954 llap->llap_write_queued = 0;
960 /* make full-page requests if we are not at EOF (bug 4410) */
961 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
962 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
963 "sync write before EOF: size_index %lu, to %d\n",
966 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index){
967 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
968 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
969 "sync write at EOF: size_index %lu, to %d/%d\n",
970 size_index, to, size_to);
975 /* compare the checksum once before the page leaves llite */
976 if (unlikely((sbi->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
977 llap->llap_checksum != 0)) {
979 struct page *page = llap->llap_page;
980 char *kaddr = kmap_atomic(page, KM_USER0);
981 csum = init_checksum(OSC_DEFAULT_CKSUM);
982 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
984 kunmap_atomic(kaddr, KM_USER0);
985 if (llap->llap_checksum == csum) {
986 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
989 CERROR("page %p old cksum %x != new cksum %x!\n",
990 page, llap->llap_checksum, csum);
994 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
995 llap->llap_cookie, OBD_BRW_WRITE | noquot,
996 0, to, 0, ASYNC_READY | ASYNC_URGENT |
997 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
1001 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1007 if (!rc && async_flags & ASYNC_READY) {
1008 unlock_page(llap->llap_page);
1009 if (PageWriteback(llap->llap_page))
1010 end_page_writeback(llap->llap_page);
1013 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
1021 /* update our write count to account for i_size increases that may have
1022 * happened since we've queued the page for io. */
1024 /* be careful not to return success without setting the page Uptodate or
1025 * the next pass through prepare_write will read in stale data from disk. */
1026 int ll_commit_write(struct file *file, struct page *page, unsigned from,
1029 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1030 struct inode *inode = page->mapping->host;
1031 struct ll_inode_info *lli = ll_i2info(inode);
1032 struct lov_stripe_md *lsm = lli->lli_smd;
1033 struct obd_export *exp;
1034 struct ll_async_page *llap;
1036 struct lustre_handle *lockh = NULL;
1040 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
1041 LASSERT(inode == file->f_dentry->d_inode);
1042 LASSERT(PageLocked(page));
1044 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
1045 inode, page, from, to, page->index);
1047 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
1048 lockh = &fd->fd_cwlockh;
1050 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh);
1052 RETURN(PTR_ERR(llap));
1054 exp = ll_i2obdexp(inode);
1058 llap->llap_ignore_quota = cfs_capable(CFS_CAP_SYS_RESOURCE);
1060 /* queue a write for some time in the future the first time we
1062 if (!PageDirty(page)) {
1063 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
1065 rc = queue_or_sync_write(exp, inode, llap, to, 0);
1069 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
1072 /* put the page in the page cache, from now on ll_removepage is
1073 * responsible for cleaning up the llap.
1074 * only set page dirty when it's queued to be write out */
1075 if (llap->llap_write_queued)
1076 set_page_dirty(page);
1079 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
1080 ll_inode_size_lock(inode, 0);
1082 lov_stripe_lock(lsm);
1083 obd_adjust_kms(exp, lsm, size, 0);
1084 lov_stripe_unlock(lsm);
1085 if (size > i_size_read(inode))
1086 i_size_write(inode, size);
1087 SetPageUptodate(page);
1088 } else if (size > i_size_read(inode)) {
1089 /* this page beyond the pales of i_size, so it can't be
1090 * truncated in ll_p_r_e during lock revoking. we must
1091 * teardown our book-keeping here. */
1092 ll_removepage(page);
1094 ll_inode_size_unlock(inode, 0);
1098 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
1100 /* WARNING: This algorithm is used to reduce the contention on
1101 * sbi->ll_lock. It should work well if the ra_max_pages is much
1102 * greater than the single file's read-ahead window.
1104 * TODO: There may exist a `global sync problem' in this implementation.
1105 * Considering the global ra window is 100M, and each file's ra window is 10M,
1106 * there are over 10 files trying to get its ra budget and reach
1107 * ll_ra_count_get at the exactly same time. All of them will get a zero ra
1108 * window, although the global window is 100M. -jay
1110 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
1112 struct ll_ra_info *ra = &sbi->ll_ra_info;
1113 unsigned long ret = 0;
1117 * If read-ahead pages left are less than 1M, do not do read-ahead,
1118 * otherwise it will form small read RPC(< 1M), which hurt server
1119 * performance a lot.
1121 ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
1122 if ((int)ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
1125 if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
1126 atomic_sub(ret, &ra->ra_cur_pages);
1133 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
1135 struct ll_ra_info *ra = &sbi->ll_ra_info;
1136 atomic_sub(len, &ra->ra_cur_pages);
1139 /* called for each page in a completed rpc.*/
1140 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
1142 struct ll_async_page *llap;
1147 llap = LLAP_FROM_COOKIE(data);
1148 page = llap->llap_page;
1149 LASSERT(PageLocked(page));
1150 LASSERT(CheckWriteback(page,cmd));
1152 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
1154 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
1155 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
1158 if (cmd & OBD_BRW_READ) {
1159 if (!llap->llap_defer_uptodate)
1160 SetPageUptodate(page);
1162 llap->llap_write_queued = 0;
1164 ClearPageError(page);
1166 if (cmd & OBD_BRW_READ) {
1167 llap->llap_defer_uptodate = 0;
1171 set_bit(AS_ENOSPC, &page->mapping->flags);
1173 set_bit(AS_EIO, &page->mapping->flags);
1176 /* be carefull about clear WB.
1177 * if WB will cleared after page lock is released - paralel IO can be
1178 * started before ap_make_ready is finished - so we will be have page
1179 * with PG_Writeback set from ->writepage() and completed READ which
1180 * clear this flag */
1181 if ((cmd & OBD_BRW_WRITE) && PageWriteback(page))
1182 end_page_writeback(page);
1186 if (cmd & OBD_BRW_WRITE) {
1187 llap_write_complete(page->mapping->host, llap);
1188 ll_try_done_writing(page->mapping->host);
1191 page_cache_release(page);
1196 static void __ll_put_llap(struct page *page)
1198 struct inode *inode = page->mapping->host;
1199 struct obd_export *exp;
1200 struct ll_async_page *llap;
1201 struct ll_sb_info *sbi = ll_i2sbi(inode);
1202 struct ll_pglist_data *pd;
1206 exp = ll_i2obdexp(inode);
1208 CERROR("page %p ind %lu gave null export\n", page, page->index);
1213 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
1215 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
1216 page->index, PTR_ERR(llap));
1221 //llap_write_complete(inode, llap);
1222 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
1225 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
1227 /* this unconditional free is only safe because the page lock
1228 * is providing exclusivity to memory pressure/truncate/writeback..*/
1229 __clear_page_ll_data(page);
1231 lcounter_dec(&sbi->ll_async_page_count);
1232 cpu = llap->llap_pglist_cpu;
1233 pd = ll_pglist_cpu_lock(sbi, cpu);
1236 if (!list_empty(&llap->llap_pglist_item))
1237 list_del_init(&llap->llap_pglist_item);
1238 ll_pglist_cpu_unlock(sbi, cpu);
1239 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
1244 /* the kernel calls us here when a page is unhashed from the page cache.
1245 * the page will be locked and the kernel is holding a spinlock, so
1246 * we need to be careful. we're just tearing down our book-keeping
1248 void ll_removepage(struct page *page)
1250 struct ll_async_page *llap = llap_cast_private(page);
1253 LASSERT(!in_interrupt());
1255 /* sync pages or failed read pages can leave pages in the page
1256 * cache that don't have our data associated with them anymore */
1257 if (page_private(page) == 0) {
1262 LASSERT(!llap->llap_lockless_io_page);
1263 LASSERT(!llap->llap_nocache);
1265 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
1266 __ll_put_llap(page);
1271 static int ll_issue_page_read(struct obd_export *exp,
1272 struct ll_async_page *llap,
1273 struct obd_io_group *oig, int defer)
1275 struct page *page = llap->llap_page;
1278 page_cache_get(page);
1279 llap->llap_defer_uptodate = defer;
1280 llap->llap_ra_used = 0;
1281 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1282 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1283 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1284 ASYNC_READY | ASYNC_URGENT);
1286 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1287 page_cache_release(page);
1292 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
1294 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1295 lprocfs_counter_incr(sbi->ll_ra_stats, which);
1298 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1300 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1301 ll_ra_stats_inc_sbi(sbi, which);
1304 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1306 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1309 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1312 #define RAS_CDEBUG(ras) \
1314 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
1315 "csr %lu sf %lu sp %lu sl %lu \n", \
1316 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1317 ras->ras_consecutive_pages, ras->ras_window_start, \
1318 ras->ras_window_len, ras->ras_next_readahead, \
1319 ras->ras_requests, ras->ras_request_index, \
1320 ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
1321 ras->ras_stride_pages, ras->ras_stride_length)
1323 static int index_in_window(unsigned long index, unsigned long point,
1324 unsigned long before, unsigned long after)
1326 unsigned long start = point - before, end = point + after;
1333 return start <= index && index <= end;
1336 static struct ll_readahead_state *ll_ras_get(struct file *f)
1338 struct ll_file_data *fd;
1340 fd = LUSTRE_FPRIVATE(f);
1344 void ll_ra_read_init(struct file *f, struct ll_ra_read *rar,
1345 loff_t offset, size_t count)
1347 struct ll_readahead_state *ras;
1349 ras = ll_ras_get(f);
1351 rar->lrr_start = offset >> CFS_PAGE_SHIFT;
1352 rar->lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
1354 spin_lock(&ras->ras_lock);
1355 ras->ras_requests++;
1356 ras->ras_request_index = 0;
1357 ras->ras_consecutive_requests++;
1358 rar->lrr_reader = current;
1360 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1361 spin_unlock(&ras->ras_lock);
1364 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1366 struct ll_readahead_state *ras;
1368 ras = ll_ras_get(f);
1370 spin_lock(&ras->ras_lock);
1371 list_del_init(&rar->lrr_linkage);
1372 spin_unlock(&ras->ras_lock);
1375 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1377 struct ll_ra_read *scan;
1379 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1380 if (scan->lrr_reader == current)
1386 struct ll_ra_read *ll_ra_read_get(struct file *f)
1388 struct ll_readahead_state *ras;
1389 struct ll_ra_read *bead;
1391 ras = ll_ras_get(f);
1393 spin_lock(&ras->ras_lock);
1394 bead = ll_ra_read_get_locked(ras);
1395 spin_unlock(&ras->ras_lock);
1399 static int ll_read_ahead_page(struct obd_export *exp, struct obd_io_group *oig,
1400 pgoff_t index, struct address_space *mapping)
1402 struct ll_async_page *llap;
1404 unsigned int gfp_mask = 0;
1407 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1409 gfp_mask |= __GFP_NOWARN;
1411 page = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
1413 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1414 CDEBUG(D_READA, "g_c_p_n failed\n");
1418 /* Check if page was truncated or reclaimed */
1419 if (page->mapping != mapping) {
1420 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1421 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1422 GOTO(unlock_page, rc = 0);
1425 /* we do this first so that we can see the page in the /proc
1427 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1428 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1429 if (PTR_ERR(llap) == -ENOLCK) {
1430 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1431 CDEBUG(D_READA | D_PAGE,
1432 "Adding page to cache failed index "
1434 CDEBUG(D_READA, "nolock page\n");
1435 GOTO(unlock_page, rc = -ENOLCK);
1437 CDEBUG(D_READA, "read-ahead page\n");
1438 GOTO(unlock_page, rc = 0);
1441 /* skip completed pages */
1442 if (Page_Uptodate(page))
1443 GOTO(unlock_page, rc = 0);
1445 /* bail out when we hit the end of the lock. */
1446 rc = ll_issue_page_read(exp, llap, oig, 1);
1448 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "started read-ahead\n");
1453 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "skipping read-ahead\n");
1455 page_cache_release(page);
1459 /* ra_io_arg will be filled in the beginning of ll_readahead with
1460 * ras_lock, then the following ll_read_ahead_pages will read RA
1461 * pages according to this arg, all the items in this structure are
1462 * counted by page index.
1465 unsigned long ria_start; /* start offset of read-ahead*/
1466 unsigned long ria_end; /* end offset of read-ahead*/
1467 /* If stride read pattern is detected, ria_stoff means where
1468 * stride read is started. Note: for normal read-ahead, the
1469 * value here is meaningless, and also it will not be accessed*/
1471 /* ria_length and ria_pages are the length and pages length in the
1472 * stride I/O mode. And they will also be used to check whether
1473 * it is stride I/O read-ahead in the read-ahead pages*/
1474 unsigned long ria_length;
1475 unsigned long ria_pages;
1478 #define RIA_DEBUG(ria) \
1479 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
1480 ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
1483 #define INIT_RAS_WINDOW_PAGES PTLRPC_MAX_BRW_PAGES
1485 static inline int stride_io_mode(struct ll_readahead_state *ras)
1487 return ras->ras_consecutive_stride_requests > 1;
1490 /* The function calculates how much pages will be read in
1491 * [off, off + length], which will be read by stride I/O mode,
1492 * stride_offset = st_off, stride_lengh = st_len,
1493 * stride_pages = st_pgs
1495 static unsigned long
1496 stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
1497 unsigned long off, unsigned length)
1499 unsigned long cont_len = st_off > off ? st_off - off : 0;
1500 __u64 stride_len = length + off > st_off ?
1501 length + off + 1 - st_off : 0;
1502 unsigned long left, pg_count;
1504 if (st_len == 0 || length == 0)
1507 left = do_div(stride_len, st_len);
1508 left = min(left, st_pgs);
1510 pg_count = left + stride_len * st_pgs + cont_len;
1512 LASSERT(pg_count >= left);
1514 CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %u"
1515 "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
1520 static int ria_page_count(struct ra_io_arg *ria)
1522 __u64 length = ria->ria_end >= ria->ria_start ?
1523 ria->ria_end - ria->ria_start + 1 : 0;
1525 return stride_pg_count(ria->ria_stoff, ria->ria_length,
1526 ria->ria_pages, ria->ria_start,
1530 /*Check whether the index is in the defined ra-window */
1531 static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
1533 /* If ria_length == ria_pages, it means non-stride I/O mode,
1534 * idx should always inside read-ahead window in this case
1535 * For stride I/O mode, just check whether the idx is inside
1537 return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
1538 (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
1541 static int ll_read_ahead_pages(struct obd_export *exp,
1542 struct obd_io_group *oig,
1543 struct ra_io_arg *ria,
1544 unsigned long *reserved_pages,
1545 struct address_space *mapping,
1546 unsigned long *ra_end)
1548 int rc, count = 0, stride_ria;
1549 unsigned long page_idx;
1551 LASSERT(ria != NULL);
1554 stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
1555 for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
1556 *reserved_pages > 0; page_idx++) {
1557 if (ras_inside_ra_window(page_idx, ria)) {
1558 /* If the page is inside the read-ahead window*/
1559 rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
1561 (*reserved_pages)--;
1563 } else if (rc == -ENOLCK)
1565 } else if (stride_ria) {
1566 /* If it is not in the read-ahead window, and it is
1567 * read-ahead mode, then check whether it should skip
1570 /* FIXME: This assertion only is valid when it is for
1571 * forward read-ahead, it will be fixed when backward
1572 * read-ahead is implemented */
1573 LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
1574 " gap of ra window,it should bigger than stride"
1575 " offset %lu \n", page_idx, ria->ria_stoff);
1577 offset = page_idx - ria->ria_stoff;
1578 offset = offset % (ria->ria_length);
1579 if (offset > ria->ria_pages) {
1580 page_idx += ria->ria_length - offset;
1581 CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
1582 ria->ria_length - offset);
1591 static int ll_readahead(struct ll_readahead_state *ras,
1592 struct obd_export *exp, struct address_space *mapping,
1593 struct obd_io_group *oig, int flags)
1595 unsigned long start = 0, end = 0, reserved;
1596 unsigned long ra_end, len;
1597 struct inode *inode;
1598 struct lov_stripe_md *lsm;
1599 struct ll_ra_read *bead;
1601 struct ra_io_arg ria = { 0 };
1606 inode = mapping->host;
1607 lsm = ll_i2info(inode)->lli_smd;
1609 lov_stripe_lock(lsm);
1610 inode_init_lvb(inode, &lvb);
1611 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
1613 lov_stripe_unlock(lsm);
1615 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1619 spin_lock(&ras->ras_lock);
1620 bead = ll_ra_read_get_locked(ras);
1621 /* Enlarge the RA window to encompass the full read */
1622 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1623 bead->lrr_start + bead->lrr_count) {
1624 obd_off read_end = (bead->lrr_start + bead->lrr_count) <<
1626 obd_extent_calc(exp, lsm, OBD_CALC_STRIPE_RPC_END_ALIGN,
1628 ras->ras_window_len = ((read_end + 1) >> CFS_PAGE_SHIFT) -
1629 ras->ras_window_start;
1631 /* Reserve a part of the read-ahead window that we'll be issuing */
1632 if (ras->ras_window_len) {
1633 start = ras->ras_next_readahead;
1634 end = ras->ras_window_start + ras->ras_window_len - 1;
1637 /* Truncate RA window to end of file */
1638 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1639 ras->ras_next_readahead = max(end, end + 1);
1642 ria.ria_start = start;
1644 /* If stride I/O mode is detected, get stride window*/
1645 if (stride_io_mode(ras)) {
1646 ria.ria_stoff = ras->ras_stride_offset;
1647 ria.ria_length = ras->ras_stride_length;
1648 ria.ria_pages = ras->ras_stride_pages;
1650 spin_unlock(&ras->ras_lock);
1653 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1657 len = ria_page_count(&ria);
1661 reserved = ll_ra_count_get(ll_i2sbi(inode), len);
1663 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1665 CDEBUG(D_READA, "reserved page %lu \n", reserved);
1667 ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
1669 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1671 ll_ra_count_put(ll_i2sbi(inode), reserved);
1673 if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
1674 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1676 /* if we didn't get to the end of the region we reserved from
1677 * the ras we need to go back and update the ras so that the
1678 * next read-ahead tries from where we left off. we only do so
1679 * if the region we failed to issue read-ahead on is still ahead
1680 * of the app and behind the next index to start read-ahead from */
1681 CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
1682 ra_end, end, ria.ria_end);
1684 if (ra_end != (end + 1)) {
1685 spin_lock(&ras->ras_lock);
1686 if (ra_end < ras->ras_next_readahead &&
1687 index_in_window(ra_end, ras->ras_window_start, 0,
1688 ras->ras_window_len)) {
1689 ras->ras_next_readahead = ra_end;
1692 spin_unlock(&ras->ras_lock);
1698 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1700 ras->ras_window_start = index & (~(INIT_RAS_WINDOW_PAGES - 1));
1703 /* called with the ras_lock held or from places where it doesn't matter */
1704 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1706 ras->ras_last_readpage = index;
1707 ras->ras_consecutive_requests = 0;
1708 ras->ras_consecutive_pages = 0;
1709 ras->ras_window_len = 0;
1710 ras_set_start(ras, index);
1711 ras->ras_next_readahead = max(ras->ras_window_start, index);
1716 /* called with the ras_lock held or from places where it doesn't matter */
1717 static void ras_stride_reset(struct ll_readahead_state *ras)
1719 ras->ras_consecutive_stride_requests = 0;
1720 ras->ras_stride_length = 0;
1721 ras->ras_stride_pages = 0;
1725 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1727 spin_lock_init(&ras->ras_lock);
1729 ras->ras_requests = 0;
1730 INIT_LIST_HEAD(&ras->ras_read_beads);
1734 * Check whether the read request is in the stride window.
1735 * If it is in the stride window, return 1, otherwise return 0.
1737 static int index_in_stride_window(unsigned long index,
1738 struct ll_readahead_state *ras,
1739 struct inode *inode)
1741 unsigned long stride_gap = index - ras->ras_last_readpage - 1;
1743 if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0)
1746 /* If it is contiguous read */
1747 if (stride_gap == 0)
1748 return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
1750 /*Otherwise check the stride by itself */
1751 return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
1752 ras->ras_consecutive_pages == ras->ras_stride_pages;
1755 static void ras_update_stride_detector(struct ll_readahead_state *ras,
1756 unsigned long index)
1758 unsigned long stride_gap = index - ras->ras_last_readpage - 1;
1760 if (!stride_io_mode(ras) && (stride_gap != 0 ||
1761 ras->ras_consecutive_stride_requests == 0)) {
1762 ras->ras_stride_pages = ras->ras_consecutive_pages;
1763 ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
1768 static unsigned long
1769 stride_page_count(struct ll_readahead_state *ras, unsigned long len)
1771 return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
1772 ras->ras_stride_pages, ras->ras_stride_offset,
1776 /* Stride Read-ahead window will be increased inc_len according to
1777 * stride I/O pattern */
1778 static void ras_stride_increase_window(struct ll_readahead_state *ras,
1779 struct ll_ra_info *ra,
1780 unsigned long inc_len)
1782 unsigned long left, step, window_len;
1783 unsigned long stride_len;
1785 LASSERT(ras->ras_stride_length > 0);
1786 LASSERTF(ras->ras_window_start + ras->ras_window_len
1787 >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
1788 " stride_offset %lu\n", ras->ras_window_start,
1789 ras->ras_window_len, ras->ras_stride_offset);
1791 stride_len = ras->ras_window_start + ras->ras_window_len -
1792 ras->ras_stride_offset;
1794 left = stride_len % ras->ras_stride_length;
1795 window_len = ras->ras_window_len - left;
1797 if (left < ras->ras_stride_pages)
1800 left = ras->ras_stride_pages + inc_len;
1802 LASSERT(ras->ras_stride_pages != 0);
1804 step = left / ras->ras_stride_pages;
1805 left %= ras->ras_stride_pages;
1807 window_len += step * ras->ras_stride_length + left;
1809 if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
1810 ras->ras_window_len = window_len;
1815 /* Set stride I/O read-ahead window start offset */
1816 static void ras_set_stride_offset(struct ll_readahead_state *ras)
1818 unsigned long window_len = ras->ras_next_readahead -
1819 ras->ras_window_start;
1822 LASSERT(ras->ras_stride_length != 0);
1824 left = window_len % ras->ras_stride_length;
1826 ras->ras_stride_offset = ras->ras_next_readahead - left;
1831 static void ras_increase_window(struct ll_readahead_state *ras,
1832 struct ll_ra_info *ra, struct inode *inode)
1838 step = ((loff_t)(ras->ras_window_start +
1839 ras->ras_window_len)) << CFS_PAGE_SHIFT;
1840 size = sizeof(step);
1841 /*Get rpc_size for this offset (step) */
1842 rc = obd_get_info(ll_i2obdexp(inode), sizeof(KEY_OFF_RPCSIZE),
1843 KEY_OFF_RPCSIZE, &size, &step,
1844 ll_i2info(inode)->lli_smd);
1846 step = INIT_RAS_WINDOW_PAGES;
1848 if (stride_io_mode(ras))
1849 ras_stride_increase_window(ras, ra, (unsigned long)step);
1851 ras->ras_window_len = min(ras->ras_window_len +
1852 (unsigned long)step,
1856 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1857 struct ll_readahead_state *ras, unsigned long index,
1860 struct ll_ra_info *ra = &sbi->ll_ra_info;
1861 int zero = 0, stride_detect = 0, ra_miss = 0;
1864 spin_lock(&ras->ras_lock);
1866 ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
1868 /* reset the read-ahead window in two cases. First when the app seeks
1869 * or reads to some other part of the file. Secondly if we get a
1870 * read-ahead miss that we think we've previously issued. This can
1871 * be a symptom of there being so many read-ahead pages that the VM is
1872 * reclaiming it before we get to it. */
1873 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1875 ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
1876 } else if (!hit && ras->ras_window_len &&
1877 index < ras->ras_next_readahead &&
1878 index_in_window(index, ras->ras_window_start, 0,
1879 ras->ras_window_len)) {
1881 ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
1884 /* On the second access to a file smaller than the tunable
1885 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1886 * file up to ra_max_pages_per_file. This is simply a best effort
1887 * and only occurs once per open file. Normal RA behavior is reverted
1888 * to for subsequent IO. The mmap case does not increment
1889 * ras_requests and thus can never trigger this behavior. */
1890 if (ras->ras_requests == 2 && !ras->ras_request_index) {
1893 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1896 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1897 ra->ra_max_read_ahead_whole_pages,
1898 ra->ra_max_pages_per_file);
1901 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1902 ras->ras_window_start = 0;
1903 ras->ras_last_readpage = 0;
1904 ras->ras_next_readahead = 0;
1905 ras->ras_window_len = min(ra->ra_max_pages_per_file,
1906 ra->ra_max_read_ahead_whole_pages);
1907 GOTO(out_unlock, 0);
1911 /* check whether it is in stride I/O mode*/
1912 if (!index_in_stride_window(index, ras, inode)) {
1913 ras_reset(ras, index);
1914 ras->ras_consecutive_pages++;
1915 ras_stride_reset(ras);
1916 GOTO(out_unlock, 0);
1918 ras->ras_consecutive_requests = 0;
1919 if (++ras->ras_consecutive_stride_requests > 1)
1925 if (index_in_stride_window(index, ras, inode) &&
1926 stride_io_mode(ras)) {
1927 /*If stride-RA hit cache miss, the stride dector
1928 *will not be reset to avoid the overhead of
1929 *redetecting read-ahead mode */
1930 if (index != ras->ras_last_readpage + 1)
1931 ras->ras_consecutive_pages = 0;
1934 /* Reset both stride window and normal RA
1936 ras_reset(ras, index);
1937 ras->ras_consecutive_pages++;
1938 ras_stride_reset(ras);
1939 GOTO(out_unlock, 0);
1941 } else if (stride_io_mode(ras)) {
1942 /* If this is contiguous read but in stride I/O mode
1943 * currently, check whether stride step still is valid,
1944 * if invalid, it will reset the stride ra window*/
1945 if (!index_in_stride_window(index, ras, inode)) {
1946 /* Shrink stride read-ahead window to be zero */
1947 ras_stride_reset(ras);
1948 ras->ras_window_len = 0;
1949 ras->ras_next_readahead = index;
1953 ras->ras_consecutive_pages++;
1954 ras_update_stride_detector(ras, index);
1955 ras->ras_last_readpage = index;
1956 ras_set_start(ras, index);
1957 ras->ras_next_readahead = max(ras->ras_window_start,
1958 ras->ras_next_readahead);
1961 /* Trigger RA in the mmap case where ras_consecutive_requests
1962 * is not incremented and thus can't be used to trigger RA */
1963 if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
1964 ras->ras_window_len = INIT_RAS_WINDOW_PAGES;
1965 GOTO(out_unlock, 0);
1968 /* Initially reset the stride window offset to next_readahead*/
1969 if (ras->ras_consecutive_stride_requests == 2 && stride_detect)
1970 ras_set_stride_offset(ras);
1972 /* The initial ras_window_len is set to the request size. To avoid
1973 * uselessly reading and discarding pages for random IO the window is
1974 * only increased once per consecutive request received. */
1975 if ((ras->ras_consecutive_requests > 1 &&
1976 !ras->ras_request_index) || stride_detect)
1977 ras_increase_window(ras, ra, inode);
1981 ras->ras_request_index++;
1982 spin_unlock(&ras->ras_lock);
1986 int ll_writepage(struct page *page)
1988 struct inode *inode = page->mapping->host;
1989 struct ll_inode_info *lli = ll_i2info(inode);
1990 struct obd_export *exp;
1991 struct ll_async_page *llap;
1995 LASSERT(PageLocked(page));
1997 exp = ll_i2obdexp(inode);
1999 GOTO(out, rc = -EINVAL);
2001 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
2003 GOTO(out, rc = PTR_ERR(llap));
2005 LASSERT(!llap->llap_nocache);
2006 LASSERT(!PageWriteback(page));
2007 set_page_writeback(page);
2009 page_cache_get(page);
2010 if (llap->llap_write_queued) {
2011 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
2012 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
2014 ASYNC_READY | ASYNC_URGENT);
2016 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
2017 ASYNC_READY | ASYNC_URGENT);
2020 /* re-dirty page on error so it retries write */
2021 if (PageWriteback(page))
2022 end_page_writeback(page);
2024 /* resend page only for not started IO*/
2025 if (!PageError(page))
2026 ll_redirty_page(page);
2028 page_cache_release(page);
2032 if (!lli->lli_async_rc)
2033 lli->lli_async_rc = rc;
2034 /* resend page only for not started IO*/
2041 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
2042 * read-ahead assumes it is valid to issue readpage all the way up to
2043 * i_size, but our dlm locks make that not the case. We disable the
2044 * kernel's read-ahead and do our own by walking ahead in the page cache
2045 * checking for dlm lock coverage. the main difference between 2.4 and
2046 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
2047 * so they look the same.
2049 int ll_readpage(struct file *filp, struct page *page)
2051 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
2052 struct inode *inode = page->mapping->host;
2053 struct obd_export *exp;
2054 struct ll_async_page *llap;
2055 struct obd_io_group *oig = NULL;
2056 struct lustre_handle *lockh = NULL;
2060 LASSERT(PageLocked(page));
2061 LASSERT(!PageUptodate(page));
2062 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
2063 inode->i_ino, inode->i_generation, inode,
2064 (((loff_t)page->index) << CFS_PAGE_SHIFT),
2065 (((loff_t)page->index) << CFS_PAGE_SHIFT));
2066 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
2068 if (!ll_i2info(inode)->lli_smd) {
2069 /* File with no objects - one big hole */
2070 /* We use this just for remove_from_page_cache that is not
2071 * exported, we'd make page back up to date. */
2072 ll_truncate_complete_page(page);
2073 clear_page(kmap(page));
2075 SetPageUptodate(page);
2080 rc = oig_init(&oig);
2084 exp = ll_i2obdexp(inode);
2086 GOTO(out, rc = -EINVAL);
2088 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
2089 lockh = &fd->fd_cwlockh;
2091 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh);
2093 if (PTR_ERR(llap) == -ENOLCK) {
2094 CWARN("ino %lu page %lu (%llu) not covered by "
2095 "a lock (mmap?). check debug logs.\n",
2096 inode->i_ino, page->index,
2097 (long long)page->index << PAGE_CACHE_SHIFT);
2099 GOTO(out, rc = PTR_ERR(llap));
2102 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages_per_file)
2103 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
2104 llap->llap_defer_uptodate);
2107 if (llap->llap_defer_uptodate) {
2108 /* This is the callpath if we got the page from a readahead */
2109 llap->llap_ra_used = 1;
2110 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
2113 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
2115 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
2116 SetPageUptodate(page);
2118 GOTO(out_oig, rc = 0);
2121 rc = ll_issue_page_read(exp, llap, oig, 0);
2125 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
2126 /* We have just requested the actual page we want, see if we can tack
2127 * on some readahead to that page's RPC before it is sent. */
2128 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages_per_file)
2129 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
2132 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
2143 static void ll_file_put_pages(struct page **pages, int numpages)
2149 for (i = 0, pp = pages; i < numpages; i++, pp++) {
2151 LL_CDEBUG_PAGE(D_PAGE, (*pp), "free\n");
2153 if (page_private(*pp))
2154 CERROR("the llap wasn't freed\n");
2155 (*pp)->mapping = NULL;
2156 if (page_count(*pp) != 1)
2157 CERROR("page %p, flags %#lx, count %i, "
2158 "private %p\n", (*pp),
2159 (unsigned long)(*pp)->flags,
2161 (void*)page_private(*pp));
2162 __free_pages(*pp, 0);
2165 OBD_FREE(pages, numpages * sizeof(struct page*));
2169 static struct page **ll_file_prepare_pages(int numpages, struct inode *inode,
2170 unsigned long first)
2172 struct page **pages;
2177 OBD_ALLOC(pages, sizeof(struct page *) * numpages);
2179 RETURN(ERR_PTR(-ENOMEM));
2180 for (i = 0; i < numpages; i++) {
2182 struct ll_async_page *llap;
2184 page = alloc_pages(GFP_HIGHUSER, 0);
2186 GOTO(err, rc = -ENOMEM);
2188 /* llap_from_page needs page index and mapping to be set */
2189 page->index = first++;
2190 page->mapping = inode->i_mapping;
2191 llap = llap_from_page(page, LLAP_ORIGIN_LOCKLESS_IO);
2193 GOTO(err, rc = PTR_ERR(llap));
2194 llap->llap_lockless_io_page = 1;
2198 ll_file_put_pages(pages, numpages);
2199 RETURN(ERR_PTR(rc));
2202 static ssize_t ll_file_copy_pages(struct page **pages, int numpages,
2203 const struct iovec *iov, unsigned long nsegs,
2204 ssize_t iov_offset, loff_t pos, size_t count,
2209 int updatechecksum = ll_i2sbi(pages[0]->mapping->host)->ll_flags &
2210 LL_SBI_LLITE_CHECKSUM;
2213 for (i = 0; i < numpages; i++) {
2214 unsigned offset, bytes, left = 0;
2217 vaddr = kmap(pages[i]);
2218 offset = pos & (CFS_PAGE_SIZE - 1);
2219 bytes = min_t(unsigned, CFS_PAGE_SIZE - offset, count);
2220 LL_CDEBUG_PAGE(D_PAGE, pages[i], "op = %s, addr = %p, "
2222 (rw == WRITE) ? "CFU" : "CTU",
2223 vaddr + offset, bytes);
2224 while (bytes > 0 && !left && nsegs) {
2225 unsigned copy = min_t(ssize_t, bytes,
2226 iov->iov_len - iov_offset);
2228 left = copy_from_user(vaddr + offset,
2229 iov->iov_base +iov_offset,
2231 if (updatechecksum) {
2232 struct ll_async_page *llap;
2234 llap = llap_cast_private(pages[i]);
2235 llap->llap_checksum =
2236 init_checksum(OSC_DEFAULT_CKSUM);
2237 llap->llap_checksum =
2238 compute_checksum(llap->llap_checksum,
2239 vaddr,CFS_PAGE_SIZE,
2243 left = copy_to_user(iov->iov_base + iov_offset,
2244 vaddr + offset, copy);
2252 if (iov_offset == iov->iov_len) {
2269 static int ll_file_oig_pages(struct inode * inode, struct page **pages,
2270 int numpages, loff_t pos, size_t count, int rw)
2272 struct obd_io_group *oig;
2273 struct ll_inode_info *lli = ll_i2info(inode);
2274 struct obd_export *exp;
2275 loff_t org_pos = pos;
2281 exp = ll_i2obdexp(inode);
2284 rc = oig_init(&oig);
2287 brw_flags = OBD_BRW_SRVLOCK;
2288 if (cfs_capable(CFS_CAP_SYS_RESOURCE))
2289 brw_flags |= OBD_BRW_NOQUOTA;
2291 for (i = 0; i < numpages; i++) {
2292 struct ll_async_page *llap;
2293 unsigned from, bytes;
2295 from = pos & (CFS_PAGE_SIZE - 1);
2296 bytes = min_t(unsigned, CFS_PAGE_SIZE - from,
2297 count - pos + org_pos);
2298 llap = llap_cast_private(pages[i]);
2301 lock_page(pages[i]);
2303 LL_CDEBUG_PAGE(D_PAGE, pages[i], "offset "LPU64","
2304 " from %u, bytes = %u\n",
2305 (__u64)pos, from, bytes);
2306 LASSERTF(pos >> CFS_PAGE_SHIFT == pages[i]->index,
2307 "wrong page index %lu (%lu)\n",
2309 (unsigned long)(pos >> CFS_PAGE_SHIFT));
2310 rc = obd_queue_group_io(exp, lli->lli_smd, NULL, oig,
2313 OBD_BRW_WRITE:OBD_BRW_READ,
2314 from, bytes, brw_flags,
2315 ASYNC_READY | ASYNC_URGENT |
2316 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
2323 rc = obd_trigger_group_io(exp, lli->lli_smd, NULL, oig);
2329 unlock_page(pages[i]);
2334 /* Advance through passed iov, adjust iov pointer as necessary and return
2335 * starting offset in individual entry we are pointing at. Also reduce
2336 * nr_segs as needed */
2337 static ssize_t ll_iov_advance(const struct iovec **iov, unsigned long *nr_segs,
2340 while (*nr_segs > 0) {
2341 if ((*iov)->iov_len > offset)
2342 return ((*iov)->iov_len - offset);
2343 offset -= (*iov)->iov_len;
2350 ssize_t ll_file_lockless_io(struct file *file, const struct iovec *iov,
2351 unsigned long nr_segs,
2352 loff_t *ppos, int rw, ssize_t count)
2355 struct inode *inode = file->f_dentry->d_inode;
2359 unsigned long first, last;
2360 const struct iovec *iv = &iov[0];
2361 unsigned long nsegs = nr_segs;
2362 unsigned long offset = 0;
2368 ll_inode_size_lock(inode, 0);
2369 isize = i_size_read(inode);
2370 ll_inode_size_unlock(inode, 0);
2373 if (*ppos + count >= isize)
2374 count -= *ppos + count - isize;
2378 rc = generic_write_checks(file, ppos, &count, 0);
2381 rc = ll_remove_suid(file, file->f_vfsmnt);
2387 first = pos >> CFS_PAGE_SHIFT;
2388 last = (pos + count - 1) >> CFS_PAGE_SHIFT;
2389 max_pages = PTLRPC_MAX_BRW_PAGES *
2390 ll_i2info(inode)->lli_smd->lsm_stripe_count;
2391 CDEBUG(D_INFO, "%u, stripe_count = %u\n",
2392 PTLRPC_MAX_BRW_PAGES /* max_pages_per_rpc */,
2393 ll_i2info(inode)->lli_smd->lsm_stripe_count);
2395 while (first <= last && rc >= 0) {
2397 struct page **pages;
2398 size_t bytes = count - amount;
2400 pages_for_io = min_t(int, last - first + 1, max_pages);
2401 pages = ll_file_prepare_pages(pages_for_io, inode, first);
2402 if (IS_ERR(pages)) {
2403 rc = PTR_ERR(pages);
2407 rc = ll_file_copy_pages(pages, pages_for_io, iv, nsegs,
2408 offset, pos + amount, bytes,
2411 GOTO(put_pages, rc);
2412 offset = ll_iov_advance(&iv, &nsegs, offset + rc);
2415 rc = ll_file_oig_pages(inode, pages, pages_for_io,
2416 pos + amount, bytes, rw);
2418 GOTO(put_pages, rc);
2420 rc = ll_file_copy_pages(pages, pages_for_io, iv, nsegs,
2421 offset, pos + amount, bytes, rw);
2423 GOTO(put_pages, rc);
2424 offset = ll_iov_advance(&iv, &nsegs, offset + rc);
2429 ll_file_put_pages(pages, pages_for_io);
2430 first += pages_for_io;
2431 /* a short read/write check */
2432 if (pos + amount < ((loff_t)first << CFS_PAGE_SHIFT))
2434 /* Check if we are out of userspace buffers. (how that could
2439 /* NOTE: don't update i_size and KMS in absence of LDLM locks even
2440 * write makes the file large */
2441 file_accessed(file);
2442 if (rw == READ && amount < count && rc == 0) {
2443 unsigned long not_cleared;
2446 ssize_t to_clear = min_t(ssize_t, count - amount,
2447 iv->iov_len - offset);
2448 not_cleared = clear_user(iv->iov_base + offset,
2450 amount += to_clear - not_cleared;
2461 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
2463 LPROC_LL_LOCKLESS_WRITE :
2464 LPROC_LL_LOCKLESS_READ,