1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
44 #define DEBUG_SUBSYSTEM S_LLITE
46 #include <lustre_lite.h>
47 #include "llite_internal.h"
48 #include <linux/lustre_compat25.h>
50 #ifndef list_for_each_prev_safe
51 #define list_for_each_prev_safe(pos, n, head) \
52 for (pos = (head)->prev, n = pos->prev; pos != (head); \
53 pos = n, n = pos->prev )
56 cfs_mem_cache_t *ll_async_page_slab = NULL;
57 size_t ll_async_page_slab_size = 0;
59 /* SYNCHRONOUS I/O to object storage for an inode */
60 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
61 struct page *page, int flags)
63 struct ll_inode_info *lli = ll_i2info(inode);
64 struct lov_stripe_md *lsm = lli->lli_smd;
65 struct obd_info oinfo = { { { 0 } } };
71 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
73 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
74 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
76 pg.count = CFS_PAGE_SIZE;
78 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80 inode->i_ino, pg.off, pg.off);
82 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
83 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
84 page->mapping->host, i_size_read(page->mapping->host),
90 if (cmd & OBD_BRW_WRITE)
91 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
94 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
98 rc = obd_brw(cmd, ll_i2obdexp(inode), &oinfo, 1, &pg, NULL);
100 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
102 CERROR("error from obd_brw: rc = %d\n", rc);
106 int ll_file_punch(struct inode * inode, loff_t new_size, int srvlock)
108 struct ll_inode_info *lli = ll_i2info(inode);
109 struct obd_info oinfo = { { { 0 } } };
114 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
115 lli->lli_smd->lsm_object_id, new_size, new_size);
117 oinfo.oi_md = lli->lli_smd;
118 oinfo.oi_policy.l_extent.start = new_size;
119 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
121 oa.o_id = lli->lli_smd->lsm_object_id;
122 oa.o_valid = OBD_MD_FLID;
123 oa.o_flags = srvlock ? OBD_FL_TRUNCLOCK : 0;
124 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |OBD_MD_FLFID|
125 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
126 OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGENER |
128 rc = obd_punch_rqset(ll_i2obdexp(inode), &oinfo, NULL);
130 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
133 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
134 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
137 /* this isn't where truncate starts. roughly:
138 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
139 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
142 * must be called under ->lli_size_sem */
143 void ll_truncate(struct inode *inode)
145 struct ll_inode_info *lli = ll_i2info(inode);
146 int srvlock = test_bit(LLI_F_SRVLOCK, &lli->lli_flags);
149 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
150 inode->i_generation, inode, i_size_read(inode), i_size_read(inode));
152 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
153 if (lli->lli_size_sem_owner != current) {
159 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
164 LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
170 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
172 lov_stripe_lock(lli->lli_smd);
173 inode_init_lvb(inode, &lvb);
174 rc = obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 0);
175 inode->i_blocks = lvb.lvb_blocks;
176 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
177 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
178 lli->lli_smd->lsm_object_id, i_size_read(inode),
180 lov_stripe_unlock(lli->lli_smd);
184 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
185 i_size_read(inode), 1);
186 lov_stripe_unlock(lli->lli_smd);
189 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
190 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
191 /* If the truncate leaves a partial page, update its checksum */
192 struct page *page = find_get_page(inode->i_mapping,
193 i_size_read(inode) >>
196 struct ll_async_page *llap = llap_cast_private(page);
198 char *kaddr = kmap_atomic(page, KM_USER0);
199 llap->llap_checksum =
200 init_checksum(OSC_DEFAULT_CKSUM);
201 llap->llap_checksum =
202 compute_checksum(llap->llap_checksum,
203 kaddr, CFS_PAGE_SIZE,
205 kunmap_atomic(kaddr, KM_USER0);
207 page_cache_release(page);
211 new_size = i_size_read(inode);
212 ll_inode_size_unlock(inode, 0);
214 ll_file_punch(inode, new_size, 0);
220 ll_inode_size_unlock(inode, 0);
223 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
226 struct inode *inode = page->mapping->host;
227 struct ll_inode_info *lli = ll_i2info(inode);
228 struct lov_stripe_md *lsm = lli->lli_smd;
229 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
230 struct obd_info oinfo = { { { 0 } } };
237 LASSERT(PageLocked(page));
238 (void)llap_cast_private(page); /* assertion */
240 /* Check to see if we should return -EIO right away */
243 pga.count = CFS_PAGE_SIZE;
246 oa.o_mode = inode->i_mode;
247 oa.o_id = lsm->lsm_object_id;
248 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
249 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
253 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oinfo, 1, &pga, NULL);
257 if (PageUptodate(page)) {
258 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
262 /* We're completely overwriting an existing page, so _don't_ set it up
263 * to date until commit_write */
264 if (from == 0 && to == CFS_PAGE_SIZE) {
265 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
266 POISON_PAGE(page, 0x11);
270 /* If are writing to a new page, no need to read old data. The extent
271 * locking will have updated the KMS, and for our purposes here we can
272 * treat it like i_size. */
273 lov_stripe_lock(lsm);
274 inode_init_lvb(inode, &lvb);
275 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
276 lov_stripe_unlock(lsm);
277 if (lvb.lvb_size <= offset) {
278 char *kaddr = kmap_atomic(page, KM_USER0);
279 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
280 lvb.lvb_size, offset);
281 memset(kaddr, 0, CFS_PAGE_SIZE);
282 kunmap_atomic(kaddr, KM_USER0);
283 GOTO(prepare_done, rc = 0);
286 /* XXX could be an async ocp read.. read-ahead? */
287 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
289 /* bug 1598: don't clobber blksize */
290 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
291 obdo_refresh_inode(inode, &oa, oa.o_valid);
297 SetPageUptodate(page);
302 static int ll_ap_make_ready(void *data, int cmd)
304 struct ll_async_page *llap;
308 llap = LLAP_FROM_COOKIE(data);
309 page = llap->llap_page;
311 LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
312 page->mapping->host->i_ino, page->index);
314 /* we're trying to write, but the page is locked.. come back later */
315 if (TryLockPage(page))
318 LASSERT(!PageWriteback(page));
320 /* if we left PageDirty we might get another writepage call
321 * in the future. list walkers are bright enough
322 * to check page dirty so we can leave it on whatever list
323 * its on. XXX also, we're called with the cli list so if
324 * we got the page cache list we'd create a lock inversion
325 * with the removepage path which gets the page lock then the
327 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
328 clear_page_dirty(page);
330 LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
331 page->mapping->host->i_ino, page->index);
332 clear_page_dirty_for_io(page);
334 /* This actually clears the dirty bit in the radix tree.*/
335 set_page_writeback(page);
338 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
339 page_cache_get(page);
344 /* We have two reasons for giving llite the opportunity to change the
345 * write length of a given queued page as it builds the RPC containing
348 * 1) Further extending writes may have landed in the page cache
349 * since a partial write first queued this page requiring us
350 * to write more from the page cache. (No further races are possible, since
351 * by the time this is called, the page is locked.)
352 * 2) We might have raced with truncate and want to avoid performing
353 * write RPCs that are just going to be thrown away by the
354 * truncate's punch on the storage targets.
356 * The kms serves these purposes as it is set at both truncate and extending
359 static int ll_ap_refresh_count(void *data, int cmd)
361 struct ll_inode_info *lli;
362 struct ll_async_page *llap;
363 struct lov_stripe_md *lsm;
370 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
371 LASSERT(cmd != OBD_BRW_READ);
373 llap = LLAP_FROM_COOKIE(data);
374 page = llap->llap_page;
375 inode = page->mapping->host;
376 lli = ll_i2info(inode);
379 lov_stripe_lock(lsm);
380 inode_init_lvb(inode, &lvb);
381 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
383 lov_stripe_unlock(lsm);
385 /* catch race with truncate */
386 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
389 /* catch sub-page write at end of file */
390 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
391 return kms % CFS_PAGE_SIZE;
393 return CFS_PAGE_SIZE;
396 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
398 struct lov_stripe_md *lsm;
399 obd_flag valid_flags;
401 lsm = ll_i2info(inode)->lli_smd;
403 oa->o_id = lsm->lsm_object_id;
404 oa->o_valid = OBD_MD_FLID;
405 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
406 if (cmd & OBD_BRW_WRITE) {
407 oa->o_valid |= OBD_MD_FLEPOCH;
408 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
410 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
411 OBD_MD_FLUID | OBD_MD_FLGID |
412 OBD_MD_FLFID | OBD_MD_FLGENER;
415 obdo_from_inode(oa, inode, valid_flags);
418 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
420 struct ll_async_page *llap;
423 llap = LLAP_FROM_COOKIE(data);
424 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
429 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
432 struct ll_async_page *llap;
435 llap = LLAP_FROM_COOKIE(data);
436 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
441 static struct obd_async_page_ops ll_async_page_ops = {
442 .ap_make_ready = ll_ap_make_ready,
443 .ap_refresh_count = ll_ap_refresh_count,
444 .ap_fill_obdo = ll_ap_fill_obdo,
445 .ap_update_obdo = ll_ap_update_obdo,
446 .ap_completion = ll_ap_completion,
449 struct ll_async_page *llap_cast_private(struct page *page)
451 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
453 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
454 "page %p private %lu gave magic %d which != %d\n",
455 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
460 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
462 * There is an llap attached onto every page in lustre, linked off @sbi.
463 * We add an llap to the list so we don't lose our place during list walking.
464 * If llaps in the list are being moved they will only move to the end
465 * of the LRU, and we aren't terribly interested in those pages here (we
466 * start at the beginning of the list where the least-used llaps are.
468 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
470 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
471 unsigned long total, want, count = 0;
473 total = sbi->ll_async_page_count;
475 /* There can be a large number of llaps (600k or more in a large
476 * memory machine) so the VM 1/6 shrink ratio is likely too much.
477 * Since we are freeing pages also, we don't necessarily want to
478 * shrink so much. Limit to 40MB of pages + llaps per call. */
479 if (shrink_fraction == 0)
480 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
482 want = (total + shrink_fraction - 1) / shrink_fraction;
484 if (want > 40 << (20 - CFS_PAGE_SHIFT))
485 want = 40 << (20 - CFS_PAGE_SHIFT);
487 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
488 want, total, shrink_fraction);
490 spin_lock(&sbi->ll_lock);
491 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
493 while (--total >= 0 && count < want) {
497 if (unlikely(need_resched())) {
498 spin_unlock(&sbi->ll_lock);
500 spin_lock(&sbi->ll_lock);
503 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
504 list_del_init(&dummy_llap.llap_pglist_item);
508 page = llap->llap_page;
509 LASSERT(page != NULL);
511 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
513 /* Page needs/undergoing IO */
514 if (TryLockPage(page)) {
515 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
519 keep = (llap->llap_write_queued || PageDirty(page) ||
520 PageWriteback(page) || (!PageUptodate(page) &&
521 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
523 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
524 keep ? "keep" : "drop",
525 llap->llap_write_queued ? "wq " : "",
526 PageDirty(page) ? "pd " : "",
527 PageUptodate(page) ? "" : "!pu ",
528 PageWriteback(page) ? "wb" : "",
529 llap->llap_defer_uptodate ? "" : "!du",
530 llap_origins[llap->llap_origin]);
532 /* If page is dirty or undergoing IO don't discard it */
538 page_cache_get(page);
539 spin_unlock(&sbi->ll_lock);
541 if (page->mapping != NULL) {
542 ll_teardown_mmaps(page->mapping,
543 (__u64)page->index << CFS_PAGE_SHIFT,
544 ((__u64)page->index << CFS_PAGE_SHIFT)|
546 if (!PageDirty(page) && !page_mapped(page)) {
547 ll_ra_accounting(llap, page->mapping);
548 ll_truncate_complete_page(page);
551 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
559 page_cache_release(page);
561 spin_lock(&sbi->ll_lock);
563 list_del(&dummy_llap.llap_pglist_item);
564 spin_unlock(&sbi->ll_lock);
566 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
572 static struct ll_async_page *llap_from_page_with_lockh(struct page *page,
574 struct lustre_handle *lockh)
576 struct ll_async_page *llap;
577 struct obd_export *exp;
578 struct inode *inode = page->mapping->host;
579 struct ll_sb_info *sbi;
584 static int triggered;
587 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
589 libcfs_debug_dumpstack(NULL);
592 RETURN(ERR_PTR(-EINVAL));
594 sbi = ll_i2sbi(inode);
595 LASSERT(ll_async_page_slab);
596 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
598 llap = llap_cast_private(page);
600 /* move to end of LRU list, except when page is just about to
602 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
603 spin_lock(&sbi->ll_lock);
604 sbi->ll_pglist_gen++;
605 list_del_init(&llap->llap_pglist_item);
606 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
607 spin_unlock(&sbi->ll_lock);
612 exp = ll_i2obdexp(page->mapping->host);
614 RETURN(ERR_PTR(-EINVAL));
616 /* limit the number of lustre-cached pages */
617 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
618 llap_shrink_cache(sbi, 0);
620 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
621 ll_async_page_slab_size);
623 RETURN(ERR_PTR(-ENOMEM));
624 llap->llap_magic = LLAP_MAGIC;
625 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
627 /* XXX: for bug 11270 - check for lockless origin here! */
628 if (origin == LLAP_ORIGIN_LOCKLESS_IO)
629 llap->llap_nocache = 1;
631 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
632 (obd_off)page->index << CFS_PAGE_SHIFT,
633 &ll_async_page_ops, llap, &llap->llap_cookie,
634 llap->llap_nocache, lockh);
636 OBD_SLAB_FREE(llap, ll_async_page_slab,
637 ll_async_page_slab_size);
641 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
642 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
643 /* also zeroing the PRIVBITS low order bitflags */
644 __set_page_ll_data(page, llap);
645 llap->llap_page = page;
647 spin_lock(&sbi->ll_lock);
648 sbi->ll_pglist_gen++;
649 sbi->ll_async_page_count++;
650 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
651 spin_unlock(&sbi->ll_lock);
654 if (unlikely(sbi->ll_flags & LL_SBI_LLITE_CHECKSUM)) {
656 char *kaddr = kmap_atomic(page, KM_USER0);
657 csum = init_checksum(OSC_DEFAULT_CKSUM);
658 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
660 kunmap_atomic(kaddr, KM_USER0);
661 if (origin == LLAP_ORIGIN_READAHEAD ||
662 origin == LLAP_ORIGIN_READPAGE ||
663 origin == LLAP_ORIGIN_LOCKLESS_IO) {
664 llap->llap_checksum = 0;
665 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
666 llap->llap_checksum == 0) {
667 llap->llap_checksum = csum;
668 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
669 } else if (llap->llap_checksum == csum) {
670 /* origin == LLAP_ORIGIN_WRITEPAGE */
671 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
674 /* origin == LLAP_ORIGIN_WRITEPAGE */
675 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
676 "%x!\n", llap->llap_checksum, csum);
680 llap->llap_origin = origin;
684 static inline struct ll_async_page *llap_from_page(struct page *page,
687 return llap_from_page_with_lockh(page, origin, NULL);
690 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
691 struct ll_async_page *llap,
692 unsigned to, obd_flag async_flags)
694 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
695 struct obd_io_group *oig;
696 struct ll_sb_info *sbi = ll_i2sbi(inode);
697 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
700 /* _make_ready only sees llap once we've unlocked the page */
701 llap->llap_write_queued = 1;
702 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
703 llap->llap_cookie, OBD_BRW_WRITE | noquot,
704 0, 0, 0, async_flags);
706 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
707 llap_write_pending(inode, llap);
711 llap->llap_write_queued = 0;
717 /* make full-page requests if we are not at EOF (bug 4410) */
718 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
719 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
720 "sync write before EOF: size_index %lu, to %d\n",
723 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index){
724 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
725 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
726 "sync write at EOF: size_index %lu, to %d/%d\n",
727 size_index, to, size_to);
732 /* compare the checksum once before the page leaves llite */
733 if (unlikely((sbi->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
734 llap->llap_checksum != 0)) {
736 struct page *page = llap->llap_page;
737 char *kaddr = kmap_atomic(page, KM_USER0);
738 csum = init_checksum(OSC_DEFAULT_CKSUM);
739 csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
741 kunmap_atomic(kaddr, KM_USER0);
742 if (llap->llap_checksum == csum) {
743 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
746 CERROR("page %p old cksum %x != new cksum %x!\n",
747 page, llap->llap_checksum, csum);
751 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
752 llap->llap_cookie, OBD_BRW_WRITE | noquot,
753 0, to, 0, ASYNC_READY | ASYNC_URGENT |
754 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
758 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
764 if (!rc && async_flags & ASYNC_READY) {
765 unlock_page(llap->llap_page);
766 if (PageWriteback(llap->llap_page)) {
767 end_page_writeback(llap->llap_page);
771 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
779 /* update our write count to account for i_size increases that may have
780 * happened since we've queued the page for io. */
782 /* be careful not to return success without setting the page Uptodate or
783 * the next pass through prepare_write will read in stale data from disk. */
784 int ll_commit_write(struct file *file, struct page *page, unsigned from,
787 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
788 struct inode *inode = page->mapping->host;
789 struct ll_inode_info *lli = ll_i2info(inode);
790 struct lov_stripe_md *lsm = lli->lli_smd;
791 struct obd_export *exp;
792 struct ll_async_page *llap;
794 struct lustre_handle *lockh = NULL;
798 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
799 LASSERT(inode == file->f_dentry->d_inode);
800 LASSERT(PageLocked(page));
802 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
803 inode, page, from, to, page->index);
805 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
806 lockh = &fd->fd_cwlockh;
808 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh);
810 RETURN(PTR_ERR(llap));
812 exp = ll_i2obdexp(inode);
816 llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
818 /* queue a write for some time in the future the first time we
820 if (!PageDirty(page)) {
821 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
823 rc = queue_or_sync_write(exp, inode, llap, to, 0);
827 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
830 /* put the page in the page cache, from now on ll_removepage is
831 * responsible for cleaning up the llap.
832 * only set page dirty when it's queued to be write out */
833 if (llap->llap_write_queued)
834 set_page_dirty(page);
837 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
838 ll_inode_size_lock(inode, 0);
840 lov_stripe_lock(lsm);
841 obd_adjust_kms(exp, lsm, size, 0);
842 lov_stripe_unlock(lsm);
843 if (size > i_size_read(inode))
844 i_size_write(inode, size);
845 SetPageUptodate(page);
846 } else if (size > i_size_read(inode)) {
847 /* this page beyond the pales of i_size, so it can't be
848 * truncated in ll_p_r_e during lock revoking. we must
849 * teardown our book-keeping here. */
852 ll_inode_size_unlock(inode, 0);
856 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
858 struct ll_ra_info *ra = &sbi->ll_ra_info;
862 spin_lock(&sbi->ll_lock);
863 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
864 ra->ra_cur_pages += ret;
865 spin_unlock(&sbi->ll_lock);
870 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
872 struct ll_ra_info *ra = &sbi->ll_ra_info;
873 spin_lock(&sbi->ll_lock);
874 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
875 ra->ra_cur_pages, len);
876 ra->ra_cur_pages -= len;
877 spin_unlock(&sbi->ll_lock);
880 /* called for each page in a completed rpc.*/
881 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
883 struct ll_async_page *llap;
888 llap = LLAP_FROM_COOKIE(data);
889 page = llap->llap_page;
890 LASSERT(PageLocked(page));
891 LASSERT(CheckWriteback(page,cmd));
893 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
895 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
896 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
899 if (cmd & OBD_BRW_READ) {
900 if (!llap->llap_defer_uptodate)
901 SetPageUptodate(page);
903 llap->llap_write_queued = 0;
905 ClearPageError(page);
907 if (cmd & OBD_BRW_READ) {
908 llap->llap_defer_uptodate = 0;
911 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
913 set_bit(AS_ENOSPC, &page->mapping->flags);
915 set_bit(AS_EIO, &page->mapping->flags);
917 page->mapping->gfp_mask |= AS_EIO_MASK;
923 if (cmd & OBD_BRW_WRITE) {
924 llap_write_complete(page->mapping->host, llap);
925 ll_try_done_writing(page->mapping->host);
928 if (PageWriteback(page)) {
929 end_page_writeback(page);
931 page_cache_release(page);
936 static void __ll_put_llap(struct page *page)
938 struct inode *inode = page->mapping->host;
939 struct obd_export *exp;
940 struct ll_async_page *llap;
941 struct ll_sb_info *sbi = ll_i2sbi(inode);
945 exp = ll_i2obdexp(inode);
947 CERROR("page %p ind %lu gave null export\n", page, page->index);
952 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
954 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
955 page->index, PTR_ERR(llap));
960 //llap_write_complete(inode, llap);
961 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
964 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
966 /* this unconditional free is only safe because the page lock
967 * is providing exclusivity to memory pressure/truncate/writeback..*/
968 __clear_page_ll_data(page);
970 spin_lock(&sbi->ll_lock);
971 if (!list_empty(&llap->llap_pglist_item))
972 list_del_init(&llap->llap_pglist_item);
973 sbi->ll_pglist_gen++;
974 sbi->ll_async_page_count--;
975 spin_unlock(&sbi->ll_lock);
976 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
981 /* the kernel calls us here when a page is unhashed from the page cache.
982 * the page will be locked and the kernel is holding a spinlock, so
983 * we need to be careful. we're just tearing down our book-keeping
985 void ll_removepage(struct page *page)
987 struct ll_async_page *llap = llap_cast_private(page);
990 LASSERT(!in_interrupt());
992 /* sync pages or failed read pages can leave pages in the page
993 * cache that don't have our data associated with them anymore */
994 if (page_private(page) == 0) {
999 LASSERT(!llap->llap_lockless_io_page);
1000 LASSERT(!llap->llap_nocache);
1002 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
1003 __ll_put_llap(page);
1008 static int ll_issue_page_read(struct obd_export *exp,
1009 struct ll_async_page *llap,
1010 struct obd_io_group *oig, int defer)
1012 struct page *page = llap->llap_page;
1015 page_cache_get(page);
1016 llap->llap_defer_uptodate = defer;
1017 llap->llap_ra_used = 0;
1018 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1019 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1020 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |
1023 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1024 page_cache_release(page);
1029 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1031 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1032 ra->ra_stats[which]++;
1035 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1037 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1038 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1040 spin_lock(&sbi->ll_lock);
1041 ll_ra_stats_inc_unlocked(ra, which);
1042 spin_unlock(&sbi->ll_lock);
1045 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1047 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1050 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1053 #define RAS_CDEBUG(ras) \
1055 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
1056 "csr %lu sf %lu sp %lu sl %lu \n", \
1057 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1058 ras->ras_consecutive_pages, ras->ras_window_start, \
1059 ras->ras_window_len, ras->ras_next_readahead, \
1060 ras->ras_requests, ras->ras_request_index, \
1061 ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
1062 ras->ras_stride_pages, ras->ras_stride_length)
1064 static int index_in_window(unsigned long index, unsigned long point,
1065 unsigned long before, unsigned long after)
1067 unsigned long start = point - before, end = point + after;
1074 return start <= index && index <= end;
1077 static struct ll_readahead_state *ll_ras_get(struct file *f)
1079 struct ll_file_data *fd;
1081 fd = LUSTRE_FPRIVATE(f);
1085 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1087 struct ll_readahead_state *ras;
1089 ras = ll_ras_get(f);
1091 spin_lock(&ras->ras_lock);
1092 ras->ras_requests++;
1093 ras->ras_request_index = 0;
1094 ras->ras_consecutive_requests++;
1095 rar->lrr_reader = current;
1097 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1098 spin_unlock(&ras->ras_lock);
1101 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1103 struct ll_readahead_state *ras;
1105 ras = ll_ras_get(f);
1107 spin_lock(&ras->ras_lock);
1108 list_del_init(&rar->lrr_linkage);
1109 spin_unlock(&ras->ras_lock);
1112 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1114 struct ll_ra_read *scan;
1116 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1117 if (scan->lrr_reader == current)
1123 struct ll_ra_read *ll_ra_read_get(struct file *f)
1125 struct ll_readahead_state *ras;
1126 struct ll_ra_read *bead;
1128 ras = ll_ras_get(f);
1130 spin_lock(&ras->ras_lock);
1131 bead = ll_ra_read_get_locked(ras);
1132 spin_unlock(&ras->ras_lock);
1136 static int ll_read_ahead_page(struct obd_export *exp, struct obd_io_group *oig,
1137 int index, struct address_space *mapping)
1139 struct ll_async_page *llap;
1141 unsigned int gfp_mask = 0;
1144 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1146 gfp_mask |= __GFP_NOWARN;
1148 page = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
1150 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1151 CDEBUG(D_READA, "g_c_p_n failed\n");
1155 /* Check if page was truncated or reclaimed */
1156 if (page->mapping != mapping) {
1157 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1158 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1159 GOTO(unlock_page, rc = 0);
1162 /* we do this first so that we can see the page in the /proc
1164 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1165 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1166 if (PTR_ERR(llap) == -ENOLCK) {
1167 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1168 CDEBUG(D_READA | D_PAGE,
1169 "Adding page to cache failed index "
1171 CDEBUG(D_READA, "nolock page\n");
1172 GOTO(unlock_page, rc = -ENOLCK);
1174 CDEBUG(D_READA, "read-ahead page\n");
1175 GOTO(unlock_page, rc = 0);
1178 /* skip completed pages */
1179 if (Page_Uptodate(page))
1180 GOTO(unlock_page, rc = 0);
1182 /* bail out when we hit the end of the lock. */
1183 rc = ll_issue_page_read(exp, llap, oig, 1);
1185 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "started read-ahead\n");
1190 LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "skipping read-ahead\n");
1192 page_cache_release(page);
1196 /* ra_io_arg will be filled in the beginning of ll_readahead with
1197 * ras_lock, then the following ll_read_ahead_pages will read RA
1198 * pages according to this arg, all the items in this structure are
1199 * counted by page index.
1202 unsigned long ria_start; /* start offset of read-ahead*/
1203 unsigned long ria_end; /* end offset of read-ahead*/
1204 /* If stride read pattern is detected, ria_stoff means where
1205 * stride read is started. Note: for normal read-ahead, the
1206 * value here is meaningless, and also it will not be accessed*/
1208 /* ria_length and ria_pages are the length and pages length in the
1209 * stride I/O mode. And they will also be used to check whether
1210 * it is stride I/O read-ahead in the read-ahead pages*/
1211 unsigned long ria_length;
1212 unsigned long ria_pages;
1215 #define RIA_DEBUG(ria) \
1216 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
1217 ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
1220 #define RAS_INCREASE_STEP (1024 * 1024 >> CFS_PAGE_SHIFT)
1222 static inline int stride_io_mode(struct ll_readahead_state *ras)
1224 return ras->ras_consecutive_stride_requests > 1;
1227 /* The function calculates how much pages will be read in
1228 * [off, off + length], which will be read by stride I/O mode,
1229 * stride_offset = st_off, stride_lengh = st_len,
1230 * stride_pages = st_pgs
1232 static unsigned long
1233 stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
1234 unsigned long off, unsigned length)
1236 unsigned long cont_len = st_off > off ? st_off - off : 0;
1237 unsigned long stride_len = length + off > st_off ?
1238 length + off + 1 - st_off : 0;
1239 unsigned long left, pg_count;
1241 if (st_len == 0 || length == 0)
1244 left = do_div(stride_len, st_len);
1245 left = min(left, st_pgs);
1247 pg_count = left + stride_len * st_pgs + cont_len;
1249 LASSERT(pg_count >= left);
1251 CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %u"
1252 "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
1257 static int ria_page_count(struct ra_io_arg *ria)
1259 __u64 length = ria->ria_end >= ria->ria_start ?
1260 ria->ria_end - ria->ria_start + 1 : 0;
1262 return stride_pg_count(ria->ria_stoff, ria->ria_length,
1263 ria->ria_pages, ria->ria_start,
1267 /*Check whether the index is in the defined ra-window */
1268 static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
1270 /* If ria_length == ria_pages, it means non-stride I/O mode,
1271 * idx should always inside read-ahead window in this case
1272 * For stride I/O mode, just check whether the idx is inside
1274 return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
1275 (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
1278 static int ll_read_ahead_pages(struct obd_export *exp,
1279 struct obd_io_group *oig,
1280 struct ra_io_arg *ria,
1281 unsigned long *reserved_pages,
1282 struct address_space *mapping,
1283 unsigned long *ra_end)
1285 int rc, count = 0, stride_ria;
1286 unsigned long page_idx;
1288 LASSERT(ria != NULL);
1291 stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
1292 for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
1293 *reserved_pages > 0; page_idx++) {
1294 if (ras_inside_ra_window(page_idx, ria)) {
1295 /* If the page is inside the read-ahead window*/
1296 rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
1298 (*reserved_pages)--;
1300 } else if (rc == -ENOLCK)
1302 } else if (stride_ria) {
1303 /* If it is not in the read-ahead window, and it is
1304 * read-ahead mode, then check whether it should skip
1307 /* FIXME: This assertion only is valid when it is for
1308 * forward read-ahead, it will be fixed when backward
1309 * read-ahead is implemented */
1310 LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
1311 " gap of ra window,it should bigger than stride"
1312 " offset %lu \n", page_idx, ria->ria_stoff);
1314 offset = page_idx - ria->ria_stoff;
1315 offset = offset % (ria->ria_length);
1316 if (offset > ria->ria_pages) {
1317 page_idx += ria->ria_length - offset;
1318 CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
1319 ria->ria_length - offset);
1328 static int ll_readahead(struct ll_readahead_state *ras,
1329 struct obd_export *exp, struct address_space *mapping,
1330 struct obd_io_group *oig, int flags)
1332 unsigned long start = 0, end = 0, reserved;
1333 unsigned long ra_end, len;
1334 struct inode *inode;
1335 struct lov_stripe_md *lsm;
1336 struct ll_ra_read *bead;
1338 struct ra_io_arg ria = { 0 };
1343 inode = mapping->host;
1344 lsm = ll_i2info(inode)->lli_smd;
1346 lov_stripe_lock(lsm);
1347 inode_init_lvb(inode, &lvb);
1348 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
1350 lov_stripe_unlock(lsm);
1352 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1356 spin_lock(&ras->ras_lock);
1357 bead = ll_ra_read_get_locked(ras);
1358 /* Enlarge the RA window to encompass the full read */
1359 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1360 bead->lrr_start + bead->lrr_count) {
1361 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1362 ras->ras_window_start;
1364 /* Reserve a part of the read-ahead window that we'll be issuing */
1365 if (ras->ras_window_len) {
1366 start = ras->ras_next_readahead;
1367 end = ras->ras_window_start + ras->ras_window_len - 1;
1370 /* Truncate RA window to end of file */
1371 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1372 ras->ras_next_readahead = max(end, end + 1);
1375 ria.ria_start = start;
1377 /* If stride I/O mode is detected, get stride window*/
1378 if (stride_io_mode(ras)) {
1379 ria.ria_length = ras->ras_stride_length;
1380 ria.ria_pages = ras->ras_stride_pages;
1382 spin_unlock(&ras->ras_lock);
1385 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1389 len = ria_page_count(&ria);
1393 reserved = ll_ra_count_get(ll_i2sbi(inode), len);
1394 if (reserved < end - start + 1)
1395 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1397 CDEBUG(D_READA, "reserved page %lu \n", reserved);
1399 ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
1401 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1403 ll_ra_count_put(ll_i2sbi(inode), reserved);
1405 if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
1406 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1408 /* if we didn't get to the end of the region we reserved from
1409 * the ras we need to go back and update the ras so that the
1410 * next read-ahead tries from where we left off. we only do so
1411 * if the region we failed to issue read-ahead on is still ahead
1412 * of the app and behind the next index to start read-ahead from */
1413 CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
1414 ra_end, end, ria.ria_end);
1416 if (ra_end != (end + 1)) {
1417 spin_lock(&ras->ras_lock);
1418 if (ra_end < ras->ras_next_readahead &&
1419 index_in_window(ra_end, ras->ras_window_start, 0,
1420 ras->ras_window_len)) {
1421 ras->ras_next_readahead = ra_end;
1424 spin_unlock(&ras->ras_lock);
1430 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1432 ras->ras_window_start = index & (~(RAS_INCREASE_STEP - 1));
1435 /* called with the ras_lock held or from places where it doesn't matter */
1436 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1438 ras->ras_last_readpage = index;
1439 ras->ras_consecutive_requests = 0;
1440 ras->ras_consecutive_pages = 0;
1441 ras->ras_window_len = 0;
1442 ras_set_start(ras, index);
1443 ras->ras_next_readahead = max(ras->ras_window_start, index);
1448 /* called with the ras_lock held or from places where it doesn't matter */
1449 static void ras_stride_reset(struct ll_readahead_state *ras)
1451 ras->ras_consecutive_stride_requests = 0;
1455 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1457 spin_lock_init(&ras->ras_lock);
1459 ras->ras_requests = 0;
1460 INIT_LIST_HEAD(&ras->ras_read_beads);
1463 /* Check whether the read request is in the stride window.
1464 * If it is in the stride window, return 1, otherwise return 0.
1465 * and also update stride_gap and stride_pages.
1467 static int index_in_stride_window(unsigned long index,
1468 struct ll_readahead_state *ras,
1469 struct inode *inode)
1471 int stride_gap = index - ras->ras_last_readpage - 1;
1473 LASSERT(stride_gap != 0);
1475 if (ras->ras_consecutive_pages == 0)
1478 /*Otherwise check the stride by itself */
1479 if ((ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
1480 ras->ras_consecutive_pages == ras->ras_stride_pages)
1483 if (stride_gap >= 0) {
1485 * only set stride_pages, stride_length if
1486 * it is forward reading ( stride_gap > 0)
1488 ras->ras_stride_pages = ras->ras_consecutive_pages;
1489 ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
1492 * If stride_gap < 0,(back_forward reading),
1493 * reset the stride_pages/length.
1494 * FIXME:back_ward stride I/O read.
1497 ras->ras_stride_pages = 0;
1498 ras->ras_stride_length = 0;
1505 static unsigned long
1506 stride_page_count(struct ll_readahead_state *ras, unsigned long len)
1508 return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
1509 ras->ras_stride_pages, ras->ras_stride_offset,
1513 /* Stride Read-ahead window will be increased inc_len according to
1514 * stride I/O pattern */
1515 static void ras_stride_increase_window(struct ll_readahead_state *ras,
1516 struct ll_ra_info *ra,
1517 unsigned long inc_len)
1519 unsigned long left, step, window_len;
1520 unsigned long stride_len;
1522 LASSERT(ras->ras_stride_length > 0);
1524 stride_len = ras->ras_window_start + ras->ras_window_len -
1525 ras->ras_stride_offset;
1527 LASSERTF(stride_len >= 0, "window_start %lu, window_len %lu"
1528 " stride_offset %lu\n", ras->ras_window_start,
1529 ras->ras_window_len, ras->ras_stride_offset);
1531 left = stride_len % ras->ras_stride_length;
1533 window_len = ras->ras_window_len - left;
1535 if (left < ras->ras_stride_pages)
1538 left = ras->ras_stride_pages + inc_len;
1540 LASSERT(ras->ras_stride_pages != 0);
1542 step = left / ras->ras_stride_pages;
1543 left %= ras->ras_stride_pages;
1545 window_len += step * ras->ras_stride_length + left;
1547 if (stride_page_count(ras, window_len) <= ra->ra_max_pages)
1548 ras->ras_window_len = window_len;
1553 /* Set stride I/O read-ahead window start offset */
1554 static void ras_set_stride_offset(struct ll_readahead_state *ras)
1556 unsigned long window_len = ras->ras_next_readahead -
1557 ras->ras_window_start;
1560 LASSERT(ras->ras_stride_length != 0);
1562 left = window_len % ras->ras_stride_length;
1564 ras->ras_stride_offset = ras->ras_next_readahead - left;
1569 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1570 struct ll_readahead_state *ras, unsigned long index,
1573 struct ll_ra_info *ra = &sbi->ll_ra_info;
1574 int zero = 0, stride_zero = 0, stride_detect = 0, ra_miss = 0;
1577 spin_lock(&sbi->ll_lock);
1578 spin_lock(&ras->ras_lock);
1580 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1582 /* reset the read-ahead window in two cases. First when the app seeks
1583 * or reads to some other part of the file. Secondly if we get a
1584 * read-ahead miss that we think we've previously issued. This can
1585 * be a symptom of there being so many read-ahead pages that the VM is
1586 * reclaiming it before we get to it. */
1587 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1589 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1590 /* check whether it is in stride I/O mode*/
1591 if (!index_in_stride_window(index, ras, inode))
1593 } else if (!hit && ras->ras_window_len &&
1594 index < ras->ras_next_readahead &&
1595 index_in_window(index, ras->ras_window_start, 0,
1596 ras->ras_window_len)) {
1599 /* If it hits read-ahead miss and the stride I/O is still
1600 * not detected, reset stride stuff to re-detect the whole
1601 * stride I/O mode to avoid complication */
1602 if (!stride_io_mode(ras))
1604 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1607 /* On the second access to a file smaller than the tunable
1608 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1609 * file up to ra_max_pages. This is simply a best effort and
1610 * only occurs once per open file. Normal RA behavior is reverted
1611 * to for subsequent IO. The mmap case does not increment
1612 * ras_requests and thus can never trigger this behavior. */
1613 if (ras->ras_requests == 2 && !ras->ras_request_index) {
1616 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1619 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1620 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1623 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1624 ras->ras_window_start = 0;
1625 ras->ras_last_readpage = 0;
1626 ras->ras_next_readahead = 0;
1627 ras->ras_window_len = min(ra->ra_max_pages,
1628 ra->ra_max_read_ahead_whole_pages);
1629 GOTO(out_unlock, 0);
1634 /* If it is discontinuous read, check
1635 * whether it is stride I/O mode*/
1637 ras_reset(ras, index);
1638 ras->ras_consecutive_pages++;
1639 ras_stride_reset(ras);
1641 GOTO(out_unlock, 0);
1643 /* The read is still in stride window or
1644 * it hits read-ahead miss */
1646 /* If ra-window miss is hitted, which probably means VM
1647 * pressure, and some read-ahead pages were reclaimed.So
1648 * the length of ra-window will not increased, but also
1649 * not reset to avoid redetecting the stride I/O mode.*/
1650 ras->ras_consecutive_requests = 0;
1652 ras->ras_consecutive_pages = 0;
1653 if (++ras->ras_consecutive_stride_requests > 1)
1658 } else if (ras->ras_consecutive_stride_requests > 1) {
1659 /* If this is contiguous read but in stride I/O mode
1660 * currently, check whether stride step still is valid,
1661 * if invalid, it will reset the stride ra window*/
1662 if (ras->ras_consecutive_pages + 1 > ras->ras_stride_pages)
1663 ras_stride_reset(ras);
1666 ras->ras_last_readpage = index;
1667 ras->ras_consecutive_pages++;
1668 ras_set_start(ras, index);
1669 ras->ras_next_readahead = max(ras->ras_window_start,
1670 ras->ras_next_readahead);
1673 /* Trigger RA in the mmap case where ras_consecutive_requests
1674 * is not incremented and thus can't be used to trigger RA */
1675 if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
1676 ras->ras_window_len = RAS_INCREASE_STEP;
1677 GOTO(out_unlock, 0);
1680 /* Initially reset the stride window offset to next_readahead*/
1681 if (ras->ras_consecutive_stride_requests == 2 && stride_detect)
1682 ras_set_stride_offset(ras);
1684 /* The initial ras_window_len is set to the request size. To avoid
1685 * uselessly reading and discarding pages for random IO the window is
1686 * only increased once per consecutive request received. */
1687 if ((ras->ras_consecutive_requests > 1 &&
1688 !ras->ras_request_index) || stride_detect) {
1689 if (stride_io_mode(ras))
1690 ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP);
1692 ras->ras_window_len = min(ras->ras_window_len +
1699 ras->ras_request_index++;
1700 spin_unlock(&ras->ras_lock);
1701 spin_unlock(&sbi->ll_lock);
1705 int ll_writepage(struct page *page)
1707 struct inode *inode = page->mapping->host;
1708 struct ll_inode_info *lli = ll_i2info(inode);
1709 struct obd_export *exp;
1710 struct ll_async_page *llap;
1714 LASSERT(PageLocked(page));
1716 exp = ll_i2obdexp(inode);
1718 GOTO(out, rc = -EINVAL);
1720 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1722 GOTO(out, rc = PTR_ERR(llap));
1724 LASSERT(!llap->llap_nocache);
1725 LASSERT(!PageWriteback(page));
1726 set_page_writeback(page);
1728 page_cache_get(page);
1729 if (llap->llap_write_queued) {
1730 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1731 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1733 ASYNC_READY | ASYNC_URGENT);
1735 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1736 ASYNC_READY | ASYNC_URGENT);
1739 page_cache_release(page);
1742 if (!lli->lli_async_rc)
1743 lli->lli_async_rc = rc;
1744 /* re-dirty page on error so it retries write */
1745 if (PageWriteback(page)) {
1746 end_page_writeback(page);
1748 /* resend page only for not started IO*/
1749 if (!PageError(page))
1750 ll_redirty_page(page);
1757 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1758 * read-ahead assumes it is valid to issue readpage all the way up to
1759 * i_size, but our dlm locks make that not the case. We disable the
1760 * kernel's read-ahead and do our own by walking ahead in the page cache
1761 * checking for dlm lock coverage. the main difference between 2.4 and
1762 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1763 * so they look the same.
1765 int ll_readpage(struct file *filp, struct page *page)
1767 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1768 struct inode *inode = page->mapping->host;
1769 struct obd_export *exp;
1770 struct ll_async_page *llap;
1771 struct obd_io_group *oig = NULL;
1772 struct lustre_handle *lockh = NULL;
1776 LASSERT(PageLocked(page));
1777 LASSERT(!PageUptodate(page));
1778 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1779 inode->i_ino, inode->i_generation, inode,
1780 (((loff_t)page->index) << CFS_PAGE_SHIFT),
1781 (((loff_t)page->index) << CFS_PAGE_SHIFT));
1782 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1784 if (!ll_i2info(inode)->lli_smd) {
1785 /* File with no objects - one big hole */
1786 /* We use this just for remove_from_page_cache that is not
1787 * exported, we'd make page back up to date. */
1788 ll_truncate_complete_page(page);
1789 clear_page(kmap(page));
1791 SetPageUptodate(page);
1796 rc = oig_init(&oig);
1800 exp = ll_i2obdexp(inode);
1802 GOTO(out, rc = -EINVAL);
1804 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
1805 lockh = &fd->fd_cwlockh;
1807 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh);
1809 if (PTR_ERR(llap) == -ENOLCK) {
1810 CWARN("ino %lu page %lu (%llu) not covered by "
1811 "a lock (mmap?). check debug logs.\n",
1812 inode->i_ino, page->index,
1813 (long long)page->index << PAGE_CACHE_SHIFT);
1815 GOTO(out, rc = PTR_ERR(llap));
1818 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1819 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1820 llap->llap_defer_uptodate);
1823 if (llap->llap_defer_uptodate) {
1824 /* This is the callpath if we got the page from a readahead */
1825 llap->llap_ra_used = 1;
1826 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1829 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1831 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1832 SetPageUptodate(page);
1834 GOTO(out_oig, rc = 0);
1837 rc = ll_issue_page_read(exp, llap, oig, 0);
1841 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1842 /* We have just requested the actual page we want, see if we can tack
1843 * on some readahead to that page's RPC before it is sent. */
1844 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1845 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1848 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1859 static void ll_file_put_pages(struct page **pages, int numpages)
1865 for (i = 0, pp = pages; i < numpages; i++, pp++) {
1867 LL_CDEBUG_PAGE(D_PAGE, (*pp), "free\n");
1869 if (page_private(*pp))
1870 CERROR("the llap wasn't freed\n");
1871 (*pp)->mapping = NULL;
1872 if (page_count(*pp) != 1)
1873 CERROR("page %p, flags %#lx, count %i, private %p\n",
1874 (*pp), (unsigned long)(*pp)->flags, page_count(*pp),
1875 (void*)page_private(*pp));
1876 __free_pages(*pp, 0);
1879 OBD_FREE(pages, numpages * sizeof(struct page*));
1883 static struct page **ll_file_prepare_pages(int numpages, struct inode *inode,
1884 unsigned long first)
1886 struct page **pages;
1891 OBD_ALLOC(pages, sizeof(struct page *) * numpages);
1893 RETURN(ERR_PTR(-ENOMEM));
1894 for (i = 0; i < numpages; i++) {
1896 struct ll_async_page *llap;
1898 page = alloc_pages(GFP_HIGHUSER, 0);
1900 GOTO(err, rc = -ENOMEM);
1902 /* llap_from_page needs page index and mapping to be set */
1903 page->index = first++;
1904 page->mapping = inode->i_mapping;
1905 llap = llap_from_page(page, LLAP_ORIGIN_LOCKLESS_IO);
1907 GOTO(err, rc = PTR_ERR(llap));
1908 llap->llap_lockless_io_page = 1;
1912 ll_file_put_pages(pages, numpages);
1913 RETURN(ERR_PTR(rc));
1916 static ssize_t ll_file_copy_pages(struct page **pages, int numpages,
1917 const struct iovec *iov, unsigned long nsegs,
1918 ssize_t iov_offset, loff_t pos, size_t count,
1923 int updatechecksum = ll_i2sbi(pages[0]->mapping->host)->ll_flags &
1924 LL_SBI_LLITE_CHECKSUM;
1927 for (i = 0; i < numpages; i++) {
1928 unsigned offset, bytes, left = 0;
1931 vaddr = kmap(pages[i]);
1932 offset = pos & (CFS_PAGE_SIZE - 1);
1933 bytes = min_t(unsigned, CFS_PAGE_SIZE - offset, count);
1934 LL_CDEBUG_PAGE(D_PAGE, pages[i], "op = %s, addr = %p, "
1936 (rw == WRITE) ? "CFU" : "CTU",
1937 vaddr + offset, bytes);
1938 while (bytes > 0 && !left && nsegs) {
1939 unsigned copy = min_t(ssize_t, bytes,
1940 iov->iov_len - iov_offset);
1942 left = copy_from_user(vaddr + offset,
1943 iov->iov_base +iov_offset,
1945 if (updatechecksum) {
1946 struct ll_async_page *llap;
1948 llap = llap_cast_private(pages[i]);
1949 llap->llap_checksum =
1950 init_checksum(OSC_DEFAULT_CKSUM);
1951 llap->llap_checksum =
1952 compute_checksum(llap->llap_checksum,
1953 vaddr,CFS_PAGE_SIZE,
1957 left = copy_to_user(iov->iov_base + iov_offset,
1958 vaddr + offset, copy);
1966 if (iov_offset == iov->iov_len) {
1983 static int ll_file_oig_pages(struct inode * inode, struct page **pages,
1984 int numpages, loff_t pos, size_t count, int rw)
1986 struct obd_io_group *oig;
1987 struct ll_inode_info *lli = ll_i2info(inode);
1988 struct obd_export *exp;
1989 loff_t org_pos = pos;
1995 exp = ll_i2obdexp(inode);
1998 rc = oig_init(&oig);
2001 brw_flags = OBD_BRW_SRVLOCK;
2002 if (capable(CAP_SYS_RESOURCE))
2003 brw_flags |= OBD_BRW_NOQUOTA;
2005 for (i = 0; i < numpages; i++) {
2006 struct ll_async_page *llap;
2007 unsigned from, bytes;
2009 from = pos & (CFS_PAGE_SIZE - 1);
2010 bytes = min_t(unsigned, CFS_PAGE_SIZE - from,
2011 count - pos + org_pos);
2012 llap = llap_cast_private(pages[i]);
2015 lock_page(pages[i]);
2017 LL_CDEBUG_PAGE(D_PAGE, pages[i], "offset "LPU64","
2018 " from %u, bytes = %u\n",
2020 LASSERTF(pos >> CFS_PAGE_SHIFT == pages[i]->index,
2021 "wrong page index %lu (%lu)\n",
2023 (unsigned long)(pos >> CFS_PAGE_SHIFT));
2024 rc = obd_queue_group_io(exp, lli->lli_smd, NULL, oig,
2027 OBD_BRW_WRITE:OBD_BRW_READ,
2028 from, bytes, brw_flags,
2029 ASYNC_READY | ASYNC_URGENT |
2030 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
2037 rc = obd_trigger_group_io(exp, lli->lli_smd, NULL, oig);
2043 unlock_page(pages[i]);
2048 /* Advance through passed iov, adjust iov pointer as necessary and return
2049 * starting offset in individual entry we are pointing at. Also reduce
2050 * nr_segs as needed */
2051 static ssize_t ll_iov_advance(const struct iovec **iov, unsigned long *nr_segs,
2054 while (*nr_segs > 0) {
2055 if ((*iov)->iov_len > offset)
2056 return ((*iov)->iov_len - offset);
2057 offset -= (*iov)->iov_len;
2064 ssize_t ll_file_lockless_io(struct file *file, const struct iovec *iov,
2065 unsigned long nr_segs,
2066 loff_t *ppos, int rw, ssize_t count)
2069 struct inode *inode = file->f_dentry->d_inode;
2073 unsigned long first, last;
2074 const struct iovec *iv = &iov[0];
2075 unsigned long nsegs = nr_segs;
2076 unsigned long offset = 0;
2082 ll_inode_size_lock(inode, 0);
2083 isize = i_size_read(inode);
2084 ll_inode_size_unlock(inode, 0);
2087 if (*ppos + count >= isize)
2088 count -= *ppos + count - isize;
2092 rc = generic_write_checks(file, ppos, &count, 0);
2095 rc = remove_suid(file->f_dentry);
2101 first = pos >> CFS_PAGE_SHIFT;
2102 last = (pos + count - 1) >> CFS_PAGE_SHIFT;
2103 max_pages = PTLRPC_MAX_BRW_PAGES *
2104 ll_i2info(inode)->lli_smd->lsm_stripe_count;
2105 CDEBUG(D_INFO, "%u, stripe_count = %u\n",
2106 PTLRPC_MAX_BRW_PAGES /* max_pages_per_rpc */,
2107 ll_i2info(inode)->lli_smd->lsm_stripe_count);
2109 while (first <= last && rc >= 0) {
2111 struct page **pages;
2112 size_t bytes = count - amount;
2114 pages_for_io = min_t(int, last - first + 1, max_pages);
2115 pages = ll_file_prepare_pages(pages_for_io, inode, first);
2116 if (IS_ERR(pages)) {
2117 rc = PTR_ERR(pages);
2121 rc = ll_file_copy_pages(pages, pages_for_io, iv, nsegs,
2122 offset, pos + amount, bytes,
2125 GOTO(put_pages, rc);
2126 offset = ll_iov_advance(&iv, &nsegs, offset + rc);
2129 rc = ll_file_oig_pages(inode, pages, pages_for_io,
2130 pos + amount, bytes, rw);
2132 GOTO(put_pages, rc);
2134 rc = ll_file_copy_pages(pages, pages_for_io, iv, nsegs,
2135 offset, pos + amount, bytes, rw);
2137 GOTO(put_pages, rc);
2138 offset = ll_iov_advance(&iv, &nsegs, offset + rc);
2143 ll_file_put_pages(pages, pages_for_io);
2144 first += pages_for_io;
2145 /* a short read/write check */
2146 if (pos + amount < ((loff_t)first << CFS_PAGE_SHIFT))
2148 /* Check if we are out of userspace buffers. (how that could
2153 /* NOTE: don't update i_size and KMS in absence of LDLM locks even
2154 * write makes the file large */
2155 file_accessed(file);
2156 if (rw == READ && amount < count && rc == 0) {
2157 unsigned long not_cleared;
2160 ssize_t to_clear = min_t(ssize_t, count - amount,
2161 iv->iov_len - offset);
2162 not_cleared = clear_user(iv->iov_base + offset,
2164 amount += to_clear - not_cleared;
2175 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
2177 LPROC_LL_LOCKLESS_WRITE :
2178 LPROC_LL_LOCKLESS_READ,