1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
44 #define DEBUG_SUBSYSTEM S_LLITE
46 #include <linux/lustre_mdc.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53 for (pos = (head)->prev, n = pos->prev; pos != (head); \
54 pos = n, n = pos->prev )
57 kmem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62 struct page *page, int flags)
64 struct ll_inode_info *lli = ll_i2info(inode);
65 struct lov_stripe_md *lsm = lli->lli_smd;
71 pg.off = ((obd_off)page->index) << PAGE_SHIFT;
73 if ((cmd & OBD_BRW_WRITE) && (pg.off + PAGE_SIZE > inode->i_size))
74 pg.count = inode->i_size % PAGE_SIZE;
78 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80 inode->i_ino, pg.off, pg.off);
82 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84 inode->i_ino, inode, inode->i_size, page->mapping->host,
85 page->mapping->host->i_size, page->index, pg.off);
90 if (cmd & OBD_BRW_WRITE)
91 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92 LPROC_LL_BRW_WRITE, pg.count);
94 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
95 LPROC_LL_BRW_READ, pg.count);
96 rc = obd_brw(cmd, ll_i2dtexp(inode), oa, lsm, 1, &pg, NULL);
98 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
100 CERROR("error from obd_brw: rc = %d\n", rc);
104 /* this isn't where truncate starts. roughly:
105 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
106 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
109 * must be called under ->lli_size_sem */
110 void ll_truncate(struct inode *inode)
112 struct ll_inode_info *lli = ll_i2info(inode);
113 struct lov_stripe_md *lsm = lli->lli_smd;
118 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
119 inode->i_generation, inode, inode->i_size, inode->i_size);
121 if (lli->lli_size_sem_owner != current) {
127 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
132 LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
134 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
136 lov_stripe_lock(lsm);
137 inode_init_lvb(inode, &lvb);
138 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 0);
139 if (lvb.lvb_size == inode->i_size) {
140 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
141 lsm->lsm_object_id, inode->i_size, inode->i_size);
142 lov_stripe_unlock(lsm);
146 obd_adjust_kms(ll_i2dtexp(inode), lsm, inode->i_size, 1);
147 lov_stripe_unlock(lsm);
149 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
150 (inode->i_size & ~PAGE_MASK))) {
151 /* If the truncate leaves behind a partial page, update its
153 struct page *page = find_get_page(inode->i_mapping,
154 inode->i_size >> PAGE_CACHE_SHIFT);
156 struct ll_async_page *llap = llap_cast_private(page);
158 llap->llap_checksum =
159 crc32_le(0, kmap(page), PAGE_SIZE);
162 page_cache_release(page);
166 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
167 lsm->lsm_object_id, inode->i_size, inode->i_size);
169 oa.o_id = lsm->lsm_object_id;
170 oa.o_valid = OBD_MD_FLID;
172 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
173 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
174 OBD_MD_FLFID | OBD_MD_FLGENER);
176 ll_inode_size_unlock(inode, 0);
178 rc = obd_punch(ll_i2dtexp(inode), &oa, lsm, inode->i_size,
179 OBD_OBJECT_EOF, NULL);
181 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
183 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
184 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
189 ll_inode_size_unlock(inode, 0);
192 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
195 struct inode *inode = page->mapping->host;
196 struct ll_inode_info *lli = ll_i2info(inode);
197 struct lov_stripe_md *lsm = lli->lli_smd;
198 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
205 LASSERT(PageLocked(page));
206 (void)llap_cast_private(page); /* assertion */
208 /* Check to see if we should return -EIO right away */
211 pga.count = PAGE_SIZE;
214 oa.o_mode = inode->i_mode;
215 oa.o_id = lsm->lsm_object_id;
216 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
217 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
219 rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oa, lsm,
224 if (PageUptodate(page)) {
225 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
229 /* We're completely overwriting an existing page, so _don't_ set it up
230 * to date until commit_write */
231 if (from == 0 && to == PAGE_SIZE) {
232 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
233 POISON_PAGE(page, 0x11);
237 /* If are writing to a new page, no need to read old data. The extent
238 * locking will have updated the KMS, and for our purposes here we can
239 * treat it like i_size. */
240 lov_stripe_lock(lsm);
241 inode_init_lvb(inode, &lvb);
242 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 0);
243 lov_stripe_unlock(lsm);
244 if (lvb.lvb_size <= offset) {
245 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
246 lvb.lvb_size, offset);
247 memset(kmap(page), 0, PAGE_SIZE);
249 GOTO(prepare_done, rc = 0);
252 /* XXX could be an async ocp read.. read-ahead? */
253 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
255 /* bug 1598: don't clobber blksize */
256 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
257 obdo_refresh_inode(inode, &oa, oa.o_valid);
263 SetPageUptodate(page);
268 static int ll_ap_make_ready(void *data, int cmd)
270 struct ll_async_page *llap;
274 llap = LLAP_FROM_COOKIE(data);
275 page = llap->llap_page;
277 LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
278 page->mapping->host->i_ino, page->index);
280 /* we're trying to write, but the page is locked.. come back later */
281 if (TryLockPage(page))
284 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
285 page_cache_get(page);
287 /* if we left PageDirty we might get another writepage call
288 * in the future. list walkers are bright enough
289 * to check page dirty so we can leave it on whatever list
290 * its on. XXX also, we're called with the cli list so if
291 * we got the page cache list we'd create a lock inversion
292 * with the removepage path which gets the page lock then the
294 clear_page_dirty(page);
298 /* We have two reasons for giving llite the opportunity to change the
299 * write length of a given queued page as it builds the RPC containing
302 * 1) Further extending writes may have landed in the page cache
303 * since a partial write first queued this page requiring us
304 * to write more from the page cache. (No further races are possible, since
305 * by the time this is called, the page is locked.)
306 * 2) We might have raced with truncate and want to avoid performing
307 * write RPCs that are just going to be thrown away by the
308 * truncate's punch on the storage targets.
310 * The kms serves these purposes as it is set at both truncate and extending
313 static int ll_ap_refresh_count(void *data, int cmd)
315 struct ll_inode_info *lli;
316 struct ll_async_page *llap;
317 struct lov_stripe_md *lsm;
324 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
325 LASSERT(cmd != OBD_BRW_READ);
327 llap = LLAP_FROM_COOKIE(data);
328 page = llap->llap_page;
329 inode = page->mapping->host;
330 lli = ll_i2info(inode);
333 lov_stripe_lock(lsm);
334 inode_init_lvb(inode, &lvb);
335 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
337 lov_stripe_unlock(lsm);
339 /* catch race with truncate */
340 if (((__u64)page->index << PAGE_SHIFT) >= kms)
343 /* catch sub-page write at end of file */
344 if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
345 return kms % PAGE_SIZE;
350 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
352 struct lov_stripe_md *lsm;
353 obd_flag valid_flags;
355 lsm = ll_i2info(inode)->lli_smd;
357 oa->o_id = lsm->lsm_object_id;
358 oa->o_valid = OBD_MD_FLID;
359 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
360 if (cmd & OBD_BRW_WRITE) {
361 oa->o_valid |= OBD_MD_FLEPOCH;
362 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
363 oa->o_uid = inode->i_uid;
364 oa->o_gid = inode->i_gid;
366 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
367 OBD_MD_FLUID | OBD_MD_FLGID |
368 OBD_MD_FLFID | OBD_MD_FLGENER;
371 obdo_from_inode(oa, inode, valid_flags);
374 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
376 struct ll_async_page *llap;
379 llap = LLAP_FROM_COOKIE(data);
380 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
385 static struct obd_async_page_ops ll_async_page_ops = {
386 .ap_make_ready = ll_ap_make_ready,
387 .ap_refresh_count = ll_ap_refresh_count,
388 .ap_fill_obdo = ll_ap_fill_obdo,
389 .ap_completion = ll_ap_completion,
392 struct ll_async_page *llap_cast_private(struct page *page)
394 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
396 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
397 "page %p private %lu gave magic %d which != %d\n",
398 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
403 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
405 * There is an llap attached onto every page in lustre, linked off @sbi.
406 * We add an llap to the list so we don't lose our place during list walking.
407 * If llaps in the list are being moved they will only move to the end
408 * of the LRU, and we aren't terribly interested in those pages here (we
409 * start at the beginning of the list where the least-used llaps are.
411 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
413 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
414 unsigned long total, want, count = 0;
416 total = sbi->ll_async_page_count;
418 /* There can be a large number of llaps (600k or more in a large
419 * memory machine) so the VM 1/6 shrink ratio is likely too much.
420 * Since we are freeing pages also, we don't necessarily want to
421 * shrink so much. Limit to 40MB of pages + llaps per call. */
422 if (shrink_fraction == 0)
423 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
425 want = (total + shrink_fraction - 1) / shrink_fraction;
427 if (want > 40 << (20 - PAGE_CACHE_SHIFT))
428 want = 40 << (20 - PAGE_CACHE_SHIFT);
430 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
431 want, total, shrink_fraction);
433 spin_lock(&sbi->ll_lock);
434 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
436 while (--total >= 0 && count < want) {
440 if (unlikely(need_resched())) {
441 spin_unlock(&sbi->ll_lock);
443 spin_lock(&sbi->ll_lock);
446 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
447 list_del_init(&dummy_llap.llap_pglist_item);
451 page = llap->llap_page;
452 LASSERT(page != NULL);
454 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
456 /* Page needs/undergoing IO */
457 if (TryLockPage(page)) {
458 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
462 if (llap->llap_write_queued || PageDirty(page) ||
463 (!PageUptodate(page) &&
464 llap->llap_origin != LLAP_ORIGIN_READAHEAD))
469 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s origin %s\n",
470 keep ? "keep" : "drop",
471 llap->llap_write_queued ? "wq " : "",
472 PageDirty(page) ? "pd " : "",
473 PageUptodate(page) ? "" : "!pu ",
474 llap->llap_defer_uptodate ? "" : "!du",
475 llap_origins[llap->llap_origin]);
477 /* If page is dirty or undergoing IO don't discard it */
483 page_cache_get(page);
484 spin_unlock(&sbi->ll_lock);
486 if (page->mapping != NULL) {
487 ll_teardown_mmaps(page->mapping,
488 (__u64)page->index<<PAGE_CACHE_SHIFT,
489 ((__u64)page->index<<PAGE_CACHE_SHIFT)|
491 if (!PageDirty(page) && !page_mapped(page)) {
492 ll_ra_accounting(llap, page->mapping);
493 ll_truncate_complete_page(page);
496 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
504 page_cache_release(page);
506 spin_lock(&sbi->ll_lock);
508 list_del(&dummy_llap.llap_pglist_item);
509 spin_unlock(&sbi->ll_lock);
511 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
517 static struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
519 struct ll_async_page *llap;
520 struct obd_export *exp;
521 struct inode *inode = page->mapping->host;
522 struct ll_sb_info *sbi;
527 static int triggered;
530 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
532 libcfs_debug_dumpstack(NULL);
535 RETURN(ERR_PTR(-EINVAL));
537 sbi = ll_i2sbi(inode);
538 LASSERT(ll_async_page_slab);
539 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
541 llap = llap_cast_private(page);
543 /* move to end of LRU list, except when page is just about to
545 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
546 spin_lock(&sbi->ll_lock);
547 sbi->ll_pglist_gen++;
548 list_del_init(&llap->llap_pglist_item);
549 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
550 spin_unlock(&sbi->ll_lock);
555 exp = ll_i2dtexp(page->mapping->host);
557 RETURN(ERR_PTR(-EINVAL));
559 /* limit the number of lustre-cached pages */
560 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
561 llap_shrink_cache(sbi, 0);
563 OBD_SLAB_ALLOC(llap, ll_async_page_slab, SLAB_KERNEL,
564 ll_async_page_slab_size);
566 RETURN(ERR_PTR(-ENOMEM));
567 llap->llap_magic = LLAP_MAGIC;
568 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
570 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
571 (obd_off)page->index << PAGE_SHIFT,
572 &ll_async_page_ops, llap, &llap->llap_cookie);
574 OBD_SLAB_FREE(llap, ll_async_page_slab,
575 ll_async_page_slab_size);
579 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
580 page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
581 /* also zeroing the PRIVBITS low order bitflags */
582 __set_page_ll_data(page, llap);
583 llap->llap_page = page;
585 spin_lock(&sbi->ll_lock);
586 sbi->ll_pglist_gen++;
587 sbi->ll_async_page_count++;
588 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
589 spin_unlock(&sbi->ll_lock);
592 if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
594 csum = crc32_le(csum, kmap(page), PAGE_SIZE);
596 if (origin == LLAP_ORIGIN_READAHEAD ||
597 origin == LLAP_ORIGIN_READPAGE) {
598 llap->llap_checksum = 0;
599 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
600 llap->llap_checksum == 0) {
601 llap->llap_checksum = csum;
602 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
603 } else if (llap->llap_checksum == csum) {
604 /* origin == LLAP_ORIGIN_WRITEPAGE */
605 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
608 /* origin == LLAP_ORIGIN_WRITEPAGE */
609 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
610 "%x!\n", llap->llap_checksum, csum);
614 llap->llap_origin = origin;
618 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
619 struct ll_async_page *llap,
620 unsigned to, obd_flag async_flags)
622 unsigned long size_index = inode->i_size >> PAGE_SHIFT;
623 struct obd_io_group *oig;
624 struct ll_sb_info *sbi = ll_i2sbi(inode);
625 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
628 /* _make_ready only sees llap once we've unlocked the page */
629 llap->llap_write_queued = 1;
630 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
631 llap->llap_cookie, OBD_BRW_WRITE | noquot,
632 0, 0, 0, async_flags);
634 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
635 //llap_write_pending(inode, llap);
639 llap->llap_write_queued = 0;
645 /* make full-page requests if we are not at EOF (bug 4410) */
646 if (to != PAGE_SIZE && llap->llap_page->index < size_index) {
647 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
648 "sync write before EOF: size_index %lu, to %d\n",
651 } else if (to != PAGE_SIZE && llap->llap_page->index == size_index) {
652 int size_to = inode->i_size & ~PAGE_MASK;
653 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
654 "sync write at EOF: size_index %lu, to %d/%d\n",
655 size_index, to, size_to);
660 /* compare the checksum once before the page leaves llite */
661 if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
662 llap->llap_checksum != 0)) {
664 struct page *page = llap->llap_page;
665 csum = crc32_le(csum, kmap(page), PAGE_SIZE);
667 if (llap->llap_checksum == csum) {
668 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
671 CERROR("page %p old cksum %x != new cksum %x!\n",
672 page, llap->llap_checksum, csum);
676 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
677 llap->llap_cookie, OBD_BRW_WRITE | noquot,
678 0, to, 0, ASYNC_READY | ASYNC_URGENT |
679 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
683 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
689 if (!rc && async_flags & ASYNC_READY)
690 unlock_page(llap->llap_page);
692 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
700 /* update our write count to account for i_size increases that may have
701 * happened since we've queued the page for io. */
703 /* be careful not to return success without setting the page Uptodate or
704 * the next pass through prepare_write will read in stale data from disk. */
705 int ll_commit_write(struct file *file, struct page *page, unsigned from,
708 struct inode *inode = page->mapping->host;
709 struct ll_inode_info *lli = ll_i2info(inode);
710 struct lov_stripe_md *lsm = lli->lli_smd;
711 struct obd_export *exp;
712 struct ll_async_page *llap;
717 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
718 LASSERT(inode == file->f_dentry->d_inode);
719 LASSERT(PageLocked(page));
721 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
722 inode, page, from, to, page->index);
724 llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
726 RETURN(PTR_ERR(llap));
728 exp = ll_i2dtexp(inode);
732 llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
734 /* queue a write for some time in the future the first time we
736 if (!PageDirty(page)) {
737 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
738 LPROC_LL_DIRTY_MISSES);
740 rc = queue_or_sync_write(exp, inode, llap, to, 0);
744 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
745 LPROC_LL_DIRTY_HITS);
748 /* put the page in the page cache, from now on ll_removepage is
749 * responsible for cleaning up the llap.
750 * only set page dirty when it's queued to be write out */
751 if (llap->llap_write_queued)
752 set_page_dirty(page);
755 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
756 ll_inode_size_lock(inode, 0);
758 lov_stripe_lock(lsm);
759 obd_adjust_kms(exp, lsm, size, 0);
760 lov_stripe_unlock(lsm);
761 if (size > inode->i_size)
762 inode->i_size = size;
763 SetPageUptodate(page);
764 } else if (size > inode->i_size) {
765 /* this page beyond the pales of i_size, so it can't be
766 * truncated in ll_p_r_e during lock revoking. we must
767 * teardown our book-keeping here. */
770 ll_inode_size_unlock(inode, 0);
774 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
776 struct ll_ra_info *ra = &sbi->ll_ra_info;
780 spin_lock(&sbi->ll_lock);
781 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
782 ra->ra_cur_pages += ret;
783 spin_unlock(&sbi->ll_lock);
788 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
790 struct ll_ra_info *ra = &sbi->ll_ra_info;
791 spin_lock(&sbi->ll_lock);
792 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
793 ra->ra_cur_pages, len);
794 ra->ra_cur_pages -= len;
795 spin_unlock(&sbi->ll_lock);
798 /* called for each page in a completed rpc.*/
799 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
801 struct ll_async_page *llap;
805 llap = LLAP_FROM_COOKIE(data);
806 page = llap->llap_page;
807 LASSERT(PageLocked(page));
809 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
811 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
812 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
815 if (cmd & OBD_BRW_READ) {
816 if (!llap->llap_defer_uptodate)
817 SetPageUptodate(page);
819 llap->llap_write_queued = 0;
821 ClearPageError(page);
823 if (cmd & OBD_BRW_READ) {
824 llap->llap_defer_uptodate = 0;
826 ll_redirty_page(page);
833 if (0 && cmd & OBD_BRW_WRITE) {
834 llap_write_complete(page->mapping->host, llap);
835 ll_try_done_writing(page->mapping->host);
838 if (PageWriteback(page)) {
839 end_page_writeback(page);
841 page_cache_release(page);
845 /* the kernel calls us here when a page is unhashed from the page cache.
846 * the page will be locked and the kernel is holding a spinlock, so
847 * we need to be careful. we're just tearing down our book-keeping
849 void ll_removepage(struct page *page)
851 struct inode *inode = page->mapping->host;
852 struct obd_export *exp;
853 struct ll_async_page *llap;
854 struct ll_sb_info *sbi = ll_i2sbi(inode);
858 LASSERT(!in_interrupt());
860 /* sync pages or failed read pages can leave pages in the page
861 * cache that don't have our data associated with them anymore */
862 if (page_private(page) == 0) {
867 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
869 exp = ll_i2dtexp(inode);
871 CERROR("page %p ind %lu gave null export\n", page, page->index);
876 llap = llap_from_page(page, 0);
878 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
879 page->index, PTR_ERR(llap));
884 //llap_write_complete(inode, llap);
885 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
888 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
890 /* this unconditional free is only safe because the page lock
891 * is providing exclusivity to memory pressure/truncate/writeback..*/
892 __clear_page_ll_data(page);
894 spin_lock(&sbi->ll_lock);
895 if (!list_empty(&llap->llap_pglist_item))
896 list_del_init(&llap->llap_pglist_item);
897 sbi->ll_pglist_gen++;
898 sbi->ll_async_page_count--;
899 spin_unlock(&sbi->ll_lock);
900 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
904 static int ll_page_matches(struct page *page, int fd_flags)
906 struct lustre_handle match_lockh = {0};
907 struct inode *inode = page->mapping->host;
908 ldlm_policy_data_t page_extent;
912 if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
915 page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
916 page_extent.l_extent.end =
917 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
918 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
919 if (!(fd_flags & LL_FILE_READAHEAD))
920 flags |= LDLM_FL_CBPENDING;
921 matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
922 ll_i2info(inode)->lli_smd, LDLM_EXTENT,
923 &page_extent, LCK_PR | LCK_PW, &flags, inode,
928 static int ll_issue_page_read(struct obd_export *exp,
929 struct ll_async_page *llap,
930 struct obd_io_group *oig, int defer)
932 struct page *page = llap->llap_page;
935 page_cache_get(page);
936 llap->llap_defer_uptodate = defer;
937 llap->llap_ra_used = 0;
938 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
939 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
940 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |
943 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
944 page_cache_release(page);
949 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
951 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
952 ra->ra_stats[which]++;
955 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
957 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
958 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
960 spin_lock(&sbi->ll_lock);
961 ll_ra_stats_inc_unlocked(ra, which);
962 spin_unlock(&sbi->ll_lock);
965 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
967 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
970 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
973 #define RAS_CDEBUG(ras) \
974 CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n", \
975 ras->ras_last_readpage, ras->ras_consecutive, \
976 ras->ras_window_start, ras->ras_window_len, \
977 ras->ras_next_readahead);
979 static int index_in_window(unsigned long index, unsigned long point,
980 unsigned long before, unsigned long after)
982 unsigned long start = point - before, end = point + after;
989 return start <= index && index <= end;
992 static struct ll_readahead_state *ll_ras_get(struct file *f)
994 struct ll_file_data *fd;
996 fd = LUSTRE_FPRIVATE(f);
1000 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1002 struct ll_readahead_state *ras;
1004 ras = ll_ras_get(f);
1005 rar->lrr_reader = current;
1007 spin_lock(&ras->ras_lock);
1008 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1009 spin_unlock(&ras->ras_lock);
1012 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1014 struct ll_readahead_state *ras;
1016 ras = ll_ras_get(f);
1018 spin_lock(&ras->ras_lock);
1019 list_del_init(&rar->lrr_linkage);
1020 spin_unlock(&ras->ras_lock);
1023 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1025 struct ll_ra_read *scan;
1027 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1028 if (scan->lrr_reader == current)
1034 struct ll_ra_read *ll_ra_read_get(struct file *f)
1036 struct ll_readahead_state *ras;
1037 struct ll_ra_read *bead;
1039 ras = ll_ras_get(f);
1041 spin_lock(&ras->ras_lock);
1042 bead = ll_ra_read_get_locked(ras);
1043 spin_unlock(&ras->ras_lock);
1047 static int ll_readahead(struct ll_readahead_state *ras,
1048 struct obd_export *exp, struct address_space *mapping,
1049 struct obd_io_group *oig, int flags)
1051 unsigned long i, start = 0, end = 0, reserved;
1052 struct ll_async_page *llap;
1054 int rc, ret = 0, match_failed = 0;
1056 unsigned int gfp_mask;
1057 struct inode *inode;
1058 struct lov_stripe_md *lsm;
1059 struct ll_ra_read *bead;
1063 inode = mapping->host;
1064 lsm = ll_i2info(inode)->lli_smd;
1066 lov_stripe_lock(lsm);
1067 inode_init_lvb(inode, &lvb);
1068 obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1070 lov_stripe_unlock(lsm);
1072 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1076 spin_lock(&ras->ras_lock);
1077 bead = ll_ra_read_get_locked(ras);
1078 /* reserve a part of the read-ahead window that we'll be issuing */
1079 if (ras->ras_window_len) {
1080 start = ras->ras_next_readahead;
1081 end = ras->ras_window_start + ras->ras_window_len - 1;
1086 start = max(start, bead->lrr_start);
1087 read_end = bead->lrr_start + bead->lrr_count - 1;
1088 if (ras->ras_consecutive > start - bead->lrr_start + 1)
1090 * if current read(2) is a part of larger sequential
1091 * read, make sure read-ahead is at least to the end
1092 * of the read region.
1094 * XXX nikita: This doesn't work when some pages in
1095 * [lrr_start, start] were cached (and, as a result,
1096 * weren't counted in ->ras_consecutive).
1098 end = max(end, read_end);
1101 * otherwise, clip read-ahead at the read boundary.
1106 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
1107 ras->ras_next_readahead = max(end, end + 1);
1110 spin_unlock(&ras->ras_lock);
1113 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1117 reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1118 if (reserved < end - start + 1)
1119 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1121 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1123 gfp_mask |= __GFP_NOWARN;
1126 for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1127 /* skip locked pages from previous readpage calls */
1128 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1130 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1131 CDEBUG(D_READA, "g_c_p_n failed\n");
1135 /* Check if page was truncated or reclaimed */
1136 if (page->mapping != mapping) {
1137 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1138 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1142 /* we do this first so that we can see the page in the /proc
1144 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1145 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1148 /* skip completed pages */
1149 if (Page_Uptodate(page))
1152 /* bail when we hit the end of the lock. */
1153 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1154 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1155 "lock match failed: rc %d\n", rc);
1156 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1161 rc = ll_issue_page_read(exp, llap, oig, 1);
1165 LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1166 "started read-ahead\n");
1170 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1171 "skipping read-ahead\n");
1175 page_cache_release(page);
1178 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1180 ll_ra_count_put(ll_i2sbi(inode), reserved);
1181 if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
1182 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1184 /* if we didn't get to the end of the region we reserved from
1185 * the ras we need to go back and update the ras so that the
1186 * next read-ahead tries from where we left off. we only do so
1187 * if the region we failed to issue read-ahead on is still ahead
1188 * of the app and behind the next index to start read-ahead from */
1190 spin_lock(&ras->ras_lock);
1191 if (i < ras->ras_next_readahead &&
1192 index_in_window(i, ras->ras_window_start, 0,
1193 ras->ras_window_len)) {
1194 ras->ras_next_readahead = i;
1197 spin_unlock(&ras->ras_lock);
1203 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1205 ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
1208 /* called with the ras_lock held or from places where it doesn't matter */
1209 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1211 ras->ras_last_readpage = index;
1212 ras->ras_consecutive = 1;
1213 ras->ras_window_len = 0;
1214 ras_set_start(ras, index);
1215 ras->ras_next_readahead = ras->ras_window_start;
1220 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1222 spin_lock_init(&ras->ras_lock);
1224 INIT_LIST_HEAD(&ras->ras_read_beads);
1227 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
1228 unsigned long index, unsigned hit)
1230 struct ll_ra_info *ra = &sbi->ll_ra_info;
1234 spin_lock(&sbi->ll_lock);
1235 spin_lock(&ras->ras_lock);
1237 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1239 /* reset the read-ahead window in two cases. First when the app seeks
1240 * or reads to some other part of the file. Secondly if we get a
1241 * read-ahead miss that we think we've previously issued. This can
1242 * be a symptom of there being so many read-ahead pages that the VM is
1243 * reclaiming it before we get to it. */
1244 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1246 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1247 } else if (!hit && ras->ras_window_len &&
1248 index < ras->ras_next_readahead &&
1249 index_in_window(index, ras->ras_window_start, 0,
1250 ras->ras_window_len)) {
1252 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1256 ras_reset(ras, index);
1257 GOTO(out_unlock, 0);
1260 ras->ras_last_readpage = index;
1261 ras->ras_consecutive++;
1262 ras_set_start(ras, index);
1263 ras->ras_next_readahead = max(ras->ras_window_start,
1264 ras->ras_next_readahead);
1266 /* wait for a few pages to arrive before issuing readahead to avoid
1267 * the worst overutilization */
1268 if (ras->ras_consecutive == 3) {
1269 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
1270 GOTO(out_unlock, 0);
1273 /* we need to increase the window sometimes. we'll arbitrarily
1274 * do it half-way through the pages in an rpc */
1275 if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) ==
1276 (PTLRPC_MAX_BRW_PAGES >> 1)) {
1277 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
1278 ras->ras_window_len = min(ras->ras_window_len,
1285 spin_unlock(&ras->ras_lock);
1286 spin_unlock(&sbi->ll_lock);
1290 int ll_writepage(struct page *page)
1292 struct inode *inode = page->mapping->host;
1293 struct ll_inode_info *lli = ll_i2info(inode);
1294 struct obd_export *exp;
1295 struct ll_async_page *llap;
1299 LASSERT(!PageDirty(page));
1300 LASSERT(PageLocked(page));
1302 exp = ll_i2dtexp(inode);
1304 GOTO(out, rc = -EINVAL);
1306 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1308 GOTO(out, rc = PTR_ERR(llap));
1310 page_cache_get(page);
1311 if (llap->llap_write_queued) {
1312 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1313 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1315 ASYNC_READY | ASYNC_URGENT);
1317 rc = queue_or_sync_write(exp, inode, llap, PAGE_SIZE,
1318 ASYNC_READY | ASYNC_URGENT);
1321 page_cache_release(page);
1324 if (!lli->lli_async_rc)
1325 lli->lli_async_rc = rc;
1326 /* re-dirty page on error so it retries write */
1327 ll_redirty_page(page);
1334 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1335 * read-ahead assumes it is valid to issue readpage all the way up to
1336 * i_size, but our dlm locks make that not the case. We disable the
1337 * kernel's read-ahead and do our own by walking ahead in the page cache
1338 * checking for dlm lock coverage. the main difference between 2.4 and
1339 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1340 * so they look the same.
1342 int ll_readpage(struct file *filp, struct page *page)
1344 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1345 struct inode *inode = page->mapping->host;
1346 struct obd_export *exp;
1347 struct ll_async_page *llap;
1348 struct obd_io_group *oig = NULL;
1352 LASSERT(PageLocked(page));
1353 LASSERT(!PageUptodate(page));
1354 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1355 inode->i_ino, inode->i_generation, inode,
1356 (((loff_t)page->index) << PAGE_SHIFT),
1357 (((loff_t)page->index) << PAGE_SHIFT));
1358 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1360 rc = oig_init(&oig);
1364 exp = ll_i2dtexp(inode);
1366 GOTO(out, rc = -EINVAL);
1368 llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1370 GOTO(out, rc = PTR_ERR(llap));
1372 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1373 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1374 llap->llap_defer_uptodate);
1376 if (llap->llap_defer_uptodate) {
1377 llap->llap_ra_used = 1;
1378 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1381 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1383 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1384 SetPageUptodate(page);
1386 GOTO(out_oig, rc = 0);
1389 if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1390 rc = ll_page_matches(page, fd->fd_flags);
1392 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1397 CWARN("ino %lu page %lu (%llu) not covered by "
1398 "a lock (mmap?). check debug logs.\n",
1399 inode->i_ino, page->index,
1400 (long long)page->index << PAGE_CACHE_SHIFT);
1404 rc = ll_issue_page_read(exp, llap, oig, 0);
1408 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1409 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1410 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1413 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);