1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O Page Cache
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35 #include "llite_internal.h"
38 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
39 #include <linux/buffer_head.h>
41 #include <linux/iobuf.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <asm/segment.h>
47 #include <linux/pagemap.h>
48 #include <linux/smp_lock.h>
50 #define DEBUG_SUBSYSTEM S_LLITE
52 #include <linux/lustre_mds.h>
53 #include <linux/lustre_lite.h>
54 #include <linux/lustre_lib.h>
55 #include <linux/lustre_compat25.h>
58 * Remove page from dirty list
60 static void __set_page_clean(struct page *page)
62 struct address_space *mapping = page->mapping;
68 PGCACHE_WRLOCK(mapping);
70 list_del(&page->list);
71 list_add(&page->list, &mapping->clean_pages);
73 /* XXX doesn't inode_lock protect i_state ? */
74 inode = mapping->host;
75 if (list_empty(&mapping->dirty_pages)) {
76 CDEBUG(D_INODE, "inode clean\n");
77 inode->i_state &= ~I_DIRTY_PAGES;
80 PGCACHE_WRUNLOCK(mapping);
84 void set_page_clean(struct page *page)
86 if (PageDirty(page)) {
88 __set_page_clean(page);
92 /* SYNCHRONOUS I/O to object storage for an inode */
93 static int ll_brw(int cmd, struct inode *inode, struct page *page, int flags)
95 struct ll_inode_info *lli = ll_i2info(inode);
96 struct lov_stripe_md *lsm = lli->lli_smd;
102 pg.off = ((obd_off)page->index) << PAGE_SHIFT;
104 if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
105 pg.count = inode->i_size % PAGE_SIZE;
107 pg.count = PAGE_SIZE;
109 CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
110 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
113 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
115 inode->i_ino, inode, inode->i_size, page->mapping->host,
116 page->mapping->host->i_size, page->index, pg.off);
121 if (cmd == OBD_BRW_WRITE)
122 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
123 LPROC_LL_BRW_WRITE, pg.count);
125 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
126 LPROC_LL_BRW_READ, pg.count);
127 rc = obd_brw(cmd, ll_i2obdconn(inode), lsm, 1, &pg, NULL);
129 CERROR("error from obd_brw: rc = %d\n", rc);
135 * we were asked to read a single page but we're going to try and read a batch
136 * of pages all at once. this vaguely simulates 2.5's readpages.
138 static int ll_readpage(struct file *file, struct page *first_page)
140 struct inode *inode = first_page->mapping->host;
141 struct ll_inode_info *lli = ll_i2info(inode);
142 struct page *page = first_page;
143 struct list_head *pos;
144 struct brw_page *pgs;
145 unsigned long end_index, extent_end = 0;
146 struct ptlrpc_request_set *set;
147 int npgs = 0, rc = 0, max_pages;
150 LASSERT(PageLocked(page));
151 LASSERT(!PageUptodate(page));
152 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
153 inode->i_ino, inode->i_generation, inode,
154 (((obd_off)page->index) << PAGE_SHIFT));
155 LASSERT(atomic_read(&file->f_dentry->d_inode->i_count) > 0);
157 if (inode->i_size <= ((obd_off)page->index) << PAGE_SHIFT) {
158 CERROR("reading beyond EOF\n");
159 memset(kmap(page), 0, PAGE_SIZE);
161 SetPageUptodate(page);
166 /* try to read the file's preferred block size in a one-er */
167 end_index = first_page->index +
168 (inode->i_blksize >> PAGE_CACHE_SHIFT);
169 if (end_index > (inode->i_size >> PAGE_CACHE_SHIFT))
170 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
172 max_pages = ((end_index - first_page->index) << PAGE_CACHE_SHIFT) >>
174 pgs = kmalloc(max_pages * sizeof(*pgs), GFP_USER);
179 * find how far we're allowed to read under the extent ll_file_read
182 spin_lock(&lli->lli_read_extent_lock);
183 list_for_each(pos, &lli->lli_read_extents) {
184 struct ll_read_extent *rextent;
185 rextent = list_entry(pos, struct ll_read_extent, re_lli_item);
186 if (rextent->re_task != current)
189 if (rextent->re_extent.end + PAGE_SIZE < rextent->re_extent.end)
190 /* extent wrapping */
193 extent_end = (rextent->re_extent.end + PAGE_SIZE)
195 /* 32bit indexes, 64bit extents.. */
196 if (((u64)extent_end >> PAGE_CACHE_SHIFT) <
197 rextent->re_extent.end)
202 spin_unlock(&lli->lli_read_extent_lock);
204 if (extent_end == 0) {
205 static long next_print;
206 if (time_after(jiffies, next_print)) {
207 next_print = jiffies + 30 * HZ;
208 CDEBUG(D_INODE, "mmap readpage - check locks\n");
210 end_index = page->index + 1;
211 } else if (extent_end < end_index)
212 end_index = extent_end;
214 /* to balance the find_get_page ref the other pages get that is
215 * decrefed on teardown.. */
216 page_cache_get(page);
218 unsigned long index ;
221 pgs[npgs].off = ((obd_off)page->index) << PAGE_CACHE_SHIFT;
223 pgs[npgs].count = PAGE_SIZE;
224 /* XXX Workaround for BA OSTs returning short reads at EOF.
225 * The linux OST will return the full page, zero-filled at the
226 * end, which will just overwrite the data we set here. Bug
227 * 593 relates to fixing this properly.
229 if (inode->i_size < pgs[npgs].off + PAGE_SIZE) {
230 int count = inode->i_size - pgs[npgs].off;
231 void *addr = kmap(page);
232 pgs[npgs].count = count;
233 //POISON(addr, 0x7c, count);
234 memset(addr + count, 0, PAGE_SIZE - count);
239 if (npgs == max_pages)
243 * find pages ahead of us that we can read in.
244 * grab_cache_page waits on pages that are locked so
245 * we first try find_get_page, which doesn't. this stops
246 * the worst case behaviour of racing threads waiting on
247 * each other, but doesn't remove it entirely.
249 for (index = page->index + 1, page = NULL;
250 page == NULL && index < end_index; index++) {
252 /* see if the page already exists and needs updating */
253 page = find_get_page(inode->i_mapping, index);
255 if (Page_Uptodate(page) || TryLockPage(page))
257 if (!page->mapping || Page_Uptodate(page))
260 /* ok, we have to create it.. */
261 page = grab_cache_page(inode->i_mapping, index);
264 if (Page_Uptodate(page))
273 page_cache_release(page);
279 set = ptlrpc_prep_set();
281 CERROR("ENOMEM allocing request set\n");
284 rc = obd_brw_async(OBD_BRW_READ, ll_i2obdconn(inode),
285 ll_i2info(inode)->lli_smd, npgs, pgs,
288 rc = ptlrpc_set_wait(set);
289 ptlrpc_set_destroy(set);
290 if (rc && rc != -EIO)
291 CERROR("error from obd_brw_async: rc = %d\n", rc);
298 SetPageUptodate(page);
300 page_cache_release(page);
307 /* this isn't where truncate starts. roughly:
308 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
309 * we grab the lock back in setattr_raw to avoid races. */
310 void ll_truncate(struct inode *inode)
312 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
313 struct obdo oa = {0};
316 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
317 inode->i_generation, inode);
320 /* object not yet allocated */
321 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
326 /* vmtruncate will just throw away our dirty pages, make sure
327 * we don't think they're still dirty, being careful to round
328 * i_size to the first whole page that was tossed */
329 err = ll_clear_dirty_pages(ll_i2obdconn(inode), lsm,
330 (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT,
333 oa.o_id = lsm->lsm_object_id;
334 oa.o_mode = inode->i_mode;
335 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
337 CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
338 oa.o_id, inode->i_size);
340 /* truncate == punch from new size to absolute end of file */
341 err = obd_punch(ll_i2obdconn(inode), &oa, lsm, inode->i_size,
342 OBD_OBJECT_EOF, NULL);
344 CERROR("obd_truncate fails (%d) ino %lu\n", err, inode->i_ino);
346 obdo_to_inode(inode, &oa, oa.o_valid);
352 //#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
354 static int ll_prepare_write(struct file *file, struct page *page, unsigned from,
357 struct inode *inode = page->mapping->host;
358 struct ll_inode_info *lli = ll_i2info(inode);
359 struct lov_stripe_md *lsm = lli->lli_smd;
360 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
365 if (!PageLocked(page))
368 if (PageUptodate(page))
371 //POISON(addr + from, 0xca, to - from);
373 /* Check to see if we should return -EIO right away */
376 pg.count = PAGE_SIZE;
378 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdconn(inode), lsm, 1, &pg, NULL);
382 /* We're completely overwriting an existing page, so _don't_ set it up
383 * to date until commit_write */
384 if (from == 0 && to == PAGE_SIZE)
387 /* If are writing to a new page, no need to read old data.
388 * the extent locking and getattr procedures in ll_file_write have
389 * guaranteed that i_size is stable enough for our zeroing needs */
390 if (inode->i_size <= offset) {
391 memset(kmap(page), 0, PAGE_SIZE);
393 GOTO(prepare_done, rc = 0);
396 rc = ll_brw(OBD_BRW_READ, inode, page, 0);
401 SetPageUptodate(page);
407 * background file writeback. This is called regularly from kupdated to write
408 * dirty data, from kswapd when memory is low, and from filemap_fdatasync when
409 * super blocks or inodes are synced..
411 * obd_brw errors down in _batch_writepage are ignored, so pages are always
412 * unlocked. Also, there is nobody to return an error code to from here - the
413 * application may not even be running anymore.
415 * this should be async so that things like kswapd can have a chance to
416 * free some more pages that our allocating writeback may need, but it isn't
419 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
420 static unsigned long ll_local_cache_dirty_pages;
421 static unsigned long ll_max_dirty_pages = 20 * 1024 * 1024 / PAGE_SIZE;
423 static spinlock_t ll_local_cache_page_count_lock = SPIN_LOCK_UNLOCKED;
425 int ll_rd_dirty_pages(char *page, char **start, off_t off, int count, int *eof,
428 unsigned long dirty_count;
429 spin_lock(&ll_local_cache_page_count_lock);
430 dirty_count = ll_local_cache_dirty_pages;
431 spin_unlock(&ll_local_cache_page_count_lock);
432 return snprintf(page, count, "%lu\n", dirty_count);
435 int ll_rd_max_dirty_pages(char *page, char **start, off_t off, int count,
436 int *eof, void *data)
438 unsigned long max_dirty;
439 spin_lock(&ll_local_cache_page_count_lock);
440 max_dirty = ll_max_dirty_pages;
441 spin_unlock(&ll_local_cache_page_count_lock);
442 return snprintf(page, count, "%lu\n", max_dirty);
445 int ll_wr_max_dirty_pages(struct file *file, const char *buffer,
446 unsigned long count, void *data)
448 unsigned long max_dirty;
449 signed long max_dirty_signed;
450 char kernbuf[20], *end;
452 if (count > (sizeof(kernbuf) - 1))
455 if (copy_from_user(kernbuf, buffer, count))
458 kernbuf[count] = '\0';
460 max_dirty_signed = simple_strtol(kernbuf, &end, 0);
463 max_dirty = (unsigned long)max_dirty_signed;
466 if (max_dirty < ll_local_cache_dirty_pages)
467 flush_to_new_max_dirty();
470 spin_lock(&ll_local_cache_page_count_lock);
471 CDEBUG(D_CACHE, "changing max_dirty from %lu to %lu\n",
472 ll_max_dirty_pages, max_dirty);
473 ll_max_dirty_pages = max_dirty;
474 spin_unlock(&ll_local_cache_page_count_lock);
478 static int ll_local_cache_full(void)
481 spin_lock(&ll_local_cache_page_count_lock);
482 if (ll_max_dirty_pages &&
483 ll_local_cache_dirty_pages >= ll_max_dirty_pages) {
486 spin_unlock(&ll_local_cache_page_count_lock);
487 /* XXX instrument? */
488 /* XXX trigger async writeback when full, or 75% of full? */
492 static void ll_local_cache_flushed_pages(unsigned long pgcount)
494 unsigned long dirty_count;
495 spin_lock(&ll_local_cache_page_count_lock);
496 dirty_count = ll_local_cache_dirty_pages;
497 ll_local_cache_dirty_pages -= pgcount;
498 CDEBUG(D_CACHE, "dirty pages: %lu->%lu)\n",
499 dirty_count, ll_local_cache_dirty_pages);
500 spin_unlock(&ll_local_cache_page_count_lock);
501 LASSERT(dirty_count >= pgcount);
504 static void ll_local_cache_dirtied_pages(unsigned long pgcount)
506 unsigned long dirty_count;
507 spin_lock(&ll_local_cache_page_count_lock);
508 dirty_count = ll_local_cache_dirty_pages;
509 ll_local_cache_dirty_pages += pgcount;
510 CDEBUG(D_CACHE, "dirty pages: %lu->%lu\n",
511 dirty_count, ll_local_cache_dirty_pages);
512 spin_unlock(&ll_local_cache_page_count_lock);
513 /* XXX track maximum cached, report to lprocfs */
516 int ll_clear_dirty_pages(struct lustre_handle *conn, struct lov_stripe_md *lsm,
517 unsigned long start, unsigned long end)
519 unsigned long cleared;
523 rc = obd_clear_dirty_pages(conn, lsm, start, end, &cleared);
525 ll_local_cache_flushed_pages(cleared);
529 int ll_mark_dirty_page(struct lustre_handle *conn, struct lov_stripe_md *lsm,
535 if (ll_local_cache_full())
538 rc = obd_mark_page_dirty(conn, lsm, index);
540 ll_local_cache_dirtied_pages(1);
544 static int ll_writepage(struct page *page)
546 struct inode *inode = page->mapping->host;
549 CDEBUG(D_CACHE, "page %p [lau %d] inode %p\n", page,
550 PageLaunder(page), inode);
551 LASSERT(PageLocked(page));
553 /* XXX should obd_brw errors trickle up? */
554 ll_batch_writepage(inode, page);
559 * we really don't want to start writeback here, we want to give callers some
560 * time to further dirty the pages before we write them out.
562 static int ll_commit_write(struct file *file, struct page *page,
563 unsigned from, unsigned to)
565 struct inode *inode = page->mapping->host;
570 LASSERT(inode == file->f_dentry->d_inode);
571 LASSERT(PageLocked(page));
573 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
574 inode, page, from, to, page->index);
575 if (!PageDirty(page)) {
576 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
577 LPROC_LL_DIRTY_MISSES);
578 rc = ll_mark_dirty_page(ll_i2obdconn(inode),
579 ll_i2info(inode)->lli_smd,
581 if (rc < 0 && rc != -EDQUOT)
582 RETURN(rc); /* XXX lproc counter here? */
584 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
585 LPROC_LL_DIRTY_HITS);
588 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
589 if (size > inode->i_size)
590 inode->i_size = size;
592 SetPageUptodate(page);
593 set_page_dirty(page);
595 /* This means that we've hit either the local cache limit or the limit
596 * of the OST's grant. */
598 int rc = ll_batch_writepage(inode, page);
599 lock_page(page); /* caller expects to unlock */
604 } /* ll_commit_write */
606 static int ll_writepage(struct page *page,
607 struct writeback_control *wbc)
612 static int ll_commit_write(struct file *file, struct page *page,
613 unsigned from, unsigned to)
619 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
620 static int ll_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf,
621 unsigned long blocknr, int blocksize)
623 struct ll_inode_info *lli = ll_i2info(inode);
624 struct lov_stripe_md *lsm = lli->lli_smd;
625 struct brw_page *pga;
626 struct ptlrpc_request_set *set;
627 int length, i, flags, rc = 0;
631 if (!lsm || !lsm->lsm_object_id)
634 if ((iobuf->offset & (blocksize - 1)) ||
635 (iobuf->length & (blocksize - 1)))
638 set = ptlrpc_prep_set();
642 OBD_ALLOC(pga, sizeof(*pga) * iobuf->nr_pages);
644 ptlrpc_set_destroy(set);
648 flags = (rw == WRITE ? OBD_BRW_CREATE : 0) /* | OBD_BRW_DIRECTIO */;
649 offset = ((obd_off)blocknr << inode->i_blkbits);
650 length = iobuf->length;
652 for (i = 0, length = iobuf->length; length > 0;
653 length -= pga[i].count, offset += pga[i].count, i++) { /*i last!*/
654 pga[i].pg = iobuf->maplist[i];
656 /* To the end of the page, or the length, whatever is less */
657 pga[i].count = min_t(int, PAGE_SIZE - (offset & ~PAGE_MASK),
661 //POISON(kmap(iobuf->maplist[i]), 0xc5, PAGE_SIZE);
662 //kunmap(iobuf->maplist[i]);
667 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
668 LPROC_LL_DIRECT_WRITE, iobuf->length);
670 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
671 LPROC_LL_DIRECT_READ, iobuf->length);
672 rc = obd_brw_async(rw == WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
673 ll_i2obdconn(inode), lsm, iobuf->nr_pages, pga, set,
676 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
677 "error from obd_brw_async: rc = %d\n", rc);
679 rc = ptlrpc_set_wait(set);
681 CERROR("error from callback: rc = %d\n", rc);
683 ptlrpc_set_destroy(set);
687 OBD_FREE(pga, sizeof(*pga) * iobuf->nr_pages);
694 struct address_space_operations ll_aops = {
695 readpage: ll_readpage,
696 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
697 direct_IO: ll_direct_IO,
699 writepage: ll_writepage,
700 sync_page: block_sync_page,
701 prepare_write: ll_prepare_write,
702 commit_write: ll_commit_write,