1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O Page Cache
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
38 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
39 #include <linux/buffer_head.h>
41 #include <linux/iobuf.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <asm/segment.h>
47 #include <linux/pagemap.h>
48 #include <linux/smp_lock.h>
50 #define DEBUG_SUBSYSTEM S_LLITE
52 #include <linux/lustre_mds.h>
53 #include <linux/lustre_lite.h>
54 #include <linux/lustre_lib.h>
57 * Remove page from dirty list
59 static void __set_page_clean(struct page *page)
61 struct address_space *mapping = page->mapping;
67 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
68 spin_lock(&pagecache_lock);
71 list_del(&page->list);
72 list_add(&page->list, &mapping->clean_pages);
74 /* XXX doesn't inode_lock protect i_state ? */
75 inode = mapping->host;
76 if (list_empty(&mapping->dirty_pages)) {
77 CDEBUG(D_INODE, "inode clean\n");
78 inode->i_state &= ~I_DIRTY_PAGES;
80 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
81 spin_unlock(&pagecache_lock);
86 void set_page_clean(struct page *page)
88 if (PageDirty(page)) {
90 __set_page_clean(page);
94 /* SYNCHRONOUS I/O to object storage for an inode */
95 static int ll_brw(int cmd, struct inode *inode, struct page *page, int flags)
97 struct ll_inode_info *lli = ll_i2info(inode);
98 struct lov_stripe_md *lsm = lli->lli_smd;
99 struct obd_brw_set *set;
104 set = obd_brw_set_new();
109 pg.off = ((obd_off)page->index) << PAGE_SHIFT;
111 if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
112 pg.count = inode->i_size % PAGE_SIZE;
114 pg.count = PAGE_SIZE;
116 CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
117 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
120 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
122 inode->i_ino, inode, inode->i_size, page->mapping->host,
123 page->mapping->host->i_size, page->index, pg.off);
128 set->brw_callback = ll_brw_sync_wait;
129 rc = obd_brw(cmd, ll_i2obdconn(inode), lsm, 1, &pg, set, NULL);
132 CERROR("error from obd_brw: rc = %d\n", rc);
134 rc = ll_brw_sync_wait(set, CB_PHASE_START);
136 CERROR("error from callback: rc = %d\n", rc);
138 obd_brw_set_decref(set);
144 * we were asked to read a single page but we're going to try and read a batch
145 * of pages all at once. this vaguely simulates 2.5's readpages.
147 static int ll_readpage(struct file *file, struct page *first_page)
149 struct inode *inode = first_page->mapping->host;
150 struct ll_inode_info *lli = ll_i2info(inode);
151 struct page *page = first_page;
152 struct list_head *pos;
153 struct brw_page *pgs;
154 struct obd_brw_set *set;
155 unsigned long end_index, extent_end = 0;
156 int npgs = 0, rc = 0;
159 LASSERT(PageLocked(page));
160 LASSERT(!PageUptodate(page));
161 CDEBUG(D_VFSTRACE, "VFS Op\n");
163 if (inode->i_size <= ((obd_off)page->index) << PAGE_SHIFT) {
164 CERROR("reading beyond EOF\n");
165 memset(kmap(page), 0, PAGE_SIZE);
167 SetPageUptodate(page);
172 pgs = kmalloc(PTL_MD_MAX_IOV * sizeof(*pgs), GFP_USER);
175 set = obd_brw_set_new();
177 GOTO(out_pgs, rc = -ENOMEM);
179 /* arbitrarily try to read-ahead 8 times what we can pass on
180 * the wire at once, clamped to file size */
181 end_index = first_page->index +
182 8 * ((PTL_MD_MAX_IOV * PAGE_SIZE)>>PAGE_CACHE_SHIFT);
183 if ( end_index > inode->i_size >> PAGE_CACHE_SHIFT )
184 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
187 * find how far we're allowed to read under the extent ll_file_read
190 spin_lock(&lli->lli_read_extent_lock);
191 list_for_each(pos, &lli->lli_read_extents) {
192 struct ll_read_extent *rextent;
193 rextent = list_entry(pos, struct ll_read_extent, re_lli_item);
194 if ( rextent->re_task != current )
197 if (rextent->re_extent.end + PAGE_SIZE < rextent->re_extent.end)
198 /* extent wrapping */
201 extent_end = ( rextent->re_extent.end + PAGE_SIZE )
203 /* 32bit indexes, 64bit extents.. */
204 if ( ((u64)extent_end >> PAGE_CACHE_SHIFT ) <
205 rextent->re_extent.end )
210 spin_unlock(&lli->lli_read_extent_lock);
212 if ( extent_end == 0 ) {
213 CERROR("readpage outside ll_file_read, no lock held?\n");
214 end_index = page->index + 1;
215 } else if ( extent_end < end_index )
216 end_index = extent_end;
218 /* to balance the find_get_page ref the other pages get that is
219 * decrefed on teardown.. */
220 page_cache_get(page);
222 unsigned long index ;
225 pgs[npgs].off = ((obd_off)page->index) << PAGE_CACHE_SHIFT;
227 pgs[npgs].count = PAGE_SIZE;
228 /* XXX Workaround for BA OSTs returning short reads at EOF.
229 * The linux OST will return the full page, zero-filled at the
230 * end, which will just overwrite the data we set here. Bug
231 * 593 relates to fixing this properly.
233 if (inode->i_size < pgs[npgs].off + PAGE_SIZE) {
234 int count = inode->i_size - pgs[npgs].off;
235 void *addr = kmap(page);
236 pgs[npgs].count = count;
237 //POISON(addr, 0x7c, count);
238 memset(addr + count, 0, PAGE_SIZE - count);
243 if ( npgs == PTL_MD_MAX_IOV )
247 * find pages ahead of us that we can read in.
248 * grab_cache_page waits on pages that are locked so
249 * we first try find_get_page, which doesn't. this stops
250 * the worst case behaviour of racing threads waiting on
251 * each other, but doesn't remove it entirely.
253 for ( index = page->index + 1, page = NULL ;
254 page == NULL && index < end_index ; index++ ) {
256 /* see if the page already exists and needs updating */
257 page = find_get_page(inode->i_mapping, index);
259 if ( Page_Uptodate(page) || TryLockPage(page) )
261 if ( !page->mapping || Page_Uptodate(page))
264 /* ok, we have to create it.. */
265 page = grab_cache_page(inode->i_mapping, index);
268 if ( Page_Uptodate(page) )
277 page_cache_release(page);
283 set->brw_callback = ll_brw_sync_wait;
284 rc = obd_brw(OBD_BRW_READ, ll_i2obdconn(inode),
285 ll_i2info(inode)->lli_smd, npgs, pgs, set, NULL);
287 CERROR("error from obd_brw: rc = %d\n", rc);
289 rc = ll_brw_sync_wait(set, CB_PHASE_START);
291 CERROR("error from callback: rc = %d\n", rc);
293 obd_brw_set_decref(set);
295 while ( --npgs > -1 ) {
299 SetPageUptodate(page);
301 page_cache_release(page);
308 void ll_truncate(struct inode *inode)
310 struct obdo oa = {0};
311 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
312 struct lustre_handle lockh = { 0, 0 };
313 struct ldlm_extent extent = {inode->i_size, OBD_OBJECT_EOF};
318 /* object not yet allocated */
319 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
324 oa.o_id = lsm->lsm_object_id;
325 oa.o_mode = inode->i_mode;
326 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
328 CDEBUG(D_VFSTRACE, "VFS Op\n");
329 CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
330 oa.o_id, inode->i_size);
332 /* i_size has already been set to the new size */
333 err = ll_extent_lock_no_validate(NULL, inode, lsm, LCK_PW,
335 if (err != ELDLM_OK && err != ELDLM_LOCK_MATCHED) {
340 /* truncate == punch from new size to absolute end of file */
341 err = obd_punch(ll_i2obdconn(inode), &oa, lsm, inode->i_size,
342 OBD_OBJECT_EOF, NULL);
344 CERROR("obd_truncate fails (%d) ino %lu\n", err, inode->i_ino);
346 obdo_to_inode(inode, &oa, oa.o_valid);
348 err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
350 CERROR("ll_extent_unlock failed: %d\n", err);
356 //#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
358 static int ll_prepare_write(struct file *file, struct page *page, unsigned from,
361 struct inode *inode = page->mapping->host;
362 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
366 ll_check_dirty(inode->i_sb);
368 if (!PageLocked(page))
371 if (PageUptodate(page))
374 //POISON(addr + from, 0xca, to - from);
376 /* We're completely overwriting an existing page, so _don't_ set it up
377 * to date until commit_write */
378 if (from == 0 && to == PAGE_SIZE)
380 CDEBUG(D_VFSTRACE, "VFS Op\n");
382 /* If are writing to a new page, no need to read old data.
383 * the extent locking and getattr procedures in ll_file_write have
384 * guaranteed that i_size is stable enough for our zeroing needs */
385 if (inode->i_size <= offset) {
386 memset(kmap(page), 0, PAGE_SIZE);
388 GOTO(prepare_done, rc = 0);
391 rc = ll_brw(OBD_BRW_READ, inode, page, 0);
396 SetPageUptodate(page);
402 * background file writeback. This is called regularly from kupdated to write
403 * dirty data, from kswapd when memory is low, and from filemap_fdatasync when
404 * super blocks or inodes are synced..
406 * obd_brw errors down in _batch_writepage are ignored, so pages are always
407 * unlocked. Also, there is nobody to return an error code to from here - the
408 * application may not even be running anymore.
410 * this should be async so that things like kswapd can have a chance to
411 * free some more pages that our allocating writeback may need, but it isn't
414 static int ll_writepage(struct page *page)
416 struct inode *inode = page->mapping->host;
419 CDEBUG(D_CACHE, "page %p [lau %d] inode %p\n", page,
420 PageLaunder(page), inode);
421 CDEBUG(D_VFSTRACE, "VFS Op\n");
422 LASSERT(PageLocked(page));
424 /* XXX should obd_brw errors trickle up? */
425 ll_batch_writepage(inode, page);
430 * we really don't want to start writeback here, we want to give callers some
431 * time to further dirty the pages before we write them out.
433 static int ll_commit_write(struct file *file, struct page *page,
434 unsigned from, unsigned to)
436 struct inode *inode = page->mapping->host;
440 LASSERT(inode == file->f_dentry->d_inode);
441 LASSERT(PageLocked(page));
443 CDEBUG(D_VFSTRACE, "VFS Op\n");
444 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
445 inode, page, from, to, page->index);
447 /* to match full page case in prepare_write */
448 SetPageUptodate(page);
449 /* mark the page dirty, put it on mapping->dirty,
450 * mark the inode PAGES_DIRTY, put it on sb->dirty */
451 set_page_dirty(page);
453 /* this is matched by a hack in obdo_to_inode at the moment */
454 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
455 if (size > inode->i_size)
456 inode->i_size = size;
459 } /* ll_commit_write */
461 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
462 static int ll_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf,
463 unsigned long blocknr, int blocksize)
465 struct ll_inode_info *lli = ll_i2info(inode);
466 struct lov_stripe_md *lsm = lli->lli_smd;
467 struct brw_page *pga;
468 struct obd_brw_set *set;
470 int length, i, flags, rc = 0;
473 CDEBUG(D_VFSTRACE, "VFS Op\n");
474 if (!lsm || !lsm->lsm_object_id)
477 if ((iobuf->offset & (blocksize - 1)) ||
478 (iobuf->length & (blocksize - 1)))
482 /* XXX Keep here until we find ia64 problem, it crashes otherwise */
483 if (blocksize != PAGE_SIZE) {
484 CERROR("direct_IO blocksize != PAGE_SIZE\n");
489 set = obd_brw_set_new();
493 OBD_ALLOC(pga, sizeof(*pga) * iobuf->nr_pages);
495 obd_brw_set_decref(set);
499 flags = (rw == WRITE ? OBD_BRW_CREATE : 0) /* | OBD_BRW_DIRECTIO */;
500 offset = (blocknr << inode->i_blkbits);
501 length = iobuf->length;
503 for (i = 0, length = iobuf->length; length > 0;
504 length -= pga[i].count, offset += pga[i].count, i++) { /*i last!*/
505 pga[i].pg = iobuf->maplist[i];
507 /* To the end of the page, or the length, whatever is less */
508 pga[i].count = min_t(int, PAGE_SIZE - (offset & ~PAGE_MASK),
512 //POISON(kmap(iobuf->maplist[i]), 0xc5, PAGE_SIZE);
513 //kunmap(iobuf->maplist[i]);
517 set->brw_callback = ll_brw_sync_wait;
518 rc = obd_brw(rw == WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
519 ll_i2obdconn(inode), lsm, iobuf->nr_pages, pga, set, NULL);
521 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
522 "error from obd_brw: rc = %d\n", rc);
524 rc = ll_brw_sync_wait(set, CB_PHASE_START);
526 CERROR("error from callback: rc = %d\n", rc);
528 obd_brw_set_decref(set);
532 OBD_FREE(pga, sizeof(*pga) * iobuf->nr_pages);
539 struct address_space_operations ll_aops = {
540 readpage: ll_readpage,
541 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
542 direct_IO: ll_direct_IO,
544 writepage: ll_writepage,
545 sync_page: block_sync_page,
546 prepare_write: ll_prepare_write,
547 commit_write: ll_commit_write,