2 * Lustre Light I/O Page Cache
4 * Copyright (C) 2002, Cluster File Systems, Inc.
8 #include <linux/config.h>
9 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/stat.h>
13 #include <linux/errno.h>
14 #include <linux/locks.h>
15 #include <linux/unistd.h>
17 #include <asm/system.h>
18 #include <asm/uaccess.h>
21 #include <linux/stat.h>
22 #include <asm/uaccess.h>
23 #include <linux/vmalloc.h>
24 #include <asm/segment.h>
26 #include <linux/pagemap.h>
27 #include <linux/smp_lock.h>
29 #include <linux/obd_support.h>
30 #include <linux/lustre_lib.h>
31 #include <linux/lustre_idl.h>
32 #include <linux/lustre_mds.h>
33 #include <linux/lustre_light.h>
36 * Add a page to the dirty page list.
38 void set_page_dirty(struct page *page)
40 if (!test_and_set_bit(PG_dirty, &page->flags)) {
41 struct address_space *mapping = page->mapping;
44 spin_lock(&pagecache_lock);
45 list_del(&page->list);
46 list_add(&page->list, &mapping->dirty_pages);
47 spin_unlock(&pagecache_lock);
50 mark_inode_dirty_pages(mapping->host);
56 * Remove page from dirty list
58 void __set_page_clean(struct page *page)
60 struct address_space *mapping = page->mapping;
66 spin_lock(&pagecache_lock);
67 list_del(&page->list);
68 list_add(&page->list, &mapping->clean_pages);
70 inode = mapping->host;
71 if (list_empty(&mapping->dirty_pages)) {
72 CDEBUG(D_INODE, "inode clean\n");
73 inode->i_state &= ~I_DIRTY_PAGES;
75 spin_unlock(&pagecache_lock);
79 inline void set_page_clean(struct page *page)
81 if (PageDirty(page)) {
83 __set_page_clean(page);
87 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
88 static int ll_brw(int rw, struct inode *inode, struct page *page, int create)
90 obd_count num_obdo = 1;
91 obd_count bufs_per_obdo = 1;
93 obd_size count = PAGE_SIZE;
94 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
95 obd_flag flags = create ? OBD_BRW_CREATE : 0;
105 oa->o_valid = OBD_MD_FLNOTOBD;
106 ll_from_inode(oa, inode);
108 err = obd_brw(rw, IID(inode), num_obdo, &oa, &bufs_per_obdo,
109 &page, &count, &offset, &flags);
111 // ll_to_inode(inode, oa); /* copy o_blocks to i_blocks */
118 extern void set_page_clean(struct page *);
120 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
121 static int ll_commit_page(struct page *page, int create, int from, int to)
123 struct inode *inode = page->mapping->host;
124 obd_count num_obdo = 1;
125 obd_count bufs_per_obdo = 1;
128 obd_off offset = (((obd_off)page->index) << PAGE_SHIFT);
129 obd_flag flags = create ? OBD_BRW_CREATE : 0;
138 oa->o_valid = OBD_MD_FLNOTOBD;
139 ll_from_inode(oa, inode);
141 CDEBUG(D_INODE, "commit_page writing (at %d) to %d, count %Ld\n",
144 err = obd_brw(WRITE, IID(inode), num_obdo, &oa, &bufs_per_obdo,
145 &page, &count, &offset, &flags);
147 SetPageUptodate(page);
148 set_page_clean(page);
152 // ll_to_inode(inode, oa); /* copy o_blocks to i_blocks */
160 /* returns the page unlocked, but with a reference */
161 int ll_readpage(struct file *file, struct page *page)
163 struct inode *inode = page->mapping->host;
168 if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT)
170 memset(kmap(page), 0, PAGE_CACHE_SIZE);
175 if (Page_Uptodate(page)) {
180 rc = ll_brw(READ, inode, page, 0);
185 /* PDEBUG(page, "READ"); */
188 SetPageUptodate(page);
189 obd_unlock_page(page);
196 /* returns the page unlocked, but with a reference */
197 int ll_dir_readpage(struct file *file, struct page *page)
199 struct inode *inode = page->mapping->host;
203 struct mds_rep_hdr *hdr;
207 if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT)
209 memset(kmap(page), 0, PAGE_CACHE_SIZE);
214 if (Page_Uptodate(page)) {
219 offset = page->index << PAGE_SHIFT;
221 rc = mdc_readpage(inode->i_ino, S_IFDIR, offset, buf, NULL, &hdr);
228 if ((rc = hdr->status)) {
233 /* PDEBUG(page, "READ"); */
235 SetPageUptodate(page);
240 } /* ll_dir_readpage */
242 int ll_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
244 struct inode *inode = page->mapping->host;
245 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
250 if (Page_Uptodate(page)) {
255 if ( (from <= offset) && (to >= offset + PAGE_SIZE) ) {
260 rc = ll_brw(READ, inode, page, 0);
262 SetPageUptodate(page);
266 set_page_dirty(page);
267 //SetPageDirty(page);
273 /* select between SYNC and ASYNC I/O methods */
274 int ll_do_writepage(struct page *page, int sync)
276 struct inode *inode = page->mapping->host;
280 /* PDEBUG(page, "WRITEPAGE"); */
281 /* XXX everything is synchronous now */
282 err = ll_brw(WRITE, inode, page, 1);
285 SetPageUptodate(page);
286 set_page_clean(page);
288 /* PDEBUG(page,"WRITEPAGE"); */
291 } /* ll_do_writepage */
295 /* returns the page unlocked, but with a reference */
296 int ll_writepage(struct page *page)
299 struct inode *inode = page->mapping->host;
301 printk("---> writepage called ino %ld!\n", inode->i_ino);
303 rc = ll_do_writepage(page, 1);
305 set_page_clean(page);
307 CDEBUG(D_INODE, "--> GRR %d\n", rc);
313 void write_inode_pages(struct inode *inode)
315 struct list_head *tmp = &inode->i_mapping->dirty_pages;
317 while ( (tmp = tmp->next) != &inode->i_mapping->dirty_pages) {
319 page = list_entry(tmp, struct page, list);
325 int ll_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
327 struct inode *inode = page->mapping->host;
329 loff_t len = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
331 CDEBUG(D_INODE, "commit write ino %ld (end at %Ld) from %d to %d ,ind %ld\n",
332 inode->i_ino, len, from, to, page->index);
334 rc = ll_commit_page(page, 1, from, to);
336 if (len > inode->i_size) {
337 ll_set_size(inode, len);
347 * This does the "real" work of the write. The generic routine has
348 * allocated the page, locked it, done all the page alignment stuff
349 * calculations etc. Now we should just copy the data from user
350 * space and write it back to the real medium..
352 * If the writer ends up delaying the write, the writer needs to
353 * increment the page use counts until he is done with the page.
355 * Return value is the number of bytes written.
357 int ll_write_one_page(struct file *file, struct page *page,
358 unsigned long offset, unsigned long bytes,
361 struct inode *inode = file->f_dentry->d_inode;
365 /* We check for complete page writes here, as we then don't have to
366 * get the page before writing over everything anyways.
368 if ( !Page_Uptodate(page) && (offset != 0 || bytes != PAGE_SIZE) ) {
369 err = ll_brw(READ, inode, page, 0);
372 SetPageUptodate(page);
375 if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
379 err = ll_writepage(page);
382 return (err < 0 ? err : bytes);
383 } /* ll_write_one_page */
386 * return an up to date page:
387 * - if locked is true then is returned locked
388 * - if create is true the corresponding disk blocks are created
389 * - page is held, i.e. caller must release the page
391 * modeled on NFS code.
393 struct page *ll_getpage(struct inode *inode, unsigned long offset,
394 int create, int locked)
402 offset = offset & PAGE_CACHE_MASK;
403 CDEBUG(D_INFO, "ino: %ld, offset %ld, create %d, locked %d\n",
404 inode->i_ino, offset, create, locked);
405 index = offset >> PAGE_CACHE_SHIFT;
407 page = grab_cache_page(&inode->i_data, index);
411 printk(KERN_WARNING " grab_cache_page says no dice ...\n");
416 /* PDEBUG(page, "GETPAGE: got page - before reading\n"); */
417 /* now check if the data in the page is up to date */
418 if ( Page_Uptodate(page)) {
420 if (PageLocked(page))
421 obd_unlock_page(page);
423 printk("file %s, line %d: expecting locked page\n",
430 err = ll_brw(READ, inode, page, create);
434 obd_unlock_page(page);
440 obd_unlock_page(page);
441 SetPageUptodate(page);
442 /* PDEBUG(page,"GETPAGE - after reading"); */
448 void ll_truncate(struct inode *inode)
454 //ll_dequeue_pages(inode);
458 /* XXX This would give an inconsistent FS, so deal with it as
459 * best we can for now - an obdo on the stack is not pretty.
463 printk(__FUNCTION__ ": obdo_alloc failed - using stack!\n");
465 obdo.o_valid = OBD_MD_FLNOTOBD;
466 ll_from_inode(&obdo, inode);
468 err = obd_punch(IID(inode), &obdo, 0, obdo.o_size);
470 oa->o_valid = OBD_MD_FLNOTOBD;
471 ll_from_inode(oa, inode);
473 CDEBUG(D_INFO, "calling punch for %ld (%Lu bytes at 0)\n",
474 (long)oa->o_id, oa->o_size);
475 err = obd_punch(IID(inode), oa, oa->o_size, 0);
481 printk(__FUNCTION__ ": obd_truncate fails (%d)\n", err);
488 struct address_space_operations ll_aops = {
489 readpage: ll_readpage,
490 writepage: ll_writepage,
491 sync_page: block_sync_page,
492 prepare_write: ll_prepare_write,
493 commit_write: ll_commit_write,
498 struct address_space_operations ll_dir_aops = {
499 readpage: ll_dir_readpage