2 * OBDFS Super operations
4 * This code is issued under the GNU General Public License.
5 * See the file COPYING in this distribution
7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
8 * Copryright (C) 1999 Stelias Computing Inc,
9 * (author Peter J. Braam <braam@stelias.com>)
10 * Copryright (C) 1999 Seagate Technology Inc.
14 #include <linux/config.h>
15 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/locks.h>
21 #include <linux/unistd.h>
22 #include <linux/version.h>
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
28 #include <linux/stat.h>
29 #include <asm/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <asm/segment.h>
33 #include <linux/pagemap.h>
34 #include <linux/smp_lock.h>
36 #define DEBUG_SUBSYSTEM S_OBDFS
38 #include <linux/obd_support.h>
39 #include <linux/obd_ext2.h>
40 #include <linux/obdfs.h>
42 void obdfs_change_inode(struct inode *inode);
44 static int cache_writes = 0;
47 /* page cache support stuff */
49 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10)
51 * Add a page to the dirty page list.
53 void __set_page_dirty(struct page *page)
55 struct address_space *mapping;
58 pg_lock = PAGECACHE_LOCK(page);
61 mapping = page->mapping;
62 spin_lock(&mapping->page_lock);
64 list_del(&page->list);
65 list_add(&page->list, &mapping->dirty_pages);
67 spin_unlock(&mapping->page_lock);
71 mark_inode_dirty_pages(mapping->host);
74 * Remove page from dirty list
76 void __set_page_clean(struct page *page)
78 struct address_space *mapping = page->mapping;
84 list_del(&page->list);
85 list_add(&page->list, &mapping->clean_pages);
87 inode = mapping->host;
88 if (list_empty(&mapping->dirty_pages)) {
89 CDEBUG(D_INODE, "inode clean\n");
90 inode->i_state &= ~I_DIRTY_PAGES;
97 * Add a page to the dirty page list.
99 void set_page_dirty(struct page *page)
101 if (!test_and_set_bit(PG_dirty, &page->flags)) {
102 struct address_space *mapping = page->mapping;
105 spin_lock(&pagecache_lock);
106 list_del(&page->list);
107 list_add(&page->list, &mapping->dirty_pages);
108 spin_unlock(&pagecache_lock);
111 mark_inode_dirty_pages(mapping->host);
116 * Remove page from dirty list
118 void __set_page_clean(struct page *page)
120 struct address_space *mapping = page->mapping;
126 spin_lock(&pagecache_lock);
127 list_del(&page->list);
128 list_add(&page->list, &mapping->clean_pages);
130 inode = mapping->host;
131 if (list_empty(&mapping->dirty_pages)) {
132 CDEBUG(D_INODE, "inode clean\n");
133 inode->i_state &= ~I_DIRTY_PAGES;
135 spin_unlock(&pagecache_lock);
142 inline void set_page_clean(struct page *page)
144 if (PageDirty(page)) {
145 ClearPageDirty(page);
146 __set_page_clean(page);
150 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
151 static int obdfs_brw(int rw, struct inode *inode, struct page *page, int create)
153 obd_count num_obdo = 1;
154 obd_count bufs_per_obdo = 1;
156 obd_size count = PAGE_SIZE;
157 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
158 obd_flag flags = create ? OBD_BRW_CREATE : 0;
168 oa->o_valid = OBD_MD_FLNOTOBD;
169 obdfs_from_inode(oa, inode);
171 err = obd_brw(rw, IID(inode), num_obdo, &oa, &bufs_per_obdo,
172 &page, &count, &offset, &flags);
174 // obdfs_to_inode(inode, oa); /* copy o_blocks to i_blocks */
181 extern void set_page_clean(struct page *);
183 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
184 static int obdfs_commit_page(struct page *page, int create, int from, int to)
186 struct inode *inode = page->mapping->host;
187 obd_count num_obdo = 1;
188 obd_count bufs_per_obdo = 1;
191 obd_off offset = (((obd_off)page->index) << PAGE_SHIFT);
192 obd_flag flags = create ? OBD_BRW_CREATE : 0;
201 oa->o_valid = OBD_MD_FLNOTOBD;
202 obdfs_from_inode(oa, inode);
204 CDEBUG(D_INODE, "commit_page writing (at %d) to %d, count %Ld\n",
207 err = obd_brw(WRITE, IID(inode), num_obdo, &oa, &bufs_per_obdo,
208 &page, &count, &offset, &flags);
210 SetPageUptodate(page);
211 set_page_clean(page);
215 // obdfs_to_inode(inode, oa); /* copy o_blocks to i_blocks */
222 /* returns the page unlocked, but with a reference */
223 int obdfs_writepage(struct page *page)
226 struct inode *inode = page->mapping->host;
228 CERROR("---> writepage called ino %ld!\n", inode->i_ino);
230 rc = obdfs_brw(OBD_BRW_WRITE, inode, page, 1);
232 set_page_clean(page);
234 CDEBUG(D_INODE, "--> GRR %d\n", rc);
241 void write_inode_pages(struct inode *inode)
243 struct list_head *tmp = &inode->i_mapping->dirty_pages;
245 while ( (tmp = tmp->next) != &inode->i_mapping->dirty_pages) {
247 page = list_entry(tmp, struct page, list);
248 obdfs_writepage(page);
253 /* returns the page unlocked, but with a reference */
254 int obdfs_readpage(struct file *file, struct page *page)
256 struct inode *inode = page->mapping->host;
261 if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT)
263 memset(kmap(page), 0, PAGE_CACHE_SIZE);
268 if (Page_Uptodate(page)) {
273 rc = obdfs_brw(READ, inode, page, 0);
280 SetPageUptodate(page);
284 } /* obdfs_readpage */
286 int obdfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
288 struct inode *inode = page->mapping->host;
289 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
294 if (Page_Uptodate(page)) {
299 if ( (from <= offset) && (to >= offset + PAGE_SIZE) ) {
304 rc = obdfs_brw(READ, inode, page, 0);
306 SetPageUptodate(page);
310 set_page_dirty(page);
311 //SetPageDirty(page);
321 static kmem_cache_t *obdfs_pgrq_cachep = NULL;
323 int obdfs_init_pgrqcache(void)
326 if (obdfs_pgrq_cachep == NULL) {
327 CDEBUG(D_CACHE, "allocating obdfs_pgrq_cache\n");
328 obdfs_pgrq_cachep = kmem_cache_create("obdfs_pgrq",
329 sizeof(struct obdfs_pgrq),
330 0, SLAB_HWCACHE_ALIGN,
332 if (obdfs_pgrq_cachep == NULL) {
336 CDEBUG(D_CACHE, "allocated cache at %p\n",
340 CDEBUG(D_CACHE, "using existing cache at %p\n",
345 } /* obdfs_init_wreqcache */
347 inline void obdfs_pgrq_del(struct obdfs_pgrq *pgrq)
350 CDEBUG(D_INFO, "deleting page %p from list [count %ld]\n",
351 pgrq->rq_page, obdfs_cache_count);
352 list_del(&pgrq->rq_plist);
353 OBDClearCachePage(pgrq->rq_page);
354 kmem_cache_free(obdfs_pgrq_cachep, pgrq);
357 void obdfs_cleanup_pgrqcache(void)
360 if (obdfs_pgrq_cachep != NULL) {
361 CDEBUG(D_CACHE, "destroying obdfs_pgrqcache at %p, count %ld\n",
362 obdfs_pgrq_cachep, obdfs_cache_count);
363 if (kmem_cache_destroy(obdfs_pgrq_cachep))
364 CERROR("unable to free all of cache\n");
365 obdfs_pgrq_cachep = NULL;
367 CERROR("called with NULL pointer\n");
370 } /* obdfs_cleanup_wreqcache */
373 /* called with the list lock held */
374 static struct page *obdfs_find_page_index(struct inode *inode,
377 struct list_head *page_list = obdfs_iplist(inode);
378 struct list_head *tmp;
383 CDEBUG(D_INFO, "looking for inode %ld pageindex %ld\n",
384 inode->i_ino, index);
387 if (list_empty(page_list)) {
392 while ( (tmp = tmp->next) != page_list ) {
393 struct obdfs_pgrq *pgrq;
395 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_plist);
396 page = pgrq->rq_page;
397 if (index == page->index) {
399 "INDEX SEARCH found page %p, index %ld\n",
408 } /* obdfs_find_page_index */
411 /* call and free pages from Linux page cache: called with io lock on inodes */
412 int obdfs_do_vec_wr(struct inode **inodes, obd_count num_io,
413 obd_count num_obdos, struct obdo **obdos,
414 obd_count *oa_bufs, struct page **pages, char **bufs,
415 obd_size *counts, obd_off *offsets, obd_flag *flags)
420 CDEBUG(D_INFO, "writing %d page(s), %d obdo(s) in vector\n",
422 if (obd_debug_level & D_INFO) { /* DEBUGGING */
425 for (i = 0; i < num_obdos; i++)
426 printk("%ld:0x%p ", (long)obdos[i]->o_id, obdos[i]);
429 for (i = 0; i < num_io; i++)
430 printk("0x%p ", pages[i]);
434 err = obd_brw(OBD_BRW_WRITE, IID(inodes[0]), num_obdos, obdos,
435 oa_bufs, pages, counts, offsets, flags);
437 CDEBUG(D_INFO, "BRW done\n");
438 /* release the pages from the page cache */
439 while ( num_io > 0 ) {
441 CDEBUG(D_INFO, "calling put_page for %p, index %ld\n",
442 pages[num_io], pages[num_io]->index);
443 put_page(pages[num_io]);
445 CDEBUG(D_INFO, "put_page done\n");
447 while ( num_obdos > 0) {
449 CDEBUG(D_INFO, "free obdo %ld\n",(long)obdos[num_obdos]->o_id);
450 /* copy o_blocks to i_blocks */
451 obdfs_set_size (inodes[num_obdos], obdos[num_obdos]->o_size);
452 //obdfs_to_inode(inodes[num_obdos], obdos[num_obdos]);
453 obdo_free(obdos[num_obdos]);
455 CDEBUG(D_INFO, "obdo_free done\n");
462 * Add a page to the write request cache list for later writing.
463 * ASYNCHRONOUS write method.
465 static int obdfs_add_page_to_cache(struct inode *inode, struct page *page)
470 /* The PG_obdcache bit is cleared by obdfs_pgrq_del() BEFORE the page
471 * is written, so at worst we will write the page out twice.
473 * If the page has the PG_obdcache bit set, then the inode MUST be
474 * on the superblock dirty list so we don't need to check this.
475 * Dirty inodes are removed from the superblock list ONLY when they
476 * don't have any more cached pages. It is possible to have an inode
477 * with no dirty pages on the superblock list, but not possible to
478 * have an inode with dirty pages NOT on the superblock dirty list.
480 if (!OBDAddCachePage(page)) {
481 struct obdfs_pgrq *pgrq;
482 pgrq = kmem_cache_alloc(obdfs_pgrq_cachep, SLAB_KERNEL);
484 OBDClearCachePage(page);
488 /* not really necessary since we set all pgrq fields here
489 memset(pgrq, 0, sizeof(*pgrq));
492 pgrq->rq_page = page;
493 pgrq->rq_jiffies = jiffies;
494 get_page(pgrq->rq_page);
496 obd_down(&obdfs_i2sbi(inode)->osi_list_mutex);
497 list_add(&pgrq->rq_plist, obdfs_iplist(inode));
499 //CERROR("-- count %d\n", obdfs_cache_count);
501 /* If inode isn't already on superblock inodes list, add it.
503 * We increment the reference count on the inode to keep it
504 * from being freed from memory. This _should_ be an iget()
505 * with an iput() in both flush_reqs() and put_inode(), but
506 * since put_inode() is called from iput() we can't call iput()
507 * again there. Instead we just increment/decrement i_count,
508 * which is mostly what iget/iput do for an inode in memory.
510 if ( list_empty(obdfs_islist(inode)) ) {
511 atomic_inc(&inode->i_count);
513 "adding inode %ld to superblock list %p\n",
514 inode->i_ino, obdfs_slist(inode));
515 list_add(obdfs_islist(inode), obdfs_slist(inode));
517 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
521 /* XXX For testing purposes, we can write out the page here.
522 err = obdfs_flush_reqs(obdfs_slist(inode), ~0UL);
527 } /* obdfs_add_page_to_cache */
531 if (obdfs_cache_count > 60000) {
532 CERROR("-- count %ld\n", obdfs_cache_count);
533 //obdfs_flush_dirty_pages(~0UL);
534 CERROR("-- count %ld\n", obdfs_cache_count);
540 /* select between SYNC and ASYNC I/O methods */
541 int obdfs_do_writepage(struct page *page, int sync)
543 struct inode *inode = page->mapping->host;
548 err = obdfs_brw(OBD_BRW_WRITE, inode, page, 1);
550 err = obdfs_add_page_to_cache(inode, page);
551 CDEBUG(D_INFO, "DO_WR ino: %ld, page %p, err %d, uptodate %d\n",
552 inode->i_ino, page, err, Page_Uptodate(page));
556 SetPageUptodate(page);
557 set_page_clean(page);
561 } /* obdfs_do_writepage */
568 int obdfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
570 struct inode *inode = page->mapping->host;
572 loff_t len = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
574 CDEBUG(D_INODE, "commit write ino %ld (end at %Ld) from %d to %d ,ind %ld\n",
575 inode->i_ino, len, from, to, page->index);
578 if (cache_writes == 0) {
579 rc = obdfs_commit_page(page, 1, from, to);
582 if (len > inode->i_size) {
583 obdfs_set_size(inode, len);
593 * This does the "real" work of the write. The generic routine has
594 * allocated the page, locked it, done all the page alignment stuff
595 * calculations etc. Now we should just copy the data from user
596 * space and write it back to the real medium..
598 * If the writer ends up delaying the write, the writer needs to
599 * increment the page use counts until he is done with the page.
601 * Return value is the number of bytes written.
603 int obdfs_write_one_page(struct file *file, struct page *page,
604 unsigned long offset, unsigned long bytes,
607 struct inode *inode = file->f_dentry->d_inode;
611 /* We check for complete page writes here, as we then don't have to
612 * get the page before writing over everything anyways.
614 if ( !Page_Uptodate(page) && (offset != 0 || bytes != PAGE_SIZE) ) {
615 err = obdfs_brw(READ, inode, page, 0);
618 SetPageUptodate(page);
621 if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
625 err = obdfs_writepage(page);
628 return (err < 0 ? err : bytes);
629 } /* obdfs_write_one_page */
632 * return an up to date page:
633 * - if locked is true then is returned locked
634 * - if create is true the corresponding disk blocks are created
635 * - page is held, i.e. caller must release the page
637 * modeled on NFS code.
639 struct page *obdfs_getpage(struct inode *inode, unsigned long offset,
640 int create, int locked)
648 offset = offset & PAGE_CACHE_MASK;
649 CDEBUG(D_INFO, "ino: %ld, offset %ld, create %d, locked %d\n",
650 inode->i_ino, offset, create, locked);
651 index = offset >> PAGE_CACHE_SHIFT;
653 page = grab_cache_page(&inode->i_data, index);
657 CERROR("grab_cache_page says no dice ...\n");
662 /* now check if the data in the page is up to date */
663 if ( Page_Uptodate(page)) {
665 if (PageLocked(page))
668 CERROR("expecting locked page\n");
674 err = obdfs_brw(READ, inode, page, create);
685 SetPageUptodate(page);
688 } /* obdfs_getpage */
691 void obdfs_truncate(struct inode *inode)
697 //obdfs_dequeue_pages(inode);
701 CERROR("obdo_alloc failed!\n");
703 oa->o_valid = OBD_MD_FLNOTOBD;
704 obdfs_from_inode(oa, inode);
706 CDEBUG(D_INFO, "calling punch for %ld (%Lu bytes at 0)\n",
707 (long)oa->o_id, oa->o_size);
708 err = obd_punch(IID(inode), oa, oa->o_size, 0);
714 CERROR("obd_truncate fails (%d)\n", err);
719 } /* obdfs_truncate */
721 struct address_space_operations obdfs_aops = {
722 readpage: obdfs_readpage,
723 writepage: obdfs_writepage,
724 sync_page: block_sync_page,
725 prepare_write: obdfs_prepare_write,
726 commit_write: obdfs_commit_write,