2 * OBDFS Super operations
4 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
5 * Copryright (C) 1999 Stelias Computing Inc,
6 * (author Peter J. Braam <braam@stelias.com>)
7 * Copryright (C) 1999 Seagate Technology Inc.
11 #include <linux/config.h>
12 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/errno.h>
17 #include <linux/locks.h>
18 #include <linux/unistd.h>
20 #include <asm/system.h>
21 #include <asm/uaccess.h>
24 #include <linux/stat.h>
25 #include <asm/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <asm/segment.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp_lock.h>
32 #include <linux/obd_support.h>
33 #include <linux/obd_ext2.h>
34 #include <linux/obdfs.h>
37 int obdfs_flush_reqs(struct list_head *page_list,
38 int flush_inode, int check_time);
41 /* SYNCHRONOUS I/O for an inode */
42 static int obdfs_brw(int rw, struct inode *inode, struct page *page, int create)
46 char *buf = (char *)page_address(page);
47 obd_size count = PAGE_SIZE;
48 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
49 obd_flag flags = create ? OBD_BRW_CREATE : 0;
53 oa = obdo_fromid(IID(inode), inode->i_ino, OBD_MD_FLNOTOBD);
58 obdfs_from_inode(oa, inode);
60 err = IOPS(inode, brw)(rw, IID(inode), &num_io, &oa, &buf, &count,
64 obdfs_to_inode(inode, oa); /* copy o_blocks to i_blocks */
72 /* returns the page unlocked, but with a reference */
73 int obdfs_readpage(struct dentry *dentry, struct page *page)
75 struct inode *inode = dentry->d_inode;
80 rc = obdfs_brw(READ, inode, page, 0);
82 SetPageUptodate(page);
88 } /* obdfs_readpage */
90 static kmem_cache_t *obdfs_pgrq_cachep = NULL;
92 int obdfs_init_pgrqcache(void)
95 if (obdfs_pgrq_cachep == NULL) {
96 CDEBUG(D_INODE, "allocating obdfs_pgrq_cache\n");
97 obdfs_pgrq_cachep = kmem_cache_create("obdfs_pgrq",
98 sizeof(struct obdfs_pgrq),
99 0, SLAB_HWCACHE_ALIGN,
101 if (obdfs_pgrq_cachep == NULL) {
105 CDEBUG(D_INODE, "allocated cache at %p\n",
109 CDEBUG(D_INODE, "using existing cache at %p\n",
114 } /* obdfs_init_wreqcache */
116 inline void obdfs_pgrq_del(struct obdfs_pgrq *pgrq)
118 list_del(&pgrq->rq_ilist);
119 list_del(&pgrq->rq_slist);
120 kmem_cache_free(obdfs_pgrq_cachep, pgrq);
123 void obdfs_cleanup_pgrqcache(void)
126 if (obdfs_pgrq_cachep != NULL) {
127 CDEBUG(D_INODE, "destroying obdfs_pgrqcache at %p\n",
129 if (kmem_cache_destroy(obdfs_pgrq_cachep))
130 printk(KERN_INFO "obd_cleanup_pgrqcache: unable to free all of cache\n");
132 printk(KERN_INFO "obd_cleanup_pgrqcache: called with NULL cache pointer\n");
135 } /* obdfs_cleanup_wreqcache */
139 * Find a specific page in the page cache. If it is found, we return
140 * the write request struct associated with it, if not found return NULL.
143 static struct obdfs_pgrq *
144 obdfs_find_in_page_cache(struct inode *inode, struct page *page)
146 struct list_head *page_list = &OBDFS_LIST(inode);
147 struct list_head *tmp;
148 struct obdfs_pgrq *pgrq;
151 CDEBUG(D_INODE, "looking for inode %ld page %p\n", inode->i_ino, page);
152 if (list_empty(page_list)) {
153 CDEBUG(D_INODE, "empty list\n");
158 while ( (tmp = tmp->next) != page_list ) {
159 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_list);
160 CDEBUG(D_INODE, "checking page %p\n", pgrq->rq_page);
161 if (pgrq->rq_page == page) {
162 CDEBUG(D_INODE, "found page %p in list\n", page);
170 } /* obdfs_find_in_page_cache */
174 int obdfs_do_vec_wr(struct super_block *sb, obd_count *num_io,
176 struct page **pages, char **bufs, obd_size *counts,
177 obd_off *offsets, obd_flag *flags)
179 int last_io = *num_io;
181 struct obdfs_sb_info *sbi = (struct obdfs_sb_info *)&sb->u.generic_sbp;
183 CDEBUG(D_INODE, "writing %d pages in vector\n", last_io);
184 err = OPS(sb, brw)(WRITE, &sbi->osi_conn, num_io, obdos,
185 bufs, counts, offsets, flags);
188 put_page(pages[--last_io]);
189 } while ( last_io > 0 );
197 * Add a page to the write request cache list for later writing
198 * ASYNCHRONOUS write method.
200 static int obdfs_add_page_to_cache(struct inode *inode, struct page *page)
202 struct obdfs_pgrq *pgrq;
206 pgrq = kmem_cache_alloc(obdfs_pgrq_cachep, SLAB_KERNEL);
207 CDEBUG(D_INODE, "adding inode %ld page %p, pgrq: %p\n",
208 inode->i_ino, page, pgrq);
213 memset(pgrq, 0, sizeof(*pgrq));
215 pgrq->rq_page = page;
216 pgrq->rq_inode = inode;
218 get_page(pgrq->rq_page);
219 list_add(&pgrq->rq_ilist, obdfs_ilist(inode));
220 list_add(&pgrq->rq_slist, obdfs_slist(inode));
222 /* XXX For testing purposes, we write out the page here.
223 * In the future, a flush daemon will write out the page.
227 rc = obdfs_flush_reqs(obdfs_slist(inode), 0, 0);
231 } /* obdfs_add_page_to_cache */
234 /* select between SYNC and ASYNC I/O methods */
235 int obdfs_do_writepage(struct inode *inode, struct page *page, int sync)
240 PDEBUG(page, "WRITEPAGE");
242 err = obdfs_brw(WRITE, inode, page, 1);
244 err = obdfs_add_page_to_cache(inode, page);
247 SetPageUptodate(page);
248 PDEBUG(page,"WRITEPAGE");
251 } /* obdfs_do_writepage */
253 /* returns the page unlocked, but with a reference */
254 int obdfs_writepage(struct dentry *dentry, struct page *page)
256 return obdfs_do_writepage(dentry->d_inode, page, 0);
260 * This does the "real" work of the write. The generic routine has
261 * allocated the page, locked it, done all the page alignment stuff
262 * calculations etc. Now we should just copy the data from user
263 * space and write it back to the real medium..
265 * If the writer ends up delaying the write, the writer needs to
266 * increment the page use counts until he is done with the page.
268 * Return value is the number of bytes written.
270 int obdfs_write_one_page(struct file *file, struct page *page,
271 unsigned long offset, unsigned long bytes,
274 struct inode *inode = file->f_dentry->d_inode;
278 if ( !Page_Uptodate(page) ) {
279 err = obdfs_brw(READ, inode, page, 1);
281 SetPageUptodate(page);
286 if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
290 err = obdfs_writepage(file->f_dentry, page);
293 return (err < 0 ? err : bytes);
294 } /* obdfs_write_one_page */
297 return an up to date page:
298 - if locked is true then is returned locked
299 - if create is true the corresponding disk blocks are created
300 - page is held, i.e. caller must release the page
304 struct page *obdfs_getpage(struct inode *inode, unsigned long offset, int create, int locked)
306 struct page *page_cache;
313 offset = offset & PAGE_CACHE_MASK;
314 CDEBUG(D_INODE, "\n");
317 page_cache = page_cache_alloc();
318 if ( ! page_cache ) {
322 CDEBUG(D_INODE, "page_cache %p\n", page_cache);
324 hash = page_hash(&inode->i_data, offset);
325 page = grab_cache_page(&inode->i_data, offset);
329 printk("grab_cache_page says no dice ...\n");
334 PDEBUG(page, "GETPAGE: got page - before reading\n");
335 /* now check if the data in the page is up to date */
336 if ( Page_Uptodate(page)) {
343 err = obdfs_brw(READ, inode, page, create);
354 SetPageUptodate(page);
355 PDEBUG(page,"GETPAGE - after reading");
358 } /* obdfs_getpage */