2 * OBDFS Super operations
4 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
5 * Copryright (C) 1999 Stelias Computing Inc,
6 * (author Peter J. Braam <braam@stelias.com>)
7 * Copryright (C) 1999 Seagate Technology Inc.
11 #include <linux/config.h>
12 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/errno.h>
17 #include <linux/locks.h>
18 #include <linux/unistd.h>
20 #include <asm/system.h>
21 #include <asm/uaccess.h>
24 #include <linux/stat.h>
25 #include <asm/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <asm/segment.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp_lock.h>
32 #include <linux/obd_support.h>
33 #include <linux/obd_ext2.h>
34 #include <linux/obdfs.h>
37 int obdfs_flush_reqs(struct list_head *page_list,
38 int flush_inode, int check_time);
40 void obdfs_flush_dirty_pages(int check_time);
42 /* SYNCHRONOUS I/O for an inode */
43 static int obdfs_brw(int rw, struct inode *inode, struct page *page, int create)
46 obd_count oa_bufs = 1;
48 char *buf = (char *)page_address(page);
49 obd_size count = PAGE_SIZE;
50 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
51 obd_flag flags = create ? OBD_BRW_CREATE : 0;
55 oa = obdo_fromid(IID(inode), inode->i_ino, OBD_MD_FLNOTOBD);
60 obdfs_from_inode(oa, inode);
62 err = IOPS(inode, brw)(rw, IID(inode), num_oa, &oa, &oa_bufs, &buf,
63 &count, &offset, &flags);
66 obdfs_to_inode(inode, oa); /* copy o_blocks to i_blocks */
74 /* returns the page unlocked, but with a reference */
75 int obdfs_readpage(struct dentry *dentry, struct page *page)
77 struct inode *inode = dentry->d_inode;
82 rc = obdfs_brw(READ, inode, page, 0);
84 SetPageUptodate(page);
90 } /* obdfs_readpage */
92 static kmem_cache_t *obdfs_pgrq_cachep = NULL;
93 static int obdfs_cache_count = 0;
95 int obdfs_init_pgrqcache(void)
98 if (obdfs_pgrq_cachep == NULL) {
99 CDEBUG(D_INODE, "allocating obdfs_pgrq_cache\n");
100 obdfs_pgrq_cachep = kmem_cache_create("obdfs_pgrq",
101 sizeof(struct obdfs_pgrq),
102 0, SLAB_HWCACHE_ALIGN,
104 if (obdfs_pgrq_cachep == NULL) {
108 CDEBUG(D_INODE, "allocated cache at %p\n",
112 CDEBUG(D_INODE, "using existing cache at %p\n",
117 } /* obdfs_init_wreqcache */
119 inline void obdfs_pgrq_del(struct obdfs_pgrq *pgrq)
122 CDEBUG(D_INODE, "deleting page %p from list [count %d]\n", pgrq->rq_page, obdfs_cache_count);
123 list_del(&pgrq->rq_plist);
124 kmem_cache_free(obdfs_pgrq_cachep, pgrq);
127 void obdfs_cleanup_pgrqcache(void)
130 if (obdfs_pgrq_cachep != NULL) {
131 CDEBUG(D_INODE, "destroying obdfs_pgrqcache at %p, count %d\n",
132 obdfs_pgrq_cachep, obdfs_cache_count);
133 if (kmem_cache_destroy(obdfs_pgrq_cachep))
134 printk(KERN_INFO "obd_cleanup_pgrqcache: unable to free all of cache\n");
136 printk(KERN_INFO "obd_cleanup_pgrqcache: called with NULL cache pointer\n");
139 } /* obdfs_cleanup_wreqcache */
143 * Find a specific page in the page cache. If it is found, we return
144 * the write request struct associated with it, if not found return NULL.
145 * Called with the list lock held.
147 static struct obdfs_pgrq *
148 obdfs_find_in_page_list(struct inode *inode, struct page *page)
150 struct list_head *page_list = obdfs_iplist(inode);
151 struct list_head *tmp;
155 CDEBUG(D_INODE, "looking for inode %ld page %p\n", inode->i_ino, page);
158 if (list_empty(page_list)) {
159 CDEBUG(D_INODE, "empty list\n");
164 while ( (tmp = tmp->next) != page_list ) {
165 struct obdfs_pgrq *pgrq;
167 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_plist);
168 if (pgrq->rq_page == page) {
169 CDEBUG(D_INODE, "found page %p in list\n", page);
177 } /* obdfs_find_in_page_list */
180 /* called with the list lock held */
181 static struct page* obdfs_find_page_index(struct inode *inode, unsigned long index)
183 struct list_head *page_list = obdfs_iplist(inode);
184 struct list_head *tmp;
189 CDEBUG(D_INODE, "looking for inode %ld pageindex %ld\n",
190 inode->i_ino, index);
193 if (list_empty(page_list)) {
198 while ( (tmp = tmp->next) != page_list ) {
199 struct obdfs_pgrq *pgrq;
201 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_plist);
202 page = pgrq->rq_page;
203 if (index == page->index) {
204 CDEBUG(D_INODE, "INDEX SEARCH found page %p in list, index %ld\n", page, index);
212 } /* obdfs_find_page_index */
215 /* call and free pages from Linux page cache: called with io lock on inodes */
216 int obdfs_do_vec_wr(struct inode **inodes, obd_count num_io,
217 obd_count num_obdos, struct obdo **obdos,
218 obd_count *oa_bufs, struct page **pages, char **bufs,
219 obd_size *counts, obd_off *offsets, obd_flag *flags)
221 struct super_block *sb = inodes[0]->i_sb;
222 struct obdfs_sb_info *sbi = (struct obdfs_sb_info *)&sb->u.generic_sbp;
226 CDEBUG(D_INODE, "writing %d page(s), %d obdo(s) in vector\n",
228 err = OPS(sb, brw)(WRITE, &sbi->osi_conn, num_obdos, obdos, oa_bufs,
229 bufs, counts, offsets, flags);
231 /* release the pages from the page cache */
232 while ( num_io > 0 ) {
234 CDEBUG(D_INODE, "calling put_page for %p, index %ld\n", pages[num_io], pages[num_io]->index);
235 put_page(pages[num_io]);
238 while ( num_obdos > 0) {
240 CDEBUG(D_INODE, "copy/free obdo %ld\n",
241 (long)obdos[num_obdos]->o_id);
242 obdfs_to_inode(inodes[num_obdos], obdos[num_obdos]);
243 obdo_free(obdos[num_obdos]);
251 * Add a page to the write request cache list for later writing
252 * ASYNCHRONOUS write method.
254 static int obdfs_add_page_to_cache(struct inode *inode, struct page *page)
260 /* If this page isn't already in the inode page list, add it */
261 obd_down(&obdfs_i2sbi(inode)->osi_list_mutex);
262 if ( !obdfs_find_in_page_list(inode, page) ) {
263 struct obdfs_pgrq *pgrq;
264 pgrq = kmem_cache_alloc(obdfs_pgrq_cachep, SLAB_KERNEL);
265 CDEBUG(D_INODE, "adding inode %ld page %p, pgrq: %p, cache count [%d]\n",
266 inode->i_ino, page, pgrq, obdfs_cache_count);
269 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
272 memset(pgrq, 0, sizeof(*pgrq));
274 pgrq->rq_page = page;
275 get_page(pgrq->rq_page);
276 list_add(&pgrq->rq_plist, obdfs_iplist(inode));
279 /* If inode isn't already on the superblock inodes list, add it,
280 * and increase ref count on inode so it doesn't disappear on us.
282 if ( list_empty(obdfs_islist(inode)) ) {
283 iget(inode->i_sb, inode->i_ino);
284 CDEBUG(D_INODE, "adding inode %ld to superblock list %p\n",
285 inode->i_ino, obdfs_slist(inode));
286 list_add(obdfs_islist(inode), obdfs_slist(inode));
289 /* XXX For testing purposes, we write out the page here.
290 * In the future, a flush daemon will write out the page.
291 res = obdfs_flush_reqs(obdfs_slist(inode), 0);
292 obdfs_flush_dirty_pages(1);
294 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
299 } /* obdfs_add_page_to_cache */
302 /* select between SYNC and ASYNC I/O methods */
303 int obdfs_do_writepage(struct inode *inode, struct page *page, int sync)
308 /* PDEBUG(page, "WRITEPAGE"); */
310 err = obdfs_brw(WRITE, inode, page, 1);
312 err = obdfs_add_page_to_cache(inode, page);
313 CDEBUG(D_IOCTL, "DO_WR ino: %ld, page %p, err %d, uptodata %d\n", inode->i_ino, page, err, Page_Uptodate(page));
317 SetPageUptodate(page);
318 /* PDEBUG(page,"WRITEPAGE"); */
321 } /* obdfs_do_writepage */
323 /* returns the page unlocked, but with a reference */
324 int obdfs_writepage(struct dentry *dentry, struct page *page)
326 return obdfs_do_writepage(dentry->d_inode, page, 0);
330 * This does the "real" work of the write. The generic routine has
331 * allocated the page, locked it, done all the page alignment stuff
332 * calculations etc. Now we should just copy the data from user
333 * space and write it back to the real medium..
335 * If the writer ends up delaying the write, the writer needs to
336 * increment the page use counts until he is done with the page.
338 * Return value is the number of bytes written.
340 int obdfs_write_one_page(struct file *file, struct page *page,
341 unsigned long offset, unsigned long bytes,
344 struct inode *inode = file->f_dentry->d_inode;
348 if ( !Page_Uptodate(page) ) {
349 err = obdfs_brw(READ, inode, page, 1);
351 SetPageUptodate(page);
356 if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
360 err = obdfs_writepage(file->f_dentry, page);
363 return (err < 0 ? err : bytes);
364 } /* obdfs_write_one_page */
367 * return an up to date page:
368 * - if locked is true then is returned locked
369 * - if create is true the corresponding disk blocks are created
370 * - page is held, i.e. caller must release the page
372 * modeled on NFS code.
374 struct page *obdfs_getpage(struct inode *inode, unsigned long offset, int create, int locked)
376 struct page *page_cache;
384 offset = offset & PAGE_CACHE_MASK;
385 CDEBUG(D_INODE, "ino: %ld, offset %ld, create %d, locked %d\n",
386 inode->i_ino, offset, create, locked);
387 index = offset >> PAGE_CACHE_SHIFT;
391 page_cache = page_cache_alloc();
392 if ( ! page_cache ) {
396 CDEBUG(D_INODE, "page_cache %p\n", page_cache);
398 hash = page_hash(&inode->i_data, index);
399 page = grab_cache_page(&inode->i_data, index);
403 printk("grab_cache_page says no dice ...\n");
408 PDEBUG(page, "GETPAGE: got page - before reading\n");
409 /* now check if the data in the page is up to date */
410 if ( Page_Uptodate(page)) {
418 if ( obdfs_find_page_index(inode, index) ) {
419 printk("OVERWRITE: found dirty page %p, index %ld\n", page, page->index);
422 err = obdfs_brw(READ, inode, page, create);
433 SetPageUptodate(page);
434 PDEBUG(page,"GETPAGE - after reading");
437 } /* obdfs_getpage */