2 * OBDFS Super operations
4 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
5 * Copryright (C) 1999 Stelias Computing Inc,
6 * (author Peter J. Braam <braam@stelias.com>)
7 * Copryright (C) 1999 Seagate Technology Inc.
11 #include <linux/config.h>
12 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/errno.h>
17 #include <linux/locks.h>
18 #include <linux/unistd.h>
20 #include <asm/system.h>
21 #include <asm/uaccess.h>
24 #include <linux/stat.h>
25 #include <asm/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <asm/segment.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp_lock.h>
32 #include <linux/obd_support.h>
33 #include <linux/obd_ext2.h>
34 #include <linux/obdfs.h>
37 /* SYNCHRONOUS I/O for an inode */
38 static int obdfs_brw(int rw, struct inode *inode, struct page *page, int create)
40 obd_count num_obdo = 1;
41 obd_count bufs_per_obdo = 1;
43 char *buf = (char *)page_address(page);
44 obd_size count = PAGE_SIZE;
45 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
46 obd_flag flags = create ? OBD_BRW_CREATE : 0;
50 oa = obdo_fromid(IID(inode), inode->i_ino, OBD_MD_FLNOTOBD);
55 obdfs_from_inode(oa, inode);
57 err = IOPS(inode, brw)(rw, IID(inode), num_obdo, &oa, &bufs_per_obdo,
58 &buf, &count, &offset, &flags);
61 obdfs_to_inode(inode, oa); /* copy o_blocks to i_blocks */
69 /* returns the page unlocked, but with a reference */
70 int obdfs_readpage(struct dentry *dentry, struct page *page)
72 struct inode *inode = dentry->d_inode;
76 /* PDEBUG(page, "READ"); */
77 rc = obdfs_brw(READ, inode, page, 0);
79 SetPageUptodate(page);
82 /* PDEBUG(page, "READ"); */
85 } /* obdfs_readpage */
87 static kmem_cache_t *obdfs_pgrq_cachep = NULL;
89 int obdfs_init_pgrqcache(void)
92 if (obdfs_pgrq_cachep == NULL) {
93 CDEBUG(D_CACHE, "allocating obdfs_pgrq_cache\n");
94 obdfs_pgrq_cachep = kmem_cache_create("obdfs_pgrq",
95 sizeof(struct obdfs_pgrq),
96 0, SLAB_HWCACHE_ALIGN,
98 if (obdfs_pgrq_cachep == NULL) {
102 CDEBUG(D_CACHE, "allocated cache at %p\n",
106 CDEBUG(D_CACHE, "using existing cache at %p\n",
111 } /* obdfs_init_wreqcache */
113 inline void obdfs_pgrq_del(struct obdfs_pgrq *pgrq)
116 CDEBUG(D_INFO, "deleting page %p from list [count %ld]\n",
117 pgrq->rq_page, obdfs_cache_count);
118 list_del(&pgrq->rq_plist);
119 kmem_cache_free(obdfs_pgrq_cachep, pgrq);
122 void obdfs_cleanup_pgrqcache(void)
125 if (obdfs_pgrq_cachep != NULL) {
126 CDEBUG(D_CACHE, "destroying obdfs_pgrqcache at %p, count %ld\n",
127 obdfs_pgrq_cachep, obdfs_cache_count);
128 if (kmem_cache_destroy(obdfs_pgrq_cachep))
129 printk(KERN_INFO __FUNCTION__
130 ": unable to free all of cache\n");
131 obdfs_pgrq_cachep = NULL;
133 printk(KERN_INFO __FUNCTION__ ": called with NULL pointer\n");
136 } /* obdfs_cleanup_wreqcache */
140 * Find a specific page in the page cache. If it is found, we return
141 * the write request struct associated with it, if not found return NULL.
142 * Called with the list lock held.
144 static struct obdfs_pgrq *
145 obdfs_find_in_page_list(struct inode *inode, struct page *page)
147 struct list_head *page_list = obdfs_iplist(inode);
148 struct list_head *tmp;
152 CDEBUG(D_INFO, "looking for inode %ld page %p\n", inode->i_ino, page);
155 if (list_empty(page_list)) {
156 CDEBUG(D_INFO, "empty list\n");
161 while ( (tmp = tmp->next) != page_list ) {
162 struct obdfs_pgrq *pgrq;
164 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_plist);
165 if (pgrq->rq_page == page) {
166 CDEBUG(D_INFO, "found page %p in list\n", page);
174 } /* obdfs_find_in_page_list */
177 /* called with the list lock held */
178 static struct page *obdfs_find_page_index(struct inode *inode,
181 struct list_head *page_list = obdfs_iplist(inode);
182 struct list_head *tmp;
187 CDEBUG(D_INFO, "looking for inode %ld pageindex %ld\n",
188 inode->i_ino, index);
191 if (list_empty(page_list)) {
196 while ( (tmp = tmp->next) != page_list ) {
197 struct obdfs_pgrq *pgrq;
199 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_plist);
200 page = pgrq->rq_page;
201 if (index == page->index) {
203 "INDEX SEARCH found page %p, index %ld\n",
212 } /* obdfs_find_page_index */
215 /* call and free pages from Linux page cache: called with io lock on inodes */
216 int obdfs_do_vec_wr(struct inode **inodes, obd_count num_io,
217 obd_count num_obdos, struct obdo **obdos,
218 obd_count *oa_bufs, struct page **pages, char **bufs,
219 obd_size *counts, obd_off *offsets, obd_flag *flags)
221 struct super_block *sb = inodes[0]->i_sb;
222 struct obdfs_sb_info *sbi = (struct obdfs_sb_info *)&sb->u.generic_sbp;
226 CDEBUG(D_INFO, "writing %d page(s), %d obdo(s) in vector\n",
231 for (i = 0; i < num_obdos; i++)
232 printk("%ld:0x%p ", (long)obdos[i]->o_id, obdos[i]);
235 for (i = 0; i < num_io; i++)
236 printk("0x%p ", pages[i]);
240 err = OPS(sb, brw)(WRITE, &sbi->osi_conn, num_obdos, obdos, oa_bufs,
241 bufs, counts, offsets, flags);
243 CDEBUG(D_CACHE, "BRW done\n");
244 /* release the pages from the page cache */
245 while ( num_io > 0 ) {
247 CDEBUG(D_INFO, "calling put_page for %p, index %ld\n",
248 pages[num_io], pages[num_io]->index);
249 /* PDEBUG(pages[num_io], "do_vec_wr"); */
250 put_page(pages[num_io]);
251 /* PDEBUG(pages[num_io], "do_vec_wr"); */
253 CDEBUG(D_CACHE, "put_page done\n");
255 while ( num_obdos > 0) {
257 CDEBUG(D_INFO, "free obdo %ld\n",(long)obdos[num_obdos]->o_id);
258 obdfs_to_inode(inodes[num_obdos], obdos[num_obdos]);
259 obdo_free(obdos[num_obdos]);
261 CDEBUG(D_CACHE, "obdo_free done\n");
268 * Add a page to the write request cache list for later writing
269 * ASYNCHRONOUS write method.
271 static int obdfs_add_page_to_cache(struct inode *inode, struct page *page)
277 /* If this page isn't already in the inode page list, add it */
278 obd_down(&obdfs_i2sbi(inode)->osi_list_mutex);
279 if ( !obdfs_find_in_page_list(inode, page) ) {
280 struct obdfs_pgrq *pgrq;
281 pgrq = kmem_cache_alloc(obdfs_pgrq_cachep, SLAB_KERNEL);
284 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
287 memset(pgrq, 0, sizeof(*pgrq));
289 pgrq->rq_page = page;
290 pgrq->rq_jiffies = jiffies;
291 get_page(pgrq->rq_page);
292 list_add(&pgrq->rq_plist, obdfs_iplist(inode));
295 "added inode %ld page %p, pgrq: %p, cache count [%ld]\n",
296 inode->i_ino, page, pgrq, obdfs_cache_count);
299 /* If inode isn't already on the superblock inodes list, add it,
300 * and increase ref count on inode so it doesn't disappear on us.
302 * We increment the reference count on the inode to keep it from
303 * being freed from memory. This _should_ be an iget() with an
304 * iput() in both flush_reqs() and put_inode(), but since put_inode()
305 * is called from iput() we can't call iput() again there. Instead
306 * we just increment/decrement i_count, which is essentially what
307 * iget/iput do for an inode already in memory.
309 if ( list_empty(obdfs_islist(inode)) ) {
311 CDEBUG(D_INFO, "adding inode %ld to superblock list %p\n",
312 inode->i_ino, obdfs_slist(inode));
313 list_add(obdfs_islist(inode), obdfs_slist(inode));
315 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
317 /* XXX For testing purposes, we write out the page here.
318 * In the future, a flush daemon will write out the page.
319 res = obdfs_flush_reqs(obdfs_slist(inode), ~0UL);
324 } /* obdfs_add_page_to_cache */
327 /* select between SYNC and ASYNC I/O methods */
328 int obdfs_do_writepage(struct inode *inode, struct page *page, int sync)
333 /* PDEBUG(page, "WRITEPAGE"); */
335 err = obdfs_brw(WRITE, inode, page, 1);
337 err = obdfs_add_page_to_cache(inode, page);
338 CDEBUG(D_INFO, "DO_WR ino: %ld, page %p, err %d, uptodate %d\n",
339 inode->i_ino, page, err, Page_Uptodate(page));
343 SetPageUptodate(page);
344 /* PDEBUG(page,"WRITEPAGE"); */
347 } /* obdfs_do_writepage */
349 /* returns the page unlocked, but with a reference */
350 int obdfs_writepage(struct dentry *dentry, struct page *page)
352 return obdfs_do_writepage(dentry->d_inode, page, 0);
356 * This does the "real" work of the write. The generic routine has
357 * allocated the page, locked it, done all the page alignment stuff
358 * calculations etc. Now we should just copy the data from user
359 * space and write it back to the real medium..
361 * If the writer ends up delaying the write, the writer needs to
362 * increment the page use counts until he is done with the page.
364 * Return value is the number of bytes written.
366 int obdfs_write_one_page(struct file *file, struct page *page,
367 unsigned long offset, unsigned long bytes,
370 struct inode *inode = file->f_dentry->d_inode;
374 /* We check for complete page writes here, as we then don't have to
375 * get the page before writing over everything anyways.
377 if ( !Page_Uptodate(page) && (offset != 0 || bytes != PAGE_SIZE) ) {
378 err = obdfs_brw(READ, inode, page, 0);
381 SetPageUptodate(page);
384 if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
387 lock_kernel(); /* XXX do we really need to lock the kernel to write? */
388 err = obdfs_writepage(file->f_dentry, page);
391 return (err < 0 ? err : bytes);
392 } /* obdfs_write_one_page */
395 * return an up to date page:
396 * - if locked is true then is returned locked
397 * - if create is true the corresponding disk blocks are created
398 * - page is held, i.e. caller must release the page
400 * modeled on NFS code.
402 struct page *obdfs_getpage(struct inode *inode, unsigned long offset,
403 int create, int locked)
411 offset = offset & PAGE_CACHE_MASK;
412 CDEBUG(D_INFO, "ino: %ld, offset %ld, create %d, locked %d\n",
413 inode->i_ino, offset, create, locked);
414 index = offset >> PAGE_CACHE_SHIFT;
416 page = grab_cache_page(&inode->i_data, index);
420 printk(KERN_WARNING " grab_cache_page says no dice ...\n");
425 /* PDEBUG(page, "GETPAGE: got page - before reading\n"); */
426 /* now check if the data in the page is up to date */
427 if ( Page_Uptodate(page)) {
435 #ifdef EXT2_OBD_DEBUG
436 if ((obd_debug_level & D_INFO) && obdfs_find_page_index(inode, index)) {
437 CDEBUG(D_INFO, "OVERWRITE: found dirty page %p, index %ld\n",
442 err = obdfs_brw(READ, inode, page, create);
453 SetPageUptodate(page);
454 /* PDEBUG(page,"GETPAGE - after reading"); */
457 } /* obdfs_getpage */