2 * OBDFS Super operations
4 * This code is issued under the GNU General Public License.
5 * See the file COPYING in this distribution
7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
8 * Copryright (C) 1999 Stelias Computing Inc,
9 * (author Peter J. Braam <braam@stelias.com>)
10 * Copryright (C) 1999 Seagate Technology Inc.
14 #include <linux/config.h>
15 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/locks.h>
21 #include <linux/unistd.h>
23 #include <asm/system.h>
24 #include <asm/uaccess.h>
27 #include <linux/stat.h>
28 #include <asm/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <asm/segment.h>
32 #include <linux/pagemap.h>
33 #include <linux/smp_lock.h>
35 #include <linux/obd_support.h>
36 #include <linux/obd_ext2.h>
37 #include <linux/obdfs.h>
39 void obdfs_change_inode(struct inode *inode);
41 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
42 static int obdfs_brw(int rw, struct inode *inode, struct page *page, int create)
44 obd_count num_obdo = 1;
45 obd_count bufs_per_obdo = 1;
47 obd_size count = PAGE_SIZE;
48 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
49 obd_flag flags = create ? OBD_BRW_CREATE : 0;
53 if (IOPS(inode, brw) == NULL) {
54 printk(KERN_ERR __FUNCTION__ ": no brw method!\n");
64 oa->o_valid = OBD_MD_FLNOTOBD;
65 obdfs_from_inode(oa, inode);
67 err = IOPS(inode, brw)(rw, IID(inode), num_obdo, &oa, &bufs_per_obdo,
68 &page, &count, &offset, &flags);
70 obdfs_set_size (inode, oa->o_size); /* copy o_blocks to i_blocks */
79 /* returns the page unlocked, but with a reference */
80 int obdfs_readpage(struct file *file, struct page *page)
82 struct inode *inode = page->mapping->host;
87 if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT)
89 memset(kmap(page), 0, PAGE_CACHE_SIZE);
93 if (Page_Uptodate(page)) {
98 rc = obdfs_brw(READ, inode, page, 0);
103 /* PDEBUG(page, "READ"); */
106 SetPageUptodate(page);
107 obd_unlock_page(page);
110 } /* obdfs_readpage */
112 int obdfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
114 struct inode *inode = page->mapping->host;
115 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
120 /* PDEBUG(page, "READ"); */
121 if (Page_Uptodate(page)) {
126 if ( (from <= offset) && (to >= offset + PAGE_SIZE) ) {
131 rc = obdfs_brw(READ, inode, page, 0);
133 SetPageUptodate(page);
134 /* obd_unlock_page(page); */
136 /* PDEBUG(page, "READ"); */
144 static kmem_cache_t *obdfs_pgrq_cachep = NULL;
146 int obdfs_init_pgrqcache(void)
149 if (obdfs_pgrq_cachep == NULL) {
150 CDEBUG(D_CACHE, "allocating obdfs_pgrq_cache\n");
151 obdfs_pgrq_cachep = kmem_cache_create("obdfs_pgrq",
152 sizeof(struct obdfs_pgrq),
153 0, SLAB_HWCACHE_ALIGN,
155 if (obdfs_pgrq_cachep == NULL) {
159 CDEBUG(D_CACHE, "allocated cache at %p\n",
163 CDEBUG(D_CACHE, "using existing cache at %p\n",
168 } /* obdfs_init_wreqcache */
170 inline void obdfs_pgrq_del(struct obdfs_pgrq *pgrq)
173 CDEBUG(D_INFO, "deleting page %p from list [count %ld]\n",
174 pgrq->rq_page, obdfs_cache_count);
175 list_del(&pgrq->rq_plist);
176 OBDClearCachePage(pgrq->rq_page);
177 kmem_cache_free(obdfs_pgrq_cachep, pgrq);
180 void obdfs_cleanup_pgrqcache(void)
183 if (obdfs_pgrq_cachep != NULL) {
184 CDEBUG(D_CACHE, "destroying obdfs_pgrqcache at %p, count %ld\n",
185 obdfs_pgrq_cachep, obdfs_cache_count);
186 if (kmem_cache_destroy(obdfs_pgrq_cachep))
187 printk(KERN_INFO __FUNCTION__
188 ": unable to free all of cache\n");
189 obdfs_pgrq_cachep = NULL;
191 printk(KERN_INFO __FUNCTION__ ": called with NULL pointer\n");
194 } /* obdfs_cleanup_wreqcache */
197 /* called with the list lock held */
198 static struct page *obdfs_find_page_index(struct inode *inode,
201 struct list_head *page_list = obdfs_iplist(inode);
202 struct list_head *tmp;
207 CDEBUG(D_INFO, "looking for inode %ld pageindex %ld\n",
208 inode->i_ino, index);
211 if (list_empty(page_list)) {
216 while ( (tmp = tmp->next) != page_list ) {
217 struct obdfs_pgrq *pgrq;
219 pgrq = list_entry(tmp, struct obdfs_pgrq, rq_plist);
220 page = pgrq->rq_page;
221 if (index == page->index) {
223 "INDEX SEARCH found page %p, index %ld\n",
232 } /* obdfs_find_page_index */
235 /* call and free pages from Linux page cache: called with io lock on inodes */
236 int obdfs_do_vec_wr(struct inode **inodes, obd_count num_io,
237 obd_count num_obdos, struct obdo **obdos,
238 obd_count *oa_bufs, struct page **pages, char **bufs,
239 obd_size *counts, obd_off *offsets, obd_flag *flags)
244 if (IOPS(inodes[0], brw) == NULL) {
245 printk(KERN_ERR __FUNCTION__ ": no brw method!\n");
250 CDEBUG(D_INFO, "writing %d page(s), %d obdo(s) in vector\n",
252 if (obd_debug_level & D_INFO) { /* DEBUGGING */
255 for (i = 0; i < num_obdos; i++)
256 printk("%ld:0x%p ", (long)obdos[i]->o_id, obdos[i]);
259 for (i = 0; i < num_io; i++)
260 printk("0x%p ", pages[i]);
264 err = IOPS(inodes[0], brw)(WRITE, IID(inodes[0]), num_obdos, obdos,
265 oa_bufs, pages, counts, offsets, flags);
267 CDEBUG(D_INFO, "BRW done\n");
268 /* release the pages from the page cache */
269 while ( num_io > 0 ) {
271 CDEBUG(D_INFO, "calling put_page for %p, index %ld\n",
272 pages[num_io], pages[num_io]->index);
273 /* PDEBUG(pages[num_io], "do_vec_wr"); */
274 put_page(pages[num_io]);
275 /* PDEBUG(pages[num_io], "do_vec_wr"); */
277 CDEBUG(D_INFO, "put_page done\n");
279 while ( num_obdos > 0) {
281 CDEBUG(D_INFO, "free obdo %ld\n",(long)obdos[num_obdos]->o_id);
282 /* copy o_blocks to i_blocks */
283 obdfs_set_size (inodes[num_obdos], obdos[num_obdos]->o_size);
284 obdo_free(obdos[num_obdos]);
286 CDEBUG(D_INFO, "obdo_free done\n");
293 * Add a page to the write request cache list for later writing.
294 * ASYNCHRONOUS write method.
296 static int obdfs_add_page_to_cache(struct inode *inode, struct page *page)
301 /* The PG_obdcache bit is cleared by obdfs_pgrq_del() BEFORE the page
302 * is written, so at worst we will write the page out twice.
304 * If the page has the PG_obdcache bit set, then the inode MUST be
305 * on the superblock dirty list so we don't need to check this.
306 * Dirty inodes are removed from the superblock list ONLY when they
307 * don't have any more cached pages. It is possible to have an inode
308 * with no dirty pages on the superblock list, but not possible to
309 * have an inode with dirty pages NOT on the superblock dirty list.
311 if (!OBDAddCachePage(page)) {
312 struct obdfs_pgrq *pgrq;
313 pgrq = kmem_cache_alloc(obdfs_pgrq_cachep, SLAB_KERNEL);
315 OBDClearCachePage(page);
319 /* not really necessary since we set all pgrq fields here
320 memset(pgrq, 0, sizeof(*pgrq));
323 pgrq->rq_page = page;
324 pgrq->rq_jiffies = jiffies;
325 get_page(pgrq->rq_page);
327 obd_down(&obdfs_i2sbi(inode)->osi_list_mutex);
328 list_add(&pgrq->rq_plist, obdfs_iplist(inode));
331 /* If inode isn't already on superblock inodes list, add it.
333 * We increment the reference count on the inode to keep it
334 * from being freed from memory. This _should_ be an iget()
335 * with an iput() in both flush_reqs() and put_inode(), but
336 * since put_inode() is called from iput() we can't call iput()
337 * again there. Instead we just increment/decrement i_count,
338 * which is mostly what iget/iput do for an inode in memory.
340 if ( list_empty(obdfs_islist(inode)) ) {
341 atomic_inc(&inode->i_count);
343 "adding inode %ld to superblock list %p\n",
344 inode->i_ino, obdfs_slist(inode));
345 list_add(obdfs_islist(inode), obdfs_slist(inode));
347 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
350 /* XXX For testing purposes, we can write out the page here.
351 err = obdfs_flush_reqs(obdfs_slist(inode), ~0UL);
356 } /* obdfs_add_page_to_cache */
359 /* select between SYNC and ASYNC I/O methods */
360 int obdfs_do_writepage(struct page *page, int sync)
362 struct inode *inode = page->mapping->host;
366 /* PDEBUG(page, "WRITEPAGE"); */
368 err = obdfs_brw(WRITE, inode, page, 1);
370 err = obdfs_add_page_to_cache(inode, page);
371 CDEBUG(D_INFO, "DO_WR ino: %ld, page %p, err %d, uptodate %d\n",
372 inode->i_ino, page, err, Page_Uptodate(page));
376 SetPageUptodate(page);
377 /* PDEBUG(page,"WRITEPAGE"); */
380 } /* obdfs_do_writepage */
384 /* returns the page unlocked, but with a reference */
385 int obdfs_writepage(struct page *page)
387 return obdfs_do_writepage(page, 0);
390 int obdfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
393 struct inode *inode = page->mapping->host;
394 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
395 // XXX test with synchronous writes
396 rc = obdfs_do_writepage(page, 1);
398 if (pos > inode->i_size) {
399 obdfs_set_size (inode, pos);
400 obdfs_change_inode(inode);
407 * This does the "real" work of the write. The generic routine has
408 * allocated the page, locked it, done all the page alignment stuff
409 * calculations etc. Now we should just copy the data from user
410 * space and write it back to the real medium..
412 * If the writer ends up delaying the write, the writer needs to
413 * increment the page use counts until he is done with the page.
415 * Return value is the number of bytes written.
417 int obdfs_write_one_page(struct file *file, struct page *page,
418 unsigned long offset, unsigned long bytes,
421 struct inode *inode = file->f_dentry->d_inode;
425 /* We check for complete page writes here, as we then don't have to
426 * get the page before writing over everything anyways.
428 if ( !Page_Uptodate(page) && (offset != 0 || bytes != PAGE_SIZE) ) {
429 err = obdfs_brw(READ, inode, page, 0);
432 SetPageUptodate(page);
435 if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
439 err = obdfs_writepage(page);
442 return (err < 0 ? err : bytes);
443 } /* obdfs_write_one_page */
446 * return an up to date page:
447 * - if locked is true then is returned locked
448 * - if create is true the corresponding disk blocks are created
449 * - page is held, i.e. caller must release the page
451 * modeled on NFS code.
453 struct page *obdfs_getpage(struct inode *inode, unsigned long offset,
454 int create, int locked)
462 offset = offset & PAGE_CACHE_MASK;
463 CDEBUG(D_INFO, "ino: %ld, offset %ld, create %d, locked %d\n",
464 inode->i_ino, offset, create, locked);
465 index = offset >> PAGE_CACHE_SHIFT;
467 page = grab_cache_page(&inode->i_data, index);
471 printk(KERN_WARNING " grab_cache_page says no dice ...\n");
476 /* PDEBUG(page, "GETPAGE: got page - before reading\n"); */
477 /* now check if the data in the page is up to date */
478 if ( Page_Uptodate(page)) {
480 if (PageLocked(page))
481 obd_unlock_page(page);
483 printk("file %s, line %d: expecting locked page\n",
491 #ifdef EXT2_OBD_DEBUG
492 if ((obd_debug_level & D_INFO) && obdfs_find_page_index(inode, index)) {
493 CDEBUG(D_INFO, "OVERWRITE: found dirty page %p, index %ld\n",
498 err = obdfs_brw(READ, inode, page, create);
502 obd_unlock_page(page);
508 obd_unlock_page(page);
509 SetPageUptodate(page);
510 /* PDEBUG(page,"GETPAGE - after reading"); */
513 } /* obdfs_getpage */
516 void obdfs_truncate(struct inode *inode)
522 obdfs_dequeue_pages(inode);
524 if (IOPS(inode, punch) == NULL) {
525 printk(KERN_ERR __FUNCTION__ ": no punch method!\n");
530 obdfs_set_size (inode, inode->i_size);
533 /* XXX This would give an inconsistent FS, so deal with it as
534 * best we can for now - an obdo on the stack is not pretty.
538 printk(__FUNCTION__ ": obdo_alloc failed - using stack!\n");
540 obdo.o_valid = OBD_MD_FLNOTOBD;
541 obdfs_from_inode(&obdo, inode);
543 err = IOPS(inode, punch)(IID(inode), &obdo, obdo.o_size, 0);
545 oa->o_valid = OBD_MD_FLNOTOBD;
546 obdfs_from_inode(oa, inode);
548 CDEBUG(D_INFO, "calling punch for %ld (%Lu bytes at 0)\n",
549 (long)oa->o_id, oa->o_size);
550 err = IOPS(inode, punch)(IID(inode), oa, oa->o_size, 0);
556 printk(__FUNCTION__ ": obd_truncate fails (%d)\n", err);
561 } /* obdfs_truncate */