Whamcloud - gitweb
- merge 0.7rc1 from b_devel to HEAD (20030612 merge point)
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O Page Cache
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36
37 #include <linux/fs.h>
38 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
39 #include <linux/buffer_head.h>
40 #else
41 #include <linux/iobuf.h>
42 #endif
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <asm/segment.h>
46 #include <linux/mm.h>
47 #include <linux/pagemap.h>
48 #include <linux/smp_lock.h>
49
50 #define DEBUG_SUBSYSTEM S_LLITE
51
52 #include <linux/lustre_mds.h>
53 #include <linux/lustre_lite.h>
54 #include <linux/lustre_lib.h>
55 #include <linux/lustre_compat25.h>
56
57 /*
58  * Remove page from dirty list
59  */
60 static void __set_page_clean(struct page *page)
61 {
62         struct address_space *mapping = page->mapping;
63         struct inode *inode;
64
65         if (!mapping)
66                 return;
67
68         PGCACHE_WRLOCK(mapping);
69
70         list_del(&page->list);
71         list_add(&page->list, &mapping->clean_pages);
72
73         /* XXX doesn't inode_lock protect i_state ? */
74         inode = mapping->host;
75         if (list_empty(&mapping->dirty_pages)) {
76                 CDEBUG(D_INODE, "inode clean\n");
77                 inode->i_state &= ~I_DIRTY_PAGES;
78         }
79
80         PGCACHE_WRUNLOCK(mapping);
81         EXIT;
82 }
83
84 void set_page_clean(struct page *page)
85 {
86         if (PageDirty(page)) {
87                 ClearPageDirty(page);
88                 __set_page_clean(page);
89         }
90 }
91
92 /* SYNCHRONOUS I/O to object storage for an inode */
93 static int ll_brw(int cmd, struct inode *inode, struct page *page, int flags)
94 {
95         struct ll_inode_info *lli = ll_i2info(inode);
96         struct lov_stripe_md *lsm = lli->lli_smd;
97         struct brw_page pg;
98         int rc;
99         ENTRY;
100
101         pg.pg = page;
102         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
103
104         if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
105                 pg.count = inode->i_size % PAGE_SIZE;
106         else
107                 pg.count = PAGE_SIZE;
108
109         CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
110                cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
111                pg.off, pg.off);
112         if (pg.count == 0) {
113                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
114                        LPU64"\n",
115                        inode->i_ino, inode, inode->i_size, page->mapping->host,
116                        page->mapping->host->i_size, page->index, pg.off);
117         }
118
119         pg.flag = flags;
120
121         rc = obd_brw(cmd, ll_i2obdconn(inode), lsm, 1, &pg, NULL);
122         if (rc)
123                 CERROR("error from obd_brw: rc = %d\n", rc);
124
125         RETURN(rc);
126 }
127
128 /*
129  * we were asked to read a single page but we're going to try and read a batch
130  * of pages all at once.  this vaguely simulates 2.5's readpages.
131  */
132 static int ll_readpage(struct file *file, struct page *first_page)
133 {
134         struct inode *inode = first_page->mapping->host;
135         struct ll_inode_info *lli = ll_i2info(inode);
136         struct page *page = first_page;
137         struct list_head *pos;
138         struct brw_page *pgs;
139         unsigned long end_index, extent_end = 0;
140         struct ptlrpc_request_set *set;
141         int npgs = 0, rc = 0, max_pages;
142         ENTRY;
143
144         LASSERT(PageLocked(page));
145         LASSERT(!PageUptodate(page));
146         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
147                inode->i_ino, inode->i_generation, inode,
148                (((obd_off)page->index) << PAGE_SHIFT));
149         LASSERT(atomic_read(&file->f_dentry->d_inode->i_count) > 0);
150
151         if (inode->i_size <= ((obd_off)page->index) << PAGE_SHIFT) {
152                 CERROR("reading beyond EOF\n");
153                 memset(kmap(page), 0, PAGE_SIZE);
154                 kunmap(page);
155                 SetPageUptodate(page);
156                 unlock_page(page);
157                 RETURN(rc);
158         }
159
160         /* try to read the file's preferred block size in a one-er */
161         end_index = first_page->index +
162                 (inode->i_blksize >> PAGE_CACHE_SHIFT);
163         if (end_index > (inode->i_size >> PAGE_CACHE_SHIFT))
164                 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
165
166         max_pages = ((end_index - first_page->index) << PAGE_CACHE_SHIFT) >>
167                 PAGE_SHIFT;
168         pgs = kmalloc(max_pages * sizeof(*pgs), GFP_USER);
169         if (pgs == NULL)
170                 RETURN(-ENOMEM);
171
172         /*
173          * find how far we're allowed to read under the extent ll_file_read
174          * is passing us..
175          */
176         spin_lock(&lli->lli_read_extent_lock);
177         list_for_each(pos, &lli->lli_read_extents) {
178                 struct ll_read_extent *rextent;
179                 rextent = list_entry(pos, struct ll_read_extent, re_lli_item);
180                 if (rextent->re_task != current)
181                         continue;
182
183                 if (rextent->re_extent.end + PAGE_SIZE < rextent->re_extent.end)
184                         /* extent wrapping */
185                         extent_end = ~0;
186                 else {
187                         extent_end = (rextent->re_extent.end + PAGE_SIZE)
188                                                         << PAGE_CACHE_SHIFT;
189                         /* 32bit indexes, 64bit extents.. */
190                         if (((u64)extent_end >> PAGE_CACHE_SHIFT) <
191                                         rextent->re_extent.end)
192                                 extent_end = ~0;
193                 }
194                 break;
195         }
196         spin_unlock(&lli->lli_read_extent_lock);
197
198         if (extent_end == 0) {
199                 static long next_print;
200                 if (time_after(jiffies, next_print)) {
201                         next_print = jiffies + 30 * HZ;
202                         CDEBUG(D_INODE, "mmap readpage - check locks\n");
203                 }
204                 end_index = page->index + 1;
205         } else if (extent_end < end_index)
206                 end_index = extent_end;
207
208         /* to balance the find_get_page ref the other pages get that is
209          * decrefed on teardown.. */
210         page_cache_get(page);
211         do {
212                 unsigned long index ;
213
214                 pgs[npgs].pg = page;
215                 pgs[npgs].off = ((obd_off)page->index) << PAGE_CACHE_SHIFT;
216                 pgs[npgs].flag = 0;
217                 pgs[npgs].count = PAGE_SIZE;
218                 /* XXX Workaround for BA OSTs returning short reads at EOF.
219                  * The linux OST will return the full page, zero-filled at the
220                  * end, which will just overwrite the data we set here.  Bug
221                  * 593 relates to fixing this properly.
222                  */
223                 if (inode->i_size < pgs[npgs].off + PAGE_SIZE) {
224                         int count = inode->i_size - pgs[npgs].off;
225                         void *addr = kmap(page);
226                         pgs[npgs].count = count;
227                         //POISON(addr, 0x7c, count);
228                         memset(addr + count, 0, PAGE_SIZE - count);
229                         kunmap(page);
230                 }
231
232                 npgs++;
233                 if (npgs == max_pages)
234                         break;
235
236                 /*
237                  * find pages ahead of us that we can read in.
238                  * grab_cache_page waits on pages that are locked so
239                  * we first try find_get_page, which doesn't.  this stops
240                  * the worst case behaviour of racing threads waiting on
241                  * each other, but doesn't remove it entirely.
242                  */
243                 for (index = page->index + 1, page = NULL;
244                      page == NULL && index < end_index; index++) {
245
246                         /* see if the page already exists and needs updating */
247                         page = find_get_page(inode->i_mapping, index);
248                         if (page) {
249                                 if (Page_Uptodate(page) || TryLockPage(page))
250                                         goto out_release;
251                                 if (!page->mapping || Page_Uptodate(page))
252                                         goto out_unlock;
253                         } else {
254                                 /* ok, we have to create it.. */
255                                 page = grab_cache_page(inode->i_mapping, index);
256                                 if (page == NULL)
257                                         continue;
258                                 if (Page_Uptodate(page))
259                                         goto out_unlock;
260                         }
261
262                         break;
263
264                 out_unlock:
265                         unlock_page(page);
266                 out_release:
267                         page_cache_release(page);
268                         page = NULL;
269                 }
270
271         } while (page);
272
273         set = ptlrpc_prep_set();
274         if (set == NULL) {
275                 CERROR("ENOMEM allocing request set\n");
276                 rc = -ENOMEM;
277         } else {
278                 rc = obd_brw_async(OBD_BRW_READ, ll_i2obdconn(inode),
279                                    ll_i2info(inode)->lli_smd, npgs, pgs,
280                                    set, NULL);
281                 if (rc == 0)
282                         rc = ptlrpc_set_wait(set);
283                 ptlrpc_set_destroy(set);
284                 if (rc && rc != -EIO)
285                         CERROR("error from obd_brw_async: rc = %d\n", rc);
286         }
287
288         while (npgs-- > 0) {
289                 page = pgs[npgs].pg;
290
291                 if (rc == 0)
292                         SetPageUptodate(page);
293                 unlock_page(page);
294                 page_cache_release(page);
295         }
296
297         kfree(pgs);
298         RETURN(rc);
299 } /* ll_readpage */
300
301 /* this isn't where truncate starts.   roughly:
302  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
303  * we grab the lock back in setattr_raw to avoid races. */
304 void ll_truncate(struct inode *inode)
305 {
306         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
307         struct obdo oa = {0};
308         int err;
309         ENTRY;
310         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
311                inode->i_generation, inode);
312
313         if (!lsm) {
314                 /* object not yet allocated */
315                 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
316                 EXIT;
317                 return;
318         }
319
320         /* vmtruncate just threw away our dirty pages, make sure
321          * we don't think they're still dirty, being careful to round
322          * i_size to the first whole page that was tossed */
323         ll_remove_dirty(inode,
324                         (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT,
325                         ~0);
326
327         oa.o_id = lsm->lsm_object_id;
328         oa.o_mode = inode->i_mode;
329         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
330
331         CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
332                oa.o_id, inode->i_size);
333
334         /* truncate == punch from new size to absolute end of file */
335         err = obd_punch(ll_i2obdconn(inode), &oa, lsm, inode->i_size,
336                         OBD_OBJECT_EOF, NULL);
337         if (err)
338                 CERROR("obd_truncate fails (%d) ino %lu\n", err, inode->i_ino);
339         else
340                 obdo_to_inode(inode, &oa, oa.o_valid);
341
342         EXIT;
343         return;
344 } /* ll_truncate */
345
346 //#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
347
348 static int ll_prepare_write(struct file *file, struct page *page, unsigned from,
349                             unsigned to)
350 {
351         struct inode *inode = page->mapping->host;
352         struct ll_inode_info *lli = ll_i2info(inode);
353         struct lov_stripe_md *lsm = lli->lli_smd;
354         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
355         struct brw_page pg;
356         int rc = 0;
357         ENTRY;
358
359         if (!PageLocked(page))
360                 LBUG();
361
362         if (PageUptodate(page))
363                 RETURN(0);
364
365         //POISON(addr + from, 0xca, to - from);
366
367         /* Check to see if we should return -EIO right away */
368         pg.pg = page;
369         pg.off = offset;
370         pg.count = PAGE_SIZE;
371         pg.flag = 0;
372         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdconn(inode), lsm, 1, &pg, NULL);
373         if (rc)
374                 RETURN(rc);
375
376         /* We're completely overwriting an existing page, so _don't_ set it up
377          * to date until commit_write */
378         if (from == 0 && to == PAGE_SIZE)
379                 RETURN(0);
380
381         /* If are writing to a new page, no need to read old data.
382          * the extent locking and getattr procedures in ll_file_write have
383          * guaranteed that i_size is stable enough for our zeroing needs */
384         if (inode->i_size <= offset) {
385                 memset(kmap(page), 0, PAGE_SIZE);
386                 kunmap(page);
387                 GOTO(prepare_done, rc = 0);
388         }
389
390         rc = ll_brw(OBD_BRW_READ, inode, page, 0);
391
392         EXIT;
393  prepare_done:
394         if (rc == 0)
395                 SetPageUptodate(page);
396
397         return rc;
398 }
399
400 /*
401  * background file writeback.  This is called regularly from kupdated to write
402  * dirty data, from kswapd when memory is low, and from filemap_fdatasync when
403  * super blocks or inodes are synced..
404  *
405  * obd_brw errors down in _batch_writepage are ignored, so pages are always
406  * unlocked.  Also, there is nobody to return an error code to from here - the
407  * application may not even be running anymore.
408  *
409  * this should be async so that things like kswapd can have a chance to
410  * free some more pages that our allocating writeback may need, but it isn't
411  * yet.
412  */
413 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
414 static int ll_writepage(struct page *page)
415 {
416         struct inode *inode = page->mapping->host;
417         ENTRY;
418
419         CDEBUG(D_CACHE, "page %p [lau %d] inode %p\n", page,
420                         PageLaunder(page), inode);
421         LASSERT(PageLocked(page));
422
423         /* XXX should obd_brw errors trickle up? */
424         ll_batch_writepage(inode, page);
425         RETURN(0);
426 }
427
428 /*
429  * we really don't want to start writeback here, we want to give callers some
430  * time to further dirty the pages before we write them out.
431  */
432 static int ll_commit_write(struct file *file, struct page *page,
433                            unsigned from, unsigned to)
434 {
435         struct inode *inode = page->mapping->host;
436         loff_t size;
437         ENTRY;
438
439         LASSERT(inode == file->f_dentry->d_inode);
440         LASSERT(PageLocked(page));
441
442         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
443                inode, page, from, to, page->index);
444         /* to match full page case in prepare_write */
445         SetPageUptodate(page);
446         /* mark the page dirty, put it on mapping->dirty,
447          * mark the inode PAGES_DIRTY, put it on sb->dirty */
448         if (!PageDirty(page))
449                 INODE_IO_STAT_ADD(inode, dirty_misses, 1);
450         else
451                 INODE_IO_STAT_ADD(inode, dirty_hits, 1);
452
453         size = (((obd_off)page->index) << PAGE_SHIFT) + to;
454         if (size > inode->i_size)
455                 inode->i_size = size;
456
457         /* XXX temporary, bug 1286 */
458         {
459                 struct ll_dirty_offsets *lldo = &ll_i2info(inode)->lli_dirty;
460                 int rc;
461                 if ((lldo->do_num_dirty * PAGE_CACHE_SIZE) > 10 * 1024 * 1024) {
462                         rc = ll_batch_writepage(inode, page);
463                         lock_page(page); /* caller expects to unlock */
464                         RETURN(rc);
465                 }
466         }
467
468         set_page_dirty(page);
469         ll_record_dirty(inode, page->index);
470
471         RETURN(0);
472 } /* ll_commit_write */
473 #else
474 static int ll_writepage(struct page *page,
475                         struct writeback_control *wbc)
476 {
477
478         return 0;
479 }
480 static int ll_commit_write(struct file *file, struct page *page,
481                            unsigned from, unsigned to)
482 {
483         return 0;
484 }
485 #endif
486
487 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
488 static int ll_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf,
489                         unsigned long blocknr, int blocksize)
490 {
491         struct ll_inode_info *lli = ll_i2info(inode);
492         struct lov_stripe_md *lsm = lli->lli_smd;
493         struct brw_page *pga;
494         struct ptlrpc_request_set *set;
495         int length, i, flags, rc = 0;
496         loff_t offset;
497         ENTRY;
498
499         if (!lsm || !lsm->lsm_object_id)
500                 RETURN(-ENOMEM);
501
502         if ((iobuf->offset & (blocksize - 1)) ||
503             (iobuf->length & (blocksize - 1)))
504                 RETURN(-EINVAL);
505
506         set = ptlrpc_prep_set();
507         if (set == NULL)
508                 RETURN(-ENOMEM);
509
510         OBD_ALLOC(pga, sizeof(*pga) * iobuf->nr_pages);
511         if (!pga) {
512                 ptlrpc_set_destroy(set);
513                 RETURN(-ENOMEM);
514         }
515
516         flags = (rw == WRITE ? OBD_BRW_CREATE : 0) /* | OBD_BRW_DIRECTIO */;
517         offset = ((obd_off)blocknr << inode->i_blkbits);
518         length = iobuf->length;
519
520         for (i = 0, length = iobuf->length; length > 0;
521              length -= pga[i].count, offset += pga[i].count, i++) { /*i last!*/
522                 pga[i].pg = iobuf->maplist[i];
523                 pga[i].off = offset;
524                 /* To the end of the page, or the length, whatever is less */
525                 pga[i].count = min_t(int, PAGE_SIZE - (offset & ~PAGE_MASK),
526                                      length);
527                 pga[i].flag = flags;
528                 if (rw == READ) {
529                         //POISON(kmap(iobuf->maplist[i]), 0xc5, PAGE_SIZE);
530                         //kunmap(iobuf->maplist[i]);
531                 }
532         }
533
534         rc = obd_brw_async(rw == WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
535                            ll_i2obdconn(inode), lsm, iobuf->nr_pages, pga, set,
536                            NULL);
537         if (rc) {
538                 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
539                        "error from obd_brw_async: rc = %d\n", rc);
540         } else {
541                 rc = ptlrpc_set_wait(set);
542                 if (rc)
543                         CERROR("error from callback: rc = %d\n", rc);
544         }
545         ptlrpc_set_destroy(set);
546         if (rc == 0)
547                 rc = iobuf->length;
548
549         OBD_FREE(pga, sizeof(*pga) * iobuf->nr_pages);
550         RETURN(rc);
551 }
552 #endif
553
554 //#endif
555
556 struct address_space_operations ll_aops = {
557         readpage: ll_readpage,
558 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
559         direct_IO: ll_direct_IO,
560 #endif
561         writepage: ll_writepage,
562         sync_page: block_sync_page,
563         prepare_write: ll_prepare_write,
564         commit_write: ll_commit_write,
565         bmap: NULL
566 //#endif
567 };