Whamcloud - gitweb
5577c038be3b10c0539136d749038d00d46955b5
[fs/lustre-release.git] / lustre / obdfilter / filter_io_26.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  linux/fs/obdfilter/filter_io.c
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *   Author: Peter Braam <braam@clusterfs.com>
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *   Author: Phil Schwan <phil@clusterfs.com>
10  *
11  *   This file is part of Lustre, http://www.lustre.org.
12  *
13  *   Lustre is free software; you can redistribute it and/or
14  *   modify it under the terms of version 2 of the GNU General Public
15  *   License as published by the Free Software Foundation.
16  *
17  *   Lustre is distributed in the hope that it will be useful,
18  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *   GNU General Public License for more details.
21  *
22  *   You should have received a copy of the GNU General Public License
23  *   along with Lustre; if not, write to the Free Software
24  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/pagemap.h> // XXX kill me soon
30 #include <linux/version.h>
31 #include <linux/buffer_head.h>
32
33 #define DEBUG_SUBSYSTEM S_FILTER
34
35 #include <linux/obd_class.h>
36 #include <linux/lustre_fsfilt.h>
37 #include "filter_internal.h"
38
39 /* 512byte block min */
40 #define MAX_BLOCKS_PER_PAGE (PAGE_SIZE / 512)
41 struct dio_request {
42         atomic_t          dr_numreqs;  /* number of reqs being processed */
43         struct bio       *dr_bios;     /* list of completed bios */
44         wait_queue_head_t dr_wait;
45         int               dr_max_pages;
46         int               dr_npages;
47         int               dr_error;
48         struct page     **dr_pages;
49         unsigned long    *dr_blocks;
50         spinlock_t        dr_lock;
51 };
52
53 static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
54 {
55         struct dio_request *dreq = bio->bi_private;
56         unsigned long flags;
57
58         if (bio->bi_size) {
59                 CWARN("gets called against non-complete bio 0x%p: %d/%d/%d\n",
60                       bio, bio->bi_size, done, error);
61                 return 1;
62         }
63
64         if (dreq == NULL) {
65                 CERROR("***** bio->bi_private is NULL!  This should never "
66                        "happen.  Normally, I would crash here, but instead I "
67                        "will dump the bio contents to the console.  Please "
68                        "report this to CFS, along with any interesting messages "
69                        "leading up to this point (like SCSI errors, perhaps).  "
70                        "Because bi_private is NULL, I can't wake up the thread "
71                        "that initiated this I/O -- so you will probably have to "
72                        "reboot this node.");
73                 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
74                        "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
75                        "bi_private: %p\n", bio->bi_next, bio->bi_flags,
76                        bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
77                        bio->bi_end_io, atomic_read(&bio->bi_cnt),
78                        bio->bi_private);
79                 return 0;
80         }
81
82         spin_lock_irqsave(&dreq->dr_lock, flags);
83         bio->bi_private = dreq->dr_bios;
84         dreq->dr_bios = bio;
85         if (dreq->dr_error == 0)
86                 dreq->dr_error = error;
87         spin_unlock_irqrestore(&dreq->dr_lock, flags);
88
89         if (atomic_dec_and_test(&dreq->dr_numreqs))
90                 wake_up(&dreq->dr_wait);
91
92         return 0;
93 }
94
95 static int can_be_merged(struct bio *bio, sector_t sector)
96 {
97         unsigned int size;
98         if (!bio)
99                 return 0;
100
101         size = bio->bi_size >> 9;
102         return bio->bi_sector + size == sector ? 1 : 0;
103 }
104
105
106 int filter_alloc_iobuf(int rw, int num_pages, void **ret)
107 {
108         struct dio_request *dreq;
109
110         LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
111
112         OBD_ALLOC(dreq, sizeof(*dreq));
113         if (dreq == NULL)
114                 goto failed_0;
115         
116         OBD_ALLOC(dreq->dr_pages, num_pages * sizeof(*dreq->dr_pages));
117         if (dreq->dr_pages == NULL)
118                 goto failed_1;
119         
120         OBD_ALLOC(dreq->dr_blocks,
121                   MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*dreq->dr_blocks));
122         if (dreq->dr_blocks == NULL)
123                 goto failed_2;
124
125         dreq->dr_bios = NULL;
126         init_waitqueue_head(&dreq->dr_wait);
127         atomic_set(&dreq->dr_numreqs, 0);
128         spin_lock_init(&dreq->dr_lock);
129         dreq->dr_max_pages = num_pages;
130         dreq->dr_npages = 0;
131
132         *ret = dreq;
133         RETURN(0);
134         
135  failed_2:
136         OBD_FREE(dreq->dr_pages,
137                  num_pages * sizeof(*dreq->dr_pages));
138  failed_1:
139         OBD_FREE(dreq, sizeof(*dreq));
140  failed_0:
141         RETURN(-ENOMEM);
142 }
143
144 void filter_free_iobuf(void *iobuf)
145 {
146         struct dio_request *dreq = iobuf;
147         int                 num_pages = dreq->dr_max_pages;
148
149         /* free all bios */
150         while (dreq->dr_bios) {
151                 struct bio *bio = dreq->dr_bios;
152                 dreq->dr_bios = bio->bi_private;
153                 bio_put(bio);
154         }
155
156         OBD_FREE(dreq->dr_blocks,
157                  MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*dreq->dr_blocks));
158         OBD_FREE(dreq->dr_pages,
159                  num_pages * sizeof(*dreq->dr_pages));
160         OBD_FREE(dreq, sizeof(*dreq));
161 }
162
163 int filter_iobuf_add_page(struct obd_device *obd, void *iobuf,
164                           struct inode *inode, struct page *page)
165 {
166         struct dio_request *dreq = iobuf;
167
168         LASSERT (dreq->dr_npages < dreq->dr_max_pages);
169         dreq->dr_pages[dreq->dr_npages++] = page;
170
171         return 0;
172 }
173
174 int filter_do_bio(struct obd_device *obd, struct inode *inode,
175                   struct dio_request *dreq, int rw)
176 {
177         int            blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
178         struct page  **pages = dreq->dr_pages;
179         int            npages = dreq->dr_npages;
180         unsigned long *blocks = dreq->dr_blocks;
181         int            total_blocks = npages * blocks_per_page;
182         int            sector_bits = inode->i_sb->s_blocksize_bits - 9;
183         unsigned int   blocksize = inode->i_sb->s_blocksize;
184         struct bio    *bio = NULL;
185         struct page   *page;
186         unsigned int   page_offset;
187         sector_t       sector;
188         int            nblocks;
189         int            block_idx;
190         int            page_idx;
191         int            i;
192         int            rc = 0;
193         ENTRY;
194
195         LASSERT(dreq->dr_npages == npages);
196         LASSERT(total_blocks <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
197
198         for (page_idx = 0, block_idx = 0; 
199              page_idx < npages; 
200              page_idx++, block_idx += blocks_per_page) {
201                         
202                 page = pages[page_idx];
203                 LASSERT (block_idx + blocks_per_page <= total_blocks);
204
205                 for (i = 0, page_offset = 0; 
206                      i < blocks_per_page;
207                      i += nblocks, page_offset += blocksize * nblocks) {
208
209                         nblocks = 1;
210
211                         if (blocks[block_idx + i] == 0) {  /* hole */
212                                 LASSERT(rw == OBD_BRW_READ);
213                                 memset(kmap(page) + page_offset, 0, blocksize);
214                                 kunmap(page);
215                                 continue;
216                         }
217
218                         sector = blocks[block_idx + i] << sector_bits;
219
220                         /* Additional contiguous file blocks? */
221                         while (i + nblocks < blocks_per_page &&
222                                (sector + nblocks*(blocksize>>9)) ==
223                                (blocks[block_idx + i + nblocks] << sector_bits))
224                                 nblocks++;
225
226                         if (bio != NULL &&
227                             can_be_merged(bio, sector) &&
228                             bio_add_page(bio, page, 
229                                          blocksize * nblocks, page_offset) != 0)
230                                 continue;       /* added this frag OK */
231
232                         if (bio != NULL) {
233                                 request_queue_t *q = bdev_get_queue(bio->bi_bdev);
234
235                                 /* Dang! I have to fragment this I/O */
236                                 CDEBUG(D_INODE|D_ERROR, "bio++ sz %d vcnt %d(%d) "
237                                        "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
238                                        bio->bi_size, 
239                                        bio->bi_vcnt, bio->bi_max_vecs,
240                                        bio->bi_size >> 9, q->max_sectors,
241                                        bio_phys_segments(q, bio), 
242                                        q->max_phys_segments,
243                                        bio_hw_segments(q, bio), 
244                                        q->max_hw_segments);
245
246                                 atomic_inc(&dreq->dr_numreqs);
247                                 rc = fsfilt_send_bio(rw, obd, inode, bio);
248                                 if (rc < 0) {
249                                         CERROR("Can't send bio: %d\n", rc);
250                                         /* OK do dec; we do the waiting */
251                                         atomic_dec(&dreq->dr_numreqs);
252                                         goto out;
253                                 }
254                                 rc = 0;
255                                         
256                                 bio = NULL;
257                         }
258
259                         /* allocate new bio */
260                         bio = bio_alloc(GFP_NOIO, 
261                                         (npages - page_idx) * blocks_per_page);
262                         if (bio == NULL) {
263                                 CERROR ("Can't allocate bio\n");
264                                 rc = -ENOMEM;
265                                 goto out;
266                         }
267
268                         bio->bi_bdev = inode->i_sb->s_bdev;
269                         bio->bi_sector = sector;
270                         bio->bi_end_io = dio_complete_routine;
271                         bio->bi_private = dreq;
272
273                         rc = bio_add_page(bio, page, 
274                                           blocksize * nblocks, page_offset);
275                         LASSERT (rc != 0);
276                 }
277         }
278
279         if (bio != NULL) {
280                 atomic_inc(&dreq->dr_numreqs);
281                 rc = fsfilt_send_bio(rw, obd, inode, bio);
282                 if (rc >= 0) {
283                         rc = 0;
284                 } else {
285                         CERROR("Can't send bio: %d\n", rc);
286                         /* OK do dec; we do the waiting */
287                         atomic_dec(&dreq->dr_numreqs);
288                 }
289         }
290                         
291  out:
292         wait_event(dreq->dr_wait, atomic_read(&dreq->dr_numreqs) == 0);
293
294         if (rc == 0)
295                 rc = dreq->dr_error;
296         RETURN(rc);
297 }
298  
299 /* These are our hacks to keep our directio/bh IO coherent with ext3's
300  * page cache use.  Most notably ext3 reads file data into the page
301  * cache when it is zeroing the tail of partial-block truncates and
302  * leaves it there, sometimes generating io from it at later truncates.
303  * This removes the partial page and its buffers from the page cache,
304  * so it should only ever cause a wait in rare cases, as otherwise we
305  * always do full-page IO to the OST.
306  *
307  * The call to truncate_complete_page() will call journal_invalidatepage()
308  * to free the buffers and drop the page from cache.  The buffers should
309  * not be dirty, because we already called fdatasync/fdatawait on them.
310  */
311 static int filter_clear_page_cache(struct inode *inode,
312                                    struct dio_request *iobuf)
313 {
314         struct page *page;
315         int i, rc, rc2;
316   
317         /* This is nearly generic_osync_inode, without the waiting on the inode
318         rc = generic_osync_inode(inode, inode->i_mapping,
319                                   OSYNC_DATA|OSYNC_METADATA);
320         */
321         rc = filemap_fdatawrite(inode->i_mapping);
322         rc2 = sync_mapping_buffers(inode->i_mapping);
323         if (rc == 0)
324                 rc = rc2;
325         rc2 = filemap_fdatawait(inode->i_mapping);
326         if (rc == 0)
327                 rc = rc2;
328         if (rc != 0)
329                 RETURN(rc);
330  
331         /* be careful to call this after fsync_inode_data_buffers has waited
332          * for IO to complete before we evict it from the cache */
333         for (i = 0; i < iobuf->dr_npages; i++) {
334                 page = find_lock_page(inode->i_mapping,
335                                        iobuf->dr_pages[i]->index);
336                 if (page == NULL)
337                        continue;
338                 if (page->mapping != NULL) {
339                        wait_on_page_writeback(page);
340                        ll_truncate_complete_page(page);
341                 }
342   
343                 unlock_page(page);
344                 page_cache_release(page);
345         }
346         return 0;
347 }
348 /* Must be called with i_sem taken for writes; this will drop it */
349 int filter_direct_io(int rw, struct dentry *dchild, void *iobuf,
350                      struct obd_export *exp, struct iattr *attr,
351                      struct obd_trans_info *oti, void **wait_handle)
352 {
353         struct obd_device *obd = exp->exp_obd;
354         struct inode *inode = dchild->d_inode;
355         struct dio_request *dreq = iobuf;
356         struct semaphore *sem = NULL;
357         int rc, rc2, create = 0;
358         ENTRY;
359
360         LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
361         LASSERTF(dreq->dr_npages <= dreq->dr_max_pages, "%d,%d\n",
362                  dreq->dr_npages, dreq->dr_max_pages);
363
364         if (dreq->dr_npages == 0)
365                 RETURN(0);
366
367         if (dreq->dr_npages > OBDFILTER_CREATED_SCRATCHPAD_ENTRIES)
368                 RETURN(-EINVAL);
369         
370         if (rw == OBD_BRW_WRITE) {
371                 create = 1;
372                 //sem = &obd->u.filter.fo_alloc_lock;
373         }
374
375         rc = fsfilt_map_inode_pages(obd, inode,
376                                     dreq->dr_pages, dreq->dr_npages,
377                                     dreq->dr_blocks,
378                                     obdfilter_created_scratchpad,
379                                     create, sem);
380
381         if (rw == OBD_BRW_WRITE) {
382                 if (rc == 0) {
383                         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
384                         filter_tally_write(&obd->u.filter,  dreq->dr_pages,
385                                            dreq->dr_npages, dreq->dr_blocks,
386                                            blocks_per_page);
387                         if (attr->ia_size > inode->i_size)
388                                 attr->ia_valid |= ATTR_SIZE;
389                         rc = fsfilt_setattr(obd, dchild, 
390                                             oti->oti_handle, attr, 0);
391                 }
392                 
393                 up(&inode->i_sem);
394
395                 rc2 = filter_finish_transno(exp, oti, 0);
396                 if (rc2 != 0)
397                         CERROR("can't close transaction: %d\n", rc);
398                 rc = (rc == 0) ? rc2 : rc;
399
400                 rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,wait_handle);
401                 rc = (rc == 0) ? rc2 : rc;
402
403                 if (rc != 0)
404                         RETURN(rc);
405         }
406
407         rc = filter_clear_page_cache(inode, dreq);
408         if (rc != 0)
409                 RETURN(rc);
410
411         RETURN(filter_do_bio(obd, inode, dreq, rw));
412 }
413
414 /* See if there are unallocated parts in given file region */
415 static int filter_range_is_mapped(struct inode *inode, obd_size offset, int len)
416 {
417         sector_t (*fs_bmap)(struct address_space *, sector_t) =
418                 inode->i_mapping->a_ops->bmap;
419         int j;
420
421         /* We can't know if we are overwriting or not */
422         if (fs_bmap == NULL)
423                 return 0;
424
425         offset >>= inode->i_blkbits;
426         len >>= inode->i_blkbits;
427
428         for (j = 0; j <= len; j++)
429                 if (fs_bmap(inode->i_mapping, offset + j) == 0)
430                         return 0;
431
432         return 1;
433 }
434
435 int filter_commitrw_write(struct obd_export *exp, struct obdo *oa,
436                           int objcount, struct obd_ioobj *obj, int niocount,
437                           struct niobuf_local *res, struct obd_trans_info *oti,
438                           int rc)
439 {
440         struct niobuf_local *lnb;
441         struct dio_request *dreq = NULL;
442         struct lvfs_run_ctxt saved;
443         struct fsfilt_objinfo fso;
444         struct iattr iattr = { 0 };
445         struct inode *inode = NULL;
446         unsigned long now = jiffies;
447         int i, err, cleanup_phase = 0;
448         struct obd_device *obd = exp->exp_obd;
449         void *wait_handle = NULL;
450         int   total_size = 0;
451         loff_t old_size;
452         ENTRY;
453
454         LASSERT(oti != NULL);
455         LASSERT(objcount == 1);
456         LASSERT(current->journal_info == NULL);
457
458         if (rc != 0)
459                 GOTO(cleanup, rc);
460         
461         rc = filter_alloc_iobuf(OBD_BRW_WRITE, obj->ioo_bufcnt, (void **)&dreq);
462         if (rc)
463                 GOTO(cleanup, rc);
464         cleanup_phase = 1;
465
466         fso.fso_dentry = res->dentry;
467         fso.fso_bufcnt = obj->ioo_bufcnt;
468         inode = res->dentry->d_inode;
469
470         for (i = 0, lnb = res; i < obj->ioo_bufcnt; i++, lnb++) {
471                 loff_t this_size;
472
473                 /* If overwriting an existing block, we don't need a grant */
474                 if (!(lnb->flags & OBD_BRW_GRANTED) && lnb->rc == -ENOSPC &&
475                     filter_range_is_mapped(inode, lnb->offset, lnb->len))
476                         lnb->rc = 0;
477
478                 if (lnb->rc) { /* ENOSPC, network RPC error, etc. */
479                         CDEBUG(D_INODE, "Skipping [%d] == %d\n", i, lnb->rc);
480                         continue;
481                 }
482
483                 err = filter_iobuf_add_page(obd, dreq, inode, lnb->page);
484                 LASSERT (err == 0);
485
486                 total_size += lnb->len;
487
488                 /* we expect these pages to be in offset order, but we'll
489                  * be forgiving */
490                 this_size = lnb->offset + lnb->len;
491                 if (this_size > iattr.ia_size)
492                         iattr.ia_size = this_size;
493         }
494 #if 0
495         /* I use this when I'm checking our lovely 1M I/Os reach the disk -eeb */
496         if (total_size != (1<<20))
497                 CWARN("total size %d (%d pages)\n", 
498                       total_size, total_size/PAGE_SIZE);
499 #endif
500         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
501         cleanup_phase = 2;
502
503         down(&inode->i_sem);
504         old_size = inode->i_size;
505         oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
506                                            oti);
507         if (IS_ERR(oti->oti_handle)) {
508                 up(&inode->i_sem);
509                 rc = PTR_ERR(oti->oti_handle);
510                 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
511                        "error starting transaction: rc = %d\n", rc);
512                 oti->oti_handle = NULL;
513                 GOTO(cleanup, rc);
514         }
515         /* have to call fsfilt_commit() from this point on */
516
517         fsfilt_check_slow(now, obd_timeout, "brw_start");
518
519         iattr_from_obdo(&iattr,oa,OBD_MD_FLATIME|OBD_MD_FLMTIME|OBD_MD_FLCTIME);
520         /* filter_direct_io drops i_sem */
521         rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, dreq, exp, &iattr,
522                               oti, &wait_handle);
523
524 #if 0
525         if (inode->i_size != old_size) {
526                 struct llog_cookie *cookie = obdo_logcookie(oa);
527                 struct lustre_id *id = obdo_id(oa);
528                 filter_log_sz_change(obd, id, oa->o_easize, cookie, inode);
529         }
530 #endif
531
532         if (rc == 0)
533                 obdo_from_inode(oa, inode, FILTER_VALID_FLAGS);
534
535         fsfilt_check_slow(now, obd_timeout, "direct_io");
536
537         err = fsfilt_commit_wait(obd, inode, wait_handle);
538         if (rc == 0)
539                 rc = err;
540
541         fsfilt_check_slow(now, obd_timeout, "commitrw commit");
542
543 cleanup:
544         filter_grant_commit(exp, niocount, res);
545
546         switch (cleanup_phase) {
547         case 2:
548                 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
549                 LASSERT(current->journal_info == NULL);
550         case 1:
551                 filter_free_iobuf(dreq);
552         case 0:
553                 filter_free_dio_pages(objcount, obj, niocount, res);
554                 f_dput(res->dentry);
555         }
556
557         RETURN(rc);
558 }