1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter_io.c
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
11 * This file is part of Lustre, http://www.lustre.org.
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/pagemap.h> // XXX kill me soon
30 #include <linux/version.h>
32 #define DEBUG_SUBSYSTEM S_FILTER
34 #include <linux/iobuf.h>
35 #include <linux/locks.h>
37 #include <linux/obd_class.h>
38 #include <linux/lustre_fsfilt.h>
39 #include "filter_internal.h"
42 /* We should only change the file mtime (and not the ctime, like
43 * update_inode_times() in generic_file_write()) when we only change data. */
44 void inode_update_time(struct inode *inode, int ctime_too)
46 time_t now = CURRENT_TIME;
47 if (inode->i_mtime == now && (!ctime_too || inode->i_ctime == now))
52 mark_inode_dirty_sync(inode);
55 /* Bug 2254 -- this is better done in ext3_map_inode_page, but this
56 * workaround will suffice until everyone has upgraded their kernels */
57 static void check_pending_bhs(unsigned long *blocks, int nr_pages, dev_t dev,
60 #if (LUSTRE_KERNEL_VERSION < 32)
61 struct buffer_head *bh;
64 for (i = 0; i < nr_pages; i++) {
65 bh = get_hash_table(dev, blocks[i], size);
68 if (!buffer_dirty(bh)) {
72 mark_buffer_clean(bh);
74 clear_bit(BH_Req, &bh->b_state);
80 /* when brw_kiovec() is asked to read from block -1UL it just zeros
81 * the page. this gives us a chance to verify the write mappings
83 static int filter_cleanup_mappings(int rw, struct kiobuf *iobuf,
86 int i, blocks_per_page_bits = PAGE_SHIFT - inode->i_blkbits;
89 for (i = 0 ; i < iobuf->nr_pages << blocks_per_page_bits; i++) {
90 if (iobuf->blocks[i] > 0)
93 if (rw == OBD_BRW_WRITE)
96 iobuf->blocks[i] = -1UL;
102 static void dump_page(int rw, unsigned long block, struct page *page)
104 char *blah = kmap(page);
105 CDEBUG(D_PAGE, "rw %d block %lu: %02x %02x %02x %02x\n", rw, block,
106 blah[0], blah[1], blah[2], blah[3]);
111 /* These are our hacks to keep our directio/bh IO coherent with ext3's
112 * page cache use. Most notably ext3 reads file data into the page
113 * cache when it is zeroing the tail of partial-block truncates and
114 * leaves it there, sometimes generating io from it at later truncates.
115 * This removes the partial page and its buffers from the page cache,
116 * so it should only ever cause a wait in rare cases, as otherwise we
117 * always do full-page IO to the OST.
119 * The call to truncate_complete_page() will call journal_flushpage() to
120 * free the buffers and drop the page from cache. The buffers should not
121 * be dirty, because we already called fdatasync/fdatawait on them.
123 static int filter_clear_page_cache(struct inode *inode, struct kiobuf *iobuf)
128 check_pending_bhs(KIOBUF_GET_BLOCKS(iobuf), iobuf->nr_pages,
129 inode->i_dev, 1 << inode->i_blkbits);
131 /* This is nearly generic_osync_inode, without the waiting on the inode
132 rc = generic_osync_inode(inode, inode->i_mapping,
133 OSYNC_DATA|OSYNC_METADATA);
135 rc = filemap_fdatasync(inode->i_mapping);
136 rc2 = fsync_inode_data_buffers(inode);
139 rc2 = filemap_fdatawait(inode->i_mapping);
145 /* be careful to call this after fsync_inode_data_buffers has waited
146 * for IO to complete before we evict it from the cache */
147 for (i = 0; i < iobuf->nr_pages ; i++) {
148 page = find_lock_page(inode->i_mapping,
149 iobuf->maplist[i]->index);
152 if (page->mapping != NULL)
153 ll_truncate_complete_page(page);
156 page_cache_release(page);
162 /* Must be called with i_sem taken for writes; this will drop it */
163 int filter_direct_io(int rw, struct dentry *dchild, void *buf,
164 struct obd_export *exp, struct iattr *attr,
165 struct obd_trans_info *oti, void **wait_handle)
167 struct obd_device *obd = exp->exp_obd;
168 struct inode *inode = dchild->d_inode;
169 struct kiobuf *iobuf = buf;
170 int rc, create = (rw == OBD_BRW_WRITE), *created = NULL, committed = 0;
171 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits, cleanup_phase = 0;
172 struct semaphore *sem = NULL;
175 LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
177 if (iobuf->nr_pages == 0)
178 GOTO(cleanup, rc = 0);
180 if (iobuf->nr_pages * blocks_per_page > KIO_MAX_SECTORS)
181 GOTO(cleanup, rc = -EINVAL);
183 if (iobuf->nr_pages * blocks_per_page >
184 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES)
185 GOTO(cleanup, rc = -EINVAL);
189 rc = lock_kiovec(1, &iobuf, 1);
194 if (rw == OBD_BRW_WRITE) {
196 sem = &obd->u.filter.fo_alloc_lock;
199 rc = fsfilt_map_inode_pages(obd, inode, iobuf->maplist,
200 iobuf->nr_pages, iobuf->blocks,
201 obdfilter_created_scratchpad, create, sem);
205 rc = filter_cleanup_mappings(rw, iobuf, inode);
209 if (rw == OBD_BRW_WRITE) {
210 filter_tally_write(&obd->u.filter, iobuf->maplist,
211 iobuf->nr_pages, iobuf->blocks,
214 if (attr->ia_size > inode->i_size)
215 attr->ia_valid |= ATTR_SIZE;
216 rc = fsfilt_setattr(obd, dchild, oti->oti_handle, attr, 0);
221 rc = filter_finish_transno(exp, oti, 0);
225 rc = fsfilt_commit_async(obd,inode,oti->oti_handle,wait_handle);
231 rc = filter_clear_page_cache(inode, iobuf);
235 rc = fsfilt_send_bio(rw, obd, inode, iobuf);
237 CDEBUG(D_INFO, "tried to write %d pages, rc = %d\n",
238 iobuf->nr_pages, rc);
245 if (!committed && (rw == OBD_BRW_WRITE)) {
246 int err = fsfilt_commit_async(obd, inode,
247 oti->oti_handle, wait_handle);
248 oti->oti_handle = NULL;
250 CERROR("can't close transaction: %d\n", err);
252 * this is error path, so we prefer to return
253 * original error, not this one
257 switch(cleanup_phase) {
260 unlock_kiovec(1, &iobuf);
263 if (cleanup_phase != 3 && rw == OBD_BRW_WRITE)
267 CERROR("corrupt cleanup_phase (%d)?\n", cleanup_phase);
274 /* See if there are unallocated parts in given file region */
275 int filter_range_is_mapped(struct inode *inode, obd_size offset, int len)
277 int (*fs_bmap)(struct address_space *, long) =
278 inode->i_mapping->a_ops->bmap;
281 /* We can't know if the range is mapped already or not */
285 offset >>= inode->i_blkbits;
286 len >>= inode->i_blkbits;
288 for (j = 0; j < len; j++)
289 if (fs_bmap(inode->i_mapping, offset + j) == 0)
295 /* some kernels require alloc_kiovec callers to zero members through the use of
296 * map_user_kiobuf and unmap_.. we don't use those, so we have a little helper
297 * that makes sure we don't break the rules. */
298 static void clear_kiobuf(struct kiobuf *iobuf)
302 for (i = 0; i < iobuf->array_len; i++)
303 iobuf->maplist[i] = NULL;
310 int filter_alloc_iobuf(int rw, int num_pages, void **ret)
313 struct kiobuf *iobuf;
316 LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
318 rc = alloc_kiovec(1, &iobuf);
322 rc = expand_kiobuf(iobuf, num_pages);
324 free_kiovec(1, &iobuf);
328 #ifdef HAVE_KIOBUF_DOVARY
329 iobuf->dovary = 0; /* this prevents corruption, not present in 2.4.20 */
336 void filter_free_iobuf(void *buf)
338 struct kiobuf *iobuf = buf;
341 free_kiovec(1, &iobuf);
344 int filter_iobuf_add_page(struct obd_device *obd, void *buf,
345 struct inode *inode, struct page *page)
347 struct kiobuf *iobuf = buf;
349 iobuf->maplist[iobuf->nr_pages++] = page;
350 iobuf->length += PAGE_SIZE;
355 int filter_commitrw_write(struct obd_export *exp, struct obdo *oa, int objcount,
356 struct obd_ioobj *obj, int niocount,
357 struct niobuf_local *res, struct obd_trans_info *oti,
360 struct obd_device *obd = exp->exp_obd;
361 struct lvfs_run_ctxt saved;
362 struct niobuf_local *lnb;
363 struct fsfilt_objinfo fso;
364 struct iattr iattr = { 0 };
366 struct inode *inode = NULL;
367 int i, n, cleanup_phase = 0, err;
368 unsigned long now = jiffies; /* DEBUGGING OST TIMEOUTS */
371 LASSERT(oti != NULL);
372 LASSERT(objcount == 1);
373 LASSERT(current->journal_info == NULL);
378 rc = filter_alloc_iobuf(OBD_BRW_WRITE, obj->ioo_bufcnt, &iobuf);
383 fso.fso_dentry = res->dentry;
384 fso.fso_bufcnt = obj->ioo_bufcnt;
385 inode = res->dentry->d_inode;
387 for (i = 0, lnb = res, n = 0; i < obj->ioo_bufcnt; i++, lnb++) {
390 /* If overwriting an existing block, we don't need a grant */
391 if (!(lnb->flags & OBD_BRW_GRANTED) && lnb->rc == -ENOSPC &&
392 filter_range_is_mapped(inode, lnb->offset, lnb->len))
395 if (lnb->rc) /* ENOSPC, network RPC error */
398 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
400 /* We expect these pages to be in offset order, but we'll
402 this_size = lnb->offset + lnb->len;
403 if (this_size > iattr.ia_size)
404 iattr.ia_size = this_size;
407 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
411 oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
413 if (IS_ERR(oti->oti_handle)) {
415 rc = PTR_ERR(oti->oti_handle);
416 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
417 "error starting transaction: rc = %d\n", rc);
418 oti->oti_handle = NULL;
422 fsfilt_check_slow(now, obd_timeout, "brw_start");
424 iattr_from_obdo(&iattr,oa,OBD_MD_FLATIME|OBD_MD_FLMTIME|OBD_MD_FLCTIME);
425 /* filter_direct_io drops i_sem */
426 rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, iobuf, exp, &iattr,
429 obdo_from_inode(oa, inode, FILTER_VALID_FLAGS);
431 fsfilt_check_slow(now, obd_timeout, "direct_io");
433 err = fsfilt_commit_wait(obd, inode, wait_handle);
436 if (obd_sync_filter && !err)
437 LASSERTF(oti->oti_transno <= obd->obd_last_committed,
438 "oti_transno "LPU64" last_committed "LPU64"\n",
439 oti->oti_transno, obd->obd_last_committed);
440 fsfilt_check_slow(now, obd_timeout, "commitrw commit");
442 filter_grant_commit(exp, niocount, res);
444 switch (cleanup_phase) {
446 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
447 LASSERT(current->journal_info == NULL);
449 filter_free_iobuf(iobuf);
451 filter_free_dio_pages(objcount, obj, niocount, res);