+
+int filter_alloc_iobuf(int rw, int num_pages, void **ret)
+{
+ struct dio_request *dreq;
+
+ LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
+
+ OBD_ALLOC(dreq, sizeof(*dreq));
+ if (dreq == NULL)
+ goto failed_0;
+
+ OBD_ALLOC(dreq->dr_pages, num_pages * sizeof(*dreq->dr_pages));
+ if (dreq->dr_pages == NULL)
+ goto failed_1;
+
+ OBD_ALLOC(dreq->dr_blocks,
+ MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*dreq->dr_blocks));
+ if (dreq->dr_blocks == NULL)
+ goto failed_2;
+
+ dreq->dr_bios = NULL;
+ init_waitqueue_head(&dreq->dr_wait);
+ atomic_set(&dreq->dr_numreqs, 0);
+ spin_lock_init(&dreq->dr_lock);
+ dreq->dr_max_pages = num_pages;
+ dreq->dr_npages = 0;
+
+ *ret = dreq;
+ RETURN(0);
+
+ failed_2:
+ OBD_FREE(dreq->dr_pages,
+ num_pages * sizeof(*dreq->dr_pages));
+ failed_1:
+ OBD_FREE(dreq, sizeof(*dreq));
+ failed_0:
+ RETURN(-ENOMEM);
+}
+
+void filter_free_iobuf(void *iobuf)
+{
+ struct dio_request *dreq = iobuf;
+ int num_pages = dreq->dr_max_pages;
+
+ /* free all bios */
+ while (dreq->dr_bios) {
+ struct bio *bio = dreq->dr_bios;
+ dreq->dr_bios = bio->bi_private;
+ bio_put(bio);
+ }
+
+ OBD_FREE(dreq->dr_blocks,
+ MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*dreq->dr_blocks));
+ OBD_FREE(dreq->dr_pages,
+ num_pages * sizeof(*dreq->dr_pages));
+ OBD_FREE(dreq, sizeof(*dreq));
+}
+
+int filter_iobuf_add_page(struct obd_device *obd, void *iobuf,
+ struct inode *inode, struct page *page)
+{
+ struct dio_request *dreq = iobuf;
+
+ LASSERT (dreq->dr_npages < dreq->dr_max_pages);
+ dreq->dr_pages[dreq->dr_npages++] = page;
+
+ return 0;
+}
+
+int filter_do_bio(struct obd_device *obd, struct inode *inode,
+ struct dio_request *dreq, int rw)
+{
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
+ struct page **pages = dreq->dr_pages;
+ int npages = dreq->dr_npages;
+ unsigned long *blocks = dreq->dr_blocks;
+ int total_blocks = npages * blocks_per_page;
+ int sector_bits = inode->i_sb->s_blocksize_bits - 9;
+ unsigned int blocksize = inode->i_sb->s_blocksize;
+ struct bio *bio = NULL;
+ struct page *page;
+ unsigned int page_offset;
+ sector_t sector;
+ int nblocks;
+ int block_idx;
+ int page_idx;
+ int i;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(dreq->dr_npages == npages);
+ LASSERT(total_blocks <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
+
+ for (page_idx = 0, block_idx = 0;
+ page_idx < npages;
+ page_idx++, block_idx += blocks_per_page) {
+
+ page = pages[page_idx];
+ LASSERT (block_idx + blocks_per_page <= total_blocks);
+
+ for (i = 0, page_offset = 0;
+ i < blocks_per_page;
+ i += nblocks, page_offset += blocksize * nblocks) {
+
+ nblocks = 1;
+
+ if (blocks[block_idx + i] == 0) { /* hole */
+ LASSERT(rw == OBD_BRW_READ);
+ memset(kmap(page) + page_offset, 0, blocksize);
+ kunmap(page);
+ continue;
+ }
+
+ sector = blocks[block_idx + i] << sector_bits;
+
+ /* Additional contiguous file blocks? */
+ while (i + nblocks < blocks_per_page &&
+ (sector + nblocks*(blocksize>>9)) ==
+ (blocks[block_idx + i + nblocks] << sector_bits))
+ nblocks++;
+
+ if (bio != NULL &&
+ can_be_merged(bio, sector) &&
+ bio_add_page(bio, page,
+ blocksize * nblocks, page_offset) != 0)
+ continue; /* added this frag OK */
+
+ if (bio != NULL) {
+ request_queue_t *q = bdev_get_queue(bio->bi_bdev);
+
+ /* Dang! I have to fragment this I/O */
+ CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
+ "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
+ bio->bi_size,
+ bio->bi_vcnt, bio->bi_max_vecs,
+ bio->bi_size >> 9, q->max_sectors,
+ bio_phys_segments(q, bio),
+ q->max_phys_segments,
+ bio_hw_segments(q, bio),
+ q->max_hw_segments);
+
+ atomic_inc(&dreq->dr_numreqs);
+ rc = fsfilt_send_bio(rw, obd, inode, bio);
+ if (rc < 0) {
+ CERROR("Can't send bio: %d\n", rc);
+ /* OK do dec; we do the waiting */
+ atomic_dec(&dreq->dr_numreqs);
+ goto out;
+ }
+ rc = 0;
+
+ bio = NULL;
+ }
+
+ /* allocate new bio */
+ bio = bio_alloc(GFP_NOIO,
+ (npages - page_idx) * blocks_per_page);
+ if (bio == NULL) {
+ CERROR ("Can't allocate bio\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio->bi_sector = sector;
+ bio->bi_end_io = dio_complete_routine;
+ bio->bi_private = dreq;
+
+ rc = bio_add_page(bio, page,
+ blocksize * nblocks, page_offset);
+ LASSERT (rc != 0);
+ }
+ }
+
+ if (bio != NULL) {
+ atomic_inc(&dreq->dr_numreqs);
+ rc = fsfilt_send_bio(rw, obd, inode, bio);
+ if (rc >= 0) {
+ rc = 0;
+ } else {
+ CERROR("Can't send bio: %d\n", rc);
+ /* OK do dec; we do the waiting */
+ atomic_dec(&dreq->dr_numreqs);
+ }
+ }
+
+ out:
+ wait_event(dreq->dr_wait, atomic_read(&dreq->dr_numreqs) == 0);
+
+ if (rc == 0)
+ rc = dreq->dr_error;
+ RETURN(rc);
+}
+
+/* These are our hacks to keep our directio/bh IO coherent with ext3's
+ * page cache use. Most notably ext3 reads file data into the page
+ * cache when it is zeroing the tail of partial-block truncates and
+ * leaves it there, sometimes generating io from it at later truncates.
+ * This removes the partial page and its buffers from the page cache,
+ * so it should only ever cause a wait in rare cases, as otherwise we
+ * always do full-page IO to the OST.
+ *
+ * The call to truncate_complete_page() will call journal_invalidatepage()
+ * to free the buffers and drop the page from cache. The buffers should
+ * not be dirty, because we already called fdatasync/fdatawait on them.
+ */
+static int filter_clear_page_cache(struct inode *inode,
+ struct dio_request *iobuf)
+{
+ struct page *page;
+ int i, rc, rc2;
+
+ /* This is nearly generic_osync_inode, without the waiting on the inode
+ rc = generic_osync_inode(inode, inode->i_mapping,
+ OSYNC_DATA|OSYNC_METADATA);
+ */
+ rc = filemap_fdatawrite(inode->i_mapping);
+ rc2 = sync_mapping_buffers(inode->i_mapping);
+ if (rc == 0)
+ rc = rc2;
+ rc2 = filemap_fdatawait(inode->i_mapping);
+ if (rc == 0)
+ rc = rc2;
+ if (rc != 0)
+ RETURN(rc);
+
+ /* be careful to call this after fsync_inode_data_buffers has waited
+ * for IO to complete before we evict it from the cache */
+ for (i = 0; i < iobuf->dr_npages; i++) {
+ page = find_lock_page(inode->i_mapping,
+ iobuf->dr_pages[i]->index);
+ if (page == NULL)
+ continue;
+ if (page->mapping != NULL) {
+ wait_on_page_writeback(page);
+ ll_truncate_complete_page(page);
+ }
+
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ return 0;
+}
+/* Must be called with i_sem taken for writes; this will drop it */
+int filter_direct_io(int rw, struct dentry *dchild, void *iobuf,
+ struct obd_export *exp, struct iattr *attr,
+ struct obd_trans_info *oti, void **wait_handle)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct inode *inode = dchild->d_inode;
+ struct dio_request *dreq = iobuf;
+ struct semaphore *sem = NULL;
+ int rc, rc2, create = 0;
+ ENTRY;
+
+ LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
+ LASSERTF(dreq->dr_npages <= dreq->dr_max_pages, "%d,%d\n",
+ dreq->dr_npages, dreq->dr_max_pages);
+
+ if (dreq->dr_npages == 0)
+ RETURN(0);
+
+ if (dreq->dr_npages > OBDFILTER_CREATED_SCRATCHPAD_ENTRIES)
+ RETURN(-EINVAL);
+
+ if (rw == OBD_BRW_WRITE) {
+ create = 1;
+ //sem = &obd->u.filter.fo_alloc_lock;
+ }
+
+ rc = fsfilt_map_inode_pages(obd, inode,
+ dreq->dr_pages, dreq->dr_npages,
+ dreq->dr_blocks,
+ obdfilter_created_scratchpad,
+ create, sem);
+
+ if (rw == OBD_BRW_WRITE) {
+ if (rc == 0) {
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
+ filter_tally_write(&obd->u.filter, dreq->dr_pages,
+ dreq->dr_npages, dreq->dr_blocks,
+ blocks_per_page);
+ if (attr->ia_size > inode->i_size)
+ attr->ia_valid |= ATTR_SIZE;
+ rc = fsfilt_setattr(obd, dchild,
+ oti->oti_handle, attr, 0);
+ }
+
+ up(&inode->i_sem);
+
+ rc2 = filter_finish_transno(exp, oti, 0);
+ if (rc2 != 0)
+ CERROR("can't close transaction: %d\n", rc);
+ rc = (rc == 0) ? rc2 : rc;
+
+ rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,wait_handle);
+ rc = (rc == 0) ? rc2 : rc;
+
+ if (rc != 0)
+ RETURN(rc);
+ }
+
+ rc = filter_clear_page_cache(inode, dreq);
+ if (rc != 0)
+ RETURN(rc);
+
+ RETURN(filter_do_bio(obd, inode, dreq, rw));
+}
+