* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* prerequisite for linux/xattr.h */
#include <linux/fs.h>
-/* ext_depth() */
-#include <ldiskfs/ldiskfs.h>
-#include <ldiskfs/ldiskfs_jbd2.h>
-#include <ldiskfs/ldiskfs_extents.h>
-
/*
* struct OBD_{ALLOC,FREE}*()
* OBD_FAIL_CHECK
#include "osd_internal.h"
+/* ext_depth() */
+#include <ldiskfs/ldiskfs_extents.h>
+
#ifndef HAVE_PAGE_CONSTANT
#define mapping_cap_page_constant_write(mapping) 0
#define SetPageConstant(page) do {} while (0)
}
#endif
-static void osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,int rw)
+static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
+ int rw, int line, int pages)
{
+ int blocks, i;
+
+ LASSERTF(iobuf->dr_elapsed_valid == 0,
+ "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
+ cfs_atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
+ iobuf->dr_init_at);
+ LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
+
cfs_waitq_init(&iobuf->dr_wait);
cfs_atomic_set(&iobuf->dr_numreqs, 0);
- iobuf->dr_max_pages = PTLRPC_MAX_BRW_PAGES;
iobuf->dr_npages = 0;
iobuf->dr_error = 0;
iobuf->dr_dev = d;
iobuf->dr_frags = 0;
iobuf->dr_elapsed = 0;
/* must be counted before, so assert */
- LASSERT(iobuf->dr_elapsed_valid == 0);
iobuf->dr_rw = rw;
+ iobuf->dr_init_at = line;
+
+ blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
+ if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
+ LASSERT(iobuf->dr_pg_buf.lb_len >=
+ pages * sizeof(iobuf->dr_pages[0]));
+ return 0;
+ }
+
+ /* start with 1MB for 4K blocks */
+ i = 256;
+ while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
+ i <<= 1;
+
+ CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
+ (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
+ pages = i;
+ blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
+ iobuf->dr_max_pages = 0;
+ CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
+ (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
+
+ lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
+ iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
+ if (unlikely(iobuf->dr_blocks == NULL))
+ return -ENOMEM;
+
+ lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
+ iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
+ if (unlikely(iobuf->dr_pages == NULL))
+ return -ENOMEM;
+
+ iobuf->dr_max_pages = pages;
+
+ return 0;
}
+#define osd_init_iobuf(dev, iobuf, rw, pages) \
+ __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
{
}
}
+#ifndef REQ_WRITE /* pre-2.6.35 */
+#define __REQ_WRITE BIO_RW
+#endif
+
#ifdef HAVE_BIO_ENDIO_2ARG
#define DIO_RETURN(a)
static void dio_complete_routine(struct bio *bio, int error)
}
/* the check is outside of the cycle for performance reason -bzzz */
- if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
+ if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
bio_for_each_segment(bvl, bio, i) {
if (likely(error == 0))
SetPageUptodate(bvl->bv_page);
if (error != 0 && iobuf->dr_error == 0)
iobuf->dr_error = error;
- if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs)) {
- iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
- iobuf->dr_elapsed_valid = 1;
- cfs_waitq_signal(&iobuf->dr_wait);
- }
+ /*
+ * set dr_elapsed before dr_numreqs turns to 0, otherwise
+ * it's possible that service thread will see dr_numreqs
+ * is zero, but dr_elapsed is not set yet, leading to lost
+ * data in this processing and an assertion in a subsequent
+ * call to OSD.
+ */
+ if (cfs_atomic_read(&iobuf->dr_numreqs) == 1) {
+ iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
+ iobuf->dr_elapsed_valid = 1;
+ }
+ if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+ cfs_waitq_signal(&iobuf->dr_wait);
/* Completed bios used to be chained off iobuf->dr_bios and freed in
* filter_clear_dreq(). It was then possible to exhaust the biovec-256
osd_submit_bio(iobuf->dr_rw, bio);
}
- /* allocate new bio, limited by max BIO size, b=9945 */
- bio = bio_alloc(GFP_NOIO, max(BIO_MAX_PAGES,
- (npages - page_idx) *
- blocks_per_page));
+ /* allocate new bio */
+ bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
+ (npages - page_idx) *
+ blocks_per_page));
if (bio == NULL) {
CERROR("Can't allocate bio %u*%u = %u pages\n",
(npages - page_idx), blocks_per_page,
/*
* there are following "locks":
* journal_start
- * i_alloc_sem
* i_mutex
* page lock
LASSERT(inode);
- osd_init_iobuf(osd, iobuf, 0);
+ rc = osd_init_iobuf(osd, iobuf, 0, npages);
+ if (unlikely(rc != 0))
+ RETURN(rc);
- isize = i_size_read(inode);
+ isize = i_size_read(inode);
maxidx = ((isize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
if (osd->od_writethrough_cache)
lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
if (iobuf->dr_npages) {
- rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
- iobuf->dr_npages,
- iobuf->dr_blocks,
- oti->oti_created,
- 0, NULL);
+ rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
+ iobuf->dr_npages,
+ iobuf->dr_blocks,
+ 0, NULL);
if (likely(rc == 0)) {
rc = osd_do_bio(osd, inode, iobuf);
/* do IO stats for preparation reads */
LASSERT(inode);
- osd_init_iobuf(osd, iobuf, 1);
- isize = i_size_read(inode);
+ rc = osd_init_iobuf(osd, iobuf, 1, npages);
+ if (unlikely(rc != 0))
+ RETURN(rc);
+
+ isize = i_size_read(inode);
ll_vfs_dq_init(inode);
for (i = 0; i < npages; i++) {
rc = -ENOSPC;
} else if (iobuf->dr_npages > 0) {
rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
- iobuf->dr_npages,
- iobuf->dr_blocks,
- oti->oti_created,
- 1, NULL);
+ iobuf->dr_npages,
+ iobuf->dr_blocks,
+ 1, NULL);
} else {
/* no pages to write, no transno is needed */
thandle->th_local = 1;
if (isize > i_size_read(inode)) {
i_size_write(inode, isize);
LDISKFS_I(inode)->i_disksize = isize;
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
rc = osd_do_bio(osd, inode, iobuf);
LASSERT(inode);
- osd_init_iobuf(osd, iobuf, 0);
+ rc = osd_init_iobuf(osd, iobuf, 0, npages);
+ if (unlikely(rc != 0))
+ RETURN(rc);
- if (osd->od_read_cache)
- cache = 1;
- if (i_size_read(inode) > osd->od_readcache_max_filesize)
- cache = 0;
+ if (osd->od_read_cache)
+ cache = 1;
+ if (i_size_read(inode) > osd->od_readcache_max_filesize)
+ cache = 0;
cfs_gettimeofday(&start);
for (i = 0; i < npages; i++) {
lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
if (iobuf->dr_npages) {
- rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
- iobuf->dr_npages,
- iobuf->dr_blocks,
- oti->oti_created,
- 0, NULL);
+ rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages,
+ iobuf->dr_npages,
+ iobuf->dr_blocks,
+ 0, NULL);
rc = osd_do_bio(osd, inode, iobuf);
/* IO stats will be done in osd_bufs_put() */
int err;
/* prevent reading after eof */
- cfs_spin_lock(&inode->i_lock);
- if (i_size_read(inode) < *offs + size) {
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < *offs + size) {
loff_t diff = i_size_read(inode) - *offs;
- cfs_spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
if (diff < 0) {
CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
i_size_read(inode), *offs);
} else {
size = diff;
}
- } else {
- cfs_spin_unlock(&inode->i_lock);
- }
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
blocksize = 1 << inode->i_blkbits;
osize = size;
* on-disk symlinks for ldiskfs.
*/
if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
- (buf->lb_len <= sizeof(LDISKFS_I(inode)->i_data)))
+ (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
else
rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
else
credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK];
- OSD_DECLARE_OP(oh, write);
- oh->ot_credits += credits;
+ osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
inode = osd_dt_obj(dt)->oo_inode;
static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
{
+ /* LU-2634: clear the extent format for fast symlink */
+ ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
LDISKFS_I(inode)->i_disksize = buflen;
i_size_write(inode, buflen);
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
return 0;
}
--new_size;
/* correct in-core and on-disk sizes */
if (new_size > i_size_read(inode)) {
- cfs_spin_lock(&inode->i_lock);
- if (new_size > i_size_read(inode))
- i_size_write(inode, new_size);
- if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
- LDISKFS_I(inode)->i_disksize = i_size_read(inode);
- dirty_inode = 1;
- }
- cfs_spin_unlock(&inode->i_lock);
- if (dirty_inode)
- inode->i_sb->s_op->dirty_inode(inode);
+ spin_lock(&inode->i_lock);
+ if (new_size > i_size_read(inode))
+ i_size_write(inode, new_size);
+ if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
+ LDISKFS_I(inode)->i_disksize = i_size_read(inode);
+ dirty_inode = 1;
+ }
+ spin_unlock(&inode->i_lock);
+ if (dirty_inode)
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
if (err == 0)
return -EACCES;
LASSERT(handle != NULL);
+ LASSERT(inode != NULL);
+ ll_vfs_dq_init(inode);
/* XXX: don't check: one declared chunk can be used many times */
- /* OSD_EXEC_OP(handle, write); */
+ /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle->h_transaction != NULL);
LASSERT(th);
oh = container_of(th, struct osd_thandle, ot_super);
- OSD_DECLARE_OP(oh, punch);
-
/*
* we don't need to reserve credits for whole truncate
* it's not possible as truncate may need to free too many
* orphan list. if needed truncate will extend or restart
* transaction
*/
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
- oh->ot_credits += 3;
+ osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
LASSERT(end == OBD_OBJECT_EOF);
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
+ LASSERT(inode != NULL);
+ ll_vfs_dq_init(inode);
LASSERT(th);
oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle->h_transaction != NULL);
- OSD_EXEC_OP(th, punch);
+ osd_trans_exec_op(env, th, OSD_OT_PUNCH);
tid = oh->ot_handle->h_transaction->t_tid;