* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
}
#endif
-static void osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,int rw)
+static void __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
+ int rw, int line)
{
+ LASSERTF(iobuf->dr_elapsed_valid == 0,
+ "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
+ cfs_atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
+ iobuf->dr_init_at);
+
cfs_waitq_init(&iobuf->dr_wait);
cfs_atomic_set(&iobuf->dr_numreqs, 0);
iobuf->dr_max_pages = PTLRPC_MAX_BRW_PAGES;
iobuf->dr_frags = 0;
iobuf->dr_elapsed = 0;
/* must be counted before, so assert */
- LASSERT(iobuf->dr_elapsed_valid == 0);
iobuf->dr_rw = rw;
+ iobuf->dr_init_at = line;
}
+#define osd_init_iobuf(dev,iobuf,rw) __osd_init_iobuf(dev, iobuf, rw, __LINE__)
static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
{
}
/* the check is outside of the cycle for performance reason -bzzz */
- if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
+ if (!test_bit(BIO_RW, &bio->bi_rw)) {
bio_for_each_segment(bvl, bio, i) {
if (likely(error == 0))
SetPageUptodate(bvl->bv_page);
if (error != 0 && iobuf->dr_error == 0)
iobuf->dr_error = error;
- if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs)) {
- iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
- iobuf->dr_elapsed_valid = 1;
- cfs_waitq_signal(&iobuf->dr_wait);
- }
+ /*
+ * set dr_elapsed before dr_numreqs turns to 0, otherwise
+ * it's possible that service thread will see dr_numreqs
+ * is zero, but dr_elapsed is not set yet, leading to lost
+ * data in this processing and an assertion in a subsequent
+ * call to OSD.
+ */
+ if (cfs_atomic_read(&iobuf->dr_numreqs) == 1) {
+ iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
+ iobuf->dr_elapsed_valid = 1;
+ }
+ if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+ cfs_waitq_signal(&iobuf->dr_wait);
/* Completed bios used to be chained off iobuf->dr_bios and freed in
* filter_clear_dreq(). It was then possible to exhaust the biovec-256
osd_submit_bio(iobuf->dr_rw, bio);
}
- /* allocate new bio, limited by max BIO size, b=9945 */
- bio = bio_alloc(GFP_NOIO, max(BIO_MAX_PAGES,
- (npages - page_idx) *
- blocks_per_page));
+ /* allocate new bio */
+ bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
+ (npages - page_idx) *
+ blocks_per_page));
if (bio == NULL) {
CERROR("Can't allocate bio %u*%u = %u pages\n",
(npages - page_idx), blocks_per_page,
if (plen > len)
plen = len;
- lnb->offset = offset;
- /* lnb->lnb_page_offset = poff; */
+ lnb->lnb_file_offset = offset;
+ lnb->lnb_page_offset = poff;
lnb->len = plen;
/* lb->flags = rnb->flags; */
lnb->flags = 0;
/*
* there are following "locks":
* journal_start
- * i_alloc_sem
* i_mutex
* page lock
* needs to keep the pages all aligned properly. */
lnb->dentry = (void *) obj;
- lnb->page = osd_get_page(d, lnb->offset, rw);
+ lnb->page = osd_get_page(d, lnb->lnb_file_offset, rw);
if (lnb->page == NULL)
GOTO(cleanup, rc = -ENOMEM);
long off;
char *p = kmap(lnb[i].page);
- off = lnb[i].offset;
- if (off)
- memset(p, 0, off);
- off = lnb[i].offset + lnb[i].len;
- off &= ~CFS_PAGE_MASK;
+ off = lnb[i].lnb_page_offset;
+ if (off)
+ memset(p, 0, off);
+ off = (lnb[i].lnb_page_offset + lnb[i].len) &
+ ~CFS_PAGE_MASK;
if (off)
memset(p + off, 0, CFS_PAGE_SIZE - off);
kunmap(lnb[i].page);
/* calculate number of extents (probably better to pass nb) */
for (i = 0; i < npages; i++) {
- if (i && lnb[i].offset !=
- lnb[i - 1].offset + lnb[i - 1].len)
+ if (i && lnb[i].lnb_file_offset !=
+ lnb[i - 1].lnb_file_offset + lnb[i - 1].len)
extents++;
- if (!osd_is_mapped(inode, lnb[i].offset))
+ if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
quota_space += CFS_PAGE_SIZE;
/* ignore quota for the whole request if any page is from
for (i = 0; i < npages; i++) {
if (lnb[i].rc == -ENOSPC &&
- osd_is_mapped(inode, lnb[i].offset)) {
+ osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
/* Allow the write to proceed if overwriting an
* existing block */
lnb[i].rc = 0;
LASSERT(PageLocked(lnb[i].page));
LASSERT(!PageWriteback(lnb[i].page));
- if (lnb[i].offset + lnb[i].len > isize)
- isize = lnb[i].offset + lnb[i].len;
+ if (lnb[i].lnb_file_offset + lnb[i].len > isize)
+ isize = lnb[i].lnb_file_offset + lnb[i].len;
/*
* Since write and truncate are serialized by oo_sem, even
cfs_gettimeofday(&start);
for (i = 0; i < npages; i++) {
- if (i_size_read(inode) <= lnb[i].offset)
+ if (i_size_read(inode) <= lnb[i].lnb_file_offset)
/* If there's no more data, abort early.
* lnb->rc == 0, so it's easy to detect later. */
break;
if (i_size_read(inode) <
- lnb[i].offset + lnb[i].len - 1)
- lnb[i].rc = i_size_read(inode) - lnb[i].offset;
+ lnb[i].lnb_file_offset + lnb[i].len - 1)
+ lnb[i].rc = i_size_read(inode) - lnb[i].lnb_file_offset;
else
lnb[i].rc = lnb[i].len;
m += lnb[i].len;
int err;
/* prevent reading after eof */
- cfs_spin_lock(&inode->i_lock);
- if (i_size_read(inode) < *offs + size) {
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < *offs + size) {
loff_t diff = i_size_read(inode) - *offs;
- cfs_spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
if (diff < 0) {
CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
i_size_read(inode), *offs);
} else {
size = diff;
}
- } else {
- cfs_spin_unlock(&inode->i_lock);
- }
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
blocksize = 1 << inode->i_blkbits;
osize = size;
* on-disk symlinks for ldiskfs.
*/
if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
- (buf->lb_len <= sizeof(LDISKFS_I(inode)->i_data)))
+ (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
else
rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
else
credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK];
- OSD_DECLARE_OP(oh, write);
- oh->ot_credits += credits;
+ osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
inode = osd_dt_obj(dt)->oo_inode;
static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
{
+ /* LU-2634: clear the extent format for fast symlink */
+ ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
LDISKFS_I(inode)->i_disksize = buflen;
--new_size;
/* correct in-core and on-disk sizes */
if (new_size > i_size_read(inode)) {
- cfs_spin_lock(&inode->i_lock);
- if (new_size > i_size_read(inode))
- i_size_write(inode, new_size);
- if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
- LDISKFS_I(inode)->i_disksize = i_size_read(inode);
- dirty_inode = 1;
- }
- cfs_spin_unlock(&inode->i_lock);
+ spin_lock(&inode->i_lock);
+ if (new_size > i_size_read(inode))
+ i_size_write(inode, new_size);
+ if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
+ LDISKFS_I(inode)->i_disksize = i_size_read(inode);
+ dirty_inode = 1;
+ }
+ spin_unlock(&inode->i_lock);
if (dirty_inode)
inode->i_sb->s_op->dirty_inode(inode);
}
struct inode *inode = osd_dt_obj(dt)->oo_inode;
struct osd_thandle *oh;
ssize_t result;
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = cfs_curproc_cap_pack();
-#endif
int is_link;
LASSERT(dt_object_exists(dt));
return -EACCES;
LASSERT(handle != NULL);
+ LASSERT(inode != NULL);
+ ll_vfs_dq_init(inode);
/* XXX: don't check: one declared chunk can be used many times */
- /* OSD_EXEC_OP(handle, write); */
+ /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle->h_transaction != NULL);
-#ifdef HAVE_QUOTA_SUPPORT
- if (ignore_quota)
- cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
- else
- cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
-#endif
/* Write small symlink to inode body as we need to maintain correct
* on-disk symlinks for ldiskfs.
* Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
result = osd_ldiskfs_write_record(inode, buf->lb_buf,
buf->lb_len, is_link, pos,
oh->ot_handle);
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_curproc_cap_unpack(save);
-#endif
if (result == 0)
result = buf->lb_len;
return result;
LASSERT(th);
oh = container_of(th, struct osd_thandle, ot_super);
- OSD_DECLARE_OP(oh, punch);
-
/*
* we don't need to reserve credits for whole truncate
* it's not possible as truncate may need to free too many
* orphan list. if needed truncate will extend or restart
* transaction
*/
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
- oh->ot_credits += 3;
+ osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
LASSERT(end == OBD_OBJECT_EOF);
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
+ LASSERT(inode != NULL);
+ ll_vfs_dq_init(inode);
LASSERT(th);
oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle->h_transaction != NULL);
- OSD_EXEC_OP(th, punch);
+ osd_trans_exec_op(env, th, OSD_OT_PUNCH);
tid = oh->ot_handle->h_transaction->t_tid;