*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
- */
-/*
- * Copyright (c) 2012, 2013, Intel Corporation.
- * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <obd_class.h>
#include <lustre_disk.h>
#include <lustre_fid.h>
-#include <lustre/lustre_idl.h> /* LLOG_CHUNK_SIZE definition */
+#include <lustre/lustre_idl.h> /* LLOG_MIN_CHUNK_SIZE definition */
#include "osd_internal.h"
#include <sys/sa_impl.h>
#include <sys/txg.h>
-static char *osd_zerocopy_tag = "zerocopy";
+static char *osd_0copy_tag = "zerocopy";
+
+
+static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
+{
+ struct obd_histogram *h = osd->od_brw_stats.hist;
+
+ if (rw == READ) {
+ atomic_inc(&osd->od_r_in_flight);
+ lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
+ atomic_read(&osd->od_r_in_flight));
+ lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
+
+ } else {
+ atomic_inc(&osd->od_w_in_flight);
+ lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
+ atomic_read(&osd->od_w_in_flight));
+ lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
+
+ }
+}
+
+static void record_end_io(struct osd_device *osd, int rw,
+ unsigned long elapsed, int disksize, int npages)
+{
+ struct obd_histogram *h = osd->od_brw_stats.hist;
+
+ if (rw == READ) {
+ atomic_dec(&osd->od_r_in_flight);
+ lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
+ if (disksize > 0)
+ lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], disksize);
+ if (elapsed)
+ lprocfs_oh_tally_log2(&h[BRW_R_IO_TIME], elapsed);
+
+ } else {
+ atomic_dec(&osd->od_w_in_flight);
+ lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
+ if (disksize > 0)
+ lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], disksize);
+ if (elapsed)
+ lprocfs_oh_tally_log2(&h[BRW_W_IO_TIME], elapsed);
+ }
+}
static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
- struct lu_buf *buf, loff_t *pos,
- struct lustre_capa *capa)
+ struct lu_buf *buf, loff_t *pos)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
uint64_t old_size;
int size = buf->lb_len;
int rc;
+ unsigned long start;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
+
+ start = cfs_time_current();
read_lock(&obj->oo_attr_lock);
old_size = obj->oo_attr.la_size;
size = old_size - *pos;
}
- rc = -dmu_read(osd->od_os, obj->oo_db->db_object, *pos, size,
- buf->lb_buf, DMU_READ_PREFETCH);
+ record_start_io(osd, READ, 0);
+
+ rc = osd_dmu_read(osd, obj->oo_dn, *pos, size, buf->lb_buf,
+ DMU_READ_PREFETCH);
+
+ record_end_io(osd, READ, cfs_time_current() - start, size,
+ size >> PAGE_SHIFT);
if (rc == 0) {
rc = size;
*pos += size;
-
- /* XXX: workaround for bug in HEAD: fsfilt_ldiskfs_read() returns
- * requested number of bytes, not actually read ones */
- if (S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr))
- rc = buf->lb_len;
}
return rc;
}
* LOHA_EXISTs is supposed to be the last step in the
* initialization */
- /* declare possible size change. notice we can't check
- * current size here as another thread can change it */
-
- if (dt_object_exists(dt)) {
- LASSERT(obj->oo_db);
- oid = obj->oo_db->db_object;
-
- dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
- } else {
+ /* size change (in dnode) will be declared by dmu_tx_hold_write() */
+ if (dt_object_exists(dt))
+ oid = obj->oo_dn->dn_object;
+ else
oid = DMU_NEW_OBJECT;
- dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
- }
/* XXX: we still miss for append declaration support in ZFS
* -1 means append which is used by llog mostly, llog
- * can grow upto LLOG_CHUNK_SIZE*8 records */
+ * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
if (pos == -1)
- pos = max_t(loff_t, 256 * 8 * LLOG_CHUNK_SIZE,
+ pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
obj->oo_attr.la_size + (2 << 20));
- dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);
+ osd_tx_hold_write(oh->ot_tx, oid, obj->oo_dn, pos, buf->lb_len);
/* dt_declare_write() is usually called for system objects, such
* as llog or last_rcvd files. We needn't enforce quota on those
static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
const struct lu_buf *buf, loff_t *pos,
- struct thandle *th, struct lustre_capa *capa,
- int ignore_quota)
+ struct thandle *th, int ignore_quota)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t offset = *pos;
int rc;
+
ENTRY;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
- dmu_write(osd->od_os, obj->oo_db->db_object, offset,
- (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
+ osd_dmu_write(osd, obj->oo_dn, offset, (uint64_t)buf->lb_len,
+ buf->lb_buf, oh->ot_tx);
write_lock(&obj->oo_attr_lock);
if (obj->oo_attr.la_size < offset + buf->lb_len) {
obj->oo_attr.la_size = offset + buf->lb_len;
* XXX: for the moment I don't want to use lnb_flags for osd-internal
* purposes as it's not very well defined ...
* instead I use the lowest bit of the address so that:
- * arc buffer: .lnb_obj = abuf (arc we loan for write)
- * dbuf buffer: .lnb_obj = dbuf | 1 (dbuf we get for read)
+ * arc buffer: .lnb_data = abuf (arc we loan for write)
+ * dbuf buffer: .lnb_data = dbuf | 1 (dbuf we get for read)
* copy buffer: .lnb_page->mapping = obj (page we allocate for write)
*
* bzzz, to blame
int i;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
for (i = 0; i < npages; i++) {
if (lnb[i].lnb_page == NULL)
ptr = (unsigned long)lnb[i].lnb_data;
if (ptr & 1UL) {
ptr &= ~1UL;
- dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
+ dmu_buf_rele((void *)ptr, osd_0copy_tag);
atomic_dec(&osd->od_zerocopy_pin);
} else if (lnb[i].lnb_data != NULL) {
dmu_return_arcbuf(lnb[i].lnb_data);
return virt_to_page(addr);
}
+/**
+ * Prepare buffers for read.
+ *
+ * The function maps the range described by \a off and \a len to \a lnb array.
+ * dmu_buf_hold_array_by_bonus() finds/creates appropriate ARC buffers, then
+ * we fill \a lnb array with the pages storing ARC buffers. Notice the current
+ * implementationt passes TRUE to dmu_buf_hold_array_by_bonus() to fill ARC
+ * buffers with actual data, I/O is done in the conext of osd_bufs_get_read().
+ * A better implementation would just return the buffers (potentially unfilled)
+ * and subsequent osd_read_prep() would do I/O for many ranges concurrently.
+ *
+ * \param[in] env environment
+ * \param[in] obj object
+ * \param[in] off offset in bytes
+ * \param[in] len the number of bytes to access
+ * \param[out] lnb array of local niobufs pointing to the buffers with data
+ *
+ * \retval 0 for success
+ * \retval negative error number of failure
+ */
static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
loff_t off, ssize_t len, struct niobuf_local *lnb)
{
struct osd_device *osd = osd_obj2dev(obj);
- dmu_buf_t **dbp;
+ unsigned long start = cfs_time_current();
int rc, i, numbufs, npages = 0;
+ dmu_buf_t **dbp;
ENTRY;
+ record_start_io(osd, READ, 0);
+
/* grab buffers for read:
* OSD API let us to grab buffers first, then initiate IO(s)
* so that all required IOs will be done in parallel, but at the
* can get own replacement for dmu_buf_hold_array_by_bonus().
*/
while (len > 0) {
- rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
- osd_zerocopy_tag, &numbufs,
- &dbp);
+ rc = -dmu_buf_hold_array_by_bonus(&obj->oo_dn->dn_bonus->db,
+ off, len, TRUE, osd_0copy_tag,
+ &numbufs, &dbp);
if (unlikely(rc))
GOTO(err, rc);
dbf = (void *) ((unsigned long)dbp[i] | 1);
while (tocpy > 0) {
- thispage = PAGE_CACHE_SIZE;
- thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
+ thispage = PAGE_SIZE;
+ thispage -= bufoff & (PAGE_SIZE - 1);
thispage = min(tocpy, thispage);
lnb->lnb_rc = 0;
lnb->lnb_file_offset = off;
- lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
+ lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
lnb->lnb_len = thispage;
lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
bufoff);
/* mark just a single slot: we need this
- * reference to dbuf to be release once */
+ * reference to dbuf to be released once */
lnb->lnb_data = dbf;
dbf = NULL;
lnb++;
}
- /* steal dbuf so dmu_buf_rele_array() cant release it */
+ /* steal dbuf so dmu_buf_rele_array() can't release
+ * it */
dbp[i] = NULL;
}
- dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
+ dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
}
+ record_end_io(osd, READ, cfs_time_current() - start,
+ npages * PAGE_SIZE, npages);
+
RETURN(npages);
err:
struct osd_device *osd = osd_obj2dev(obj);
int plen, off_in_block, sz_in_block;
int rc, i = 0, npages = 0;
- arc_buf_t *abuf;
- uint32_t bs;
- uint64_t dummy;
+ dnode_t *dn = obj->oo_dn;
+ arc_buf_t *abuf;
+ uint32_t bs = dn->dn_datablksz;
ENTRY;
- dmu_object_size_from_db(obj->oo_db, &bs, &dummy);
-
/*
* currently only full blocks are subject to zerocopy approach:
* so that we're sure nobody is trying to update the same block
if (sz_in_block == bs) {
/* full block, try to use zerocopy */
- abuf = dmu_request_arcbuf(obj->oo_db, bs);
+ abuf = dmu_request_arcbuf(&dn->dn_bonus->db, bs);
if (unlikely(abuf == NULL))
GOTO(out_err, rc = -ENOMEM);
/* go over pages arcbuf contains, put them as
* local niobufs for ptlrpc's bulks */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
/* can't use zerocopy, allocate temp. buffers */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
loff_t offset, ssize_t len, struct niobuf_local *lnb,
- int rw, struct lustre_capa *capa)
+ int rw)
{
struct osd_object *obj = osd_dt_obj(dt);
int rc;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
if (rw == 0)
rc = osd_bufs_get_read(env, obj, offset, len, lnb);
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
return 0;
}
-/* Return number of blocks that aren't mapped in the [start, start + size]
- * region */
-static int osd_count_not_mapped(struct osd_object *obj, uint64_t start,
- uint32_t size)
+static inline uint64_t osd_roundup2blocksz(uint64_t size,
+ uint64_t offset,
+ uint32_t blksz)
{
- dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)obj->oo_db;
- dmu_buf_impl_t *db;
- dnode_t *dn;
- uint32_t blkshift;
- uint64_t end, blkid;
- int rc;
- ENTRY;
+ LASSERT(blksz > 0);
- DB_DNODE_ENTER(dbi);
- dn = DB_DNODE(dbi);
+ size += offset % blksz;
- if (dn->dn_maxblkid == 0) {
- if (start + size <= dn->dn_datablksz)
- GOTO(out, size = 0);
- if (start < dn->dn_datablksz)
- start = dn->dn_datablksz;
- /* assume largest block size */
- blkshift = SPA_MAXBLOCKSHIFT;
- } else {
- /* blocksize can't change */
- blkshift = dn->dn_datablkshift;
- }
+ if (likely(is_power_of_2(blksz)))
+ return PO2_ROUNDUP_TYPED(size, blksz, uint64_t);
- /* compute address of last block */
- end = (start + size - 1) >> blkshift;
- /* align start on block boundaries */
- start >>= blkshift;
-
- /* size is null, can't be mapped */
- if (obj->oo_attr.la_size == 0 || dn->dn_maxblkid == 0)
- GOTO(out, size = (end - start + 1) << blkshift);
-
- /* beyond EOF, can't be mapped */
- if (start > dn->dn_maxblkid)
- GOTO(out, size = (end - start + 1) << blkshift);
-
- size = 0;
- for (blkid = start; blkid <= end; blkid++) {
- if (blkid == dn->dn_maxblkid)
- /* this one is mapped for sure */
- continue;
- if (blkid > dn->dn_maxblkid) {
- size += (end - blkid + 1) << blkshift;
- GOTO(out, size);
- }
-
- rc = dbuf_hold_impl(dn, 0, blkid, TRUE, FTAG, &db);
- if (rc) {
- /* for ENOENT (block not mapped) and any other errors,
- * assume the block isn't mapped */
- size += 1 << blkshift;
- continue;
- }
- dbuf_rele(db, FTAG);
- }
-
- GOTO(out, size);
-out:
- DB_DNODE_EXIT(dbi);
- return size;
+ size += blksz - 1;
+ do_div(size, blksz);
+ return size * blksz;
}
static int osd_declare_write_commit(const struct lu_env *env,
- struct dt_object *dt,
- struct niobuf_local *lnb, int npages,
- struct thandle *th)
+ struct dt_object *dt,
+ struct niobuf_local *lnb, int npages,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t offset = 0;
uint32_t size = 0;
+ uint32_t blksz = obj->oo_dn->dn_datablksz;
int i, rc, flags = 0;
bool ignore_quota = false, synced = false;
long long space = 0;
+ struct page *last_page = NULL;
+ unsigned long discont_pages = 0;
ENTRY;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
LASSERT(lnb);
LASSERT(npages > 0);
oh = container_of0(th, struct osd_thandle, ot_super);
for (i = 0; i < npages; i++) {
+ if (last_page && lnb[i].lnb_page->index != (last_page->index + 1))
+ ++discont_pages;
+ last_page = lnb[i].lnb_page;
if (lnb[i].lnb_rc)
/* ENOSPC, network RPC error, etc.
* We don't want to book space for pages which will be
continue;
}
- dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
- offset, size);
- /* estimating space that will be consumed by a write is rather
+ osd_tx_hold_write(oh->ot_tx, obj->oo_dn->dn_object,
+ obj->oo_dn, offset, size);
+ /* Estimating space to be consumed by a write is rather
* complicated with ZFS. As a consequence, we don't account for
- * indirect blocks and quota overrun will be adjusted once the
- * operation is committed, if required. */
- space += osd_count_not_mapped(obj, offset, size);
+ * indirect blocks and just use as a rough estimate the worse
+ * case where the old space is being held by a snapshot. Quota
+ * overrun will be adjusted once the operation is committed, if
+ * required. */
+ space += osd_roundup2blocksz(size, offset, blksz);
offset = lnb[i].lnb_file_offset;
size = lnb[i].lnb_len;
}
if (size) {
- dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
+ osd_tx_hold_write(oh->ot_tx, obj->oo_dn->dn_object, obj->oo_dn,
offset, size);
- space += osd_count_not_mapped(obj, offset, size);
+ space += osd_roundup2blocksz(size, offset, blksz);
}
- dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
-
oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
/* backend zfs filesystem might be configured to store multiple data
* copies */
space *= osd->od_os->os_copies;
space = toqb(space);
- CDEBUG(D_QUOTA, "writting %d pages, reserving "LPD64"K of quota "
- "space\n", npages, space);
+ CDEBUG(D_QUOTA, "writing %d pages, reserving %lldK of quota space\n",
+ npages, space);
+ record_start_io(osd, WRITE, discont_pages);
retry:
/* acquire quota space if needed */
rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
RETURN(rc);
}
+/**
+ * Policy to grow ZFS block size by write pattern.
+ * For sequential write, it grows block size gradually until it reaches the
+ * maximum blocksize the dataset can support. Otherwise, it will pick a
+ * a block size by the writing region of this I/O.
+ */
+static int osd_grow_blocksize(struct osd_object *obj, struct osd_thandle *oh,
+ uint64_t start, uint64_t end)
+{
+ struct osd_device *osd = osd_obj2dev(obj);
+ dnode_t *dn = obj->oo_dn;
+ uint32_t blksz;
+ int rc = 0;
+
+ ENTRY;
+
+ if (dn->dn_maxblkid > 0) /* can't change block size */
+ GOTO(out, rc);
+
+ if (dn->dn_datablksz >= osd->od_max_blksz)
+ GOTO(out, rc);
+
+ down_write(&obj->oo_guard);
+
+ blksz = dn->dn_datablksz;
+ if (blksz >= osd->od_max_blksz) /* check again after grabbing lock */
+ GOTO(out_unlock, rc);
+
+ /* now ZFS can support up to 16MB block size, and if the write
+ * is sequential, it just increases the block size gradually */
+ if (start <= blksz) { /* sequential */
+ blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz, end);
+ } else { /* sparse, pick a block size by write region */
+ blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz,
+ end - start);
+ }
+
+ if (!is_power_of_2(blksz))
+ blksz = size_roundup_power2(blksz);
+
+ if (blksz > dn->dn_datablksz) {
+ rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
+ blksz, 0, oh->ot_tx);
+ LASSERT(ergo(rc == 0, dn->dn_datablksz >= blksz));
+ if (rc < 0)
+ CDEBUG(D_INODE, "object "DFID": change block size"
+ "%u -> %u error rc = %d\n",
+ PFID(lu_object_fid(&obj->oo_dt.do_lu)),
+ dn->dn_datablksz, blksz, rc);
+ }
+ EXIT;
+out_unlock:
+ up_write(&obj->oo_guard);
+out:
+ return rc;
+}
+
static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
struct niobuf_local *lnb, int npages,
struct thandle *th)
struct osd_thandle *oh;
uint64_t new_size = 0;
int i, rc = 0;
+ unsigned long iosize = 0;
ENTRY;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
+ /* adjust block size. Assume the buffers are sorted. */
+ (void)osd_grow_blocksize(obj, oh, lnb[0].lnb_file_offset,
+ lnb[npages - 1].lnb_file_offset +
+ lnb[npages - 1].lnb_len);
+
+ /* LU-8791: take oo_guard to avoid the deadlock that changing block
+ * size and assigning arcbuf take place at the same time.
+ *
+ * Thread 1:
+ * osd_write_commit()
+ * -> osd_grow_blocksize() with osd_object::oo_guard held
+ * -> dmu_object_set_blocksize()
+ * -> dnode_set_blksz(), with dnode_t::dn_struct_rwlock
+ * write lock held
+ * -> dbuf_new_size()
+ * -> dmu_buf_will_dirty()
+ * -> dbuf_read()
+ * -> wait for the dbuf state to change
+ * Thread 2:
+ * osd_write_commit()
+ * -> dmu_assign_arcbuf()
+ * -> dbuf_assign_arcbuf(), set dbuf state to DB_FILL
+ * -> dbuf_dirty()
+ * -> try to hold the read lock of dnode_t::dn_struct_rwlock
+ *
+ * By taking the read lock, it can avoid thread 2 to enter into the
+ * critical section of assigning the arcbuf, while thread 1 is
+ * changing the block size.
+ */
+ down_read(&obj->oo_guard);
for (i = 0; i < npages; i++) {
CDEBUG(D_INODE, "write %u bytes at %u\n",
(unsigned) lnb[i].lnb_len,
}
if (lnb[i].lnb_page->mapping == (void *)obj) {
- dmu_write(osd->od_os, obj->oo_db->db_object,
- lnb[i].lnb_file_offset, lnb[i].lnb_len,
- kmap(lnb[i].lnb_page), oh->ot_tx);
+ osd_dmu_write(osd, obj->oo_dn, lnb[i].lnb_file_offset,
+ lnb[i].lnb_len, kmap(lnb[i].lnb_page),
+ oh->ot_tx);
kunmap(lnb[i].lnb_page);
} else if (lnb[i].lnb_data) {
LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
* notice that dmu_assign_arcbuf() is smart
* enough to recognize changed blocksize
* in this case it fallbacks to dmu_write() */
- dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
+ dmu_assign_arcbuf(&obj->oo_dn->dn_bonus->db,
+ lnb[i].lnb_file_offset,
lnb[i].lnb_data, oh->ot_tx);
/* drop the reference, otherwise osd_put_bufs()
* will be releasing it - bad! */
if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
+ iosize += lnb[i].lnb_len;
}
+ up_read(&obj->oo_guard);
if (unlikely(new_size == 0)) {
/* no pages to write, no transno is needed */
th->th_local = 1;
/* it is important to return 0 even when all lnb_rc == -ENOSPC
* since ofd_commitrw_write() retries several times on ENOSPC */
+ record_end_io(osd, WRITE, 0, 0, 0);
RETURN(0);
}
write_unlock(&obj->oo_attr_lock);
}
+ record_end_io(osd, WRITE, 0, iosize, npages);
+
RETURN(rc);
}
struct niobuf_local *lnb, int npages)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct lu_buf buf;
- loff_t offset;
int i;
+ loff_t eof;
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_db);
+ LASSERT(obj->oo_dn);
+
+ read_lock(&obj->oo_attr_lock);
+ eof = obj->oo_attr.la_size;
+ read_unlock(&obj->oo_attr_lock);
for (i = 0; i < npages; i++) {
- buf.lb_buf = kmap(lnb[i].lnb_page);
- buf.lb_len = lnb[i].lnb_len;
- offset = lnb[i].lnb_file_offset;
+ if (unlikely(lnb[i].lnb_rc < 0))
+ continue;
- CDEBUG(D_OTHER, "read %u bytes at %u\n",
- (unsigned) lnb[i].lnb_len,
- (unsigned) lnb[i].lnb_file_offset);
- lnb[i].lnb_rc = osd_read(env, dt, &buf, &offset, NULL);
- kunmap(lnb[i].lnb_page);
+ lnb[i].lnb_rc = lnb[i].lnb_len;
+
+ if (lnb[i].lnb_file_offset + lnb[i].lnb_len >= eof) {
+ if (eof <= lnb[i].lnb_file_offset)
+ lnb[i].lnb_rc = 0;
+ else
+ lnb[i].lnb_rc = eof - lnb[i].lnb_file_offset;
- if (lnb[i].lnb_rc < buf.lb_len) {
/* all subsequent rc should be 0 */
while (++i < npages)
lnb[i].lnb_rc = 0;
* dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
* called and then assigned to a transaction group.
*/
-static int __osd_object_punch(objset_t *os, dmu_buf_t *db, dmu_tx_t *tx,
+static int __osd_object_punch(objset_t *os, dnode_t *dn, dmu_tx_t *tx,
uint64_t size, uint64_t off, uint64_t len)
{
int rc = 0;
if (len == DMU_OBJECT_END && size == off)
return 0;
+ /* XXX: dnode_free_range() can be used to save on dnode lookup */
if (off < size)
- rc = -dmu_free_range(os, db->db_object, off, len, tx);
+ dmu_free_range(os, dn->dn_object, off, len, tx);
return rc;
}
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
len = end - start;
write_unlock(&obj->oo_attr_lock);
- rc = __osd_object_punch(osd->od_os, obj->oo_db, oh->ot_tx,
+ rc = __osd_object_punch(osd->od_os, obj->oo_dn, oh->ot_tx,
obj->oo_attr.la_size, start, len);
/* set new size */
if (len == DMU_OBJECT_END) {
/* declare we'll free some blocks ... */
if (start < obj->oo_attr.la_size) {
read_unlock(&obj->oo_attr_lock);
- dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
+ dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object, start, len);
} else {
read_unlock(&obj->oo_attr_lock);
}
- /* ... and we'll modify size attribute */
- dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
-
RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
obj->oo_attr.la_gid, 0, oh, true, NULL,
false));
}
+static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, enum lu_ladvise_type advice)
+{
+ int rc;
+ ENTRY;
+
+ switch (advice) {
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ RETURN(rc);
+}
struct dt_body_operations osd_body_ops = {
.dbo_read = osd_read,
.dbo_read_prep = osd_read_prep,
.dbo_declare_punch = osd_declare_punch,
.dbo_punch = osd_punch,
+ .dbo_ladvise = osd_ladvise,
};
-