static void dbuf_set_pending_evict(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
+
dbi->db_pending_evict = TRUE;
}
const struct lu_buf *buf, loff_t pos,
struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
loff_t _pos = pos, max = 0;
struct osd_thandle *oh;
- uint64_t oid;
- ENTRY;
+ uint64_t oid;
+ ENTRY;
oh = container_of(th, struct osd_thandle, ot_super);
/* in some cases declare can race with creation (e.g. llog)
* and we need to wait till object is initialized. notice
* LOHA_EXISTs is supposed to be the last step in the
- * initialization */
+ * initialization
+ */
/* size change (in dnode) will be declared by dmu_tx_hold_write() */
if (dt_object_exists(dt))
/* XXX: we still miss for append declaration support in ZFS
* -1 means append which is used by llog mostly, llog
- * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
+ * can grow upto LLOG_MIN_CHUNK_SIZE*8 records
+ */
max = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
obj->oo_attr.la_size + (2 << 20));
if (pos == -1)
/* dt_declare_write() is usually called for system objects, such
* as llog or last_rcvd files. We needn't enforce quota on those
- * objects, so always set the lqi_space as 0. */
+ * objects, so always set the lqi_space as 0.
+ */
RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
obj->oo_attr.la_gid, obj->oo_attr.la_projid,
0, oh, NULL, OSD_QID_BLK));
blkid = dbuf_whichblock(obj->oo_dn, 0, offset);
for (i = 0; i < OSD_MAX_DBUFS; i++) {
dmu_buf_impl_t *dbi = (void *)dbs[i];
+
if (!dbs[i])
continue;
if (dbi->db_blkid == blkid)
const struct lu_buf *buf, loff_t *pos,
struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
- uint64_t offset = *pos;
- int rc;
+ uint64_t offset = *pos;
+ int rc;
ENTRY;
-
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_dn);
write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from oo_attr
* into dbuf. any update within a single txg will copy the
- * most actual */
+ * most actual
+ */
rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
&obj->oo_attr.la_size, 8, oh);
if (unlikely(rc))
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
- unsigned long ptr;
- int i;
+ unsigned long ptr;
+ int i;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_dn);
atomic_dec(&osd->od_zerocopy_pin);
} else if (lnb[i].lnb_data != NULL) {
int j, apages, abufsz;
+
abufsz = arc_buf_size(lnb[i].lnb_data);
apages = abufsz >> PAGE_SHIFT;
/* these references to pages must be invalidated
- * to prevent access in osd_bufs_put() */
+ * to prevent access in osd_bufs_put()
+ */
for (j = 0; j < apages; j++)
lnb[i + j].lnb_page = NULL;
dmu_return_arcbuf(lnb[i].lnb_data);
lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
bufoff);
/* mark just a single slot: we need this
- * reference to dbuf to be released once */
+ * reference to dbuf to be released once
+ */
lnb->lnb_data = dbf;
dbf = NULL;
if (drop_cache)
dbuf_set_pending_evict(dbp[i]);
- /* steal dbuf so dmu_buf_rele_array() can't release
- * it */
+ /* steal dbuf so dmu_buf_rele_array() can't free it */
dbp[i] = NULL;
}
int maxlnb)
{
struct osd_device *osd = osd_obj2dev(obj);
- int poff, plen, off_in_block, sz_in_block;
- int rc, i = 0, npages = 0;
+ int poff, plen, off_in_block, sz_in_block;
+ int rc, i = 0, npages = 0;
dnode_t *dn = obj->oo_dn;
arc_buf_t *abuf;
uint32_t bs = dn->dn_datablksz;
- ENTRY;
+ ENTRY;
/*
* currently only full blocks are subject to zerocopy approach:
* so that we're sure nobody is trying to update the same block
atomic_inc(&osd->od_zerocopy_loan);
/* go over pages arcbuf contains, put them as
- * local niobufs for ptlrpc's bulks */
+ * local niobufs for ptlrpc's bulks
+ */
while (sz_in_block > 0) {
plen = min_t(int, sz_in_block, PAGE_SIZE);
int maxlnb, enum dt_bufs_type rw)
{
struct osd_object *obj = osd_dt_obj(dt);
- int rc;
+ int rc;
down_read(&obj->oo_guard);
struct niobuf_local *lnb, int npages,
struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
- uint64_t offset = 0;
- uint32_t size = 0;
+ uint64_t offset = 0;
+ uint32_t size = 0;
uint32_t blksz = obj->oo_dn->dn_datablksz;
- int i, rc;
+ int i, rc;
bool synced = false;
- long long space = 0;
- struct page *last_page = NULL;
- unsigned long discont_pages = 0;
+ long long space = 0;
+ struct page *last_page = NULL;
+ unsigned long discont_pages = 0;
enum osd_quota_local_flags local_flags = 0;
enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
- ENTRY;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_dn);
/* ENOSPC, network RPC error, etc.
* We don't want to book space for pages which will be
* skipped in osd_write_commit(). Hence we skip pages
- * with lnb_rc != 0 here too */
+ * with lnb_rc != 0 here too
+ */
continue;
/* ignore quota for the whole request if any page is from
* client cache or written by root.
*
* XXX we could handle this on per-lnb basis as done by
- * grant. */
+ * grant.
+ */
if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
(lnb[i].lnb_flags & OBD_BRW_SYS_RESOURCE) ||
!(lnb[i].lnb_flags & OBD_BRW_SYNC))
* indirect blocks and just use as a rough estimate the worse
* case where the old space is being held by a snapshot. Quota
* overrun will be adjusted once the operation is committed, if
- * required. */
+ * required.
+ */
space += osd_roundup2blocksz(size, offset, blksz);
offset = lnb[i].lnb_file_offset;
space += osd_roundup2blocksz(size, offset, blksz);
}
- /* backend zfs filesystem might be configured to store multiple data
- * copies */
+ /* backend zfs FS might be configured to store multiple data copies */
space *= osd->od_os->os_copies;
space = toqb(space);
CDEBUG(D_QUOTA, "writing %d pages, reserving %lldK of quota space\n",
/* we need only to store the overquota flags in the first lnb for
* now, once we support multiple objects BRW, this code needs be
- * revised. */
+ * revised.
+ */
if (local_flags & QUOTA_FL_OVER_USRQUOTA)
lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
static int osd_grow_blocksize(struct osd_object *obj, struct osd_thandle *oh,
uint64_t start, uint64_t end)
{
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_device *osd = osd_obj2dev(obj);
dnode_t *dn = obj->oo_dn;
- uint32_t blksz;
- int rc = 0;
+ uint32_t blksz;
+ int rc = 0;
ENTRY;
-
if (dn->dn_maxblkid > 0) /* can't change block size */
GOTO(out, rc);
GOTO(out_unlock, rc);
/* now ZFS can support up to 16MB block size, and if the write
- * is sequential, it just increases the block size gradually */
+ * is sequential, it just increases the block size gradually
+ */
if (start <= blksz) { /* sequential */
blksz = (uint32_t)min_t(uint64_t, osd->od_max_blksz, end);
} else { /* sparse, pick a block size by write region */
blksz, 0, oh->ot_tx);
LASSERT(ergo(rc == 0, dn->dn_datablksz >= blksz));
if (rc < 0)
- CDEBUG(D_INODE, "object "DFID": change block size"
- "%u -> %u error rc = %d\n",
+ CDEBUG(D_INODE,
+ "object "DFID": change block size %u -> %u error: rc = %d\n",
PFID(lu_object_fid(&obj->oo_dt.do_lu)),
dn->dn_datablksz, blksz, rc);
}
struct niobuf_local *lnb, int npages,
struct thandle *th, __u64 user_size)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
- uint64_t new_size = 0;
- int i, abufsz, rc = 0, drop_cache = 0;
- unsigned long iosize = 0;
- ENTRY;
+ uint64_t new_size = 0;
+ int i, abufsz, rc = 0, drop_cache = 0;
+ unsigned long iosize = 0;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_dn);
for (i = 0; i < npages; i++) {
CDEBUG(D_INODE, "write %u bytes at %u\n",
- (unsigned) lnb[i].lnb_len,
- (unsigned) lnb[i].lnb_file_offset);
+ (unsigned int) lnb[i].lnb_len,
+ (unsigned int) lnb[i].lnb_file_offset);
if (lnb[i].lnb_rc) {
/* ENOSPC, network RPC error, etc.
* Unlike ldiskfs, zfs allocates new blocks on rewrite,
- * so we skip this page if lnb_rc is set to -ENOSPC */
+ * so we skip this page if lnb_rc is set to -ENOSPC
+ */
CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
PFID(lu_object_fid(&dt->do_lu)), i,
lnb[i].lnb_rc);
abufsz = lnb[i].lnb_len; /* to drop cache below */
} else if (lnb[i].lnb_data) {
int j, apages;
+
LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
/* buffer loaned for zerocopy, try to use it.
* notice that dmu_assign_arcbuf() is smart
* enough to recognize changed blocksize
- * in this case it fallbacks to dmu_write() */
+ * in this case it fallbacks to dmu_write()
+ */
abufsz = arc_buf_size(lnb[i].lnb_data);
LASSERT(abufsz & PAGE_MASK);
apages = abufsz >> PAGE_SHIFT;
LASSERT(i + apages <= npages);
/* these references to pages must be invalidated
- * to prevent access in osd_bufs_put() */
+ * to prevent access in osd_bufs_put()
+ */
for (j = 0; j < apages; j++)
lnb[i + j].lnb_page = NULL;
dmu_assign_arcbuf(&obj->oo_dn->dn_bonus->db,
lnb[i].lnb_file_offset,
lnb[i].lnb_data, oh->ot_tx);
/* drop the reference, otherwise osd_put_bufs()
- * will be releasing it - bad! */
+ * will be releasing it - bad!
+ */
lnb[i].lnb_data = NULL;
atomic_dec(&osd->od_zerocopy_loan);
iosize += abufsz;
} else {
/* we don't want to deal with cache if nothing
- * has been send to ZFS at this step */
+ * has been send to ZFS at this step
+ */
continue;
}
/* we have to mark dbufs for eviction here because
* dmu_assign_arcbuf() may create a new dbuf for
- * loaned abuf */
+ * loaned abuf
+ */
osd_evict_dbufs_after_write(obj, lnb[i].lnb_file_offset,
abufsz);
}
/* no pages to write, no transno is needed */
th->th_local = 1;
/* it is important to return 0 even when all lnb_rc == -ENOSPC
- * since ofd_commitrw_write() retries several times on ENOSPC */
+ * since ofd_commitrw_write() retries several times on ENOSPC
+ */
up_read(&obj->oo_guard);
record_end_io(osd, WRITE, 0, 0, 0);
RETURN(0);
write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from
* oo_attr into dbuf. any update within a single txg will copy
- * the most actual */
+ * the most actual
+ */
rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
&obj->oo_attr.la_size, 8, oh);
} else {
struct niobuf_local *lnb, int npages)
{
struct osd_object *obj = osd_dt_obj(dt);
- int i;
- loff_t eof;
+ int i;
+ loff_t eof;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_dn);
uint64_t size = obj->oo_attr.la_size;
int rc = 0;
- /* Assert that the transaction has been assigned to a
- transaction group. */
+ /* Confirm if transaction has been assigned to a transaction group */
LASSERT(tx->tx_txg != 0);
- /*
- * Nothing to do if file already at desired length.
- */
+ /* Nothing to do if file already at desired length. */
if (len == DMU_OBJECT_END && size == off)
return 0;
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
__u64 start, __u64 end, struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
- __u64 len;
- int rc = 0;
- ENTRY;
+ __u64 len;
+ int rc = 0;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
- __u64 len;
- ENTRY;
+ __u64 len;
+ ENTRY;
oh = container_of(handle, struct osd_thandle, ot_super);
read_lock(&obj->oo_attr_lock);
static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
__u64 start, __u64 end, enum lu_ladvise_type advice)
{
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
switch (advice) {
default:
rc = -ENOTSUPP;
__u64 start, __u64 end, int mode, struct thandle *th)
{
int rc = -EOPNOTSUPP;
- ENTRY;
+ ENTRY;
/*
* space preallocation is not supported for ZFS
* Returns -EOPNOTSUPP for now
int mode, struct thandle *th)
{
int rc = -EOPNOTSUPP;
- ENTRY;
+ ENTRY;
/*
* space preallocation is not supported for ZFS
* Returns -EOPNOTSUPP for now
boolean_t hole = whence == SEEK_HOLE;
ENTRY;
-
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
LASSERT(offset >= 0);