struct niobuf_local {
__u64 lnb_file_offset;
__u32 lnb_page_offset;
- __u32 len;
- __u32 flags;
- struct page *page;
- struct dentry *dentry;
- int rc;
+ __u32 lnb_len;
+ __u32 lnb_flags;
+ struct page *lnb_page;
+ void *lnb_data;
+ int lnb_rc;
};
#define LUSTRE_FLD_NAME "fld"
{
CDEBUG(D_RPCTRACE,
"niobuf_local: file_offset="LPD64", len=%d, page=%p, rc=%d\n",
- nb->lnb_file_offset, nb->len, nb->page, nb->rc);
+ nb->lnb_file_offset, nb->lnb_len, nb->lnb_page, nb->lnb_rc);
CDEBUG(D_RPCTRACE, "nb->page: index = %ld\n",
- nb->page ? page_index(nb->page) : -1);
+ nb->lnb_page ? page_index(nb->lnb_page) : -1);
}
EXPORT_SYMBOL(dump_lniobuf);
return -EINVAL;
res->lnb_file_offset = offset;
- res->len = plen;
- LASSERT((res->lnb_file_offset & ~CFS_PAGE_MASK) + res->len <=
- PAGE_CACHE_SIZE);
+ res->lnb_len = plen;
+ LASSERT((res->lnb_file_offset & ~CFS_PAGE_MASK) +
+ res->lnb_len <= PAGE_CACHE_SIZE);
if (ispersistent &&
((res->lnb_file_offset >> PAGE_CACHE_SHIFT) <
ECHO_PERSISTENT_PAGES)) {
- res->page =
+ res->lnb_page =
echo_persistent_pages[res->lnb_file_offset >>
PAGE_CACHE_SHIFT];
/* Take extra ref so __free_pages() can be called OK */
- get_page (res->page);
+ get_page(res->lnb_page);
} else {
- OBD_PAGE_ALLOC(res->page, gfp_mask);
- if (res->page == NULL) {
- CERROR("can't get page for id " DOSTID"\n",
- POSTID(&obj->ioo_oid));
- return -ENOMEM;
- }
- }
+ OBD_PAGE_ALLOC(res->lnb_page, gfp_mask);
+ if (res->lnb_page == NULL) {
+ CERROR("can't get page for id " DOSTID"\n",
+ POSTID(&obj->ioo_oid));
+ return -ENOMEM;
+ }
+ }
- CDEBUG(D_PAGE, "$$$$ get page %p @ "LPU64" for %d\n",
- res->page, res->lnb_file_offset, res->len);
+ CDEBUG(D_PAGE, "$$$$ get page %p @ "LPU64" for %d\n",
+ res->lnb_page, res->lnb_file_offset, res->lnb_len);
- if (cmd & OBD_BRW_READ)
- res->rc = res->len;
+ if (cmd & OBD_BRW_READ)
+ res->lnb_rc = res->lnb_len;
if (debug_setup)
- echo_page_debug_setup(res->page, cmd,
+ echo_page_debug_setup(res->lnb_page, cmd,
ostid_id(&obj->ioo_oid),
- res->lnb_file_offset, res->len);
+ res->lnb_file_offset,
+ res->lnb_len);
offset += plen;
len -= plen;
int i;
for (i = 0; i < count; i++, (*pgs) ++, res++) {
- struct page *page = res->page;
+ struct page *page = res->lnb_page;
void *addr;
if (page == NULL) {
addr = kmap(page);
CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@"LPU64"\n",
- res->page, addr, res->lnb_file_offset);
+ res->lnb_page, addr, res->lnb_file_offset);
if (verify) {
int vrc = echo_page_debug_check(page,
ostid_id(&obj->ioo_oid),
res->lnb_file_offset,
- res->len);
+ res->lnb_len);
/* check all the pages always */
if (vrc != 0 && rc == 0)
rc = vrc;
*/
CERROR("cleaning up %u pages (%d obdos)\n", *pages, objcount);
for (i = 0; i < *pages; i++) {
- kunmap(res[i].page);
- /* NB if this is a persistent page, __free_pages will just
- * lose the extra ref gained above */
- OBD_PAGE_FREE(res[i].page);
- res[i].page = NULL;
+ kunmap(res[i].lnb_page);
+ /* NB if this is a persistent page, __free_pages will just
+ * lose the extra ref gained above */
+ OBD_PAGE_FREE(res[i].lnb_page);
+ res[i].lnb_page = NULL;
atomic_dec(&obd->u.echo.eo_prep);
- }
+ }
- return rc;
+ return rc;
}
static int echo_commitrw(const struct lu_env *env, int cmd,
niocount - pgs - 1, objcount);
while (pgs < niocount) {
- struct page *page = res[pgs++].page;
+ struct page *page = res[pgs++].lnb_page;
if (page == NULL)
continue;
LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
- struct page *page = lnb[i].page;
+ struct page *page = lnb[i].lnb_page;
- /* read past eof? */
- if (page == NULL && lnb[i].rc == 0)
- continue;
+ /* read past eof? */
+ if (page == NULL && lnb[i].lnb_rc == 0)
+ continue;
- if (async)
- lnb[i].flags |= OBD_BRW_ASYNC;
+ if (async)
+ lnb[i].lnb_flags |= OBD_BRW_ASYNC;
if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
(oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
#define rnb_offset offset
#define rnb_flags flags
#define rnb_len len
-/* the same for niobuf_local */
-#define lnb_flags flags
-#define lnb_rc rc
#endif /* _OFD_INTERNAL_H */
plen = len;
lnb->lnb_file_offset = offset;
lnb->lnb_page_offset = poff;
- lnb->len = plen;
- /* lb->flags = rnb->flags; */
- lnb->flags = 0;
- lnb->page = NULL;
- lnb->rc = 0;
+ lnb->lnb_len = plen;
+ /* lnb->lnb_flags = rnb->flags; */
+ lnb->lnb_flags = 0;
+ lnb->lnb_page = NULL;
+ lnb->lnb_rc = 0;
LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
(long long) len);
osd_map_remote_to_local(pos, len, &npages, lnb);
for (i = 0; i < npages; i++, lnb++) {
- lnb->page = osd_get_page(d, lnb->lnb_file_offset, rw);
- if (lnb->page == NULL)
- GOTO(cleanup, rc = -ENOMEM);
-
- /* DLM locking protects us from write and truncate competing
- * for same region, but truncate can leave dirty page in the
- * cache. it's possible the writeout on a such a page is in
- * progress when we access it. it's also possible that during
- * this writeout we put new (partial) data, but then won't
- * be able to proceed in filter_commitrw_write(). thus let's
- * just wait for writeout completion, should be rare enough.
- * -bzzz */
- wait_on_page_writeback(lnb->page);
- BUG_ON(PageWriteback(lnb->page));
+ lnb->lnb_page = osd_get_page(d, lnb->lnb_file_offset, rw);
+ if (lnb->lnb_page == NULL)
+ GOTO(cleanup, rc = -ENOMEM);
+
+ /* DLM locking protects us from write and truncate competing
+ * for same region, but truncate can leave dirty page in the
+ * cache. it's possible the writeout on a such a page is in
+ * progress when we access it. it's also possible that during
+ * this writeout we put new (partial) data, but then won't
+ * be able to proceed in filter_commitrw_write(). thus let's
+ * just wait for writeout completion, should be rare enough.
+ * -bzzz */
+ wait_on_page_writeback(lnb->lnb_page);
+ BUG_ON(PageWriteback(lnb->lnb_page));
lu_object_get(&d->do_lu);
}
}
static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
- struct niobuf_local *lnb, int npages)
+ struct niobuf_local *lnb, int npages)
{
- int i;
+ int i;
- for (i = 0; i < npages; i++) {
- if (lnb[i].page == NULL)
- continue;
- LASSERT(PageLocked(lnb[i].page));
- unlock_page(lnb[i].page);
- page_cache_release(lnb[i].page);
- lu_object_put(env, &dt->do_lu);
- lnb[i].page = NULL;
- }
- RETURN(0);
+ for (i = 0; i < npages; i++) {
+ if (lnb[i].lnb_page == NULL)
+ continue;
+ LASSERT(PageLocked(lnb[i].lnb_page));
+ unlock_page(lnb[i].lnb_page);
+ page_cache_release(lnb[i].lnb_page);
+ lu_object_put(env, &dt->do_lu);
+ lnb[i].lnb_page = NULL;
+ }
+
+ RETURN(0);
}
#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
do_gettimeofday(&start);
for (i = 0; i < npages; i++) {
- if (cache == 0)
- generic_error_remove_page(inode->i_mapping,
- lnb[i].page);
+ if (cache == 0)
+ generic_error_remove_page(inode->i_mapping,
+ lnb[i].lnb_page);
- /*
- * till commit the content of the page is undefined
- * we'll set it uptodate once bulk is done. otherwise
- * subsequent reads can access non-stable data
- */
- ClearPageUptodate(lnb[i].page);
+ /*
+ * till commit the content of the page is undefined
+ * we'll set it uptodate once bulk is done. otherwise
+ * subsequent reads can access non-stable data
+ */
+ ClearPageUptodate(lnb[i].lnb_page);
- if (lnb[i].len == PAGE_CACHE_SIZE)
- continue;
+ if (lnb[i].lnb_len == PAGE_CACHE_SIZE)
+ continue;
- if (maxidx >= lnb[i].page->index) {
- osd_iobuf_add_page(iobuf, lnb[i].page);
- } else {
- long off;
- char *p = kmap(lnb[i].page);
+ if (maxidx >= lnb[i].lnb_page->index) {
+ osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
+ } else {
+ long off;
+ char *p = kmap(lnb[i].lnb_page);
off = lnb[i].lnb_page_offset;
if (off)
memset(p, 0, off);
- off = (lnb[i].lnb_page_offset + lnb[i].len) &
+ off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
~CFS_PAGE_MASK;
- if (off)
+ if (off)
memset(p + off, 0, PAGE_CACHE_SIZE - off);
- kunmap(lnb[i].page);
+ kunmap(lnb[i].lnb_page);
}
}
do_gettimeofday(&end);
/* calculate number of extents (probably better to pass nb) */
for (i = 0; i < npages; i++) {
if (i && lnb[i].lnb_file_offset !=
- lnb[i - 1].lnb_file_offset + lnb[i - 1].len)
+ lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
extents++;
if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
*
* XXX we could handle this on per-lnb basis as done by
* grant. */
- if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
- (lnb[i].flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
+ if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
+ (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
OBD_BRW_FROM_GRANT)
ignore_quota = true;
}
oh->ot_credits += newblocks;
/* make sure the over quota flags were not set */
- lnb[0].flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
+ lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
quota_space, oh, true, true, &flags,
* now, once we support multiple objects BRW, this code needs be
* revised. */
if (flags & QUOTA_FL_OVER_USRQUOTA)
- lnb[0].flags |= OBD_BRW_OVER_USRQUOTA;
+ lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
if (flags & QUOTA_FL_OVER_GRPQUOTA)
- lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA;
+ lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
RETURN(rc);
}
ll_vfs_dq_init(inode);
for (i = 0; i < npages; i++) {
- if (lnb[i].rc == -ENOSPC &&
+ if (lnb[i].lnb_rc == -ENOSPC &&
osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
- /* Allow the write to proceed if overwriting an
- * existing block */
- lnb[i].rc = 0;
- }
+ /* Allow the write to proceed if overwriting an
+ * existing block */
+ lnb[i].lnb_rc = 0;
+ }
- if (lnb[i].rc) { /* ENOSPC, network RPC error, etc. */
- CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
- lnb[i].rc);
- LASSERT(lnb[i].page);
- generic_error_remove_page(inode->i_mapping,lnb[i].page);
- continue;
- }
+ if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
+ CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
+ lnb[i].lnb_rc);
+ LASSERT(lnb[i].lnb_page);
+ generic_error_remove_page(inode->i_mapping,
+ lnb[i].lnb_page);
+ continue;
+ }
- LASSERT(PageLocked(lnb[i].page));
- LASSERT(!PageWriteback(lnb[i].page));
+ LASSERT(PageLocked(lnb[i].lnb_page));
+ LASSERT(!PageWriteback(lnb[i].lnb_page));
- if (lnb[i].lnb_file_offset + lnb[i].len > isize)
- isize = lnb[i].lnb_file_offset + lnb[i].len;
+ if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
+ isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
- /*
- * Since write and truncate are serialized by oo_sem, even
- * partial-page truncate should not leave dirty pages in the
- * page cache.
- */
- LASSERT(!PageDirty(lnb[i].page));
+ /*
+ * Since write and truncate are serialized by oo_sem, even
+ * partial-page truncate should not leave dirty pages in the
+ * page cache.
+ */
+ LASSERT(!PageDirty(lnb[i].lnb_page));
- SetPageUptodate(lnb[i].page);
+ SetPageUptodate(lnb[i].lnb_page);
- osd_iobuf_add_page(iobuf, lnb[i].page);
+ osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
}
if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
if (unlikely(rc != 0)) {
/* if write fails, we should drop pages from the cache */
for (i = 0; i < npages; i++) {
- if (lnb[i].page == NULL)
- continue;
- LASSERT(PageLocked(lnb[i].page));
- generic_error_remove_page(inode->i_mapping,lnb[i].page);
- }
- }
+ if (lnb[i].lnb_page == NULL)
+ continue;
+ LASSERT(PageLocked(lnb[i].lnb_page));
+ generic_error_remove_page(inode->i_mapping,
+ lnb[i].lnb_page);
+ }
+ }
- RETURN(rc);
+ RETURN(rc);
}
static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
for (i = 0; i < npages; i++) {
if (i_size_read(inode) <= lnb[i].lnb_file_offset)
- /* If there's no more data, abort early.
- * lnb->rc == 0, so it's easy to detect later. */
- break;
+ /* If there's no more data, abort early.
+ * lnb->lnb_rc == 0, so it's easy to detect later. */
+ break;
- if (i_size_read(inode) <
- lnb[i].lnb_file_offset + lnb[i].len - 1)
- lnb[i].rc = i_size_read(inode) - lnb[i].lnb_file_offset;
- else
- lnb[i].rc = lnb[i].len;
- m += lnb[i].len;
+ if (i_size_read(inode) <
+ lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
+ lnb[i].lnb_rc = i_size_read(inode) -
+ lnb[i].lnb_file_offset;
+ else
+ lnb[i].lnb_rc = lnb[i].lnb_len;
+ m += lnb[i].lnb_len;
- if (PageUptodate(lnb[i].page)) {
+ if (PageUptodate(lnb[i].lnb_page)) {
cache_hits++;
} else {
cache_misses++;
- osd_iobuf_add_page(iobuf, lnb[i].page);
+ osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
}
if (cache == 0)
- generic_error_remove_page(inode->i_mapping,lnb[i].page);
+ generic_error_remove_page(inode->i_mapping,
+ lnb[i].lnb_page);
}
do_gettimeofday(&end);
timediff = cfs_timeval_sub(&end, &start, NULL);
LASSERT(obj->oo_db);
for (i = 0; i < npages; i++) {
- if (lnb[i].page == NULL)
+ if (lnb[i].lnb_page == NULL)
continue;
- if (lnb[i].page->mapping == (void *)obj) {
+ if (lnb[i].lnb_page->mapping == (void *)obj) {
/* this is anonymous page allocated for copy-write */
- lnb[i].page->mapping = NULL;
- __free_page(lnb[i].page);
+ lnb[i].lnb_page->mapping = NULL;
+ __free_page(lnb[i].lnb_page);
atomic_dec(&osd->od_zerocopy_alloc);
} else {
/* see comment in osd_bufs_get_read() */
- ptr = (unsigned long)lnb[i].dentry;
+ ptr = (unsigned long)lnb[i].lnb_data;
if (ptr & 1UL) {
ptr &= ~1UL;
dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
atomic_dec(&osd->od_zerocopy_pin);
- } else if (lnb[i].dentry != NULL) {
- dmu_return_arcbuf((void *)lnb[i].dentry);
+ } else if (lnb[i].lnb_data != NULL) {
+ dmu_return_arcbuf(lnb[i].lnb_data);
atomic_dec(&osd->od_zerocopy_loan);
}
}
- lnb[i].page = NULL;
- lnb[i].dentry = NULL;
+ lnb[i].lnb_page = NULL;
+ lnb[i].lnb_data = NULL;
}
return 0;
thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
thispage = min(tocpy, thispage);
- lnb->rc = 0;
+ lnb->lnb_rc = 0;
lnb->lnb_file_offset = off;
lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
- lnb->len = thispage;
- lnb->page = kmem_to_page(dbp[i]->db_data +
- bufoff);
+ lnb->lnb_len = thispage;
+ lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
+ bufoff);
/* mark just a single slot: we need this
* reference to dbuf to be release once */
- lnb->dentry = dbf;
+ lnb->lnb_data = dbf;
dbf = NULL;
tocpy -= thispage;
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
- lnb[i].len = plen;
- lnb[i].rc = 0;
+ lnb[i].lnb_len = plen;
+ lnb[i].lnb_rc = 0;
if (sz_in_block == bs)
- lnb[i].dentry = (void *)abuf;
+ lnb[i].lnb_data = abuf;
else
- lnb[i].dentry = NULL;
+ lnb[i].lnb_data = NULL;
/* this one is not supposed to fail */
- lnb[i].page = kmem_to_page(abuf->b_data +
+ lnb[i].lnb_page = kmem_to_page(abuf->b_data +
off_in_block);
- LASSERT(lnb[i].page);
+ LASSERT(lnb[i].lnb_page);
lprocfs_counter_add(osd->od_stats,
LPROC_OSD_ZEROCOPY_IO, 1);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
- lnb[i].len = plen;
- lnb[i].rc = 0;
- lnb[i].dentry = NULL;
+ lnb[i].lnb_len = plen;
+ lnb[i].lnb_rc = 0;
+ lnb[i].lnb_data = NULL;
- lnb[i].page = alloc_page(OSD_GFP_IO);
- if (unlikely(lnb[i].page == NULL))
+ lnb[i].lnb_page = alloc_page(OSD_GFP_IO);
+ if (unlikely(lnb[i].lnb_page == NULL))
GOTO(out_err, rc = -ENOMEM);
- LASSERT(lnb[i].page->mapping == NULL);
- lnb[i].page->mapping = (void *)obj;
+ LASSERT(lnb[i].lnb_page->mapping == NULL);
+ lnb[i].lnb_page->mapping = (void *)obj;
atomic_inc(&osd->od_zerocopy_alloc);
lprocfs_counter_add(osd->od_stats,
oh = container_of0(th, struct osd_thandle, ot_super);
for (i = 0; i < npages; i++) {
- if (lnb[i].rc)
+ if (lnb[i].lnb_rc)
/* ENOSPC, network RPC error, etc.
* We don't want to book space for pages which will be
* skipped in osd_write_commit(). Hence we skip pages
*
* XXX we could handle this on per-lnb basis as done by
* grant. */
- if ((lnb[i].flags & OBD_BRW_NOQUOTA) ||
- (lnb[i].flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
+ if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
+ (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
OBD_BRW_FROM_GRANT)
ignore_quota = true;
if (size == 0) {
/* first valid lnb */
offset = lnb[i].lnb_file_offset;
- size = lnb[i].len;
+ size = lnb[i].lnb_len;
continue;
}
if (offset + size == lnb[i].lnb_file_offset) {
/* this lnb is contiguous to the previous one */
- size += lnb[i].len;
+ size += lnb[i].lnb_len;
continue;
}
space += osd_count_not_mapped(obj, offset, size);
offset = lnb[i].lnb_file_offset;
- size = lnb[i].len;
+ size = lnb[i].lnb_len;
}
if (size) {
* now, once we support multiple objects BRW, this code needs be
* revised. */
if (flags & QUOTA_FL_OVER_USRQUOTA)
- lnb[0].flags |= OBD_BRW_OVER_USRQUOTA;
+ lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
if (flags & QUOTA_FL_OVER_GRPQUOTA)
- lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA;
+ lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
RETURN(rc);
}
for (i = 0; i < npages; i++) {
CDEBUG(D_INODE, "write %u bytes at %u\n",
- (unsigned) lnb[i].len,
+ (unsigned) lnb[i].lnb_len,
(unsigned) lnb[i].lnb_file_offset);
- if (lnb[i].rc) {
+ if (lnb[i].lnb_rc) {
/* ENOSPC, network RPC error, etc.
* Unlike ldiskfs, zfs allocates new blocks on rewrite,
* so we skip this page if lnb_rc is set to -ENOSPC */
CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
PFID(lu_object_fid(&dt->do_lu)), i,
- lnb[i].rc);
+ lnb[i].lnb_rc);
continue;
}
- if (lnb[i].page->mapping == (void *)obj) {
+ if (lnb[i].lnb_page->mapping == (void *)obj) {
dmu_write(osd->od_objset.os, obj->oo_db->db_object,
- lnb[i].lnb_file_offset, lnb[i].len,
- kmap(lnb[i].page), oh->ot_tx);
- kunmap(lnb[i].page);
- } else if (lnb[i].dentry) {
- LASSERT(((unsigned long)lnb[i].dentry & 1) == 0);
+ lnb[i].lnb_file_offset, lnb[i].lnb_len,
+ kmap(lnb[i].lnb_page), oh->ot_tx);
+ kunmap(lnb[i].lnb_page);
+ } else if (lnb[i].lnb_data) {
+ LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
/* buffer loaned for zerocopy, try to use it.
* notice that dmu_assign_arcbuf() is smart
* enough to recognize changed blocksize
* in this case it fallbacks to dmu_write() */
dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
- (void *)lnb[i].dentry, oh->ot_tx);
+ lnb[i].lnb_data, oh->ot_tx);
/* drop the reference, otherwise osd_put_bufs()
* will be releasing it - bad! */
- lnb[i].dentry = NULL;
+ lnb[i].lnb_data = NULL;
atomic_dec(&osd->od_zerocopy_loan);
}
- if (new_size < lnb[i].lnb_file_offset + lnb[i].len)
- new_size = lnb[i].lnb_file_offset + lnb[i].len;
+ if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
+ new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
}
if (unlikely(new_size == 0)) {
LASSERT(obj->oo_db);
for (i = 0; i < npages; i++) {
- buf.lb_buf = kmap(lnb[i].page);
- buf.lb_len = lnb[i].len;
+ buf.lb_buf = kmap(lnb[i].lnb_page);
+ buf.lb_len = lnb[i].lnb_len;
offset = lnb[i].lnb_file_offset;
CDEBUG(D_OTHER, "read %u bytes at %u\n",
- (unsigned) lnb[i].len,
+ (unsigned) lnb[i].lnb_len,
(unsigned) lnb[i].lnb_file_offset);
- lnb[i].rc = osd_read(env, dt, &buf, &offset, NULL);
- kunmap(lnb[i].page);
+ lnb[i].lnb_rc = osd_read(env, dt, &buf, &offset, NULL);
+ kunmap(lnb[i].lnb_page);
- if (lnb[i].rc < buf.lb_len) {
+ if (lnb[i].lnb_rc < buf.lb_len) {
/* all subsequent rc should be 0 */
while (++i < npages)
- lnb[i].rc = 0;
+ lnb[i].lnb_rc = 0;
break;
}
}
nob = 0;
for (i = 0; i < npages; i++) {
- int page_rc = local_nb[i].rc;
+ int page_rc = local_nb[i].lnb_rc;
if (page_rc < 0) {
rc = page_rc;
nob += page_rc;
if (page_rc != 0) { /* some data! */
- LASSERT(local_nb[i].page != NULL);
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ LASSERT(local_nb[i].lnb_page != NULL);
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
local_nb[i].lnb_page_offset,
page_rc);
}
- if (page_rc != local_nb[i].len) { /* short read */
+ if (page_rc != local_nb[i].lnb_len) { /* short read */
/* All subsequent pages should be 0 */
while (++i < npages)
- LASSERT(local_nb[i].rc == 0);
+ LASSERT(local_nb[i].lnb_rc == 0);
break;
}
}
POSTID(&body->oa.o_oi),
local_nb[0].lnb_file_offset,
local_nb[npages-1].lnb_file_offset +
- local_nb[npages-1].len - 1,
+ local_nb[npages - 1].lnb_len - 1,
client_cksum, server_cksum);
}
/* NB Having prepped, we must commit... */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
local_nb[i].lnb_page_offset,
- local_nb[i].len);
+ local_nb[i].lnb_len);
rc = sptlrpc_svc_prep_bulk(req, desc);
if (rc != 0)
rcs[i] = 0;
do {
LASSERT(j < npages);
- if (local_nb[j].rc < 0)
- rcs[i] = local_nb[j].rc;
- len -= local_nb[j].len;
+ if (local_nb[j].lnb_rc < 0)
+ rcs[i] = local_nb[j].lnb_rc;
+ len -= local_nb[j].lnb_len;
j++;
} while (len > 0);
LASSERT(len == 0);