/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
- */
-/*
- * Copyright (c) 2011, 2012 Whamcloud, Inc.
- * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Mike Pershin <tappro@whamcloud.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_OSD
#include <lustre_ver.h>
#include <libcfs/libcfs.h>
-#include <lustre_fsfilt.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <obd.h>
#include <obd_class.h>
#include <lustre_disk.h>
#include <lustre_fid.h>
+#include <lustre/lustre_idl.h> /* LLOG_MIN_CHUNK_SIZE definition */
#include "osd_internal.h"
static char *osd_zerocopy_tag = "zerocopy";
+
+static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
+{
+ struct obd_histogram *h = osd->od_brw_stats.hist;
+
+ if (rw == READ) {
+ atomic_inc(&osd->od_r_in_flight);
+ lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
+ atomic_read(&osd->od_r_in_flight));
+ lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
+
+ } else {
+ atomic_inc(&osd->od_w_in_flight);
+ lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
+ atomic_read(&osd->od_w_in_flight));
+ lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
+
+ }
+}
+
+static void record_end_io(struct osd_device *osd, int rw,
+ unsigned long elapsed, int disksize, int npages)
+{
+ struct obd_histogram *h = osd->od_brw_stats.hist;
+
+ if (rw == READ) {
+ atomic_dec(&osd->od_r_in_flight);
+ lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
+ if (disksize > 0)
+ lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], disksize);
+ if (elapsed)
+ lprocfs_oh_tally_log2(&h[BRW_R_IO_TIME], elapsed);
+
+ } else {
+ atomic_dec(&osd->od_w_in_flight);
+ lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
+ if (disksize > 0)
+ lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], disksize);
+ if (elapsed)
+ lprocfs_oh_tally_log2(&h[BRW_W_IO_TIME], elapsed);
+ }
+}
+
static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
- struct lu_buf *buf, loff_t *pos,
- struct lustre_capa *capa)
+ struct lu_buf *buf, loff_t *pos)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
uint64_t old_size;
int size = buf->lb_len;
int rc;
+ unsigned long start;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_db);
- cfs_read_lock(&obj->oo_attr_lock);
+ start = cfs_time_current();
+
+ read_lock(&obj->oo_attr_lock);
old_size = obj->oo_attr.la_size;
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
if (*pos + size > old_size) {
if (old_size < *pos)
size = old_size - *pos;
}
- rc = -dmu_read(osd->od_objset.os, obj->oo_db->db_object, *pos, size,
+ record_start_io(osd, READ, 0);
+
+ rc = -dmu_read(osd->od_os, obj->oo_db->db_object, *pos, size,
buf->lb_buf, DMU_READ_PREFETCH);
+
+ record_end_io(osd, READ, cfs_time_current() - start, size,
+ size >> PAGE_CACHE_SHIFT);
if (rc == 0) {
rc = size;
*pos += size;
-
- /* XXX: workaround for bug in HEAD: fsfilt_ldiskfs_read() returns
- * requested number of bytes, not actually read ones */
- if (S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr))
- rc = buf->lb_len;
}
return rc;
}
static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
- const loff_t size, loff_t pos,
+ const struct lu_buf *buf, loff_t pos,
struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t oid;
ENTRY;
dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
}
- dmu_tx_hold_write(oh->ot_tx, oid, pos, size);
-
- RETURN(0);
+ /* XXX: we still miss for append declaration support in ZFS
+ * -1 means append which is used by llog mostly, llog
+ * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
+ if (pos == -1)
+ pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
+ obj->oo_attr.la_size + (2 << 20));
+ dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);
+
+ /* dt_declare_write() is usually called for system objects, such
+ * as llog or last_rcvd files. We needn't enforce quota on those
+ * objects, so always set the lqi_space as 0. */
+ RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
+ obj->oo_attr.la_gid, 0, oh, true, NULL,
+ false));
}
static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
const struct lu_buf *buf, loff_t *pos,
- struct thandle *th, struct lustre_capa *capa,
- int ignore_quota)
+ struct thandle *th, int ignore_quota)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
- udmu_objset_t *uos = &osd->od_objset;
struct osd_thandle *oh;
uint64_t offset = *pos;
int rc;
+
ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
- dmu_write(osd->od_objset.os, obj->oo_db->db_object, offset,
+ record_start_io(osd, WRITE, 0);
+
+ dmu_write(osd->od_os, obj->oo_db->db_object, offset,
(uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
if (obj->oo_attr.la_size < offset + buf->lb_len) {
obj->oo_attr.la_size = offset + buf->lb_len;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from oo_attr
* into dbuf. any update within a single txg will copy the
* most actual */
- rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
+ rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
&obj->oo_attr.la_size, 8, oh);
if (unlikely(rc))
GOTO(out, rc);
} else {
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
}
*pos += buf->lb_len;
rc = buf->lb_len;
out:
+ record_end_io(osd, WRITE, 0, buf->lb_len,
+ buf->lb_len >> PAGE_CACHE_SHIFT);
+
RETURN(rc);
}
* XXX: for the moment I don't want to use lnb_flags for osd-internal
* purposes as it's not very well defined ...
* instead I use the lowest bit of the address so that:
- * arc buffer: .lnb_obj = abuf (arc we loan for write)
- * dbuf buffer: .lnb_obj = dbuf | 1 (dbuf we get for read)
+ * arc buffer: .lnb_data = abuf (arc we loan for write)
+ * dbuf buffer: .lnb_data = dbuf | 1 (dbuf we get for read)
* copy buffer: .lnb_page->mapping = obj (page we allocate for write)
*
* bzzz, to blame
LASSERT(obj->oo_db);
for (i = 0; i < npages; i++) {
- if (lnb[i].page == NULL)
+ if (lnb[i].lnb_page == NULL)
continue;
- if (lnb[i].page->mapping == (void *)obj) {
+ if (lnb[i].lnb_page->mapping == (void *)obj) {
/* this is anonymous page allocated for copy-write */
- lnb[i].page->mapping = NULL;
- __free_page(lnb[i].page);
- cfs_atomic_dec(&osd->od_zerocopy_alloc);
+ lnb[i].lnb_page->mapping = NULL;
+ __free_page(lnb[i].lnb_page);
+ atomic_dec(&osd->od_zerocopy_alloc);
} else {
/* see comment in osd_bufs_get_read() */
- ptr = (unsigned long)lnb[i].dentry;
+ ptr = (unsigned long)lnb[i].lnb_data;
if (ptr & 1UL) {
ptr &= ~1UL;
dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
- cfs_atomic_dec(&osd->od_zerocopy_pin);
- } else if (lnb[i].dentry != NULL) {
- dmu_return_arcbuf((void *)lnb[i].dentry);
- cfs_atomic_dec(&osd->od_zerocopy_loan);
+ atomic_dec(&osd->od_zerocopy_pin);
+ } else if (lnb[i].lnb_data != NULL) {
+ dmu_return_arcbuf(lnb[i].lnb_data);
+ atomic_dec(&osd->od_zerocopy_loan);
}
}
- lnb[i].page = NULL;
- lnb[i].dentry = NULL;
+ lnb[i].lnb_page = NULL;
+ lnb[i].lnb_data = NULL;
}
return 0;
}
-static struct page *kmem_to_page(void *addr)
+static inline struct page *kmem_to_page(void *addr)
{
- struct page *page;
-
- if (kmem_virt(addr))
- page = vmalloc_to_page(addr);
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
else
- page = virt_to_page(addr);
-
- return page;
+ return virt_to_page(addr);
}
+/**
+ * Prepare buffers for read.
+ *
+ * The function maps the range described by \a off and \a len to \a lnb array.
+ * dmu_buf_hold_array_by_bonus() finds/creates appropriate ARC buffers, then
+ * we fill \a lnb array with the pages storing ARC buffers. Notice the current
+ * implementationt passes TRUE to dmu_buf_hold_array_by_bonus() to fill ARC
+ * buffers with actual data, I/O is done in the conext of osd_bufs_get_read().
+ * A better implementation would just return the buffers (potentially unfilled)
+ * and subsequent osd_read_prep() would do I/O for many ranges concurrently.
+ *
+ * \param[in] env environment
+ * \param[in] obj object
+ * \param[in] off offset in bytes
+ * \param[in] len the number of bytes to access
+ * \param[out] lnb array of local niobufs pointing to the buffers with data
+ *
+ * \retval 0 for success
+ * \retval negative error number of failure
+ */
static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
loff_t off, ssize_t len, struct niobuf_local *lnb)
{
struct osd_device *osd = osd_obj2dev(obj);
- dmu_buf_t **dbp;
+ unsigned long start = cfs_time_current();
int rc, i, numbufs, npages = 0;
+ dmu_buf_t **dbp;
ENTRY;
+ record_start_io(osd, READ, 0);
+
/* grab buffers for read:
* OSD API let us to grab buffers first, then initiate IO(s)
* so that all required IOs will be done in parallel, but at the
*/
while (len > 0) {
rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
- osd_zerocopy_tag, &numbufs,
- &dbp);
- LASSERT(rc == 0);
+ osd_zerocopy_tag, &numbufs,
+ &dbp);
+ if (unlikely(rc))
+ GOTO(err, rc);
for (i = 0; i < numbufs; i++) {
int bufoff, tocpy, thispage;
LASSERT(len > 0);
- cfs_atomic_inc(&osd->od_zerocopy_pin);
+ atomic_inc(&osd->od_zerocopy_pin);
bufoff = off - dbp[i]->db_offset;
tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
dbf = (void *) ((unsigned long)dbp[i] | 1);
while (tocpy > 0) {
- thispage = CFS_PAGE_SIZE;
- thispage -= bufoff & (CFS_PAGE_SIZE - 1);
+ thispage = PAGE_CACHE_SIZE;
+ thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
thispage = min(tocpy, thispage);
- lnb->rc = 0;
+ lnb->lnb_rc = 0;
lnb->lnb_file_offset = off;
- lnb->offset = bufoff & ~CFS_PAGE_MASK;
- lnb->len = thispage;
- lnb->page = kmem_to_page(dbp[i]->db_data +
- bufoff);
+ lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
+ lnb->lnb_len = thispage;
+ lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
+ bufoff);
/* mark just a single slot: we need this
- * reference to dbuf to be release once */
- lnb->dentry = dbf;
+ * reference to dbuf to be released once */
+ lnb->lnb_data = dbf;
dbf = NULL;
tocpy -= thispage;
lnb++;
}
- /* steal dbuf so dmu_buf_rele_array() cant release it */
+ /* steal dbuf so dmu_buf_rele_array() can't release
+ * it */
dbp[i] = NULL;
}
dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
}
+ record_end_io(osd, READ, cfs_time_current() - start,
+ npages * PAGE_SIZE, npages);
+
RETURN(npages);
+
+err:
+ LASSERT(rc < 0);
+ osd_bufs_put(env, &obj->oo_dt, lnb - npages, npages);
+ RETURN(rc);
}
static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
{
struct osd_device *osd = osd_obj2dev(obj);
int plen, off_in_block, sz_in_block;
- int i = 0, npages = 0;
+ int rc, i = 0, npages = 0;
arc_buf_t *abuf;
uint32_t bs;
uint64_t dummy;
abuf = dmu_request_arcbuf(obj->oo_db, bs);
if (unlikely(abuf == NULL))
- GOTO(out_err, -ENOMEM);
+ GOTO(out_err, rc = -ENOMEM);
- cfs_atomic_inc(&osd->od_zerocopy_loan);
+ atomic_inc(&osd->od_zerocopy_loan);
/* go over pages arcbuf contains, put them as
* local niobufs for ptlrpc's bulks */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
lnb[i].lnb_file_offset = off;
- lnb[i].offset = 0;
- lnb[i].len = plen;
- lnb[i].rc = 0;
+ lnb[i].lnb_page_offset = 0;
+ lnb[i].lnb_len = plen;
+ lnb[i].lnb_rc = 0;
if (sz_in_block == bs)
- lnb[i].dentry = (void *)abuf;
+ lnb[i].lnb_data = abuf;
else
- lnb[i].dentry = NULL;
+ lnb[i].lnb_data = NULL;
/* this one is not supposed to fail */
- lnb[i].page = kmem_to_page(abuf->b_data +
+ lnb[i].lnb_page = kmem_to_page(abuf->b_data +
off_in_block);
- LASSERT(lnb[i].page);
+ LASSERT(lnb[i].lnb_page);
lprocfs_counter_add(osd->od_stats,
LPROC_OSD_ZEROCOPY_IO, 1);
/* can't use zerocopy, allocate temp. buffers */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
lnb[i].lnb_file_offset = off;
- lnb[i].offset = 0;
- lnb[i].len = plen;
- lnb[i].rc = 0;
- lnb[i].dentry = NULL;
+ lnb[i].lnb_page_offset = 0;
+ lnb[i].lnb_len = plen;
+ lnb[i].lnb_rc = 0;
+ lnb[i].lnb_data = NULL;
- lnb[i].page = alloc_page(OSD_GFP_IO);
- if (unlikely(lnb[i].page == NULL))
- GOTO(out_err, -ENOMEM);
+ lnb[i].lnb_page = alloc_page(OSD_GFP_IO);
+ if (unlikely(lnb[i].lnb_page == NULL))
+ GOTO(out_err, rc = -ENOMEM);
- LASSERT(lnb[i].page->mapping == NULL);
- lnb[i].page->mapping = (void *)obj;
+ LASSERT(lnb[i].lnb_page->mapping == NULL);
+ lnb[i].lnb_page->mapping = (void *)obj;
- cfs_atomic_inc(&osd->od_zerocopy_alloc);
+ atomic_inc(&osd->od_zerocopy_alloc);
lprocfs_counter_add(osd->od_stats,
LPROC_OSD_COPY_IO, 1);
out_err:
osd_bufs_put(env, &obj->oo_dt, lnb, npages);
- RETURN(-ENOMEM);
+ RETURN(rc);
}
static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
loff_t offset, ssize_t len, struct niobuf_local *lnb,
- int rw, struct lustre_capa *capa)
+ int rw)
{
struct osd_object *obj = osd_dt_obj(dt);
int rc;
return 0;
}
+/* Return number of blocks that aren't mapped in the [start, start + size]
+ * region */
+static int osd_count_not_mapped(struct osd_object *obj, uint64_t start,
+ uint32_t size)
+{
+ dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)obj->oo_db;
+ dmu_buf_impl_t *db;
+ dnode_t *dn;
+ uint32_t blkshift;
+ uint64_t end, blkid;
+ int rc;
+ ENTRY;
+
+ DB_DNODE_ENTER(dbi);
+ dn = DB_DNODE(dbi);
+
+ if (dn->dn_maxblkid == 0) {
+ if (start + size <= dn->dn_datablksz)
+ GOTO(out, size = 0);
+ if (start < dn->dn_datablksz)
+ start = dn->dn_datablksz;
+ /* assume largest block size */
+ blkshift = osd_spa_maxblockshift(
+ dmu_objset_spa(osd_obj2dev(obj)->od_os));
+ } else {
+ /* blocksize can't change */
+ blkshift = dn->dn_datablkshift;
+ }
+
+ /* compute address of last block */
+ end = (start + size - 1) >> blkshift;
+ /* align start on block boundaries */
+ start >>= blkshift;
+
+ /* size is null, can't be mapped */
+ if (obj->oo_attr.la_size == 0 || dn->dn_maxblkid == 0)
+ GOTO(out, size = (end - start + 1) << blkshift);
+
+ /* beyond EOF, can't be mapped */
+ if (start > dn->dn_maxblkid)
+ GOTO(out, size = (end - start + 1) << blkshift);
+
+ size = 0;
+ for (blkid = start; blkid <= end; blkid++) {
+ if (blkid == dn->dn_maxblkid)
+ /* this one is mapped for sure */
+ continue;
+ if (blkid > dn->dn_maxblkid) {
+ size += (end - blkid + 1) << blkshift;
+ GOTO(out, size);
+ }
+
+ rc = dbuf_hold_impl(dn, 0, blkid, TRUE, FTAG, &db);
+ if (rc) {
+ /* for ENOENT (block not mapped) and any other errors,
+ * assume the block isn't mapped */
+ size += 1 << blkshift;
+ continue;
+ }
+ dbuf_rele(db, FTAG);
+ }
+
+ GOTO(out, size);
+out:
+ DB_DNODE_EXIT(dbi);
+ return size;
+}
+
static int osd_declare_write_commit(const struct lu_env *env,
struct dt_object *dt,
struct niobuf_local *lnb, int npages,
struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t offset = 0;
uint32_t size = 0;
- int i;
+ int i, rc, flags = 0;
+ bool ignore_quota = false, synced = false;
+ long long space = 0;
+ struct page *last_page = NULL;
+ unsigned long discont_pages = 0;
ENTRY;
LASSERT(dt_object_exists(dt));
oh = container_of0(th, struct osd_thandle, ot_super);
for (i = 0; i < npages; i++) {
- if (lnb[i].rc)
+ if (last_page && lnb[i].lnb_page->index != (last_page->index + 1))
+ ++discont_pages;
+ last_page = lnb[i].lnb_page;
+ if (lnb[i].lnb_rc)
/* ENOSPC, network RPC error, etc.
* We don't want to book space for pages which will be
* skipped in osd_write_commit(). Hence we skip pages
* with lnb_rc != 0 here too */
continue;
+ /* ignore quota for the whole request if any page is from
+ * client cache or written by root.
+ *
+ * XXX once we drop the 1.8 client support, the checking
+ * for whether page is from cache can be simplified as:
+ * !(lnb[i].flags & OBD_BRW_SYNC)
+ *
+ * XXX we could handle this on per-lnb basis as done by
+ * grant. */
+ if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
+ (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
+ OBD_BRW_FROM_GRANT)
+ ignore_quota = true;
if (size == 0) {
/* first valid lnb */
offset = lnb[i].lnb_file_offset;
- size = lnb[i].len;
+ size = lnb[i].lnb_len;
continue;
}
if (offset + size == lnb[i].lnb_file_offset) {
/* this lnb is contiguous to the previous one */
- size += lnb[i].len;
+ size += lnb[i].lnb_len;
continue;
}
- dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
+ dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
+ offset, size);
+ /* estimating space that will be consumed by a write is rather
+ * complicated with ZFS. As a consequence, we don't account for
+ * indirect blocks and quota overrun will be adjusted once the
+ * operation is committed, if required. */
+ space += osd_count_not_mapped(obj, offset, size);
- offset = lnb->lnb_file_offset;
- size = lnb->len;
+ offset = lnb[i].lnb_file_offset;
+ size = lnb[i].lnb_len;
}
- if (size)
- dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
+ if (size) {
+ dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
+ offset, size);
+ space += osd_count_not_mapped(obj, offset, size);
+ }
dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
- RETURN(0);
+ /* backend zfs filesystem might be configured to store multiple data
+ * copies */
+ space *= osd->od_os->os_copies;
+ space = toqb(space);
+ CDEBUG(D_QUOTA, "writting %d pages, reserving "LPD64"K of quota "
+ "space\n", npages, space);
+
+ record_start_io(osd, WRITE, discont_pages);
+retry:
+ /* acquire quota space if needed */
+ rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
+ obj->oo_attr.la_gid, space, oh, true, &flags,
+ ignore_quota);
+
+ if (!synced && rc == -EDQUOT && (flags & QUOTA_FL_SYNC) != 0) {
+ dt_sync(env, th->th_dev);
+ synced = true;
+ CDEBUG(D_QUOTA, "retry after sync\n");
+ flags = 0;
+ goto retry;
+ }
+
+ /* we need only to store the overquota flags in the first lnb for
+ * now, once we support multiple objects BRW, this code needs be
+ * revised. */
+ if (flags & QUOTA_FL_OVER_USRQUOTA)
+ lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
+ if (flags & QUOTA_FL_OVER_GRPQUOTA)
+ lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
+
+ RETURN(rc);
}
static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
- udmu_objset_t *uos = &osd->od_objset;
struct osd_thandle *oh;
uint64_t new_size = 0;
int i, rc = 0;
+ unsigned long iosize = 0;
ENTRY;
LASSERT(dt_object_exists(dt));
for (i = 0; i < npages; i++) {
CDEBUG(D_INODE, "write %u bytes at %u\n",
- (unsigned) lnb[i].len,
+ (unsigned) lnb[i].lnb_len,
(unsigned) lnb[i].lnb_file_offset);
- if (lnb[i].rc) {
+ if (lnb[i].lnb_rc) {
/* ENOSPC, network RPC error, etc.
* Unlike ldiskfs, zfs allocates new blocks on rewrite,
* so we skip this page if lnb_rc is set to -ENOSPC */
CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
PFID(lu_object_fid(&dt->do_lu)), i,
- lnb[i].rc);
+ lnb[i].lnb_rc);
continue;
}
- if (lnb[i].page->mapping == (void *)obj) {
- dmu_write(osd->od_objset.os, obj->oo_db->db_object,
- lnb[i].lnb_file_offset, lnb[i].len,
- kmap(lnb[i].page), oh->ot_tx);
- kunmap(lnb[i].page);
- } else if (lnb[i].dentry) {
- LASSERT(((unsigned long)lnb[i].dentry & 1) == 0);
+ if (lnb[i].lnb_page->mapping == (void *)obj) {
+ dmu_write(osd->od_os, obj->oo_db->db_object,
+ lnb[i].lnb_file_offset, lnb[i].lnb_len,
+ kmap(lnb[i].lnb_page), oh->ot_tx);
+ kunmap(lnb[i].lnb_page);
+ } else if (lnb[i].lnb_data) {
+ LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
/* buffer loaned for zerocopy, try to use it.
* notice that dmu_assign_arcbuf() is smart
* enough to recognize changed blocksize
* in this case it fallbacks to dmu_write() */
dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
- (void *)lnb[i].dentry, oh->ot_tx);
+ lnb[i].lnb_data, oh->ot_tx);
/* drop the reference, otherwise osd_put_bufs()
* will be releasing it - bad! */
- lnb[i].dentry = NULL;
- cfs_atomic_dec(&osd->od_zerocopy_loan);
+ lnb[i].lnb_data = NULL;
+ atomic_dec(&osd->od_zerocopy_loan);
}
- if (new_size < lnb[i].lnb_file_offset + lnb[i].len)
- new_size = lnb[i].lnb_file_offset + lnb[i].len;
+ if (new_size < lnb[i].lnb_file_offset + lnb[i].lnb_len)
+ new_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
+ iosize += lnb[i].lnb_len;
}
if (unlikely(new_size == 0)) {
th->th_local = 1;
/* it is important to return 0 even when all lnb_rc == -ENOSPC
* since ofd_commitrw_write() retries several times on ENOSPC */
+ record_end_io(osd, WRITE, 0, 0, 0);
RETURN(0);
}
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
if (obj->oo_attr.la_size < new_size) {
obj->oo_attr.la_size = new_size;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from
* oo_attr into dbuf. any update within a single txg will copy
* the most actual */
- rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
- &obj->oo_attr.la_size, 8, oh);
+ rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
+ &obj->oo_attr.la_size, 8, oh);
} else {
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
}
+ record_end_io(osd, WRITE, 0, iosize, npages);
+
RETURN(rc);
}
struct niobuf_local *lnb, int npages)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct lu_buf buf;
- loff_t offset;
int i;
+ unsigned long size = 0;
+ loff_t eof;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_db);
+ read_lock(&obj->oo_attr_lock);
+ eof = obj->oo_attr.la_size;
+ read_unlock(&obj->oo_attr_lock);
+
for (i = 0; i < npages; i++) {
- buf.lb_buf = kmap(lnb[i].page);
- buf.lb_len = lnb[i].len;
- offset = lnb[i].lnb_file_offset;
+ if (unlikely(lnb[i].lnb_rc < 0))
+ continue;
- CDEBUG(D_OTHER, "read %u bytes at %u\n",
- (unsigned) lnb[i].len,
- (unsigned) lnb[i].lnb_file_offset);
- lnb[i].rc = osd_read(env, dt, &buf, &offset, NULL);
- kunmap(lnb[i].page);
+ lnb[i].lnb_rc = lnb[i].lnb_len;
+ size += lnb[i].lnb_rc;
+
+ if (lnb[i].lnb_file_offset + lnb[i].lnb_len > eof) {
+ lnb[i].lnb_rc = eof - lnb[i].lnb_file_offset;
+ if (lnb[i].lnb_rc < 0)
+ lnb[i].lnb_rc = 0;
- if (lnb[i].rc < buf.lb_len) {
/* all subsequent rc should be 0 */
while (++i < npages)
- lnb[i].rc = 0;
+ lnb[i].lnb_rc = 0;
break;
}
}
}
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
- udmu_objset_t *uos = &osd->od_objset;
struct osd_thandle *oh;
__u64 len;
int rc = 0;
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
/* truncate */
if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
len = DMU_OBJECT_END;
else
len = end - start;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
- rc = __osd_object_punch(osd->od_objset.os, obj->oo_db, oh->ot_tx,
+ rc = __osd_object_punch(osd->od_os, obj->oo_db, oh->ot_tx,
obj->oo_attr.la_size, start, len);
/* set new size */
if (len == DMU_OBJECT_END) {
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
obj->oo_attr.la_size = start;
- cfs_write_unlock(&obj->oo_attr_lock);
- rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
- &obj->oo_attr.la_size, 8, oh);
+ write_unlock(&obj->oo_attr_lock);
+ rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
+ &obj->oo_attr.la_size, 8, oh);
}
RETURN(rc);
}
__u64 start, __u64 end, struct thandle *handle)
{
struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
__u64 len;
ENTRY;
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_read_lock(&obj->oo_attr_lock);
+ read_lock(&obj->oo_attr_lock);
if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
len = DMU_OBJECT_END;
else
/* declare we'll free some blocks ... */
if (start < obj->oo_attr.la_size) {
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
} else {
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
}
/* ... and we'll modify size attribute */
dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
- RETURN(0);
+ RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid,
+ obj->oo_attr.la_gid, 0, oh, true, NULL,
+ false));
}
.dbo_declare_write_commit = osd_declare_write_commit,
.dbo_write_commit = osd_write_commit,
.dbo_read_prep = osd_read_prep,
- .do_declare_punch = osd_declare_punch,
- .do_punch = osd_punch,
+ .dbo_declare_punch = osd_declare_punch,
+ .dbo_punch = osd_punch,
};
-