* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <obd_class.h>
#include <lustre_disk.h>
#include <lustre_fid.h>
-#include <lustre/lustre_idl.h> /* LLOG_CHUNK_SIZE definition */
+#include <lustre/lustre_idl.h> /* LLOG_MIN_CHUNK_SIZE definition */
#include "osd_internal.h"
static char *osd_zerocopy_tag = "zerocopy";
-static void record_start_io(struct osd_device *osd, int rw, int npages,
- int discont_pages)
+static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
{
struct obd_histogram *h = osd->od_brw_stats.hist;
atomic_inc(&osd->od_r_in_flight);
lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
atomic_read(&osd->od_r_in_flight));
- lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
lprocfs_oh_tally(&h[BRW_R_DISCONT_PAGES], discont_pages);
} else {
atomic_inc(&osd->od_w_in_flight);
lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
atomic_read(&osd->od_w_in_flight));
- lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
lprocfs_oh_tally(&h[BRW_W_DISCONT_PAGES], discont_pages);
}
}
static void record_end_io(struct osd_device *osd, int rw,
- unsigned long elapsed, int disksize)
+ unsigned long elapsed, int disksize, int npages)
{
struct obd_histogram *h = osd->od_brw_stats.hist;
if (rw == READ) {
atomic_dec(&osd->od_r_in_flight);
+ lprocfs_oh_tally_log2(&h[BRW_R_PAGES], npages);
if (disksize > 0)
lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], disksize);
if (elapsed)
} else {
atomic_dec(&osd->od_w_in_flight);
+ lprocfs_oh_tally_log2(&h[BRW_W_PAGES], npages);
if (disksize > 0)
lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], disksize);
if (elapsed)
}
static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
- struct lu_buf *buf, loff_t *pos,
- struct lustre_capa *capa)
+ struct lu_buf *buf, loff_t *pos)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
size = old_size - *pos;
}
- record_start_io(osd, READ, (size >> PAGE_CACHE_SHIFT), 0);
+ record_start_io(osd, READ, 0);
rc = -dmu_read(osd->od_os, obj->oo_db->db_object, *pos, size,
buf->lb_buf, DMU_READ_PREFETCH);
- record_end_io(osd, READ, cfs_time_current() - start, size);
+ record_end_io(osd, READ, cfs_time_current() - start, size,
+ size >> PAGE_CACHE_SHIFT);
if (rc == 0) {
rc = size;
*pos += size;
/* XXX: we still miss for append declaration support in ZFS
* -1 means append which is used by llog mostly, llog
- * can grow upto LLOG_CHUNK_SIZE*8 records */
+ * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */
if (pos == -1)
- pos = max_t(loff_t, 256 * 8 * LLOG_CHUNK_SIZE,
+ pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE,
obj->oo_attr.la_size + (2 << 20));
dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len);
static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
const struct lu_buf *buf, loff_t *pos,
- struct thandle *th, struct lustre_capa *capa,
- int ignore_quota)
+ struct thandle *th, int ignore_quota)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
- record_start_io(osd, WRITE, (buf->lb_len >> PAGE_CACHE_SHIFT), 0);
+ record_start_io(osd, WRITE, 0);
dmu_write(osd->od_os, obj->oo_db->db_object, offset,
(uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
rc = buf->lb_len;
out:
- record_end_io(osd, WRITE, 0, buf->lb_len);
+ record_end_io(osd, WRITE, 0, buf->lb_len,
+ buf->lb_len >> PAGE_CACHE_SHIFT);
RETURN(rc);
}
* XXX: for the moment I don't want to use lnb_flags for osd-internal
* purposes as it's not very well defined ...
* instead I use the lowest bit of the address so that:
- * arc buffer: .lnb_obj = abuf (arc we loan for write)
- * dbuf buffer: .lnb_obj = dbuf | 1 (dbuf we get for read)
+ * arc buffer: .lnb_data = abuf (arc we loan for write)
+ * dbuf buffer: .lnb_data = dbuf | 1 (dbuf we get for read)
* copy buffer: .lnb_page->mapping = obj (page we allocate for write)
*
* bzzz, to blame
return virt_to_page(addr);
}
+/**
+ * Prepare buffers for read.
+ *
+ * The function maps the range described by \a off and \a len to \a lnb array.
+ * dmu_buf_hold_array_by_bonus() finds/creates appropriate ARC buffers, then
+ * we fill \a lnb array with the pages storing ARC buffers. Notice the current
+ * implementationt passes TRUE to dmu_buf_hold_array_by_bonus() to fill ARC
+ * buffers with actual data, I/O is done in the conext of osd_bufs_get_read().
+ * A better implementation would just return the buffers (potentially unfilled)
+ * and subsequent osd_read_prep() would do I/O for many ranges concurrently.
+ *
+ * \param[in] env environment
+ * \param[in] obj object
+ * \param[in] off offset in bytes
+ * \param[in] len the number of bytes to access
+ * \param[out] lnb array of local niobufs pointing to the buffers with data
+ *
+ * \retval 0 for success
+ * \retval negative error number of failure
+ */
static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
loff_t off, ssize_t len, struct niobuf_local *lnb)
{
struct osd_device *osd = osd_obj2dev(obj);
- dmu_buf_t **dbp;
+ unsigned long start = cfs_time_current();
int rc, i, numbufs, npages = 0;
+ dmu_buf_t **dbp;
ENTRY;
+ record_start_io(osd, READ, 0);
+
/* grab buffers for read:
* OSD API let us to grab buffers first, then initiate IO(s)
* so that all required IOs will be done in parallel, but at the
lnb->lnb_rc = 0;
lnb->lnb_file_offset = off;
- lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
+ lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
lnb->lnb_len = thispage;
lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
bufoff);
/* mark just a single slot: we need this
- * reference to dbuf to be release once */
+ * reference to dbuf to be released once */
lnb->lnb_data = dbf;
dbf = NULL;
lnb++;
}
- /* steal dbuf so dmu_buf_rele_array() cant release it */
+ /* steal dbuf so dmu_buf_rele_array() can't release
+ * it */
dbp[i] = NULL;
}
dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
}
+ record_end_io(osd, READ, cfs_time_current() - start,
+ npages * PAGE_SIZE, npages);
+
RETURN(npages);
err:
static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
loff_t offset, ssize_t len, struct niobuf_local *lnb,
- int rw, struct lustre_capa *capa)
+ int rw)
{
struct osd_object *obj = osd_dt_obj(dt);
int rc;
return 0;
}
-/* Return number of blocks that aren't mapped in the [start, start + size]
- * region */
-static int osd_count_not_mapped(struct osd_object *obj, uint64_t start,
- uint32_t size)
+static inline uint32_t osd_get_blocksz(struct osd_object *obj)
{
- dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)obj->oo_db;
- dmu_buf_impl_t *db;
- dnode_t *dn;
- uint32_t blkshift;
- uint64_t end, blkid;
- int rc;
- ENTRY;
+ uint32_t blksz;
+ u_longlong_t unused;
- DB_DNODE_ENTER(dbi);
- dn = DB_DNODE(dbi);
-
- if (dn->dn_maxblkid == 0) {
- if (start + size <= dn->dn_datablksz)
- GOTO(out, size = 0);
- if (start < dn->dn_datablksz)
- start = dn->dn_datablksz;
- /* assume largest block size */
- blkshift = SPA_MAXBLOCKSHIFT;
- } else {
- /* blocksize can't change */
- blkshift = dn->dn_datablkshift;
- }
-
- /* compute address of last block */
- end = (start + size - 1) >> blkshift;
- /* align start on block boundaries */
- start >>= blkshift;
+ LASSERT(obj->oo_db);
- /* size is null, can't be mapped */
- if (obj->oo_attr.la_size == 0 || dn->dn_maxblkid == 0)
- GOTO(out, size = (end - start + 1) << blkshift);
+ dmu_object_size_from_db(obj->oo_db, &blksz, &unused);
+ return blksz;
+}
- /* beyond EOF, can't be mapped */
- if (start > dn->dn_maxblkid)
- GOTO(out, size = (end - start + 1) << blkshift);
+static inline uint64_t osd_roundup2blocksz(uint64_t size,
+ uint64_t offset,
+ uint32_t blksz)
+{
+ LASSERT(blksz > 0);
- size = 0;
- for (blkid = start; blkid <= end; blkid++) {
- if (blkid == dn->dn_maxblkid)
- /* this one is mapped for sure */
- continue;
- if (blkid > dn->dn_maxblkid) {
- size += (end - blkid + 1) << blkshift;
- GOTO(out, size);
- }
+ size += offset % blksz;
- rc = dbuf_hold_impl(dn, 0, blkid, TRUE, FTAG, &db);
- if (rc) {
- /* for ENOENT (block not mapped) and any other errors,
- * assume the block isn't mapped */
- size += 1 << blkshift;
- continue;
- }
- dbuf_rele(db, FTAG);
- }
+ if (likely(IS_PO2(blksz)))
+ return PO2_ROUNDUP_TYPED(size, blksz, uint64_t);
- GOTO(out, size);
-out:
- DB_DNODE_EXIT(dbi);
- return size;
+ size += blksz - 1;
+ do_div(size, blksz);
+ return size * blksz;
}
static int osd_declare_write_commit(const struct lu_env *env,
- struct dt_object *dt,
- struct niobuf_local *lnb, int npages,
- struct thandle *th)
+ struct dt_object *dt,
+ struct niobuf_local *lnb, int npages,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t offset = 0;
uint32_t size = 0;
+ uint32_t blksz = osd_get_blocksz(obj);
int i, rc, flags = 0;
bool ignore_quota = false, synced = false;
long long space = 0;
dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
offset, size);
- /* estimating space that will be consumed by a write is rather
+ /* Estimating space to be consumed by a write is rather
* complicated with ZFS. As a consequence, we don't account for
- * indirect blocks and quota overrun will be adjusted once the
- * operation is committed, if required. */
- space += osd_count_not_mapped(obj, offset, size);
+ * indirect blocks and just use as a rough estimate the worse
+ * case where the old space is being held by a snapshot. Quota
+ * overrun will be adjusted once the operation is committed, if
+ * required. */
+ space += osd_roundup2blocksz(size, offset, blksz);
offset = lnb[i].lnb_file_offset;
size = lnb[i].lnb_len;
if (size) {
dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object,
offset, size);
- space += osd_count_not_mapped(obj, offset, size);
+ space += osd_roundup2blocksz(size, offset, blksz);
}
dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
* copies */
space *= osd->od_os->os_copies;
space = toqb(space);
- CDEBUG(D_QUOTA, "writting %d pages, reserving "LPD64"K of quota "
- "space\n", npages, space);
+ CDEBUG(D_QUOTA, "writing %d pages, reserving "LPD64"K of quota space\n",
+ npages, space);
- record_start_io(osd, WRITE, npages, discont_pages);
+ record_start_io(osd, WRITE, discont_pages);
retry:
/* acquire quota space if needed */
rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
th->th_local = 1;
/* it is important to return 0 even when all lnb_rc == -ENOSPC
* since ofd_commitrw_write() retries several times on ENOSPC */
- record_end_io(osd, WRITE, 0, 0);
+ record_end_io(osd, WRITE, 0, 0, 0);
RETURN(0);
}
write_unlock(&obj->oo_attr_lock);
}
- record_end_io(osd, WRITE, 0, iosize);
+ record_end_io(osd, WRITE, 0, iosize, npages);
RETURN(rc);
}
struct niobuf_local *lnb, int npages)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
- struct lu_buf buf;
- loff_t offset;
int i;
- unsigned long start;
- unsigned long size = 0;
+ loff_t eof;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_db);
- start = cfs_time_current();
-
- record_start_io(osd, READ, npages, 0);
+ read_lock(&obj->oo_attr_lock);
+ eof = obj->oo_attr.la_size;
+ read_unlock(&obj->oo_attr_lock);
for (i = 0; i < npages; i++) {
- buf.lb_buf = kmap(lnb[i].lnb_page);
- buf.lb_len = lnb[i].lnb_len;
- offset = lnb[i].lnb_file_offset;
+ if (unlikely(lnb[i].lnb_rc < 0))
+ continue;
- CDEBUG(D_OTHER, "read %u bytes at %u\n",
- (unsigned) lnb[i].lnb_len,
- (unsigned) lnb[i].lnb_file_offset);
- lnb[i].lnb_rc = osd_read(env, dt, &buf, &offset, NULL);
- kunmap(lnb[i].lnb_page);
+ lnb[i].lnb_rc = lnb[i].lnb_len;
- size += lnb[i].lnb_rc;
+ if (lnb[i].lnb_file_offset + lnb[i].lnb_len >= eof) {
+ if (eof <= lnb[i].lnb_file_offset)
+ lnb[i].lnb_rc = 0;
+ else
+ lnb[i].lnb_rc = eof - lnb[i].lnb_file_offset;
- if (lnb[i].lnb_rc < buf.lb_len) {
/* all subsequent rc should be 0 */
while (++i < npages)
lnb[i].lnb_rc = 0;
}
}
- record_end_io(osd, READ, cfs_time_current() - start, size);
-
return 0;
}
}
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
false));
}
+static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, enum lu_ladvise_type advice)
+{
+ int rc;
+ ENTRY;
+
+ switch (advice) {
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ RETURN(rc);
+}
struct dt_body_operations osd_body_ops = {
.dbo_read = osd_read,
.dbo_read_prep = osd_read_prep,
.dbo_declare_punch = osd_declare_punch,
.dbo_punch = osd_punch,
+ .dbo_ladvise = osd_ladvise,
};