[Have inode_timespec_t])
])
dnl # ZFS 0.7.12/0.8.x uses zfs_refcount_add() instead of
- dnl # refcount_add().
+ dnl # refcount_add(). ZFS 2.0 renamed sys/refcount.h to
+ dnl # sys/zfs_refcount.h, rather the add another check to
+ dnl # determine the correct header name include it
+ dnl # indirectly through sys/dnode.h.
dnl #
LB_CHECK_COMPILE([if ZFS has 'zfs_refcount_add'],
zfs_refcount_add, [
- #include <sys/refcount.h>
+ #include <sys/dnode.h>
],[
zfs_refcount_add((zfs_refcount_t *) NULL, NULL);
],[
AC_DEFINE(HAVE_DMU_OFFSET_NEXT, 1,
[Have dmu_offset_next() exported])
])
+ dnl #
+ dnl # ZFS 2.0 replaced .db_last_dirty / .dr_next with a list_t
+ dnl # and list_node_t named .db_dirty_records / .dr_dbuf_node.
+ dnl #
+ LB_CHECK_COMPILE([if ZFS has 'db_dirty_records' list_t],
+ db_dirty_records, [
+ #include <sys/dbuf.h>
+ ],[
+ dmu_buf_impl_t db;
+ dbuf_dirty_record_t *dr;
+ dr = list_head(&db.db_dirty_records);
+ ],[
+ AC_DEFINE(HAVE_DB_DIRTY_RECORDS_LIST, 1,
+ [Have db_dirty_records list_t])
+ ])
])
AS_IF([test "x$enable_zfs" = xyes], [
dmu_buf_rele(&db->db, osd_obj_tag);
}
+static inline uint64_t osd_db_dirty_txg(dmu_buf_impl_t *db)
+{
+ dbuf_dirty_record_t *dr;
+ uint64_t txg = 0;
+
+ mutex_enter(&db->db_mtx);
+#ifdef HAVE_DB_DIRTY_RECORDS_LIST
+ dr = list_head(&db->db_dirty_records);
+#else
+ dr = db->db_last_dirty;
+#endif
+ if (dr != NULL)
+ txg = dr->dr_txg;
+ mutex_exit(&db->db_mtx);
+
+ return txg;
+}
+
#ifdef HAVE_DMU_USEROBJ_ACCOUNTING
#define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX
{
struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
size_t size = buf->lb_len;
- ktime_t start;
+ hrtime_t start = gethrtime();
s64 delta_ms;
int rc;
- start = ktime_get();
record_start_io(osd, READ, 0);
rc = __osd_read(env, dt, buf, pos, &size);
- delta_ms = ktime_ms_delta(ktime_get(), start);
+ delta_ms = gethrtime() - start;
+ do_div(delta_ms, NSEC_PER_MSEC);
record_end_io(osd, READ, delta_ms, size, size >> PAGE_SHIFT);
return rc;
{
struct osd_device *osd = osd_obj2dev(obj);
int rc, i, numbufs, npages = 0, drop_cache = 0;
- ktime_t start = ktime_get();
+ hrtime_t start = gethrtime();
dmu_buf_t **dbp;
s64 delta_ms;
dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
}
- delta_ms = ktime_ms_delta(ktime_get(), start);
+ delta_ms = gethrtime() - start;
+ do_div(delta_ms, NSEC_PER_MSEC);
record_end_io(osd, READ, delta_ms, npages * PAGE_SIZE, npages);
RETURN(npages);
__u64 start, __u64 end)
{
struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
- struct dmu_buf_impl *db = osd_dt_obj(dt)->oo_dn->dn_dbuf;
uint64_t txg = 0;
ENTRY;
if (osd->od_dt_dev.dd_rdonly)
RETURN(0);
- mutex_enter(&db->db_mtx);
- if (db->db_last_dirty)
- txg = db->db_last_dirty->dr_txg;
- mutex_exit(&db->db_mtx);
-
+ txg = osd_db_dirty_txg(osd_dt_obj(dt)->oo_dn->dn_dbuf);
if (txg) {
/* the object is dirty or being synced */
if (osd_object_sync_delay_us < 0)