+static inline void
+osd_zio_buf_free(void *buf, size_t size)
+{
+ sa_spill_free(buf);
+}
+#else
+#define osd_zio_buf_alloc(size) zio_buf_alloc(size)
+#define osd_zio_buf_free(buf, size) zio_buf_free(buf, size)
+#endif
+
+#ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
+static inline uint64_t
+osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
+ int dnodesize, dmu_tx_t *tx)
+{
+ if (dnodesize == 0)
+ dnodesize = max_t(int, dmu_objset_dnodesize(os),
+ DNODE_MIN_SIZE);
+
+ return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA,
+ DN_BONUS_SIZE(dnodesize), dnodesize, tx);
+}
+
+static inline uint64_t
+osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
+ dmu_object_type_t ot, int leaf_blockshift,
+ int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
+{
+ if (dnodesize == 0)
+ dnodesize = max_t(int, dmu_objset_dnodesize(os),
+ DNODE_MIN_SIZE);
+
+ return zap_create_flags_dnsize(os, normflags, flags, ot,
+ leaf_blockshift, indirect_blockshift,
+ DMU_OT_SA, DN_BONUS_SIZE(dnodesize),
+ dnodesize, tx);
+}
+
+static inline int
+osd_obj_bonuslen(struct osd_object *obj)
+{
+ int bonuslen = DN_BONUS_SIZE(DNODE_MIN_SIZE);
+
+ if (obj->oo_dn != NULL && obj->oo_dn->dn_num_slots != 0) {
+ bonuslen = DN_SLOTS_TO_BONUSLEN(obj->oo_dn->dn_num_slots);
+ } else {
+ objset_t *os = osd_dtobj2objset(&obj->oo_dt);
+ int dnodesize;
+
+ if (os != NULL) {
+ dnodesize = dmu_objset_dnodesize(os);
+ if (dnodesize != 0)
+ bonuslen = DN_BONUS_SIZE(dnodesize);
+ }
+ }
+
+ return bonuslen;
+}
+#else
+static inline uint64_t
+osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
+ int dnodesize, dmu_tx_t *tx)
+{
+ return dmu_object_alloc(os, objtype, blocksize, DMU_OT_SA,
+ DN_MAX_BONUSLEN, tx);
+}
+
+static inline uint64_t
+osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
+ dmu_object_type_t ot, int leaf_blockshift,
+ int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
+{
+ return zap_create_flags(os, normflags, flags, ot, leaf_blockshift,
+ indirect_blockshift, DMU_OT_SA,
+ DN_MAX_BONUSLEN, tx);
+}
+
+static inline int
+osd_obj_bonuslen(struct osd_object *obj)
+{
+ return DN_MAX_BONUSLEN;
+}
+#endif /* HAVE_DMU_OBJECT_ALLOC_DNSIZE */
+
+#ifdef HAVE_DMU_PREFETCH_6ARG
+#define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
+ dmu_prefetch((os), (obj), (lvl), (off), (len), (pri))
+#else
+#define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
+ dmu_prefetch((os), (obj), (lvl), (off))
+#endif
+
+static inline int osd_sa_handle_get(struct osd_object *obj)
+{
+ struct osd_device *osd = osd_obj2dev(obj);
+ dnode_t *dn = obj->oo_dn;
+ int rc;
+
+ if (obj->oo_sa_hdl)
+ return 0;
+
+ dbuf_read(dn->dn_bonus, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
+ rc = -sa_handle_get_from_db(osd->od_os, &dn->dn_bonus->db, obj,
+ SA_HDL_PRIVATE, &obj->oo_sa_hdl);
+ if (rc)
+ return rc;
+ zfs_refcount_add(&dn->dn_bonus->db_holds, osd_obj_tag);
+ return 0;
+}
+
+static inline void osd_dnode_rele(dnode_t *dn)
+{
+ dmu_buf_impl_t *db;
+ LASSERT(dn);
+ LASSERT(dn->dn_bonus);
+ db = dn->dn_bonus;
+
+ dmu_buf_rele(&db->db, osd_obj_tag);
+}
+
+static inline uint64_t osd_db_dirty_txg(dmu_buf_impl_t *db)
+{
+ dbuf_dirty_record_t *dr;
+ uint64_t txg = 0;
+
+ mutex_enter(&db->db_mtx);
+#ifdef HAVE_DB_DIRTY_RECORDS_LIST
+ dr = list_head(&db->db_dirty_records);
+#else
+ dr = db->db_last_dirty;
+#endif
+ if (dr != NULL)
+ txg = dr->dr_txg;
+ mutex_exit(&db->db_mtx);
+
+ return txg;
+}
+
+#ifdef HAVE_DMU_USEROBJ_ACCOUNTING
+
+#define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX
+#define OSD_DMU_USEROBJ_PREFIX_LEN DMU_OBJACCT_PREFIX_LEN
+
+static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
+{
+ return dmu_objset_userobjspace_present(osd->od_os);
+}
+#else
+
+#define OSD_DMU_USEROBJ_PREFIX "obj-"
+#define OSD_DMU_USEROBJ_PREFIX_LEN 4
+
+static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
+{
+ return false;
+}
+#endif /* #ifdef HAVE_DMU_USEROBJ_ACCOUNTING */
+
+static inline int osd_zap_add(struct osd_device *osd, uint64_t zap,
+ dnode_t *dn, const char *key,
+ int int_size, int int_num,
+ const void *val, dmu_tx_t *tx)
+{
+ LASSERT(zap != 0);
+
+#ifdef HAVE_ZAP_ADD_BY_DNODE
+ if (dn)
+ return -zap_add_by_dnode(dn, key, int_size, int_num, val, tx);
+#endif
+ return -zap_add(osd->od_os, zap, key, int_size, int_num, val, tx);
+}
+
+static inline int osd_zap_remove(struct osd_device *osd, uint64_t zap,
+ dnode_t *dn, const char *key,
+ dmu_tx_t *tx)
+{
+ LASSERT(zap != 0);
+
+#ifdef HAVE_ZAP_ADD_BY_DNODE
+ if (dn)
+ return -zap_remove_by_dnode(dn, key, tx);
+#endif
+ return -zap_remove(osd->od_os, zap, key, tx);
+}
+
+
+static inline int osd_zap_lookup(struct osd_device *osd, uint64_t zap,
+ dnode_t *dn, const char *key,
+ int int_size, int int_num, void *v)
+{
+ LASSERT(zap != 0);
+
+#ifdef HAVE_ZAP_ADD_BY_DNODE
+ if (dn)
+ return -zap_lookup_by_dnode(dn, key, int_size, int_num, v);
+#endif
+ return -zap_lookup(osd->od_os, zap, key, int_size, int_num, v);
+}
+
+static inline void osd_tx_hold_zap(dmu_tx_t *tx, uint64_t zap,
+ dnode_t *dn, int add, const char *name)
+{
+#ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE
+ if (dn) {
+ dmu_tx_hold_zap_by_dnode(tx, dn, add, name);
+ return;
+ }
+#endif
+ dmu_tx_hold_zap(tx, zap, add, name);
+}
+
+static inline void osd_tx_hold_write(dmu_tx_t *tx, uint64_t oid,
+ dnode_t *dn, uint64_t off, int len)
+{
+#ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE
+ if (dn) {
+ dmu_tx_hold_write_by_dnode(tx, dn, off, len);
+ return;
+ }
+#endif
+ dmu_tx_hold_write(tx, oid, off, len);
+}
+
+static inline void osd_dmu_write(struct osd_device *osd, dnode_t *dn,
+ uint64_t offset, uint64_t size,
+ const char *buf, dmu_tx_t *tx)
+{
+ LASSERT(dn);
+#ifdef HAVE_DMU_WRITE_BY_DNODE
+ dmu_write_by_dnode(dn, offset, size, buf, tx);
+#else
+ dmu_write(osd->od_os, dn->dn_object, offset, size, buf, tx);
+#endif
+}
+
+static inline int osd_dmu_read(struct osd_device *osd, dnode_t *dn,
+ uint64_t offset, uint64_t size,
+ char *buf, int flags)
+{
+ LASSERT(dn);
+#ifdef HAVE_DMU_READ_BY_DNODE
+ return -dmu_read_by_dnode(dn, offset, size, buf, flags);
+#else
+ return -dmu_read(osd->od_os, dn->dn_object, offset, size, buf, flags);
+#endif
+}
+
+#ifdef HAVE_DMU_OBJSET_OWN_6ARG
+#define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \
+ dmu_objset_own((name), (type), (ronly), (decrypt), (tag), (os))
+#else
+#define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \
+ dmu_objset_own((name), (type), (ronly), (tag), (os))
+#endif
+
+#ifdef HAVE_DMU_OBJSET_DISOWN_3ARG
+#define osd_dmu_objset_disown(os, decrypt, tag) \
+ dmu_objset_disown((os), (decrypt), (tag))
+#else
+#define osd_dmu_objset_disown(os, decrypt, tag) \
+ dmu_objset_disown((os), (tag))
+#endif
+
+static inline int
+osd_index_register(struct osd_device *osd, const struct lu_fid *fid,
+ __u32 keysize, __u32 recsize)
+{
+ return lustre_index_register(&osd->od_dt_dev, osd_name(osd),
+ &osd->od_index_backup_list, &osd->od_lock,
+ &osd->od_index_backup_stop,
+ fid, keysize, recsize);
+}
+
+static inline void
+osd_index_backup(const struct lu_env *env, struct osd_device *osd, bool backup)
+{
+ struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
+ int rc;
+
+ lu_local_obj_fid(fid, INDEX_BACKUP_OID);
+ rc = osd_idc_find_and_init_with_oid(env, osd, fid,
+ osd->od_index_backup_id);
+ if (rc)
+ backup = false;
+
+ lustre_index_backup(env, &osd->od_dt_dev, osd_name(osd),
+ &osd->od_index_backup_list, &osd->od_lock,
+ &osd->od_index_backup_stop, backup);
+}
+
+#ifndef HAVE_DMU_TX_MARK_NETFREE
+#define dmu_tx_mark_netfree(tx)
+#endif
+
+#ifndef HAVE_ZFS_INODE_TIMESPEC
+#define inode_timespec_t timestruc_t
+#endif
+
+#ifdef HAVE_DMU_OFFSET_NEXT
+#define osd_dmu_offset_next(os, obj, hole, res) \
+ dmu_offset_next((os), (obj), (hole), (res))
+#else
+#define osd_dmu_offset_next(os, obj, hole, res) (EBUSY)