+#ifdef HAVE_SA_SPILL_ALLOC
+static inline void *
+osd_zio_buf_alloc(size_t size)
+{
+ return sa_spill_alloc(KM_SLEEP);
+}
+
+static inline void
+osd_zio_buf_free(void *buf, size_t size)
+{
+ sa_spill_free(buf);
+}
+#else
+#define osd_zio_buf_alloc(size) zio_buf_alloc(size)
+#define osd_zio_buf_free(buf, size) zio_buf_free(buf, size)
+#endif
+
+#ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
+static inline uint64_t
+osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
+ int dnodesize, dmu_tx_t *tx)
+{
+ if (dnodesize == 0)
+ dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
+
+ return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA,
+ DN_BONUS_SIZE(dnodesize), dnodesize, tx);
+}
+
+static inline uint64_t
+osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
+ dmu_object_type_t ot, int leaf_blockshift,
+ int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
+{
+ if (dnodesize == 0)
+ dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
+
+ return zap_create_flags_dnsize(os, normflags, flags, ot,
+ leaf_blockshift, indirect_blockshift,
+ DMU_OT_SA, DN_BONUS_SIZE(dnodesize),
+ dnodesize, tx);
+}
+
+static inline int
+osd_obj_bonuslen(struct osd_object *obj)
+{
+ int bonuslen = DN_BONUS_SIZE(DNODE_MIN_SIZE);
+
+ if (obj->oo_dn != NULL && obj->oo_dn->dn_num_slots != 0) {
+ bonuslen = DN_SLOTS_TO_BONUSLEN(obj->oo_dn->dn_num_slots);
+ } else {
+ objset_t *os = osd_dtobj2objset(&obj->oo_dt);
+ int dnodesize;
+
+ if (os != NULL) {
+ dnodesize = dmu_objset_dnodesize(os);
+ if (dnodesize != 0)
+ bonuslen = DN_BONUS_SIZE(dnodesize);
+ }
+ }
+
+ return bonuslen;
+}
+#else
+static inline uint64_t
+osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
+ int dnodesize, dmu_tx_t *tx)
+{
+ return dmu_object_alloc(os, objtype, blocksize, DMU_OT_SA,
+ DN_MAX_BONUSLEN, tx);
+}
+
+static inline uint64_t
+osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
+ dmu_object_type_t ot, int leaf_blockshift,
+ int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
+{
+ return zap_create_flags(os, normflags, flags, ot, leaf_blockshift,
+ indirect_blockshift, DMU_OT_SA,
+ DN_MAX_BONUSLEN, tx);
+}
+
+static inline int
+osd_obj_bonuslen(struct osd_object *obj)
+{
+ return DN_MAX_BONUSLEN;
+}
+#endif /* HAVE_DMU_OBJECT_ALLOC_DNSIZE */
+
+#ifdef HAVE_DMU_PREFETCH_6ARG
+#define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
+ dmu_prefetch((os), (obj), (lvl), (off), (len), (pri))
+#else
+#define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
+ dmu_prefetch((os), (obj), (lvl), (off))
+#endif
+
+static inline void osd_dnode_rele(dnode_t *dn)
+{
+ dmu_buf_impl_t *db;
+ LASSERT(dn);
+ LASSERT(dn->dn_bonus);
+ db = dn->dn_bonus;
+
+ DB_DNODE_EXIT(db);
+ dmu_buf_rele(&db->db, osd_obj_tag);
+}
+
+#ifdef HAVE_DMU_USEROBJ_ACCOUNTING
+
+#define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX
+
+static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
+{
+ if (unlikely(dmu_objset_userobjspace_upgradable(osd->od_os)))
+ dmu_objset_userobjspace_upgrade(osd->od_os);
+
+ return dmu_objset_userobjspace_present(osd->od_os);
+}
+#else
+
+#define OSD_DMU_USEROBJ_PREFIX "obj-"
+
+static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
+{
+ return false;
+}
+#endif /* #ifdef HAVE_DMU_USEROBJ_ACCOUNTING */
+