* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/osd-zfs/osd_internal.h
* Shared definitions and declarations for zfs/dmu osd
#include <dt_object.h>
#include <md_object.h>
#include <lustre_quota.h>
+#include <lustre_scrub.h>
+#include <obd.h>
#ifdef SHRINK_STOP
#undef SHRINK_STOP
#endif
#include <sys/zap.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
+#include <lustre_scrub.h>
/**
* By design including kmem.h overrides the Linux slab interfaces to provide
#define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */
+#define OSD_MAX_CACHE_SIZE OBD_OBJECT_EOF
+
+#ifndef HAVE_ZFS_REFCOUNT_ADD
+#define zfs_refcount_add refcount_add
+#endif
+
+extern const struct dt_body_operations osd_body_scrub_ops;
+extern const struct dt_body_operations osd_body_ops;
+extern struct kmem_cache *osd_object_kmem;
+
/**
* Iterator's in-memory data structure for quota file.
*/
unsigned oiq_reset:1; /* 1 -- no need to advance */
};
-/**
- * Iterator's in-memory data structure for ZAPs
- *
- * ZFS does not store . and .. on a disk, instead they are
- * generated up on request
- * to follow this format we do the same
- */
-struct osd_zap_it {
- zap_cursor_t *ozi_zc;
- struct osd_object *ozi_obj;
- unsigned ozi_reset:1; /* 1 -- no need to advance */
- /* ozi_pos - position of the cursor:
- * 0 - before any record
- * 1 - "."
- * 2 - ".."
- * 3 - real records */
- unsigned ozi_pos:3;
- union {
- char ozi_name[MAXNAMELEN]; /* file name for dir */
- __u64 ozi_key; /* binary key for index files */
- };
+enum osd_zap_pos {
+ OZI_POS_INIT = 0,
+ OZI_POS_DOT = 1, /* cursor at . */
+ OZI_POS_DOTDOT = 2, /* cursor at .. */
+ OZI_POS_REAL = 3, /* cursor at real entries */
};
-#define DT_IT2DT(it) (&((struct osd_zap_it *)it)->ozi_obj->oo_dt)
/*
* regular ZFS direntry
struct lu_fid lzd_fid;
} __attribute__((packed));
+/**
+ * Iterator's in-memory data structure for ZAPs
+ *
+ * ZFS does not store . and .. on a disk, instead they are
+ * generated up on request
+ * to follow this format we do the same
+ */
+struct osd_zap_it {
+ zap_cursor_t *ozi_zc;
+ struct osd_object *ozi_obj;
+ unsigned ozi_reset:1; /* 1 -- no need to advance */
+ /* ozi_pos - position of the cursor */
+ enum osd_zap_pos ozi_pos;
+ struct luz_direntry ozi_zde;
+ zap_attribute_t ozi_za;
+ union {
+ char ozi_name[MAXNAMELEN]; /* file name for dir */
+ __u64 ozi_key; /* binary key for index files */
+ };
+};
+#define DT_IT2DT(it) (&((struct osd_zap_it *)it)->ozi_obj->oo_dt)
/* cached SA attributes */
struct osa_attr {
uint64_t mode;
uint64_t gid;
uint64_t uid;
+#ifdef ZFS_PROJINHERIT
+ uint64_t projid;
+#endif
uint64_t nlink;
uint64_t rdev;
uint64_t flags;
uint64_t atime[2];
uint64_t mtime[2];
uint64_t ctime[2];
+ uint64_t btime[2];
};
oic_remote:1; /* FID isn't local */
};
-/* max.number of regular attrubites the callers may ask for */
-#define OSD_MAX_IN_BULK 13
+struct osd_inconsistent_item {
+ /* link into lustre_scrub::os_inconsistent_items,
+ * protected by lustr_scrub::os_lock. */
+ struct list_head oii_list;
+
+ /* The right FID <=> oid mapping. */
+ struct osd_idmap_cache oii_cache;
+
+ unsigned int oii_insert:1; /* insert or update mapping. */
+};
+
+struct osd_otable_it {
+ struct osd_device *ooi_dev;
+ struct lu_fid ooi_fid;
+ __u64 ooi_pos;
+ __u64 ooi_prefetched_dnode;
+ int ooi_prefetched;
+
+ /* The following bits can be updated/checked w/o lock protection.
+ * If more bits will be introduced in the future and need lock to
+ * protect, please add comment. */
+ unsigned int ooi_used_outside:1, /* Some user out of OSD
+ * uses the iteration. */
+ ooi_all_cached:1, /* No more entries can be
+ * filled into cache. */
+ ooi_user_ready:1, /* The user out of OSD is
+ * ready to iterate. */
+ ooi_waiting:1; /* it::next is waiting. */
+};
+
+extern const struct dt_index_operations osd_otable_ops;
+
+/* max.number of regular attributes the callers may ask for */
+# define OSD_MAX_IN_BULK (sizeof(struct osa_attr)/sizeof(uint64_t))
struct osd_thread_info {
const struct lu_env *oti_env;
struct lu_fid oti_fid;
- /*
- * XXX temporary: for ->i_op calls.
- */
- struct timespec oti_time;
struct ost_id oti_ostid;
__u64 oti_key64[(MAXNAMELEN + 1)/sizeof(__u64)];
sa_bulk_attr_t oti_attr_bulk[OSD_MAX_IN_BULK];
};
- struct lustre_mdt_attrs oti_mdt_attrs;
+ struct lustre_mdt_attrs oti_mdt_attrs;
+ unsigned int oti_in_trans:1;
struct lu_attr oti_la;
struct osa_attr oti_osa;
zap_attribute_t oti_za;
+ zap_attribute_t oti_za2;
dmu_object_info_t oti_doi;
struct luz_direntry oti_zde;
struct osd_idmap_cache *oti_ins_cache;
int oti_ins_cache_size;
int oti_ins_cache_used;
+ /* inc by osd_trans_create and dec by osd_trans_stop */
+ int oti_ins_cache_depth;
struct lu_buf oti_xattr_lbuf;
+ zap_cursor_t oti_zc;
+ zap_cursor_t oti_zc2;
+
+ char *oti_seq_name;
+ char *oti_dir_name;
};
extern struct lu_context_key osd_key;
struct list_head ot_sa_list;
dmu_tx_t *ot_tx;
struct lquota_trans ot_quota_trans;
- __u32 ot_write_commit:1,
- ot_assigned:1;
+ __u32 ot_assigned:1;
};
-#define OSD_OI_NAME_SIZE 16
+#define OSD_OI_NAME_SIZE 24
/*
* Object Index (OI) instance.
*/
struct osd_oi {
- char oi_name[OSD_OI_NAME_SIZE]; /* unused */
+ char oi_name[OSD_OI_NAME_SIZE];
uint64_t oi_zapid;
dnode_t *oi_dn;
};
struct osd_seq {
+ uint64_t os_oid;
uint64_t *os_compat_dirs;
int os_subdir_count; /* subdir count for each seq */
u64 os_seq; /* seq number */
struct proc_dir_entry *od_proc_entry;
struct lprocfs_stats *od_stats;
+ uint64_t od_remote_parent_dir;
+ uint64_t od_index_backup_id;
uint64_t od_max_blksz;
uint64_t od_root;
uint64_t od_O_id;
od_prop_rdonly:1, /**< ZFS property readonly */
od_xattr_in_sa:1,
od_is_ost:1,
- od_posix_acl:1;
+ od_in_init:1,
+ od_posix_acl:1,
+ od_nonrotational:1;
+ unsigned int od_dnsize;
+ int od_index_backup_stop;
+ enum lustre_index_backup_policy od_index_backup_policy;
char od_mntdev[128];
char od_svname[128];
+ uuid_t od_uuid;
int od_connects;
+ int od_index;
+ __s64 od_auto_scrub_interval;
struct lu_site od_site;
- dnode_t *od_groupused_dn;
- dnode_t *od_userused_dn;
+ dnode_t *od_groupused_dn;
+ dnode_t *od_userused_dn;
+#ifdef ZFS_PROJINHERIT
+ dnode_t *od_projectused_dn;
+#endif
+
+ /* quota slave instance for inode */
+ struct qsd_instance *od_quota_slave_md;
- /* quota slave instance */
- struct qsd_instance *od_quota_slave;
+ /* quota slave instance for block */
+ struct qsd_instance *od_quota_slave_dt;
struct brw_stats od_brw_stats;
atomic_t od_r_in_flight;
/* osd seq instance */
struct lu_client_seq *od_cl_seq;
+
+ struct semaphore od_otable_sem;
+ struct osd_otable_it *od_otable_it;
+ struct lustre_scrub od_scrub;
+ struct list_head od_ios_list;
+ struct list_head od_index_backup_list;
+ struct list_head od_index_restore_list;
+ spinlock_t od_lock;
+ unsigned long long od_readcache_max_filesize;
};
+static inline struct qsd_instance *osd_def_qsd(struct osd_device *osd)
+{
+ if (osd->od_is_ost)
+ return osd->od_quota_slave_dt;
+ else
+ return osd->od_quota_slave_md;
+}
+
enum osd_destroy_type {
OSD_DESTROY_NONE = 0,
OSD_DESTROY_SYNC = 1,
__u32 oo_destroyed:1,
oo_late_xattr:1,
- oo_late_attr_set:1;
+#ifdef ZFS_PROJINHERIT
+ oo_with_projid:1,
+#endif
+ oo_late_attr_set:1,
+ oo_pfid_in_lma:1;
/* the i_flags in LMA */
__u32 oo_lma_flags;
};
uint64_t oo_parent; /* used only at object creation */
};
+ struct lu_object_header *oo_header;
};
-int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *);
+int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *,
+ struct obd_statfs_info *);
extern const struct dt_index_operations osd_acct_index_ops;
-extern struct lu_device_operations osd_lu_ops;
-extern struct dt_index_operations osd_dir_ops;
+extern const struct lu_device_operations osd_lu_ops;
+extern const struct dt_index_operations osd_dir_ops;
int osd_declare_quota(const struct lu_env *env, struct osd_device *osd,
- qid_t uid, qid_t gid, long long space,
- struct osd_thandle *oh, bool is_blk, int *flags,
- bool force);
+ qid_t uid, qid_t gid, qid_t projid, long long space,
+ struct osd_thandle *oh,
+ enum osd_quota_local_flags *local_flags,
+ enum osd_qid_declare_flags osd_qid_declare_flags);
uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
uint64_t nrblocks, uint64_t est_maxblockshift);
int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd,
static inline struct osd_object *osd_obj(const struct lu_object *o)
{
LASSERT(lu_device_is_osd(o->lo_dev));
- return container_of0(o, struct osd_object, oo_dt.do_lu);
+ return container_of(o, struct osd_object, oo_dt.do_lu);
}
static inline struct osd_device *osd_dt_dev(const struct dt_device *d)
{
LASSERT(lu_device_is_osd(&d->dd_lu_dev));
- return container_of0(d, struct osd_device, od_dt_dev);
+ return container_of(d, struct osd_device, od_dt_dev);
}
static inline struct osd_device *osd_dev(const struct lu_device *d)
{
LASSERT(lu_device_is_osd(d));
- return osd_dt_dev(container_of0(d, struct dt_device, dd_lu_dev));
+ return osd_dt_dev(container_of(d, struct dt_device, dd_lu_dev));
}
static inline struct osd_object *osd_dt_obj(const struct dt_object *d)
static inline char *osd_name(struct osd_device *osd)
{
- return osd->od_dt_dev.dd_lu_dev.ld_obd->obd_name;
+ return osd->od_svname;
+}
+
+static inline void zfs_set_bit(int nr, __u8 *addr)
+{
+ set_bit(nr, (unsigned long *)addr);
+}
+
+static inline int zfs_test_bit(int nr, __u8 *addr)
+{
+ return test_bit(nr, (const unsigned long *)addr);
+}
+
+static inline int osd_oi_fid2idx(struct osd_device *dev,
+ const struct lu_fid *fid)
+{
+ return fid->f_seq & (dev->od_oi_count - 1);
+}
+
+static inline struct osd_oi *osd_fid2oi(struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ LASSERTF(osd->od_oi_table && osd->od_oi_count >= 1,
+ "%s: "DFID", oi_count %d\n",
+ osd_name(osd), PFID(fid), osd->od_oi_count);
+
+ return osd->od_oi_table[osd_oi_fid2idx(osd, fid)];
}
#ifdef CONFIG_PROC_FS
void *buf, uint32_t buflen, struct osd_thandle *oh);
int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
dnode_t **zap_dnp, dmu_tx_t *tx, struct lu_attr *la,
- zap_flags_t flags);
-int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
+ unsigned dnsize, zap_flags_t flags);
+int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
+ struct osd_object *obj, const struct lu_fid *fid,
dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la);
int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
- sa_handle_t *sa_hdl, dmu_tx_t *tx,
+ struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
struct lu_attr *la, uint64_t parent, nvlist_t *);
+int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
+ uint64_t oid, dnode_t **dnp);
/* osd_oi.c */
-int osd_oi_init(const struct lu_env *env, struct osd_device *o);
+int osd_oi_init(const struct lu_env *env, struct osd_device *o, bool reset);
void osd_oi_fini(const struct lu_env *env, struct osd_device *o);
int osd_fid_lookup(const struct lu_env *env,
struct osd_device *, const struct lu_fid *, uint64_t *);
struct osd_idmap_cache *osd_idc_find(const struct lu_env *env,
struct osd_device *osd,
const struct lu_fid *fid);
+int osd_idc_find_and_init_with_oid(const struct lu_env *env,
+ struct osd_device *osd,
+ const struct lu_fid *fid,
+ uint64_t oid);
+int fid_is_on_ost(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid);
+int osd_obj_find_or_create(const struct lu_env *env, struct osd_device *o,
+ uint64_t parent, const char *name, uint64_t *child,
+ const struct lu_fid *fid, bool isdir);
+
+extern unsigned int osd_oi_count;
/* osd_index.c */
+int osd_get_fid_by_oid(const struct lu_env *env, struct osd_device *osd,
+ uint64_t oid, struct lu_fid *fid);
int osd_index_try(const struct lu_env *env, struct dt_object *dt,
const struct dt_index_features *feat);
int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc);
int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
const struct lu_fid *fid);
+int osd_add_to_remote_parent(const struct lu_env *env,
+ struct osd_device *osd,
+ struct osd_object *obj,
+ struct osd_thandle *oh);
+int osd_delete_from_remote_parent(const struct lu_env *env,
+ struct osd_device *osd,
+ struct osd_object *obj,
+ struct osd_thandle *oh, bool destroy);
+int __osd_xattr_load_by_oid(struct osd_device *osd, uint64_t oid,
+ nvlist_t **sa);
+
+/* osd_scrub.c */
+int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev,
+ bool resetoi);
+void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev);
+int osd_scrub_start(const struct lu_env *env, struct osd_device *dev,
+ __u32 flags);
+void osd_scrub_stop(struct osd_device *dev);
+int osd_oii_insert(const struct lu_env *env, struct osd_device *dev,
+ const struct lu_fid *fid, uint64_t oid, bool insert);
+int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
+ uint64_t *oid);
+
+/**
+ * Basic transaction credit op
+ */
+enum dt_txn_op {
+ DTO_INDEX_INSERT,
+ DTO_INDEX_DELETE,
+ DTO_INDEX_UPDATE,
+ DTO_NR
+};
+
+int osd_scrub_refresh_mapping(const struct lu_env *env,
+ struct osd_device *dev,
+ const struct lu_fid *fid,
+ uint64_t oid, enum dt_txn_op ops,
+ bool force, const char *name);
+
/* osd_xattr.c */
int __osd_sa_xattr_schedule_update(const struct lu_env *env,
int __osd_xattr_get_large(const struct lu_env *env, struct osd_device *osd,
uint64_t xattr, struct lu_buf *buf,
const char *name, int *sizep);
+int osd_xattr_get_internal(const struct lu_env *env, struct osd_object *obj,
+ struct lu_buf *buf, const char *name, int *sizep);
+int osd_xattr_get_lma(const struct lu_env *env, struct osd_object *obj,
+ struct lu_buf *buf);
int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
struct lu_buf *buf, const char *name);
int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
struct osd_thandle *oh);
int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj,
struct osd_thandle *oh);
+
+#define OSD_BASE_EA_IN_BONUS (ZFS_SA_BASE_ATTR_SIZE + \
+ sizeof(__u64) /* VBR VERSION */ + \
+ sizeof(struct lustre_mdt_attrs) /* LMA */)
+
+#ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
+int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus);
+#else
+static inline int
+osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
+{
+ return DN_MAX_BONUSLEN;
+}
+#endif
+
+static inline int osd_object_is_zap(dnode_t *dn)
+{
+ return (dn->dn_type == DMU_OT_DIRECTORY_CONTENTS ||
+ dn->dn_type == DMU_OT_USERGROUP_USED);
+}
+
+/* XXX: f_ver is not counted, but may differ too */
+static inline void osd_fid2str(char *buf, const struct lu_fid *fid, int len)
+{
+ snprintf(buf, len, DFID_NOBRACE, PFID(fid));
+}
+
static inline int
osd_xattr_set_internal(const struct lu_env *env, struct osd_object *obj,
const struct lu_buf *buf, const char *name, int fl,
{
return (flags & LUSTRE_APPEND_FL ? ZFS_APPENDONLY : 0) |
(flags & LUSTRE_NODUMP_FL ? ZFS_NODUMP : 0) |
+#ifdef ZFS_PROJINHERIT
+ (flags & LUSTRE_PROJINHERIT_FL ? ZFS_PROJINHERIT : 0) |
+#endif
(flags & LUSTRE_IMMUTABLE_FL ? ZFS_IMMUTABLE : 0);
}
{
return (flags & ZFS_APPENDONLY ? LUSTRE_APPEND_FL : 0) |
(flags & ZFS_NODUMP ? LUSTRE_NODUMP_FL : 0) |
+#ifdef ZFS_PROJINHERIT
+ (flags & ZFS_PROJINHERIT ? LUSTRE_PROJINHERIT_FL : 0) |
+#endif
(flags & ZFS_IMMUTABLE ? LUSTRE_IMMUTABLE_FL : 0);
}
#endif
#ifndef HAVE_DSL_POOL_CONFIG
-static inline void dsl_pool_config_enter(dsl_pool_t *dp, char *name)
+static inline void dsl_pool_config_enter(dsl_pool_t *dp, void *name)
{
}
-static inline void dsl_pool_config_exit(dsl_pool_t *dp, char *name)
+static inline void dsl_pool_config_exit(dsl_pool_t *dp, void *name)
{
}
#endif
int dnodesize, dmu_tx_t *tx)
{
if (dnodesize == 0)
- dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
+ dnodesize = max_t(int, dmu_objset_dnodesize(os),
+ DNODE_MIN_SIZE);
return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA,
DN_BONUS_SIZE(dnodesize), dnodesize, tx);
int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
{
if (dnodesize == 0)
- dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
+ dnodesize = max_t(int, dmu_objset_dnodesize(os),
+ DNODE_MIN_SIZE);
return zap_create_flags_dnsize(os, normflags, flags, ot,
leaf_blockshift, indirect_blockshift,
SA_HDL_PRIVATE, &obj->oo_sa_hdl);
if (rc)
return rc;
- refcount_add(&dn->dn_bonus->db_holds, osd_obj_tag);
+ zfs_refcount_add(&dn->dn_bonus->db_holds, osd_obj_tag);
return 0;
}
LASSERT(dn->dn_bonus);
db = dn->dn_bonus;
- DB_DNODE_EXIT(db);
dmu_buf_rele(&db->db, osd_obj_tag);
}
+static inline uint64_t osd_db_dirty_txg(dmu_buf_impl_t *db)
+{
+ dbuf_dirty_record_t *dr;
+ uint64_t txg = 0;
+
+ mutex_enter(&db->db_mtx);
+#ifdef HAVE_DB_DIRTY_RECORDS_LIST
+ dr = list_head(&db->db_dirty_records);
+#else
+ dr = db->db_last_dirty;
+#endif
+ if (dr != NULL)
+ txg = dr->dr_txg;
+ mutex_exit(&db->db_mtx);
+
+ return txg;
+}
+
#ifdef HAVE_DMU_USEROBJ_ACCOUNTING
#define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX
static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
{
- if (unlikely(dmu_objset_userobjspace_upgradable(osd->od_os)))
- dmu_objset_userobjspace_upgrade(osd->od_os);
-
return dmu_objset_userobjspace_present(osd->od_os);
}
#else
#endif
}
+#ifdef HAVE_DMU_OBJSET_OWN_6ARG
+#define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \
+ dmu_objset_own((name), (type), (ronly), (decrypt), (tag), (os))
+#else
+#define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \
+ dmu_objset_own((name), (type), (ronly), (tag), (os))
+#endif
+
+#ifdef HAVE_DMU_OBJSET_DISOWN_3ARG
+#define osd_dmu_objset_disown(os, decrypt, tag) \
+ dmu_objset_disown((os), (decrypt), (tag))
+#else
+#define osd_dmu_objset_disown(os, decrypt, tag) \
+ dmu_objset_disown((os), (tag))
+#endif
+
+static inline int
+osd_index_register(struct osd_device *osd, const struct lu_fid *fid,
+ __u32 keysize, __u32 recsize)
+{
+ return lustre_index_register(&osd->od_dt_dev, osd_name(osd),
+ &osd->od_index_backup_list, &osd->od_lock,
+ &osd->od_index_backup_stop,
+ fid, keysize, recsize);
+}
+
+static inline void
+osd_index_backup(const struct lu_env *env, struct osd_device *osd, bool backup)
+{
+ struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
+ int rc;
+
+ lu_local_obj_fid(fid, INDEX_BACKUP_OID);
+ rc = osd_idc_find_and_init_with_oid(env, osd, fid,
+ osd->od_index_backup_id);
+ if (rc)
+ backup = false;
+
+ lustre_index_backup(env, &osd->od_dt_dev, osd_name(osd),
+ &osd->od_index_backup_list, &osd->od_lock,
+ &osd->od_index_backup_stop, backup);
+}
+
+#ifndef HAVE_DMU_TX_MARK_NETFREE
+#define dmu_tx_mark_netfree(tx)
+#endif
+
+#ifndef HAVE_ZFS_INODE_TIMESPEC
+#define inode_timespec_t timestruc_t
+#endif
+
+#ifdef HAVE_DMU_OFFSET_NEXT
+#define osd_dmu_offset_next(os, obj, hole, res) \
+ dmu_offset_next((os), (obj), (hole), (res))
+#else
+#define osd_dmu_offset_next(os, obj, hole, res) (EBUSY)
+#endif
+
#endif /* _OSD_INTERNAL_H */