4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_internal.h
33 * Shared definitions and declarations for zfs/dmu osd
35 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
36 * Author: Mike Pershin <tappro@whamcloud.com>
37 * Author: Johann Lombardi <johann@whamcloud.com>
40 #ifndef _OSD_INTERNAL_H
41 #define _OSD_INTERNAL_H
43 #include <dt_object.h>
44 #include <md_object.h>
45 #include <lustre_quota.h>
50 #include <sys/nvpair.h>
51 #include <sys/zfs_znode.h>
54 #include <sys/dmu_objset.h>
57 * By design including kmem.h overrides the Linux slab interfaces to provide
58 * the Illumos kmem cache interfaces. To override this and gain access to
59 * the Linux interfaces these preprocessor macros must be undefined.
61 #ifdef kmem_cache_destroy
62 #undef kmem_cache_destroy
65 #ifdef kmem_cache_create
66 #undef kmem_cache_create
69 #ifdef kmem_cache_alloc
70 #undef kmem_cache_alloc
73 #ifdef kmem_cache_free
74 #undef kmem_cache_free
77 #define LUSTRE_ROOT_FID_SEQ 0
78 #define DMU_OSD_SVNAME "svname"
79 #define DMU_OSD_OI_NAME_BASE "oi"
81 #define OSD_GFP_IO (GFP_NOFS | __GFP_HIGHMEM)
83 /* Statfs space reservation for grant, fragmentation, and unlink space. */
84 #define OSD_STATFS_RESERVED_SIZE (16ULL << 20) /* reserve 16MB minimum */
85 #define OSD_STATFS_RESERVED_SHIFT (7) /* reserve 0.78% of all space */
87 /* Statfs {minimum, safe estimate, and maximum} dnodes per block */
88 #define OSD_DNODE_MIN_BLKSHIFT (DNODES_PER_BLOCK_SHIFT)
89 #define OSD_DNODE_EST_BLKSHIFT (12) /* est 4KB/dnode */
90 #define OSD_DNODE_EST_COUNT 4096
92 #define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */
95 * Iterator's in-memory data structure for quota file.
98 struct osd_object *oiq_obj;
99 /* DMU accounting object id */
102 zap_cursor_t *oiq_zc;
103 /** identifier for current quota record */
105 unsigned oiq_reset:1; /* 1 -- no need to advance */
109 * Iterator's in-memory data structure for ZAPs
111 * ZFS does not store . and .. on a disk, instead they are
112 * generated up on request
113 * to follow this format we do the same
116 zap_cursor_t *ozi_zc;
117 struct osd_object *ozi_obj;
118 unsigned ozi_reset:1; /* 1 -- no need to advance */
119 /* ozi_pos - position of the cursor:
120 * 0 - before any record
123 * 3 - real records */
126 char ozi_name[MAXNAMELEN]; /* file name for dir */
127 __u64 ozi_key; /* binary key for index files */
130 #define DT_IT2DT(it) (&((struct osd_zap_it *)it)->ozi_obj->oo_dt)
133 * regular ZFS direntry
135 struct zpl_direntry {
136 uint64_t zde_dnode:48,
139 } __attribute__((packed));
142 * lustre direntry adds a fid to regular ZFS direntry
144 struct luz_direntry {
145 struct zpl_direntry lzd_reg;
146 struct lu_fid lzd_fid;
147 } __attribute__((packed));
150 /* cached SA attributes */
165 #define OSD_INS_CACHE_SIZE 8
168 struct osd_idmap_cache {
169 struct osd_device *oic_dev;
170 struct lu_fid oic_fid;
171 /** max 2^48 dnodes per dataset, avoid spilling into another word */
172 uint64_t oic_dnode:DN_MAX_OBJECT_SHIFT,
173 oic_remote:1; /* FID isn't local */
176 /* max.number of regular attrubites the callers may ask for */
177 #define OSD_MAX_IN_BULK 13
179 struct osd_thread_info {
180 const struct lu_env *oti_env;
182 struct lu_fid oti_fid;
184 * XXX temporary: for ->i_op calls.
186 struct timespec oti_time;
188 struct ost_id oti_ostid;
194 char oti_key[MAXNAMELEN + 1];
195 __u64 oti_key64[(MAXNAMELEN + 1)/sizeof(__u64)];
196 sa_bulk_attr_t oti_attr_bulk[OSD_MAX_IN_BULK];
198 struct lustre_mdt_attrs oti_mdt_attrs;
200 struct lu_attr oti_la;
201 struct osa_attr oti_osa;
202 zap_attribute_t oti_za;
203 dmu_object_info_t oti_doi;
204 struct luz_direntry oti_zde;
206 struct lquota_id_info oti_qi;
207 struct lu_seq_range oti_seq_range;
209 /* dedicated OI cache for insert (which needs inum) */
210 struct osd_idmap_cache *oti_ins_cache;
211 int oti_ins_cache_size;
212 int oti_ins_cache_used;
215 extern struct lu_context_key osd_key;
217 static inline struct osd_thread_info *osd_oti_get(const struct lu_env *env)
219 return lu_context_key_get(&env->le_ctx, &osd_key);
223 struct thandle ot_super;
224 struct list_head ot_dcb_list;
225 struct list_head ot_stop_dcb_list;
226 struct list_head ot_unlinked_list;
227 struct list_head ot_sa_list;
228 struct semaphore ot_sa_lock;
230 struct lquota_trans ot_quota_trans;
231 __u32 ot_write_commit:1,
235 #define OSD_OI_NAME_SIZE 16
238 * Object Index (OI) instance.
241 char oi_name[OSD_OI_NAME_SIZE]; /* unused */
247 uint64_t *os_compat_dirs;
248 int os_subdir_count; /* subdir count for each seq */
249 u64 os_seq; /* seq number */
250 struct list_head os_seq_list; /* list to seq_list */
253 struct osd_seq_list {
254 rwlock_t osl_seq_list_lock; /* lock for seq_list */
255 struct list_head osl_seq_list; /* list head for seq */
256 struct semaphore osl_seq_init_sem;
259 #define OSD_OST_MAP_SIZE 32
266 struct dt_device od_dt_dev;
267 /* information about underlying file system */
268 struct objset *od_os;
269 uint64_t od_rootid; /* id of root znode */
270 dnode_t *od_unlinked; /* dnode of unlinked zapobj */
271 /* SA attr mapping->id,
272 * name is the same as in ZFS to use defines SA_ZPL_...*/
273 sa_attr_type_t *z_attr_table;
275 struct proc_dir_entry *od_proc_entry;
276 struct lprocfs_stats *od_stats;
278 uint64_t od_max_blksz;
281 struct osd_oi **od_oi_table;
282 unsigned int od_oi_count;
283 struct osd_seq_list od_seq_list;
285 unsigned int od_dev_set_rdonly:1, /**< osd_ro() called */
286 od_prop_rdonly:1, /**< ZFS property readonly */
295 struct lu_site od_site;
297 /* object IDs of the inode accounting indexes */
298 uint64_t od_iusr_oid;
299 uint64_t od_igrp_oid;
300 dnode_t *od_groupused_dn;
301 dnode_t *od_userused_dn;
303 /* quota slave instance */
304 struct qsd_instance *od_quota_slave;
306 struct brw_stats od_brw_stats;
307 atomic_t od_r_in_flight;
308 atomic_t od_w_in_flight;
310 /* used to debug zerocopy logic: the fields track all
311 * allocated, loaned and referenced buffers in use.
312 * to be removed once the change is tested well. */
313 atomic_t od_zerocopy_alloc;
314 atomic_t od_zerocopy_loan;
315 atomic_t od_zerocopy_pin;
317 arc_prune_t *arc_prune_cb;
319 /* osd seq instance */
320 struct lu_client_seq *od_cl_seq;
323 enum osd_destroy_type {
324 OSD_DESTROY_NONE = 0,
325 OSD_DESTROY_SYNC = 1,
326 OSD_DESTROY_ASYNC = 2,
330 struct dt_object oo_dt;
332 * Inode for file system object represented by this osd_object. This
333 * inode is pinned for the whole duration of lu_object life.
335 * Not modified concurrently (either setup early during object
336 * creation, or assigned by osd_object_create() under write lock).
339 sa_handle_t *oo_sa_hdl;
340 nvlist_t *oo_sa_xattr;
341 struct list_head oo_sa_linkage;
343 /* used to implement osd_object_*_{lock|unlock} */
344 struct rw_semaphore oo_sem;
346 /* to serialize some updates: destroy vs. others,
347 * xattr_set, object block size change etc */
348 struct rw_semaphore oo_guard;
350 /* protected by oo_guard */
351 struct list_head oo_unlinked_linkage;
353 /* cached attributes */
354 rwlock_t oo_attr_lock;
355 struct lu_attr oo_attr;
357 /* external dnode holding large EAs, protected by oo_guard */
359 enum osd_destroy_type oo_destroy;
361 __u32 oo_destroyed:1;
363 /* the i_flags in LMA */
366 int oo_ea_in_bonus; /* EA bytes we expect */
368 /* record size for index file */
369 unsigned char oo_keysize;
370 unsigned char oo_recsize;
371 unsigned char oo_recusize; /* unit size */
376 int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *);
377 extern const struct dt_index_operations osd_acct_index_ops;
378 dnode_t *osd_quota_fid2dmu(const struct osd_device *, const struct lu_fid *fid);
379 extern struct lu_device_operations osd_lu_ops;
380 extern struct dt_index_operations osd_dir_ops;
381 int osd_declare_quota(const struct lu_env *env, struct osd_device *osd,
382 qid_t uid, qid_t gid, long long space,
383 struct osd_thandle *oh, bool is_blk, int *flags,
385 uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
386 uint64_t nrblocks, uint64_t est_maxblockshift);
387 int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd,
393 static inline int lu_device_is_osd(const struct lu_device *d)
395 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osd_lu_ops);
398 static inline struct osd_object *osd_obj(const struct lu_object *o)
400 LASSERT(lu_device_is_osd(o->lo_dev));
401 return container_of0(o, struct osd_object, oo_dt.do_lu);
404 static inline struct osd_device *osd_dt_dev(const struct dt_device *d)
406 LASSERT(lu_device_is_osd(&d->dd_lu_dev));
407 return container_of0(d, struct osd_device, od_dt_dev);
410 static inline struct osd_device *osd_dev(const struct lu_device *d)
412 LASSERT(lu_device_is_osd(d));
413 return osd_dt_dev(container_of0(d, struct dt_device, dd_lu_dev));
416 static inline struct osd_object *osd_dt_obj(const struct dt_object *d)
418 return osd_obj(&d->do_lu);
421 static inline struct osd_device *osd_obj2dev(const struct osd_object *o)
423 return osd_dev(o->oo_dt.do_lu.lo_dev);
426 static inline struct lu_device *osd2lu_dev(struct osd_device *osd)
428 return &osd->od_dt_dev.dd_lu_dev;
431 static inline struct objset * osd_dtobj2objset(struct dt_object *o)
433 return osd_dev(o->do_lu.lo_dev)->od_os;
436 static inline int osd_invariant(const struct osd_object *obj)
442 * Put the osd object once done with it.
444 * \param obj osd object that needs to be put
446 static inline void osd_object_put(const struct lu_env *env,
447 struct osd_object *obj)
449 dt_object_put(env, &obj->oo_dt);
452 static inline int osd_object_invariant(const struct lu_object *l)
454 return osd_invariant(osd_obj(l));
457 static inline struct seq_server_site *osd_seq_site(struct osd_device *osd)
459 return osd->od_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
462 static inline char *osd_name(struct osd_device *osd)
464 return osd->od_dt_dev.dd_lu_dev.ld_obd->obd_name;
467 #ifdef CONFIG_PROC_FS
469 LPROC_OSD_READ_BYTES = 0,
470 LPROC_OSD_WRITE_BYTES = 1,
471 LPROC_OSD_GET_PAGE = 2,
472 LPROC_OSD_NO_PAGE = 3,
473 LPROC_OSD_CACHE_ACCESS = 4,
474 LPROC_OSD_CACHE_HIT = 5,
475 LPROC_OSD_CACHE_MISS = 6,
476 LPROC_OSD_COPY_IO = 7,
477 LPROC_OSD_ZEROCOPY_IO = 8,
478 LPROC_OSD_TAIL_IO = 9,
482 extern struct kmem_cache *osd_zapit_cachep;
484 extern struct lprocfs_vars lprocfs_osd_obd_vars[];
486 int osd_procfs_init(struct osd_device *osd, const char *name);
487 int osd_procfs_fini(struct osd_device *osd);
490 extern char *osd_obj_tag;
491 void osd_object_sa_dirty_rele(struct osd_thandle *oh);
492 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp);
493 struct lu_object *osd_object_alloc(const struct lu_env *env,
494 const struct lu_object_header *hdr,
495 struct lu_device *d);
496 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
497 void *buf, uint32_t buflen, struct osd_thandle *oh);
498 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
499 dnode_t **zap_dnp, dmu_tx_t *tx, struct lu_attr *la,
501 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
502 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la);
503 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
504 sa_handle_t *sa_hdl, dmu_tx_t *tx,
505 struct lu_attr *la, uint64_t parent);
508 int osd_oi_init(const struct lu_env *env, struct osd_device *o);
509 void osd_oi_fini(const struct lu_env *env, struct osd_device *o);
510 int osd_fid_lookup(const struct lu_env *env,
511 struct osd_device *, const struct lu_fid *, uint64_t *);
512 uint64_t osd_get_name_n_idx(const struct lu_env *env, struct osd_device *osd,
513 const struct lu_fid *fid, char *buf, int bufsize,
515 int osd_options_init(void);
516 int osd_ost_seq_exists(const struct lu_env *env, struct osd_device *osd,
518 int osd_idc_find_and_init(const struct lu_env *env, struct osd_device *osd,
519 struct osd_object *obj);
520 struct osd_idmap_cache *osd_idc_find_or_init(const struct lu_env *env,
521 struct osd_device *osd,
522 const struct lu_fid *fid);
523 struct osd_idmap_cache *osd_idc_find(const struct lu_env *env,
524 struct osd_device *osd,
525 const struct lu_fid *fid);
528 int osd_index_try(const struct lu_env *env, struct dt_object *dt,
529 const struct dt_index_features *feat);
530 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
531 u64 seq, struct lu_seq_range *range);
532 void osd_zap_cursor_init_serialized(zap_cursor_t *zc, struct objset *os,
533 uint64_t id, uint64_t dirhash);
534 int osd_zap_cursor_init(zap_cursor_t **zc, struct objset *os,
535 uint64_t id, uint64_t dirhash);
536 void osd_zap_cursor_fini(zap_cursor_t *zc);
537 uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc);
538 int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
539 const struct lu_fid *fid);
542 int __osd_xattr_load(struct osd_device *osd, sa_handle_t *hdl, nvlist_t **sa);
543 int __osd_xattr_get_large(const struct lu_env *env, struct osd_device *osd,
544 uint64_t xattr, struct lu_buf *buf,
545 const char *name, int *sizep);
546 int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
547 struct lu_buf *buf, const char *name);
548 int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
549 const struct lu_buf *buf, const char *name,
550 int fl, struct thandle *handle);
551 int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
552 const struct lu_buf *buf, const char *name, int fl,
553 struct thandle *handle);
554 int osd_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
555 const char *name, struct thandle *handle);
556 int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
557 const char *name, struct thandle *handle);
558 void osd_declare_xattrs_destroy(const struct lu_env *env,
559 struct osd_object *obj,
560 struct osd_thandle *oh);
561 int osd_xattrs_destroy(const struct lu_env *env,
562 struct osd_object *obj, struct osd_thandle *oh);
563 int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
564 const struct lu_buf *lb);
565 void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj,
566 int vallen, const char *name, struct osd_thandle *oh);
567 int __osd_sa_xattr_set(const struct lu_env *env, struct osd_object *obj,
568 const struct lu_buf *buf, const char *name, int fl,
569 struct osd_thandle *oh);;
570 int __osd_xattr_set(const struct lu_env *env, struct osd_object *obj,
571 const struct lu_buf *buf, const char *name, int fl,
572 struct osd_thandle *oh);
573 int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj,
574 struct osd_thandle *oh);
576 osd_xattr_set_internal(const struct lu_env *env, struct osd_object *obj,
577 const struct lu_buf *buf, const char *name, int fl,
578 struct osd_thandle *oh)
582 if (unlikely(!dt_object_exists(&obj->oo_dt) || obj->oo_destroyed))
586 if (osd_obj2dev(obj)->od_xattr_in_sa) {
587 rc = __osd_sa_xattr_set(env, obj, buf, name, fl, oh);
589 rc = __osd_xattr_set(env, obj, buf, name, fl, oh);
591 rc = __osd_xattr_set(env, obj, buf, name, fl, oh);
597 static inline uint64_t attrs_fs2zfs(const uint32_t flags)
599 return (flags & LUSTRE_APPEND_FL ? ZFS_APPENDONLY : 0) |
600 (flags & LUSTRE_NODUMP_FL ? ZFS_NODUMP : 0) |
601 (flags & LUSTRE_IMMUTABLE_FL ? ZFS_IMMUTABLE : 0);
604 static inline uint32_t attrs_zfs2fs(const uint64_t flags)
606 return (flags & ZFS_APPENDONLY ? LUSTRE_APPEND_FL : 0) |
607 (flags & ZFS_NODUMP ? LUSTRE_NODUMP_FL : 0) |
608 (flags & ZFS_IMMUTABLE ? LUSTRE_IMMUTABLE_FL : 0);
613 #ifndef HAVE_DSL_POOL_CONFIG
614 static inline void dsl_pool_config_enter(dsl_pool_t *dp, char *name)
618 static inline void dsl_pool_config_exit(dsl_pool_t *dp, char *name)
623 #ifdef HAVE_SPA_MAXBLOCKSIZE
624 #define osd_spa_maxblocksize(spa) spa_maxblocksize(spa)
625 #define osd_spa_maxblockshift(spa) fls64(spa_maxblocksize(spa) - 1)
627 #define osd_spa_maxblocksize(spa) SPA_MAXBLOCKSIZE
628 #define osd_spa_maxblockshift(spa) SPA_MAXBLOCKSHIFT
629 #define SPA_OLD_MAXBLOCKSIZE SPA_MAXBLOCKSIZE
632 #ifdef HAVE_SA_SPILL_ALLOC
634 osd_zio_buf_alloc(size_t size)
636 return sa_spill_alloc(KM_SLEEP);
640 osd_zio_buf_free(void *buf, size_t size)
645 #define osd_zio_buf_alloc(size) zio_buf_alloc(size)
646 #define osd_zio_buf_free(buf, size) zio_buf_free(buf, size)
649 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
650 static inline uint64_t
651 osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
652 int dnodesize, dmu_tx_t *tx)
655 dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
657 return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA,
658 DN_BONUS_SIZE(dnodesize), dnodesize, tx);
661 static inline uint64_t
662 osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
663 dmu_object_type_t ot, int leaf_blockshift,
664 int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
667 dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
669 return zap_create_flags_dnsize(os, normflags, flags, ot,
670 leaf_blockshift, indirect_blockshift,
671 DMU_OT_SA, DN_BONUS_SIZE(dnodesize),
676 osd_obj_bonuslen(struct osd_object *obj)
678 int bonuslen = DN_BONUS_SIZE(DNODE_MIN_SIZE);
680 if (obj->oo_dn != NULL && obj->oo_dn->dn_num_slots != 0) {
681 bonuslen = DN_SLOTS_TO_BONUSLEN(obj->oo_dn->dn_num_slots);
683 objset_t *os = osd_dtobj2objset(&obj->oo_dt);
687 dnodesize = dmu_objset_dnodesize(os);
689 bonuslen = DN_BONUS_SIZE(dnodesize);
696 static inline uint64_t
697 osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
698 int dnodesize, dmu_tx_t *tx)
700 return dmu_object_alloc(os, objtype, blocksize, DMU_OT_SA,
701 DN_MAX_BONUSLEN, tx);
704 static inline uint64_t
705 osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
706 dmu_object_type_t ot, int leaf_blockshift,
707 int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
709 return zap_create_flags(os, normflags, flags, ot, leaf_blockshift,
710 indirect_blockshift, DMU_OT_SA,
711 DN_MAX_BONUSLEN, tx);
715 osd_obj_bonuslen(struct osd_object *obj)
717 return DN_MAX_BONUSLEN;
719 #endif /* HAVE_DMU_OBJECT_ALLOC_DNSIZE */
721 #ifdef HAVE_DMU_PREFETCH_6ARG
722 #define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
723 dmu_prefetch((os), (obj), (lvl), (off), (len), (pri))
725 #define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
726 dmu_prefetch((os), (obj), (lvl), (off))
729 static inline int osd_sa_handle_get(struct osd_object *obj)
731 struct osd_device *osd = osd_obj2dev(obj);
732 dnode_t *dn = obj->oo_dn;
738 dbuf_read(dn->dn_bonus, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
739 rc = -sa_handle_get_from_db(osd->od_os, &dn->dn_bonus->db, obj,
740 SA_HDL_PRIVATE, &obj->oo_sa_hdl);
743 refcount_add(&dn->dn_bonus->db_holds, osd_obj_tag);
747 static inline void osd_dnode_rele(dnode_t *dn)
751 LASSERT(dn->dn_bonus);
755 dmu_buf_rele(&db->db, osd_obj_tag);
758 #ifdef HAVE_DMU_USEROBJ_ACCOUNTING
760 #define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX
762 static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
764 if (unlikely(dmu_objset_userobjspace_upgradable(osd->od_os)))
765 dmu_objset_userobjspace_upgrade(osd->od_os);
767 return dmu_objset_userobjspace_present(osd->od_os);
771 #define OSD_DMU_USEROBJ_PREFIX "obj-"
773 static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
777 #endif /* #ifdef HAVE_DMU_USEROBJ_ACCOUNTING */
779 static inline int osd_zap_add(struct osd_device *osd, uint64_t zap,
780 dnode_t *dn, const char *key,
781 int int_size, int int_num,
782 const void *val, dmu_tx_t *tx)
786 #ifdef HAVE_ZAP_ADD_BY_DNODE
788 return -zap_add_by_dnode(dn, key, int_size, int_num, val, tx);
790 return -zap_add(osd->od_os, zap, key, int_size, int_num, val, tx);
793 static inline int osd_zap_remove(struct osd_device *osd, uint64_t zap,
794 dnode_t *dn, const char *key,
799 #ifdef HAVE_ZAP_ADD_BY_DNODE
801 return -zap_remove_by_dnode(dn, key, tx);
803 return -zap_remove(osd->od_os, zap, key, tx);
807 static inline int osd_zap_lookup(struct osd_device *osd, uint64_t zap,
808 dnode_t *dn, const char *key,
809 int int_size, int int_num, void *v)
813 #ifdef HAVE_ZAP_ADD_BY_DNODE
815 return -zap_lookup_by_dnode(dn, key, int_size, int_num, v);
817 return -zap_lookup(osd->od_os, zap, key, int_size, int_num, v);
820 static inline void osd_tx_hold_zap(dmu_tx_t *tx, uint64_t zap,
821 dnode_t *dn, int add, const char *name)
823 #ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE
825 dmu_tx_hold_zap_by_dnode(tx, dn, add, name);
829 dmu_tx_hold_zap(tx, zap, add, name);
832 static inline void osd_tx_hold_write(dmu_tx_t *tx, uint64_t oid,
833 dnode_t *dn, uint64_t off, int len)
835 #ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE
837 dmu_tx_hold_write_by_dnode(tx, dn, off, len);
841 dmu_tx_hold_write(tx, oid, off, len);
844 static inline void osd_dmu_write(struct osd_device *osd, dnode_t *dn,
845 uint64_t offset, uint64_t size,
846 const char *buf, dmu_tx_t *tx)
849 #ifdef HAVE_DMU_WRITE_BY_DNODE
850 dmu_write_by_dnode(dn, offset, size, buf, tx);
852 dmu_write(osd->od_os, dn->dn_object, offset, size, buf, tx);
856 static inline int osd_dmu_read(struct osd_device *osd, dnode_t *dn,
857 uint64_t offset, uint64_t size,
858 char *buf, int flags)
861 #ifdef HAVE_DMU_READ_BY_DNODE
862 return -dmu_read_by_dnode(dn, offset, size, buf, flags);
864 return -dmu_read(osd->od_os, dn->dn_object, offset, size, buf, flags);
868 #endif /* _OSD_INTERNAL_H */