4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/osd-zfs/osd_internal.h
32 * Shared definitions and declarations for zfs/dmu osd
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
36 * Author: Johann Lombardi <johann@whamcloud.com>
39 #ifndef _OSD_INTERNAL_H
40 #define _OSD_INTERNAL_H
42 #include <linux/refcount.h>
43 #include <dt_object.h>
44 #include <md_object.h>
45 #include <lustre_quota.h>
46 #include <lustre_scrub.h>
52 #include <sys/nvpair.h>
53 #ifdef HAVE_ZFS_REFCOUNT_HEADER
54 #include <sys/zfs_refcount.h>
56 #include <sys/zfs_znode.h>
59 #include <sys/dmu_objset.h>
60 #include <lustre_scrub.h>
63 * By design including kmem.h overrides the Linux slab interfaces to provide
64 * the Illumos kmem cache interfaces. To override this and gain access to
65 * the Linux interfaces these preprocessor macros must be undefined.
67 #ifdef kmem_cache_destroy
68 #undef kmem_cache_destroy
71 #ifdef kmem_cache_create
72 #undef kmem_cache_create
75 #ifdef kmem_cache_alloc
76 #undef kmem_cache_alloc
79 #ifdef kmem_cache_free
80 #undef kmem_cache_free
83 #define ZFS_VERSION_CODE \
84 OBD_OCD_VERSION(ZFS_MAJOR, ZFS_MINOR, ZFS_PATCH, ZFS_FIX)
86 #define LUSTRE_ROOT_FID_SEQ 0
87 #define DMU_OSD_SVNAME "svname"
88 #define DMU_OSD_OI_NAME_BASE "oi"
90 #define OSD_GFP_IO (GFP_NOFS | __GFP_HIGHMEM)
92 /* Statfs space reservation for grant, fragmentation, and unlink space. */
93 #define OSD_STATFS_RESERVED_SIZE (16ULL << 20) /* reserve 16MB minimum */
94 #define OSD_STATFS_RESERVED_SHIFT (7) /* reserve 0.78% of all space */
96 /* Statfs {minimum, safe estimate, and maximum} dnodes per block */
97 #define OSD_DNODE_MIN_BLKSHIFT (DNODES_PER_BLOCK_SHIFT)
98 #define OSD_DNODE_EST_BLKSHIFT (12) /* est 4KB/dnode */
99 #define OSD_DNODE_EST_COUNT 4096
101 #define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */
103 #define OSD_MAX_CACHE_SIZE OBD_OBJECT_EOF
105 #ifndef HAVE_ZFS_REFCOUNT_HEADER
106 #ifndef HAVE_ZFS_REFCOUNT_ADD
107 #define zfs_refcount_add refcount_add
111 extern const struct dt_body_operations osd_body_scrub_ops;
112 extern const struct dt_body_operations osd_body_ops;
113 extern struct kmem_cache *osd_object_kmem;
116 * Iterator's in-memory data structure for quota file.
118 struct osd_it_quota {
119 struct osd_object *oiq_obj;
120 /* DMU accounting object id */
123 zap_cursor_t *oiq_zc;
124 /** identifier for current quota record */
126 unsigned oiq_reset:1; /* 1 -- no need to advance */
131 OZI_POS_DOT = 1, /* cursor at . */
132 OZI_POS_DOTDOT = 2, /* cursor at .. */
133 OZI_POS_REAL = 3, /* cursor at real entries */
137 * regular ZFS direntry
139 struct zpl_direntry {
140 uint64_t zde_dnode:48,
143 } __attribute__((packed));
146 * lustre direntry adds a fid to regular ZFS direntry
148 struct luz_direntry {
149 struct zpl_direntry lzd_reg;
150 struct lu_fid lzd_fid;
151 } __attribute__((packed));
154 * Iterator's in-memory data structure for ZAPs
156 * ZFS does not store . and .. on a disk, instead they are
157 * generated up on request
158 * to follow this format we do the same
161 zap_cursor_t *ozi_zc;
162 struct osd_object *ozi_obj;
163 unsigned ozi_reset:1; /* 1 -- no need to advance */
164 /* ozi_pos - position of the cursor */
165 enum osd_zap_pos ozi_pos;
166 struct luz_direntry ozi_zde;
167 zap_attribute_t ozi_za;
169 char ozi_name[MAXNAMELEN]; /* file name for dir */
170 __u64 ozi_key; /* binary key for index files */
173 #define DT_IT2DT(it) (&((struct osd_zap_it *)it)->ozi_obj->oo_dt)
175 /* cached SA attributes */
180 #ifdef ZFS_PROJINHERIT
194 #define OSD_INS_CACHE_SIZE 8
197 struct osd_idmap_cache {
198 struct osd_device *oic_dev;
199 struct lu_fid oic_fid;
200 /** max 2^48 dnodes per dataset, avoid spilling into another word */
201 uint64_t oic_dnode:DN_MAX_OBJECT_SHIFT,
202 oic_remote:1; /* FID isn't local */
205 struct osd_inconsistent_item {
206 /* link into lustre_scrub::os_inconsistent_items,
207 * protected by lustr_scrub::os_lock. */
208 struct list_head oii_list;
210 /* The right FID <=> oid mapping. */
211 struct osd_idmap_cache oii_cache;
213 unsigned int oii_insert:1; /* insert or update mapping. */
216 struct osd_otable_it {
217 struct osd_device *ooi_dev;
218 struct lu_fid ooi_fid;
220 __u64 ooi_prefetched_dnode;
223 /* The following bits can be updated/checked w/o lock protection.
224 * If more bits will be introduced in the future and need lock to
225 * protect, please add comment. */
226 unsigned int ooi_used_outside:1, /* Some user out of OSD
227 * uses the iteration. */
228 ooi_all_cached:1, /* No more entries can be
229 * filled into cache. */
230 ooi_user_ready:1, /* The user out of OSD is
231 * ready to iterate. */
232 ooi_waiting:1; /* it::next is waiting. */
235 extern const struct dt_index_operations osd_otable_ops;
237 /* max.number of regular attributes the callers may ask for */
238 # define OSD_MAX_IN_BULK (sizeof(struct osa_attr)/sizeof(uint64_t))
240 struct osd_thread_info {
241 const struct lu_env *oti_env;
243 struct lu_fid oti_fid;
245 struct ost_id oti_ostid;
251 char oti_key[MAXNAMELEN + 1];
252 __u64 oti_key64[(MAXNAMELEN + 1)/sizeof(__u64)];
253 sa_bulk_attr_t oti_attr_bulk[OSD_MAX_IN_BULK];
255 struct lustre_mdt_attrs oti_mdt_attrs;
256 unsigned int oti_in_trans:1;
258 struct lu_attr oti_la;
259 struct osa_attr oti_osa;
260 zap_attribute_t oti_za;
261 zap_attribute_t oti_za2;
262 dmu_object_info_t oti_doi;
263 struct luz_direntry oti_zde;
265 struct lquota_id_info oti_qi;
266 struct lu_seq_range oti_seq_range;
268 /* dedicated OI cache for insert (which needs inum) */
269 struct osd_idmap_cache *oti_ins_cache;
270 int oti_ins_cache_size;
271 int oti_ins_cache_used;
272 /* inc by osd_trans_create and dec by osd_trans_stop */
273 int oti_ins_cache_depth;
274 struct lu_buf oti_xattr_lbuf;
276 zap_cursor_t oti_zc2;
282 extern struct lu_context_key osd_key;
284 static inline struct osd_thread_info *osd_oti_get(const struct lu_env *env)
286 return lu_context_key_get(&env->le_ctx, &osd_key);
290 struct thandle ot_super;
291 struct list_head ot_dcb_list;
292 struct list_head ot_stop_dcb_list;
293 struct list_head ot_unlinked_list;
294 struct list_head ot_sa_list;
296 struct lquota_trans ot_quota_trans;
300 #define OSD_OI_NAME_SIZE 24
303 * Object Index (OI) instance.
306 char oi_name[OSD_OI_NAME_SIZE];
313 uint64_t *os_compat_dirs;
314 int os_subdir_count; /* subdir count for each seq */
315 u64 os_seq; /* seq number */
316 struct list_head os_seq_list; /* list to seq_list */
319 struct osd_seq_list {
320 rwlock_t osl_seq_list_lock; /* lock for seq_list */
321 struct list_head osl_seq_list; /* list head for seq */
322 struct semaphore osl_seq_init_sem;
325 #define OSD_OST_MAP_SIZE 32
332 struct dt_device od_dt_dev;
333 /* information about underlying file system */
334 struct objset *od_os;
335 uint64_t od_rootid; /* id of root znode */
336 dnode_t *od_unlinked; /* dnode of unlinked zapobj */
337 /* SA attr mapping->id,
338 * name is the same as in ZFS to use defines SA_ZPL_...*/
339 sa_attr_type_t *z_attr_table;
341 struct proc_dir_entry *od_proc_entry;
342 struct lprocfs_stats *od_stats;
344 uint64_t od_remote_parent_dir;
345 uint64_t od_index_backup_id;
346 uint64_t od_max_blksz;
349 struct osd_oi **od_oi_table;
350 unsigned int od_oi_count;
351 struct osd_seq_list od_seq_list;
353 unsigned int od_dev_set_rdonly:1, /**< osd_ro() called */
354 od_prop_rdonly:1, /**< ZFS property readonly */
361 unsigned int od_dnsize;
362 int od_index_backup_stop;
364 enum lustre_index_backup_policy od_index_backup_policy;
371 struct lu_site od_site;
373 dnode_t *od_groupused_dn;
374 dnode_t *od_userused_dn;
375 #ifdef ZFS_PROJINHERIT
376 dnode_t *od_projectused_dn;
379 /* quota slave instance for inode */
380 struct qsd_instance *od_quota_slave_md;
382 /* quota slave instance for block */
383 struct qsd_instance *od_quota_slave_dt;
385 struct brw_stats od_brw_stats;
386 atomic_t od_r_in_flight;
387 atomic_t od_w_in_flight;
389 /* used to debug zerocopy logic: the fields track all
390 * allocated, loaned and referenced buffers in use.
391 * to be removed once the change is tested well. */
392 atomic_t od_zerocopy_alloc;
393 atomic_t od_zerocopy_loan;
394 atomic_t od_zerocopy_pin;
396 arc_prune_t *arc_prune_cb;
398 /* osd seq instance */
399 struct lu_client_seq *od_cl_seq;
401 struct semaphore od_otable_sem;
402 struct osd_otable_it *od_otable_it;
403 struct lustre_scrub od_scrub;
404 struct list_head od_ios_list;
405 struct list_head od_index_backup_list;
406 struct list_head od_index_restore_list;
408 unsigned long long od_readcache_max_filesize;
411 static inline struct qsd_instance *osd_def_qsd(struct osd_device *osd)
414 return osd->od_quota_slave_dt;
416 return osd->od_quota_slave_md;
419 enum osd_destroy_type {
420 OSD_DESTROY_NONE = 0,
421 OSD_DESTROY_SYNC = 1,
422 OSD_DESTROY_ASYNC = 2,
426 struct dt_object oo_dt;
428 * Inode for file system object represented by this osd_object. This
429 * inode is pinned for the whole duration of lu_object life.
431 * Not modified concurrently (either setup early during object
432 * creation, or assigned by osd_create() under write lock).
435 sa_handle_t *oo_sa_hdl;
436 nvlist_t *oo_sa_xattr;
437 struct list_head oo_sa_linkage;
439 /* used to implement osd_object_*_{lock|unlock} */
440 struct rw_semaphore oo_sem;
442 /* to serialize some updates: destroy vs. others,
443 * xattr_set, object block size change etc */
444 struct rw_semaphore oo_guard;
446 /* protected by oo_guard */
447 struct list_head oo_unlinked_linkage;
449 /* cached attributes */
450 rwlock_t oo_attr_lock;
451 struct lu_attr oo_attr;
453 /* external dnode holding large EAs, protected by oo_guard */
455 enum osd_destroy_type oo_destroy;
457 __u32 oo_destroyed:1,
459 #ifdef ZFS_PROJINHERIT
465 /* the i_flags in LMA */
468 int oo_ea_in_bonus; /* EA bytes we expect */
470 /* record size for index file */
471 unsigned char oo_keysize;
472 unsigned char oo_recsize;
473 unsigned char oo_recusize; /* unit size */
475 uint64_t oo_parent; /* used only at object creation */
477 struct lu_object_header *oo_header;
480 int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *,
481 struct obd_statfs_info *);
482 extern const struct dt_index_operations osd_acct_index_ops;
483 extern const struct lu_device_operations osd_lu_ops;
484 extern const struct dt_index_operations osd_dir_ops;
485 int osd_declare_quota(const struct lu_env *env, struct osd_device *osd,
486 qid_t uid, qid_t gid, qid_t projid, long long space,
487 struct osd_thandle *oh,
488 enum osd_quota_local_flags *local_flags,
489 enum osd_qid_declare_flags osd_qid_declare_flags);
490 uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
491 uint64_t nrblocks, uint64_t est_maxblockshift);
492 int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd,
498 static inline int lu_device_is_osd(const struct lu_device *d)
500 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osd_lu_ops);
503 static inline struct osd_object *osd_obj(const struct lu_object *o)
505 LASSERT(lu_device_is_osd(o->lo_dev));
506 return container_of(o, struct osd_object, oo_dt.do_lu);
509 static inline struct osd_device *osd_dt_dev(const struct dt_device *d)
511 LASSERT(lu_device_is_osd(&d->dd_lu_dev));
512 return container_of(d, struct osd_device, od_dt_dev);
515 static inline struct osd_device *osd_dev(const struct lu_device *d)
517 LASSERT(lu_device_is_osd(d));
518 return osd_dt_dev(container_of(d, struct dt_device, dd_lu_dev));
521 static inline struct osd_object *osd_dt_obj(const struct dt_object *d)
523 return osd_obj(&d->do_lu);
526 static inline struct osd_device *osd_obj2dev(const struct osd_object *o)
528 return osd_dev(o->oo_dt.do_lu.lo_dev);
531 static inline struct lu_device *osd2lu_dev(struct osd_device *osd)
533 return &osd->od_dt_dev.dd_lu_dev;
536 static inline struct objset * osd_dtobj2objset(struct dt_object *o)
538 return osd_dev(o->do_lu.lo_dev)->od_os;
541 static inline int osd_invariant(const struct osd_object *obj)
547 * Put the osd object once done with it.
549 * \param obj osd object that needs to be put
551 static inline void osd_object_put(const struct lu_env *env,
552 struct osd_object *obj)
554 dt_object_put(env, &obj->oo_dt);
557 static inline int osd_object_invariant(const struct lu_object *l)
559 return osd_invariant(osd_obj(l));
562 static inline struct seq_server_site *osd_seq_site(struct osd_device *osd)
564 return osd->od_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
567 static inline char *osd_name(struct osd_device *osd)
569 return osd->od_svname;
572 static inline void zfs_set_bit(int nr, __u8 *addr)
574 set_bit(nr, (unsigned long *)addr);
577 static inline int zfs_test_bit(int nr, __u8 *addr)
579 return test_bit(nr, (const unsigned long *)addr);
582 static inline int osd_oi_fid2idx(struct osd_device *dev,
583 const struct lu_fid *fid)
585 return fid->f_seq & (dev->od_oi_count - 1);
588 static inline struct osd_oi *osd_fid2oi(struct osd_device *osd,
589 const struct lu_fid *fid)
591 LASSERTF(osd->od_oi_table && osd->od_oi_count >= 1,
592 "%s: "DFID", oi_count %d\n",
593 osd_name(osd), PFID(fid), osd->od_oi_count);
595 return osd->od_oi_table[osd_oi_fid2idx(osd, fid)];
598 #ifdef CONFIG_PROC_FS
600 LPROC_OSD_READ_BYTES = 0,
601 LPROC_OSD_WRITE_BYTES = 1,
602 LPROC_OSD_GET_PAGE = 2,
603 LPROC_OSD_NO_PAGE = 3,
604 LPROC_OSD_CACHE_ACCESS = 4,
605 LPROC_OSD_CACHE_HIT = 5,
606 LPROC_OSD_CACHE_MISS = 6,
607 LPROC_OSD_COPY_IO = 7,
608 LPROC_OSD_ZEROCOPY_IO = 8,
609 LPROC_OSD_TAIL_IO = 9,
613 extern struct kmem_cache *osd_zapit_cachep;
615 extern struct lprocfs_vars lprocfs_osd_obd_vars[];
617 int osd_procfs_init(struct osd_device *osd, const char *name);
618 int osd_procfs_fini(struct osd_device *osd);
621 extern char *osd_obj_tag;
622 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp);
623 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh);
624 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh);
625 int __osd_obj2dbuf(const struct lu_env *env, objset_t *os,
626 uint64_t oid, dmu_buf_t **dbp);
627 struct lu_object *osd_object_alloc(const struct lu_env *env,
628 const struct lu_object_header *hdr,
629 struct lu_device *d);
630 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
631 void *buf, uint32_t buflen, struct osd_thandle *oh);
632 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
633 dnode_t **zap_dnp, dmu_tx_t *tx, struct lu_attr *la,
634 unsigned dnsize, zap_flags_t flags);
635 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
636 struct osd_object *obj, const struct lu_fid *fid,
637 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la);
638 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
639 struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
640 struct lu_attr *la, uint64_t parent, nvlist_t *);
641 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
642 uint64_t oid, dnode_t **dnp);
645 int osd_oi_init(const struct lu_env *env, struct osd_device *o, bool reset);
646 void osd_oi_fini(const struct lu_env *env, struct osd_device *o);
647 int osd_fid_lookup(const struct lu_env *env,
648 struct osd_device *, const struct lu_fid *, uint64_t *);
649 uint64_t osd_get_name_n_idx(const struct lu_env *env, struct osd_device *osd,
650 const struct lu_fid *fid, char *buf, int bufsize,
652 int osd_options_init(void);
653 int osd_ost_seq_exists(const struct lu_env *env, struct osd_device *osd,
655 int osd_idc_find_and_init(const struct lu_env *env, struct osd_device *osd,
656 struct osd_object *obj);
657 struct osd_idmap_cache *osd_idc_find_or_init(const struct lu_env *env,
658 struct osd_device *osd,
659 const struct lu_fid *fid);
660 struct osd_idmap_cache *osd_idc_find(const struct lu_env *env,
661 struct osd_device *osd,
662 const struct lu_fid *fid);
663 int osd_idc_find_and_init_with_oid(const struct lu_env *env,
664 struct osd_device *osd,
665 const struct lu_fid *fid,
667 int fid_is_on_ost(const struct lu_env *env, struct osd_device *osd,
668 const struct lu_fid *fid);
669 int osd_obj_find_or_create(const struct lu_env *env, struct osd_device *o,
670 uint64_t parent, const char *name, uint64_t *child,
671 const struct lu_fid *fid, bool isdir);
673 extern unsigned int osd_oi_count;
676 int osd_get_fid_by_oid(const struct lu_env *env, struct osd_device *osd,
677 uint64_t oid, struct lu_fid *fid);
678 int osd_index_try(const struct lu_env *env, struct dt_object *dt,
679 const struct dt_index_features *feat);
680 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
681 u64 seq, struct lu_seq_range *range);
682 void osd_zap_cursor_init_serialized(zap_cursor_t *zc, struct objset *os,
683 uint64_t id, uint64_t dirhash);
684 int osd_zap_cursor_init(zap_cursor_t **zc, struct objset *os,
685 uint64_t id, uint64_t dirhash);
686 void osd_zap_cursor_fini(zap_cursor_t *zc);
687 uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc);
688 int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
689 const struct lu_fid *fid);
690 int osd_add_to_remote_parent(const struct lu_env *env,
691 struct osd_device *osd,
692 struct osd_object *obj,
693 struct osd_thandle *oh);
694 int osd_delete_from_remote_parent(const struct lu_env *env,
695 struct osd_device *osd,
696 struct osd_object *obj,
697 struct osd_thandle *oh, bool destroy);
698 int __osd_xattr_load_by_oid(struct osd_device *osd, uint64_t oid,
702 int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev,
703 time64_t interval, bool resetoi);
704 void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev);
705 int osd_scrub_start(const struct lu_env *env, struct osd_device *dev,
707 void osd_scrub_stop(struct osd_device *dev);
708 int osd_oii_insert(const struct lu_env *env, struct osd_device *dev,
709 const struct lu_fid *fid, uint64_t oid, bool insert);
710 int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
714 * Basic transaction credit op
723 int osd_scrub_refresh_mapping(const struct lu_env *env,
724 struct osd_device *dev,
725 const struct lu_fid *fid,
726 uint64_t oid, enum dt_txn_op ops,
727 bool force, const char *name);
731 int __osd_sa_xattr_schedule_update(const struct lu_env *env,
732 struct osd_object *obj,
733 struct osd_thandle *oh);
734 int __osd_sa_attr_init(const struct lu_env *env, struct osd_object *obj,
735 struct osd_thandle *oh);
736 int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj,
737 struct osd_thandle *oh);
738 int __osd_xattr_load(struct osd_device *osd, sa_handle_t *hdl,
740 int __osd_xattr_get_large(const struct lu_env *env, struct osd_device *osd,
741 uint64_t xattr, struct lu_buf *buf,
742 const char *name, int *sizep);
743 int osd_xattr_get_internal(const struct lu_env *env, struct osd_object *obj,
744 struct lu_buf *buf, const char *name, int *sizep);
745 int osd_xattr_get_lma(const struct lu_env *env, struct osd_object *obj,
747 int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
748 struct lu_buf *buf, const char *name);
749 int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
750 const struct lu_buf *buf, const char *name,
751 int fl, struct thandle *handle);
752 int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
753 const struct lu_buf *buf, const char *name, int fl,
754 struct thandle *handle);
755 int osd_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
756 const char *name, struct thandle *handle);
757 int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
758 const char *name, struct thandle *handle);
759 void osd_declare_xattrs_destroy(const struct lu_env *env,
760 struct osd_object *obj,
761 struct osd_thandle *oh);
762 int osd_xattrs_destroy(const struct lu_env *env,
763 struct osd_object *obj, struct osd_thandle *oh);
764 int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
765 const struct lu_buf *lb);
766 void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj,
767 int vallen, const char *name, struct osd_thandle *oh);
768 int __osd_sa_xattr_set(const struct lu_env *env, struct osd_object *obj,
769 const struct lu_buf *buf, const char *name, int fl,
770 struct osd_thandle *oh);;
771 int __osd_xattr_set(const struct lu_env *env, struct osd_object *obj,
772 const struct lu_buf *buf, const char *name, int fl,
773 struct osd_thandle *oh);
774 int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj,
775 struct osd_thandle *oh);
777 #define OSD_BASE_EA_IN_BONUS (ZFS_SA_BASE_ATTR_SIZE + \
778 sizeof(__u64) /* VBR VERSION */ + \
779 sizeof(struct lustre_mdt_attrs) /* LMA */)
781 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
782 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus);
785 osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
787 return DN_MAX_BONUSLEN;
791 static inline int osd_object_is_zap(dnode_t *dn)
793 return (dn->dn_type == DMU_OT_DIRECTORY_CONTENTS ||
794 dn->dn_type == DMU_OT_USERGROUP_USED);
797 /* XXX: f_ver is not counted, but may differ too */
798 static inline void osd_fid2str(char *buf, const struct lu_fid *fid, int len)
800 snprintf(buf, len, DFID_NOBRACE, PFID(fid));
804 osd_xattr_set_internal(const struct lu_env *env, struct osd_object *obj,
805 const struct lu_buf *buf, const char *name, int fl,
806 struct osd_thandle *oh)
810 if (unlikely(!dt_object_exists(&obj->oo_dt) || obj->oo_destroyed))
814 if (osd_obj2dev(obj)->od_xattr_in_sa) {
815 rc = __osd_sa_xattr_set(env, obj, buf, name, fl, oh);
817 rc = __osd_xattr_set(env, obj, buf, name, fl, oh);
819 rc = __osd_xattr_set(env, obj, buf, name, fl, oh);
825 static inline uint64_t attrs_fs2zfs(const uint32_t flags)
827 return (flags & LUSTRE_APPEND_FL ? ZFS_APPENDONLY : 0) |
828 (flags & LUSTRE_NODUMP_FL ? ZFS_NODUMP : 0) |
829 #ifdef ZFS_PROJINHERIT
830 (flags & LUSTRE_PROJINHERIT_FL ? ZFS_PROJINHERIT : 0) |
832 (flags & LUSTRE_IMMUTABLE_FL ? ZFS_IMMUTABLE : 0);
835 static inline uint32_t attrs_zfs2fs(const uint64_t flags)
837 return (flags & ZFS_APPENDONLY ? LUSTRE_APPEND_FL : 0) |
838 (flags & ZFS_NODUMP ? LUSTRE_NODUMP_FL : 0) |
839 #ifdef ZFS_PROJINHERIT
840 (flags & ZFS_PROJINHERIT ? LUSTRE_PROJINHERIT_FL : 0) |
842 (flags & ZFS_IMMUTABLE ? LUSTRE_IMMUTABLE_FL : 0);
847 #ifndef HAVE_DSL_POOL_CONFIG
848 static inline void dsl_pool_config_enter(dsl_pool_t *dp, void *name)
852 static inline void dsl_pool_config_exit(dsl_pool_t *dp, void *name)
857 #ifdef HAVE_SPA_MAXBLOCKSIZE
858 #define osd_spa_maxblocksize(spa) spa_maxblocksize(spa)
859 #define osd_spa_maxblockshift(spa) fls64(spa_maxblocksize(spa) - 1)
861 #define osd_spa_maxblocksize(spa) SPA_MAXBLOCKSIZE
862 #define osd_spa_maxblockshift(spa) SPA_MAXBLOCKSHIFT
863 #define SPA_OLD_MAXBLOCKSIZE SPA_MAXBLOCKSIZE
866 #ifdef HAVE_SA_SPILL_ALLOC
868 osd_zio_buf_alloc(size_t size)
870 return sa_spill_alloc(KM_SLEEP);
874 osd_zio_buf_free(void *buf, size_t size)
879 #define osd_zio_buf_alloc(size) zio_buf_alloc(size)
880 #define osd_zio_buf_free(buf, size) zio_buf_free(buf, size)
883 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
884 static inline uint64_t
885 osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
886 int dnodesize, dmu_tx_t *tx)
889 dnodesize = max_t(int, dmu_objset_dnodesize(os),
892 return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA,
893 DN_BONUS_SIZE(dnodesize), dnodesize, tx);
896 static inline uint64_t
897 osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
898 dmu_object_type_t ot, int leaf_blockshift,
899 int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
902 dnodesize = max_t(int, dmu_objset_dnodesize(os),
905 return zap_create_flags_dnsize(os, normflags, flags, ot,
906 leaf_blockshift, indirect_blockshift,
907 DMU_OT_SA, DN_BONUS_SIZE(dnodesize),
912 osd_obj_bonuslen(struct osd_object *obj)
914 int bonuslen = DN_BONUS_SIZE(DNODE_MIN_SIZE);
916 if (obj->oo_dn != NULL && obj->oo_dn->dn_num_slots != 0) {
917 bonuslen = DN_SLOTS_TO_BONUSLEN(obj->oo_dn->dn_num_slots);
919 objset_t *os = osd_dtobj2objset(&obj->oo_dt);
923 dnodesize = dmu_objset_dnodesize(os);
925 bonuslen = DN_BONUS_SIZE(dnodesize);
932 static inline uint64_t
933 osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
934 int dnodesize, dmu_tx_t *tx)
936 return dmu_object_alloc(os, objtype, blocksize, DMU_OT_SA,
937 DN_MAX_BONUSLEN, tx);
940 static inline uint64_t
941 osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
942 dmu_object_type_t ot, int leaf_blockshift,
943 int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
945 return zap_create_flags(os, normflags, flags, ot, leaf_blockshift,
946 indirect_blockshift, DMU_OT_SA,
947 DN_MAX_BONUSLEN, tx);
951 osd_obj_bonuslen(struct osd_object *obj)
953 return DN_MAX_BONUSLEN;
955 #endif /* HAVE_DMU_OBJECT_ALLOC_DNSIZE */
957 #ifdef HAVE_DMU_PREFETCH_6ARG
958 #define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
959 dmu_prefetch((os), (obj), (lvl), (off), (len), (pri))
961 #define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \
962 dmu_prefetch((os), (obj), (lvl), (off))
965 static inline int osd_sa_handle_get(struct osd_object *obj)
967 struct osd_device *osd = osd_obj2dev(obj);
968 dnode_t *dn = obj->oo_dn;
974 dbuf_read(dn->dn_bonus, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
975 rc = -sa_handle_get_from_db(osd->od_os, &dn->dn_bonus->db, obj,
976 SA_HDL_PRIVATE, &obj->oo_sa_hdl);
979 zfs_refcount_add(&dn->dn_bonus->db_holds, osd_obj_tag);
983 static inline void osd_dnode_rele(dnode_t *dn)
987 LASSERT(dn->dn_bonus);
990 dmu_buf_rele(&db->db, osd_obj_tag);
993 static inline uint64_t osd_db_dirty_txg(dmu_buf_impl_t *db)
995 dbuf_dirty_record_t *dr;
998 mutex_enter(&db->db_mtx);
999 #ifdef HAVE_DB_DIRTY_RECORDS_LIST
1000 dr = list_head(&db->db_dirty_records);
1002 dr = db->db_last_dirty;
1006 mutex_exit(&db->db_mtx);
1011 #ifdef HAVE_DMU_USEROBJ_ACCOUNTING
1013 #define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX
1014 #define OSD_DMU_USEROBJ_PREFIX_LEN DMU_OBJACCT_PREFIX_LEN
1016 static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
1018 return dmu_objset_userobjspace_present(osd->od_os);
1022 #define OSD_DMU_USEROBJ_PREFIX "obj-"
1023 #define OSD_DMU_USEROBJ_PREFIX_LEN 4
1025 static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd)
1029 #endif /* #ifdef HAVE_DMU_USEROBJ_ACCOUNTING */
1031 static inline int osd_zap_add(struct osd_device *osd, uint64_t zap,
1032 dnode_t *dn, const char *key,
1033 int int_size, int int_num,
1034 const void *val, dmu_tx_t *tx)
1038 #ifdef HAVE_ZAP_ADD_BY_DNODE
1040 return -zap_add_by_dnode(dn, key, int_size, int_num, val, tx);
1042 return -zap_add(osd->od_os, zap, key, int_size, int_num, val, tx);
1045 static inline int osd_zap_remove(struct osd_device *osd, uint64_t zap,
1046 dnode_t *dn, const char *key,
1051 #ifdef HAVE_ZAP_ADD_BY_DNODE
1053 return -zap_remove_by_dnode(dn, key, tx);
1055 return -zap_remove(osd->od_os, zap, key, tx);
1059 static inline int osd_zap_lookup(struct osd_device *osd, uint64_t zap,
1060 dnode_t *dn, const char *key,
1061 int int_size, int int_num, void *v)
1065 #ifdef HAVE_ZAP_ADD_BY_DNODE
1067 return -zap_lookup_by_dnode(dn, key, int_size, int_num, v);
1069 return -zap_lookup(osd->od_os, zap, key, int_size, int_num, v);
1072 static inline void osd_tx_hold_zap(dmu_tx_t *tx, uint64_t zap,
1073 dnode_t *dn, int add, const char *name)
1075 #ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE
1077 dmu_tx_hold_zap_by_dnode(tx, dn, add, name);
1081 dmu_tx_hold_zap(tx, zap, add, name);
1084 static inline void osd_tx_hold_write(dmu_tx_t *tx, uint64_t oid,
1085 dnode_t *dn, uint64_t off, int len)
1087 #ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE
1089 dmu_tx_hold_write_by_dnode(tx, dn, off, len);
1093 dmu_tx_hold_write(tx, oid, off, len);
1096 static inline void osd_dmu_write(struct osd_device *osd, dnode_t *dn,
1097 uint64_t offset, uint64_t size,
1098 const char *buf, dmu_tx_t *tx)
1101 #ifdef HAVE_DMU_WRITE_BY_DNODE
1102 dmu_write_by_dnode(dn, offset, size, buf, tx);
1104 dmu_write(osd->od_os, dn->dn_object, offset, size, buf, tx);
1108 static inline int osd_dmu_read(struct osd_device *osd, dnode_t *dn,
1109 uint64_t offset, uint64_t size,
1110 char *buf, int flags)
1113 #ifdef HAVE_DMU_READ_BY_DNODE
1114 return -dmu_read_by_dnode(dn, offset, size, buf, flags);
1116 return -dmu_read(osd->od_os, dn->dn_object, offset, size, buf, flags);
1120 #ifdef HAVE_DMU_OBJSET_OWN_6ARG
1121 #define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \
1122 dmu_objset_own((name), (type), (ronly), (decrypt), (tag), (os))
1124 #define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \
1125 dmu_objset_own((name), (type), (ronly), (tag), (os))
1128 #ifdef HAVE_DMU_OBJSET_DISOWN_3ARG
1129 #define osd_dmu_objset_disown(os, decrypt, tag) \
1130 dmu_objset_disown((os), (decrypt), (tag))
1132 #define osd_dmu_objset_disown(os, decrypt, tag) \
1133 dmu_objset_disown((os), (tag))
1137 osd_index_register(struct osd_device *osd, const struct lu_fid *fid,
1138 __u32 keysize, __u32 recsize)
1140 return lustre_index_register(&osd->od_dt_dev, osd_name(osd),
1141 &osd->od_index_backup_list, &osd->od_lock,
1142 &osd->od_index_backup_stop,
1143 fid, keysize, recsize);
1147 osd_index_backup(const struct lu_env *env, struct osd_device *osd, bool backup)
1149 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
1152 lu_local_obj_fid(fid, INDEX_BACKUP_OID);
1153 rc = osd_idc_find_and_init_with_oid(env, osd, fid,
1154 osd->od_index_backup_id);
1158 lustre_index_backup(env, &osd->od_dt_dev, osd_name(osd),
1159 &osd->od_index_backup_list, &osd->od_lock,
1160 &osd->od_index_backup_stop, backup);
1163 #ifndef HAVE_DMU_TX_MARK_NETFREE
1164 #define dmu_tx_mark_netfree(tx)
1167 #ifndef HAVE_ZFS_INODE_TIMESPEC
1168 #define inode_timespec_t timestruc_t
1171 #ifdef HAVE_DMU_OFFSET_NEXT
1172 #define osd_dmu_offset_next(os, obj, hole, res) \
1173 dmu_offset_next((os), (obj), (hole), (res))
1175 #define osd_dmu_offset_next(os, obj, hole, res) (EOPNOTSUPP)
1178 #endif /* _OSD_INTERNAL_H */