X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosd-zfs%2Fosd_internal.h;h=4e5b4807e7acc266ac6d247778562e53f0c73982;hp=9bb7f98da3ad85fb3d075e7d71bd91470d7e2270;hb=424f3d1ac0ce065b77058afb093f791275ecf999;hpb=b1e595c09e1b07a6840142b3ae015b8a5a8affeb diff --git a/lustre/osd-zfs/osd_internal.h b/lustre/osd-zfs/osd_internal.h index 9bb7f98..4e5b480 100644 --- a/lustre/osd-zfs/osd_internal.h +++ b/lustre/osd-zfs/osd_internal.h @@ -15,21 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. - */ -/* - * Copyright (c) 2012, 2013, Intel Corporation. - * Use is subject to license terms. + * + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -49,19 +43,42 @@ #include #include #include - -#define _SPL_KMEM_H -#include -#define kmem_zalloc(a, b) kzalloc(a, b) -#define kmem_free(ptr, sz) ((void)(sz), kfree(ptr)) -#ifndef KM_SLEEP -#define KM_SLEEP GFP_KERNEL +#include +#include +#ifdef SHRINK_STOP +#undef SHRINK_STOP #endif - #include #include #include #include +#include +#include +#include + +/** + * By design including kmem.h overrides the Linux slab interfaces to provide + * the Illumos kmem cache interfaces. To override this and gain access to + * the Linux interfaces these preprocessor macros must be undefined. + */ +#ifdef kmem_cache_destroy +#undef kmem_cache_destroy +#endif + +#ifdef kmem_cache_create +#undef kmem_cache_create +#endif + +#ifdef kmem_cache_alloc +#undef kmem_cache_alloc +#endif + +#ifdef kmem_cache_free +#undef kmem_cache_free +#endif + +#define ZFS_VERSION_CODE \ + OBD_OCD_VERSION(ZFS_MAJOR, ZFS_MINOR, ZFS_PATCH, ZFS_FIX) #define LUSTRE_ROOT_FID_SEQ 0 #define DMU_OSD_SVNAME "svname" @@ -70,16 +87,24 @@ #define OSD_GFP_IO (GFP_NOFS | __GFP_HIGHMEM) /* Statfs space reservation for grant, fragmentation, and unlink space. */ -#define OSD_STATFS_RESERVED_BLKS (1ULL << (22 - SPA_MAXBLOCKSHIFT)) /* 4MB */ -#define OSD_STATFS_RESERVED_SHIFT (7) /* reserve 0.78% of all space */ +#define OSD_STATFS_RESERVED_SIZE (16ULL << 20) /* reserve 16MB minimum */ +#define OSD_STATFS_RESERVED_SHIFT (7) /* reserve 0.78% of all space */ /* Statfs {minimum, safe estimate, and maximum} dnodes per block */ -#define OSD_DNODE_MIN_BLKSHIFT (SPA_MAXBLOCKSHIFT - DNODE_SHIFT) /* 17-9 =8 */ -#define OSD_DNODE_EST_BLKSHIFT (SPA_MAXBLOCKSHIFT - 12) /* 17-12=5 */ -#define OSD_DNODE_EST_COUNT 1024 +#define OSD_DNODE_MIN_BLKSHIFT (DNODES_PER_BLOCK_SHIFT) +#define OSD_DNODE_EST_BLKSHIFT (12) /* est 4KB/dnode */ +#define OSD_DNODE_EST_COUNT 4096 #define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */ +#define OSD_MAX_CACHE_SIZE OBD_OBJECT_EOF + +#ifndef HAVE_ZFS_REFCOUNT_ADD +#define zfs_refcount_add refcount_add +#endif + +extern struct dt_body_operations osd_body_scrub_ops; + /** * Iterator's in-memory data structure for quota file. */ @@ -94,6 +119,13 @@ struct osd_it_quota { unsigned oiq_reset:1; /* 1 -- no need to advance */ }; +enum osd_zap_pos { + OZI_POS_INIT = 0, + OZI_POS_DOT = 1, /* cursor at . */ + OZI_POS_DOTDOT = 2, /* cursor at .. */ + OZI_POS_REAL = 3, /* cursor at real entries */ +}; + /** * Iterator's in-memory data structure for ZAPs * @@ -104,14 +136,9 @@ struct osd_it_quota { struct osd_zap_it { zap_cursor_t *ozi_zc; struct osd_object *ozi_obj; - struct lustre_capa *ozi_capa; unsigned ozi_reset:1; /* 1 -- no need to advance */ - /* ozi_pos - position of the cursor: - * 0 - before any record - * 1 - "." - * 2 - ".." - * 3 - real records */ - unsigned ozi_pos:3; + /* ozi_pos - position of the cursor */ + enum osd_zap_pos ozi_pos; union { char ozi_name[MAXNAMELEN]; /* file name for dir */ __u64 ozi_key; /* binary key for index files */ @@ -142,6 +169,9 @@ struct osa_attr { uint64_t mode; uint64_t gid; uint64_t uid; +#ifdef ZFS_PROJINHERIT + uint64_t projid; +#endif uint64_t nlink; uint64_t rdev; uint64_t flags; @@ -151,46 +181,90 @@ struct osa_attr { uint64_t ctime[2]; }; + +#define OSD_INS_CACHE_SIZE 8 + +/* OI cache entry */ +struct osd_idmap_cache { + struct osd_device *oic_dev; + struct lu_fid oic_fid; + /** max 2^48 dnodes per dataset, avoid spilling into another word */ + uint64_t oic_dnode:DN_MAX_OBJECT_SHIFT, + oic_remote:1; /* FID isn't local */ +}; + +struct osd_inconsistent_item { + /* link into lustre_scrub::os_inconsistent_items, + * protected by lustr_scrub::os_lock. */ + struct list_head oii_list; + + /* The right FID <=> oid mapping. */ + struct osd_idmap_cache oii_cache; + + unsigned int oii_insert:1; /* insert or update mapping. */ +}; + +struct osd_otable_it { + struct osd_device *ooi_dev; + struct lu_fid ooi_fid; + __u64 ooi_pos; + __u64 ooi_prefetched_dnode; + int ooi_prefetched; + + /* The following bits can be updated/checked w/o lock protection. + * If more bits will be introduced in the future and need lock to + * protect, please add comment. */ + unsigned int ooi_used_outside:1, /* Some user out of OSD + * uses the iteration. */ + ooi_all_cached:1, /* No more entries can be + * filled into cache. */ + ooi_user_ready:1, /* The user out of OSD is + * ready to iterate. */ + ooi_waiting:1; /* it::next is waiting. */ +}; + +extern const struct dt_index_operations osd_otable_ops; + +/* max.number of regular attributes the callers may ask for */ +# define OSD_MAX_IN_BULK (sizeof(struct osa_attr)/sizeof(uint64_t)) + struct osd_thread_info { const struct lu_env *oti_env; struct lu_fid oti_fid; - /* - * XXX temporary: for ->i_op calls. - */ - struct timespec oti_time; - /* - * XXX temporary: for capa operations. - */ - struct lustre_capa_key oti_capa_key; - struct lustre_capa oti_capa; struct ost_id oti_ostid; char oti_buf[64]; - /** osd iterator context used for iterator session */ - union { - struct osd_zap_it oti_it_zap; - struct osd_it_quota oti_it_quota; - }; - char oti_str[64]; union { char oti_key[MAXNAMELEN + 1]; __u64 oti_key64[(MAXNAMELEN + 1)/sizeof(__u64)]; + sa_bulk_attr_t oti_attr_bulk[OSD_MAX_IN_BULK]; }; - struct lustre_mdt_attrs oti_mdt_attrs; + struct lustre_mdt_attrs oti_mdt_attrs; + unsigned int oti_in_trans:1; struct lu_attr oti_la; struct osa_attr oti_osa; zap_attribute_t oti_za; + zap_attribute_t oti_za2; dmu_object_info_t oti_doi; struct luz_direntry oti_zde; struct lquota_id_info oti_qi; struct lu_seq_range oti_seq_range; - unsigned int oti_it_inline:1; + + /* dedicated OI cache for insert (which needs inum) */ + struct osd_idmap_cache *oti_ins_cache; + int oti_ins_cache_size; + int oti_ins_cache_used; + /* inc by osd_trans_create and dec by osd_trans_stop */ + int oti_ins_cache_depth; + struct lu_buf oti_xattr_lbuf; + zap_cursor_t oti_zc; + zap_cursor_t oti_zc2; }; extern struct lu_context_key osd_key; @@ -203,28 +277,30 @@ static inline struct osd_thread_info *osd_oti_get(const struct lu_env *env) struct osd_thandle { struct thandle ot_super; struct list_head ot_dcb_list; + struct list_head ot_stop_dcb_list; + struct list_head ot_unlinked_list; struct list_head ot_sa_list; - struct semaphore ot_sa_lock; dmu_tx_t *ot_tx; struct lquota_trans ot_quota_trans; - __u32 ot_write_commit:1, - ot_assigned:1; + __u32 ot_assigned:1; }; -#define OSD_OI_NAME_SIZE 16 +#define OSD_OI_NAME_SIZE 24 /* * Object Index (OI) instance. */ struct osd_oi { - char oi_name[OSD_OI_NAME_SIZE]; /* unused */ + char oi_name[OSD_OI_NAME_SIZE]; uint64_t oi_zapid; + dnode_t *oi_dn; }; struct osd_seq { + uint64_t os_oid; uint64_t *os_compat_dirs; int os_subdir_count; /* subdir count for each seq */ - obd_seq os_seq; /* seq number */ + u64 os_seq; /* seq number */ struct list_head os_seq_list; /* list to seq_list */ }; @@ -245,46 +321,58 @@ struct osd_device { /* information about underlying file system */ struct objset *od_os; uint64_t od_rootid; /* id of root znode */ + dnode_t *od_unlinked; /* dnode of unlinked zapobj */ /* SA attr mapping->id, * name is the same as in ZFS to use defines SA_ZPL_...*/ sa_attr_type_t *z_attr_table; - /* - * Fid Capability - */ - unsigned int od_fl_capa:1; - unsigned long od_capa_timeout; - __u32 od_capa_alg; - struct lustre_capa_key *od_capa_keys; - struct hlist_head *od_capa_hash; - struct proc_dir_entry *od_proc_entry; struct lprocfs_stats *od_stats; + uint64_t od_remote_parent_dir; + uint64_t od_index_backup_id; + uint64_t od_max_blksz; uint64_t od_root; uint64_t od_O_id; struct osd_oi **od_oi_table; unsigned int od_oi_count; struct osd_seq_list od_seq_list; - unsigned int od_rdonly:1, + unsigned int od_dev_set_rdonly:1, /**< osd_ro() called */ + od_prop_rdonly:1, /**< ZFS property readonly */ od_xattr_in_sa:1, - od_quota_iused_est:1, od_is_ost:1, - od_posix_acl:1; + od_in_init:1, + od_posix_acl:1, + od_nonrotational:1; + unsigned int od_dnsize; + int od_index_backup_stop; + enum lustre_index_backup_policy od_index_backup_policy; char od_mntdev[128]; char od_svname[128]; + uuid_t od_uuid; int od_connects; + int od_index; + __s64 od_auto_scrub_interval; struct lu_site od_site; - /* object IDs of the inode accounting indexes */ - uint64_t od_iusr_oid; - uint64_t od_igrp_oid; + dnode_t *od_groupused_dn; + dnode_t *od_userused_dn; +#ifdef ZFS_PROJINHERIT + dnode_t *od_projectused_dn; +#endif + + /* quota slave instance for inode */ + struct qsd_instance *od_quota_slave_md; + + /* quota slave instance for block */ + struct qsd_instance *od_quota_slave_dt; - /* quota slave instance */ - struct qsd_instance *od_quota_slave; + struct brw_stats od_brw_stats; + atomic_t od_r_in_flight; + atomic_t od_w_in_flight; /* used to debug zerocopy logic: the fields track all * allocated, loaned and referenced buffers in use. @@ -297,6 +385,29 @@ struct osd_device { /* osd seq instance */ struct lu_client_seq *od_cl_seq; + + struct semaphore od_otable_sem; + struct osd_otable_it *od_otable_it; + struct lustre_scrub od_scrub; + struct list_head od_ios_list; + struct list_head od_index_backup_list; + struct list_head od_index_restore_list; + spinlock_t od_lock; + unsigned long long od_readcache_max_filesize; +}; + +static inline struct qsd_instance *osd_def_qsd(struct osd_device *osd) +{ + if (osd->od_is_ost) + return osd->od_quota_slave_dt; + else + return osd->od_quota_slave_md; +} + +enum osd_destroy_type { + OSD_DESTROY_NONE = 0, + OSD_DESTROY_SYNC = 1, + OSD_DESTROY_ASYNC = 2, }; struct osd_object { @@ -306,40 +417,67 @@ struct osd_object { * inode is pinned for the whole duration of lu_object life. * * Not modified concurrently (either setup early during object - * creation, or assigned by osd_object_create() under write lock). + * creation, or assigned by osd_create() under write lock). */ - dmu_buf_t *oo_db; + dnode_t *oo_dn; sa_handle_t *oo_sa_hdl; nvlist_t *oo_sa_xattr; struct list_head oo_sa_linkage; + /* used to implement osd_object_*_{lock|unlock} */ struct rw_semaphore oo_sem; + /* to serialize some updates: destroy vs. others, + * xattr_set, object block size change etc */ + struct rw_semaphore oo_guard; + + /* protected by oo_guard */ + struct list_head oo_unlinked_linkage; + /* cached attributes */ rwlock_t oo_attr_lock; struct lu_attr oo_attr; - /* protects extended attributes */ - struct semaphore oo_guard; + /* external dnode holding large EAs, protected by oo_guard */ uint64_t oo_xattr; + enum osd_destroy_type oo_destroy; + + __u32 oo_destroyed:1, + oo_late_xattr:1, +#ifdef ZFS_PROJINHERIT + oo_with_projid:1, +#endif + oo_late_attr_set:1, + oo_pfid_in_lma:1; - /* record size for index file */ - unsigned char oo_keysize; - unsigned char oo_recsize; - unsigned char oo_recusize; /* unit size */ + /* the i_flags in LMA */ + __u32 oo_lma_flags; + union { + int oo_ea_in_bonus; /* EA bytes we expect */ + struct { + /* record size for index file */ + unsigned char oo_keysize; + unsigned char oo_recsize; + unsigned char oo_recusize; /* unit size */ + }; + uint64_t oo_parent; /* used only at object creation */ + }; + struct lu_object_header *oo_header; }; -int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *); +int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *, + struct obd_statfs_info *); extern const struct dt_index_operations osd_acct_index_ops; -uint64_t osd_quota_fid2dmu(const struct lu_fid *fid); extern struct lu_device_operations osd_lu_ops; extern struct dt_index_operations osd_dir_ops; int osd_declare_quota(const struct lu_env *env, struct osd_device *osd, - qid_t uid, qid_t gid, long long space, - struct osd_thandle *oh, bool is_blk, int *flags, - bool force); + qid_t uid, qid_t gid, qid_t projid, long long space, + struct osd_thandle *oh, int *flags, + enum osd_qid_declare_flags osd_qid_declare_flags); uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs, - uint64_t nrblocks); + uint64_t nrblocks, uint64_t est_maxblockshift); +int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd, + uint64_t oid); /* * Helpers. @@ -392,6 +530,17 @@ static inline int osd_invariant(const struct osd_object *obj) return 1; } +/** + * Put the osd object once done with it. + * + * \param obj osd object that needs to be put + */ +static inline void osd_object_put(const struct lu_env *env, + struct osd_object *obj) +{ + dt_object_put(env, &obj->oo_dt); +} + static inline int osd_object_invariant(const struct lu_object *l) { return osd_invariant(osd_obj(l)); @@ -404,10 +553,36 @@ static inline struct seq_server_site *osd_seq_site(struct osd_device *osd) static inline char *osd_name(struct osd_device *osd) { - return osd->od_dt_dev.dd_lu_dev.ld_obd->obd_name; + return osd->od_svname; } -#ifdef LPROCFS +static inline void zfs_set_bit(int nr, __u8 *addr) +{ + set_bit(nr, (unsigned long *)addr); +} + +static inline int zfs_test_bit(int nr, __u8 *addr) +{ + return test_bit(nr, (const unsigned long *)addr); +} + +static inline int osd_oi_fid2idx(struct osd_device *dev, + const struct lu_fid *fid) +{ + return fid->f_seq & (dev->od_oi_count - 1); +} + +static inline struct osd_oi *osd_fid2oi(struct osd_device *osd, + const struct lu_fid *fid) +{ + LASSERTF(osd->od_oi_table && osd->od_oi_count >= 1, + "%s: "DFID", oi_count %d\n", + osd_name(osd), PFID(fid), osd->od_oi_count); + + return osd->od_oi_table[osd_oi_fid2idx(osd, fid)]; +} + +#ifdef CONFIG_PROC_FS enum { LPROC_OSD_READ_BYTES = 0, LPROC_OSD_WRITE_BYTES = 1, @@ -422,15 +597,18 @@ enum { LPROC_OSD_LAST, }; +extern struct kmem_cache *osd_zapit_cachep; /* osd_lproc.c */ -extern struct lprocfs_seq_vars lprocfs_osd_obd_vars[]; +extern struct lprocfs_vars lprocfs_osd_obd_vars[]; int osd_procfs_init(struct osd_device *osd, const char *name); int osd_procfs_fini(struct osd_device *osd); /* osd_object.c */ extern char *osd_obj_tag; -void osd_object_sa_dirty_rele(struct osd_thandle *oh); +int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp); +void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh); +void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh); int __osd_obj2dbuf(const struct lu_env *env, objset_t *os, uint64_t oid, dmu_buf_t **dbp); struct lu_object *osd_object_alloc(const struct lu_env *env, @@ -439,11 +617,16 @@ struct lu_object *osd_object_alloc(const struct lu_env *env, int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type, void *buf, uint32_t buflen, struct osd_thandle *oh); int __osd_zap_create(const struct lu_env *env, struct osd_device *osd, - dmu_buf_t **zap_dbp, dmu_tx_t *tx, struct lu_attr *la, - uint64_t parent, zap_flags_t flags); + dnode_t **zap_dnp, dmu_tx_t *tx, struct lu_attr *la, + unsigned dnsize, zap_flags_t flags); int __osd_object_create(const struct lu_env *env, struct osd_device *osd, - dmu_buf_t **dbp, dmu_tx_t *tx, struct lu_attr *la, - uint64_t parent); + struct osd_object *obj, const struct lu_fid *fid, + dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la); +int __osd_attr_init(const struct lu_env *env, struct osd_device *osd, + struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx, + struct lu_attr *la, uint64_t parent, nvlist_t *); +int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx, + uint64_t oid, dnode_t **dnp); /* osd_oi.c */ int osd_oi_init(const struct lu_env *env, struct osd_device *o); @@ -451,46 +634,102 @@ void osd_oi_fini(const struct lu_env *env, struct osd_device *o); int osd_fid_lookup(const struct lu_env *env, struct osd_device *, const struct lu_fid *, uint64_t *); uint64_t osd_get_name_n_idx(const struct lu_env *env, struct osd_device *osd, - const struct lu_fid *fid, char *buf); + const struct lu_fid *fid, char *buf, int bufsize, + dnode_t **zdn); int osd_options_init(void); -int osd_convert_root_to_new_seq(const struct lu_env *env, - struct osd_device *o); int osd_ost_seq_exists(const struct lu_env *env, struct osd_device *osd, __u64 seq); +int osd_idc_find_and_init(const struct lu_env *env, struct osd_device *osd, + struct osd_object *obj); +struct osd_idmap_cache *osd_idc_find_or_init(const struct lu_env *env, + struct osd_device *osd, + const struct lu_fid *fid); +struct osd_idmap_cache *osd_idc_find(const struct lu_env *env, + struct osd_device *osd, + const struct lu_fid *fid); +int osd_idc_find_and_init_with_oid(const struct lu_env *env, + struct osd_device *osd, + const struct lu_fid *fid, + uint64_t oid); +int fid_is_on_ost(const struct lu_env *env, struct osd_device *osd, + const struct lu_fid *fid); +int osd_obj_find_or_create(const struct lu_env *env, struct osd_device *o, + uint64_t parent, const char *name, uint64_t *child, + const struct lu_fid *fid, bool isdir); + +extern unsigned int osd_oi_count; + /* osd_index.c */ +int osd_get_fid_by_oid(const struct lu_env *env, struct osd_device *osd, + uint64_t oid, struct lu_fid *fid); int osd_index_try(const struct lu_env *env, struct dt_object *dt, const struct dt_index_features *feat); int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd, - obd_seq seq, struct lu_seq_range *range); + u64 seq, struct lu_seq_range *range); void osd_zap_cursor_init_serialized(zap_cursor_t *zc, struct objset *os, uint64_t id, uint64_t dirhash); int osd_zap_cursor_init(zap_cursor_t **zc, struct objset *os, uint64_t id, uint64_t dirhash); void osd_zap_cursor_fini(zap_cursor_t *zc); uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc); +int osd_remote_fid(const struct lu_env *env, struct osd_device *osd, + const struct lu_fid *fid); +int osd_add_to_remote_parent(const struct lu_env *env, + struct osd_device *osd, + struct osd_object *obj, + struct osd_thandle *oh); +int osd_delete_from_remote_parent(const struct lu_env *env, + struct osd_device *osd, + struct osd_object *obj, + struct osd_thandle *oh, bool destroy); +int __osd_xattr_load_by_oid(struct osd_device *osd, uint64_t oid, + nvlist_t **sa); + +/* osd_scrub.c */ +int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev); +void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev); +int osd_scrub_start(const struct lu_env *env, struct osd_device *dev, + __u32 flags); +void osd_scrub_stop(struct osd_device *dev); +int osd_oii_insert(const struct lu_env *env, struct osd_device *dev, + const struct lu_fid *fid, uint64_t oid, bool insert); +int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid, + uint64_t *oid); /* osd_xattr.c */ -int __osd_xattr_load(struct osd_device *osd, uint64_t dnode, - nvlist_t **sa_xattr); +int __osd_sa_xattr_schedule_update(const struct lu_env *env, + struct osd_object *obj, + struct osd_thandle *oh); +int __osd_sa_attr_init(const struct lu_env *env, struct osd_object *obj, + struct osd_thandle *oh); +int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj, + struct osd_thandle *oh); +int __osd_xattr_load(struct osd_device *osd, sa_handle_t *hdl, + nvlist_t **sa); int __osd_xattr_get_large(const struct lu_env *env, struct osd_device *osd, uint64_t xattr, struct lu_buf *buf, const char *name, int *sizep); +int osd_xattr_get_internal(const struct lu_env *env, struct osd_object *obj, + struct lu_buf *buf, const char *name, int *sizep); int osd_xattr_get(const struct lu_env *env, struct dt_object *dt, - struct lu_buf *buf, const char *name, - struct lustre_capa *capa); + struct lu_buf *buf, const char *name); int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt, const struct lu_buf *buf, const char *name, int fl, struct thandle *handle); int osd_xattr_set(const struct lu_env *env, struct dt_object *dt, const struct lu_buf *buf, const char *name, int fl, - struct thandle *handle, struct lustre_capa *capa); + struct thandle *handle); int osd_declare_xattr_del(const struct lu_env *env, struct dt_object *dt, const char *name, struct thandle *handle); int osd_xattr_del(const struct lu_env *env, struct dt_object *dt, - const char *name, struct thandle *handle, - struct lustre_capa *capa); + const char *name, struct thandle *handle); +void osd_declare_xattrs_destroy(const struct lu_env *env, + struct osd_object *obj, + struct osd_thandle *oh); +int osd_xattrs_destroy(const struct lu_env *env, + struct osd_object *obj, struct osd_thandle *oh); int osd_xattr_list(const struct lu_env *env, struct dt_object *dt, - struct lu_buf *lb, struct lustre_capa *capa); + const struct lu_buf *lb); void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj, int vallen, const char *name, struct osd_thandle *oh); int __osd_sa_xattr_set(const struct lu_env *env, struct osd_object *obj, @@ -499,13 +738,46 @@ int __osd_sa_xattr_set(const struct lu_env *env, struct osd_object *obj, int __osd_xattr_set(const struct lu_env *env, struct osd_object *obj, const struct lu_buf *buf, const char *name, int fl, struct osd_thandle *oh); +int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj, + struct osd_thandle *oh); + +#define OSD_BASE_EA_IN_BONUS (ZFS_SA_BASE_ATTR_SIZE + \ + sizeof(__u64) /* VBR VERSION */ + \ + sizeof(struct lustre_mdt_attrs) /* LMA */) + +#ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE +int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus); +#else +static inline int +osd_find_dnsize(struct osd_device *osd, int ea_in_bonus) +{ + return DN_MAX_BONUSLEN; +} +#endif + +static inline int osd_object_is_zap(dnode_t *dn) +{ + return (dn->dn_type == DMU_OT_DIRECTORY_CONTENTS || + dn->dn_type == DMU_OT_USERGROUP_USED); +} + +/* XXX: f_ver is not counted, but may differ too */ +static inline void osd_fid2str(char *buf, const struct lu_fid *fid, int len) +{ + snprintf(buf, len, DFID_NOBRACE, PFID(fid)); +} + static inline int osd_xattr_set_internal(const struct lu_env *env, struct osd_object *obj, const struct lu_buf *buf, const char *name, int fl, - struct osd_thandle *oh, struct lustre_capa *capa) + struct osd_thandle *oh) { int rc; + if (unlikely(!dt_object_exists(&obj->oo_dt) || obj->oo_destroyed)) + return -ENOENT; + + LASSERT(obj->oo_dn); if (osd_obj2dev(obj)->od_xattr_in_sa) { rc = __osd_sa_xattr_set(env, obj, buf, name, fl, oh); if (rc == -EFBIG) @@ -519,29 +791,328 @@ osd_xattr_set_internal(const struct lu_env *env, struct osd_object *obj, static inline uint64_t attrs_fs2zfs(const uint32_t flags) { - return (((flags & FS_APPEND_FL) ? ZFS_APPENDONLY : 0) | - ((flags & FS_NODUMP_FL) ? ZFS_NODUMP : 0) | - ((flags & FS_IMMUTABLE_FL) ? ZFS_IMMUTABLE : 0)); + return (flags & LUSTRE_APPEND_FL ? ZFS_APPENDONLY : 0) | + (flags & LUSTRE_NODUMP_FL ? ZFS_NODUMP : 0) | +#ifdef ZFS_PROJINHERIT + (flags & LUSTRE_PROJINHERIT_FL ? ZFS_PROJINHERIT : 0) | +#endif + (flags & LUSTRE_IMMUTABLE_FL ? ZFS_IMMUTABLE : 0); } static inline uint32_t attrs_zfs2fs(const uint64_t flags) { - return (((flags & ZFS_APPENDONLY) ? FS_APPEND_FL : 0) | - ((flags & ZFS_NODUMP) ? FS_NODUMP_FL : 0) | - ((flags & ZFS_IMMUTABLE) ? FS_IMMUTABLE_FL : 0)); + return (flags & ZFS_APPENDONLY ? LUSTRE_APPEND_FL : 0) | + (flags & ZFS_NODUMP ? LUSTRE_NODUMP_FL : 0) | +#ifdef ZFS_PROJINHERIT + (flags & ZFS_PROJINHERIT ? LUSTRE_PROJINHERIT_FL : 0) | +#endif + (flags & ZFS_IMMUTABLE ? LUSTRE_IMMUTABLE_FL : 0); } #endif #ifndef HAVE_DSL_POOL_CONFIG -static inline void dsl_pool_config_enter(dsl_pool_t *dp, char *name) +static inline void dsl_pool_config_enter(dsl_pool_t *dp, void *name) +{ +} + +static inline void dsl_pool_config_exit(dsl_pool_t *dp, void *name) { } +#endif + +#ifdef HAVE_SPA_MAXBLOCKSIZE +#define osd_spa_maxblocksize(spa) spa_maxblocksize(spa) +#define osd_spa_maxblockshift(spa) fls64(spa_maxblocksize(spa) - 1) +#else +#define osd_spa_maxblocksize(spa) SPA_MAXBLOCKSIZE +#define osd_spa_maxblockshift(spa) SPA_MAXBLOCKSHIFT +#define SPA_OLD_MAXBLOCKSIZE SPA_MAXBLOCKSIZE +#endif + +#ifdef HAVE_SA_SPILL_ALLOC +static inline void * +osd_zio_buf_alloc(size_t size) +{ + return sa_spill_alloc(KM_SLEEP); +} + +static inline void +osd_zio_buf_free(void *buf, size_t size) +{ + sa_spill_free(buf); +} +#else +#define osd_zio_buf_alloc(size) zio_buf_alloc(size) +#define osd_zio_buf_free(buf, size) zio_buf_free(buf, size) +#endif + +#ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE +static inline uint64_t +osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize, + int dnodesize, dmu_tx_t *tx) +{ + if (dnodesize == 0) + dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE); + + return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA, + DN_BONUS_SIZE(dnodesize), dnodesize, tx); +} + +static inline uint64_t +osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags, + dmu_object_type_t ot, int leaf_blockshift, + int indirect_blockshift, int dnodesize, dmu_tx_t *tx) +{ + if (dnodesize == 0) + dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE); + + return zap_create_flags_dnsize(os, normflags, flags, ot, + leaf_blockshift, indirect_blockshift, + DMU_OT_SA, DN_BONUS_SIZE(dnodesize), + dnodesize, tx); +} + +static inline int +osd_obj_bonuslen(struct osd_object *obj) +{ + int bonuslen = DN_BONUS_SIZE(DNODE_MIN_SIZE); + + if (obj->oo_dn != NULL && obj->oo_dn->dn_num_slots != 0) { + bonuslen = DN_SLOTS_TO_BONUSLEN(obj->oo_dn->dn_num_slots); + } else { + objset_t *os = osd_dtobj2objset(&obj->oo_dt); + int dnodesize; + + if (os != NULL) { + dnodesize = dmu_objset_dnodesize(os); + if (dnodesize != 0) + bonuslen = DN_BONUS_SIZE(dnodesize); + } + } + + return bonuslen; +} +#else +static inline uint64_t +osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize, + int dnodesize, dmu_tx_t *tx) +{ + return dmu_object_alloc(os, objtype, blocksize, DMU_OT_SA, + DN_MAX_BONUSLEN, tx); +} + +static inline uint64_t +osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags, + dmu_object_type_t ot, int leaf_blockshift, + int indirect_blockshift, int dnodesize, dmu_tx_t *tx) +{ + return zap_create_flags(os, normflags, flags, ot, leaf_blockshift, + indirect_blockshift, DMU_OT_SA, + DN_MAX_BONUSLEN, tx); +} + +static inline int +osd_obj_bonuslen(struct osd_object *obj) +{ + return DN_MAX_BONUSLEN; +} +#endif /* HAVE_DMU_OBJECT_ALLOC_DNSIZE */ + +#ifdef HAVE_DMU_PREFETCH_6ARG +#define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \ + dmu_prefetch((os), (obj), (lvl), (off), (len), (pri)) +#else +#define osd_dmu_prefetch(os, obj, lvl, off, len, pri) \ + dmu_prefetch((os), (obj), (lvl), (off)) +#endif + +static inline int osd_sa_handle_get(struct osd_object *obj) +{ + struct osd_device *osd = osd_obj2dev(obj); + dnode_t *dn = obj->oo_dn; + int rc; + + if (obj->oo_sa_hdl) + return 0; + + dbuf_read(dn->dn_bonus, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH); + rc = -sa_handle_get_from_db(osd->od_os, &dn->dn_bonus->db, obj, + SA_HDL_PRIVATE, &obj->oo_sa_hdl); + if (rc) + return rc; + zfs_refcount_add(&dn->dn_bonus->db_holds, osd_obj_tag); + return 0; +} + +static inline void osd_dnode_rele(dnode_t *dn) +{ + dmu_buf_impl_t *db; + LASSERT(dn); + LASSERT(dn->dn_bonus); + db = dn->dn_bonus; + + dmu_buf_rele(&db->db, osd_obj_tag); +} + +#ifdef HAVE_DMU_USEROBJ_ACCOUNTING + +#define OSD_DMU_USEROBJ_PREFIX DMU_OBJACCT_PREFIX +#define OSD_DMU_USEROBJ_PREFIX_LEN DMU_OBJACCT_PREFIX_LEN + +static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd) +{ + return dmu_objset_userobjspace_present(osd->od_os); +} +#else -static inline void dsl_pool_config_exit(dsl_pool_t *dp, char *name) +#define OSD_DMU_USEROBJ_PREFIX "obj-" +#define OSD_DMU_USEROBJ_PREFIX_LEN 4 + +static inline bool osd_dmu_userobj_accounting_available(struct osd_device *osd) +{ + return false; +} +#endif /* #ifdef HAVE_DMU_USEROBJ_ACCOUNTING */ + +static inline int osd_zap_add(struct osd_device *osd, uint64_t zap, + dnode_t *dn, const char *key, + int int_size, int int_num, + const void *val, dmu_tx_t *tx) +{ + LASSERT(zap != 0); + +#ifdef HAVE_ZAP_ADD_BY_DNODE + if (dn) + return -zap_add_by_dnode(dn, key, int_size, int_num, val, tx); +#endif + return -zap_add(osd->od_os, zap, key, int_size, int_num, val, tx); +} + +static inline int osd_zap_remove(struct osd_device *osd, uint64_t zap, + dnode_t *dn, const char *key, + dmu_tx_t *tx) +{ + LASSERT(zap != 0); + +#ifdef HAVE_ZAP_ADD_BY_DNODE + if (dn) + return -zap_remove_by_dnode(dn, key, tx); +#endif + return -zap_remove(osd->od_os, zap, key, tx); +} + + +static inline int osd_zap_lookup(struct osd_device *osd, uint64_t zap, + dnode_t *dn, const char *key, + int int_size, int int_num, void *v) { + LASSERT(zap != 0); + +#ifdef HAVE_ZAP_ADD_BY_DNODE + if (dn) + return -zap_lookup_by_dnode(dn, key, int_size, int_num, v); +#endif + return -zap_lookup(osd->od_os, zap, key, int_size, int_num, v); } +static inline void osd_tx_hold_zap(dmu_tx_t *tx, uint64_t zap, + dnode_t *dn, int add, const char *name) +{ +#ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE + if (dn) { + dmu_tx_hold_zap_by_dnode(tx, dn, add, name); + return; + } +#endif + dmu_tx_hold_zap(tx, zap, add, name); +} + +static inline void osd_tx_hold_write(dmu_tx_t *tx, uint64_t oid, + dnode_t *dn, uint64_t off, int len) +{ +#ifdef HAVE_DMU_TX_HOLD_ZAP_BY_DNODE + if (dn) { + dmu_tx_hold_write_by_dnode(tx, dn, off, len); + return; + } +#endif + dmu_tx_hold_write(tx, oid, off, len); +} + +static inline void osd_dmu_write(struct osd_device *osd, dnode_t *dn, + uint64_t offset, uint64_t size, + const char *buf, dmu_tx_t *tx) +{ + LASSERT(dn); +#ifdef HAVE_DMU_WRITE_BY_DNODE + dmu_write_by_dnode(dn, offset, size, buf, tx); +#else + dmu_write(osd->od_os, dn->dn_object, offset, size, buf, tx); +#endif +} + +static inline int osd_dmu_read(struct osd_device *osd, dnode_t *dn, + uint64_t offset, uint64_t size, + char *buf, int flags) +{ + LASSERT(dn); +#ifdef HAVE_DMU_READ_BY_DNODE + return -dmu_read_by_dnode(dn, offset, size, buf, flags); +#else + return -dmu_read(osd->od_os, dn->dn_object, offset, size, buf, flags); +#endif +} + +#ifdef HAVE_DMU_OBJSET_OWN_6ARG +#define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \ + dmu_objset_own((name), (type), (ronly), (decrypt), (tag), (os)) +#else +#define osd_dmu_objset_own(name, type, ronly, decrypt, tag, os) \ + dmu_objset_own((name), (type), (ronly), (tag), (os)) +#endif + +#ifdef HAVE_DMU_OBJSET_DISOWN_3ARG +#define osd_dmu_objset_disown(os, decrypt, tag) \ + dmu_objset_disown((os), (decrypt), (tag)) +#else +#define osd_dmu_objset_disown(os, decrypt, tag) \ + dmu_objset_disown((os), (tag)) +#endif + +static inline int +osd_index_register(struct osd_device *osd, const struct lu_fid *fid, + __u32 keysize, __u32 recsize) +{ + return lustre_index_register(&osd->od_dt_dev, osd_name(osd), + &osd->od_index_backup_list, &osd->od_lock, + &osd->od_index_backup_stop, + fid, keysize, recsize); +} + +static inline void +osd_index_backup(const struct lu_env *env, struct osd_device *osd, bool backup) +{ + struct lu_fid *fid = &osd_oti_get(env)->oti_fid; + int rc; + + lu_local_obj_fid(fid, INDEX_BACKUP_OID); + rc = osd_idc_find_and_init_with_oid(env, osd, fid, + osd->od_index_backup_id); + if (rc) + backup = false; + + lustre_index_backup(env, &osd->od_dt_dev, osd_name(osd), + &osd->od_index_backup_list, &osd->od_lock, + &osd->od_index_backup_stop, backup); +} + +#ifndef HAVE_DMU_TX_MARK_NETFREE +#define dmu_tx_mark_netfree(tx) +#endif + +#ifndef HAVE_ZFS_INODE_TIMESPEC +#define inode_timespec_t timestruc_t #endif #endif /* _OSD_INTERNAL_H */