4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd-zfs/osd_internal.h
37 * Shared definitions and declarations for zfs/dmu osd
39 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
40 * Author: Mike Pershin <tappro@whamcloud.com>
41 * Author: Johann Lombardi <johann@whamcloud.com>
44 #ifndef _OSD_INTERNAL_H
45 #define _OSD_INTERNAL_H
47 #include <dt_object.h>
48 #include <md_object.h>
49 #include <lustre_quota.h>
54 #include <sys/nvpair.h>
55 #include <sys/zfs_znode.h>
60 * By design including kmem.h overrides the Linux slab interfaces to provide
61 * the Illumos kmem cache interfaces. To override this and gain access to
62 * the Linux interfaces these preprocessor macros must be undefined.
64 #ifdef kmem_cache_destroy
65 #undef kmem_cache_destroy
68 #ifdef kmem_cache_create
69 #undef kmem_cache_create
72 #ifdef kmem_cache_alloc
73 #undef kmem_cache_alloc
76 #ifdef kmem_cache_free
77 #undef kmem_cache_free
80 #define LUSTRE_ROOT_FID_SEQ 0
81 #define DMU_OSD_SVNAME "svname"
82 #define DMU_OSD_OI_NAME_BASE "oi"
84 #define OSD_GFP_IO (GFP_NOFS | __GFP_HIGHMEM)
86 /* Statfs space reservation for grant, fragmentation, and unlink space. */
87 #define OSD_STATFS_RESERVED_SIZE (16ULL << 20) /* reserve 16MB minimum */
88 #define OSD_STATFS_RESERVED_SHIFT (7) /* reserve 0.78% of all space */
90 /* Statfs {minimum, safe estimate, and maximum} dnodes per block */
91 #define OSD_DNODE_MIN_BLKSHIFT (DNODES_PER_BLOCK_SHIFT)
92 #define OSD_DNODE_EST_BLKSHIFT (DNODES_PER_BLOCK_SHIFT >> 1)
93 #define OSD_DNODE_EST_COUNT 1024
95 #define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */
98 * Iterator's in-memory data structure for quota file.
100 struct osd_it_quota {
101 struct osd_object *oiq_obj;
102 /* DMU accounting object id */
105 zap_cursor_t *oiq_zc;
106 /** identifier for current quota record */
108 unsigned oiq_reset:1; /* 1 -- no need to advance */
112 * Iterator's in-memory data structure for ZAPs
114 * ZFS does not store . and .. on a disk, instead they are
115 * generated up on request
116 * to follow this format we do the same
119 zap_cursor_t *ozi_zc;
120 struct osd_object *ozi_obj;
121 unsigned ozi_reset:1; /* 1 -- no need to advance */
122 /* ozi_pos - position of the cursor:
123 * 0 - before any record
126 * 3 - real records */
129 char ozi_name[MAXNAMELEN]; /* file name for dir */
130 __u64 ozi_key; /* binary key for index files */
133 #define DT_IT2DT(it) (&((struct osd_zap_it *)it)->ozi_obj->oo_dt)
136 * regular ZFS direntry
138 struct zpl_direntry {
139 uint64_t zde_dnode:48,
142 } __attribute__((packed));
145 * lustre direntry adds a fid to regular ZFS direntry
147 struct luz_direntry {
148 struct zpl_direntry lzd_reg;
149 struct lu_fid lzd_fid;
150 } __attribute__((packed));
153 /* cached SA attributes */
167 /* max.number of regular attrubites the callers may ask for */
168 #define OSD_MAX_IN_BULK 13
170 struct osd_thread_info {
171 const struct lu_env *oti_env;
173 struct lu_fid oti_fid;
175 * XXX temporary: for ->i_op calls.
177 struct timespec oti_time;
179 struct ost_id oti_ostid;
185 char oti_key[MAXNAMELEN + 1];
186 __u64 oti_key64[(MAXNAMELEN + 1)/sizeof(__u64)];
187 sa_bulk_attr_t oti_attr_bulk[OSD_MAX_IN_BULK];
189 struct lustre_mdt_attrs oti_mdt_attrs;
191 struct lu_attr oti_la;
192 struct osa_attr oti_osa;
193 zap_attribute_t oti_za;
194 dmu_object_info_t oti_doi;
195 struct luz_direntry oti_zde;
197 struct lquota_id_info oti_qi;
198 struct lu_seq_range oti_seq_range;
199 struct lu_buf oti_xattr_lbuf;
202 extern struct lu_context_key osd_key;
204 static inline struct osd_thread_info *osd_oti_get(const struct lu_env *env)
206 return lu_context_key_get(&env->le_ctx, &osd_key);
210 struct thandle ot_super;
211 struct list_head ot_dcb_list;
212 struct list_head ot_stop_dcb_list;
213 struct list_head ot_unlinked_list;
214 struct list_head ot_sa_list;
216 struct lquota_trans ot_quota_trans;
217 __u32 ot_write_commit:1,
221 #define OSD_OI_NAME_SIZE 16
224 * Object Index (OI) instance.
227 char oi_name[OSD_OI_NAME_SIZE]; /* unused */
233 uint64_t *os_compat_dirs;
234 int os_subdir_count; /* subdir count for each seq */
235 u64 os_seq; /* seq number */
236 struct list_head os_seq_list; /* list to seq_list */
239 struct osd_seq_list {
240 rwlock_t osl_seq_list_lock; /* lock for seq_list */
241 struct list_head osl_seq_list; /* list head for seq */
242 struct semaphore osl_seq_init_sem;
245 #define OSD_OST_MAP_SIZE 32
252 struct dt_device od_dt_dev;
253 /* information about underlying file system */
254 struct objset *od_os;
255 uint64_t od_rootid; /* id of root znode */
256 uint64_t od_unlinkedid; /* id of unlinked zapobj */
257 /* SA attr mapping->id,
258 * name is the same as in ZFS to use defines SA_ZPL_...*/
259 sa_attr_type_t *z_attr_table;
261 struct proc_dir_entry *od_proc_entry;
262 struct lprocfs_stats *od_stats;
264 uint64_t od_max_blksz;
267 struct osd_oi **od_oi_table;
268 unsigned int od_oi_count;
269 struct osd_seq_list od_seq_list;
271 unsigned int od_dev_set_rdonly:1, /**< osd_ro() called */
272 od_prop_rdonly:1, /**< ZFS property readonly */
274 od_quota_iused_est:1,
282 struct lu_site od_site;
284 /* object IDs of the inode accounting indexes */
285 uint64_t od_iusr_oid;
286 uint64_t od_igrp_oid;
288 /* quota slave instance */
289 struct qsd_instance *od_quota_slave;
291 struct brw_stats od_brw_stats;
292 atomic_t od_r_in_flight;
293 atomic_t od_w_in_flight;
295 /* used to debug zerocopy logic: the fields track all
296 * allocated, loaned and referenced buffers in use.
297 * to be removed once the change is tested well. */
298 atomic_t od_zerocopy_alloc;
299 atomic_t od_zerocopy_loan;
300 atomic_t od_zerocopy_pin;
302 arc_prune_t *arc_prune_cb;
304 /* osd seq instance */
305 struct lu_client_seq *od_cl_seq;
308 enum osd_destroy_type {
309 OSD_DESTROY_NONE = 0,
310 OSD_DESTROY_SYNC = 1,
311 OSD_DESTROY_ASYNC = 2,
315 struct dt_object oo_dt;
317 * Inode for file system object represented by this osd_object. This
318 * inode is pinned for the whole duration of lu_object life.
320 * Not modified concurrently (either setup early during object
321 * creation, or assigned by osd_object_create() under write lock).
324 sa_handle_t *oo_sa_hdl;
325 nvlist_t *oo_sa_xattr;
326 struct list_head oo_sa_linkage;
328 /* used to implement osd_object_*_{lock|unlock} */
329 struct rw_semaphore oo_sem;
331 /* to serialize some updates: destroy vs. others,
333 struct rw_semaphore oo_guard;
335 /* protected by oo_guard */
336 struct list_head oo_unlinked_linkage;
338 /* cached attributes */
339 rwlock_t oo_attr_lock;
340 struct lu_attr oo_attr;
342 /* external dnode holding large EAs, protected by oo_guard */
344 enum osd_destroy_type oo_destroy;
346 __u32 oo_destroyed:1,
349 /* the i_flags in LMA */
351 /* record size for index file */
352 unsigned char oo_keysize;
353 unsigned char oo_recsize;
354 unsigned char oo_recusize; /* unit size */
357 int osd_statfs(const struct lu_env *, struct dt_device *, struct obd_statfs *);
358 extern const struct dt_index_operations osd_acct_index_ops;
359 uint64_t osd_quota_fid2dmu(const struct lu_fid *fid);
360 extern struct lu_device_operations osd_lu_ops;
361 extern struct dt_index_operations osd_dir_ops;
362 int osd_declare_quota(const struct lu_env *env, struct osd_device *osd,
363 qid_t uid, qid_t gid, long long space,
364 struct osd_thandle *oh, bool is_blk, int *flags,
366 uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
367 uint64_t nrblocks, uint64_t est_maxblockshift);
372 static inline int lu_device_is_osd(const struct lu_device *d)
374 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osd_lu_ops);
377 static inline struct osd_object *osd_obj(const struct lu_object *o)
379 LASSERT(lu_device_is_osd(o->lo_dev));
380 return container_of0(o, struct osd_object, oo_dt.do_lu);
383 static inline struct osd_device *osd_dt_dev(const struct dt_device *d)
385 LASSERT(lu_device_is_osd(&d->dd_lu_dev));
386 return container_of0(d, struct osd_device, od_dt_dev);
389 static inline struct osd_device *osd_dev(const struct lu_device *d)
391 LASSERT(lu_device_is_osd(d));
392 return osd_dt_dev(container_of0(d, struct dt_device, dd_lu_dev));
395 static inline struct osd_object *osd_dt_obj(const struct dt_object *d)
397 return osd_obj(&d->do_lu);
400 static inline struct osd_device *osd_obj2dev(const struct osd_object *o)
402 return osd_dev(o->oo_dt.do_lu.lo_dev);
405 static inline struct lu_device *osd2lu_dev(struct osd_device *osd)
407 return &osd->od_dt_dev.dd_lu_dev;
410 static inline struct objset * osd_dtobj2objset(struct dt_object *o)
412 return osd_dev(o->do_lu.lo_dev)->od_os;
415 static inline int osd_invariant(const struct osd_object *obj)
420 static inline int osd_object_invariant(const struct lu_object *l)
422 return osd_invariant(osd_obj(l));
425 static inline struct seq_server_site *osd_seq_site(struct osd_device *osd)
427 return osd->od_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
430 static inline char *osd_name(struct osd_device *osd)
432 return osd->od_dt_dev.dd_lu_dev.ld_obd->obd_name;
435 #ifdef CONFIG_PROC_FS
437 LPROC_OSD_READ_BYTES = 0,
438 LPROC_OSD_WRITE_BYTES = 1,
439 LPROC_OSD_GET_PAGE = 2,
440 LPROC_OSD_NO_PAGE = 3,
441 LPROC_OSD_CACHE_ACCESS = 4,
442 LPROC_OSD_CACHE_HIT = 5,
443 LPROC_OSD_CACHE_MISS = 6,
444 LPROC_OSD_COPY_IO = 7,
445 LPROC_OSD_ZEROCOPY_IO = 8,
446 LPROC_OSD_TAIL_IO = 9,
450 extern struct kmem_cache *osd_zapit_cachep;
452 extern struct lprocfs_vars lprocfs_osd_obd_vars[];
454 int osd_procfs_init(struct osd_device *osd, const char *name);
455 int osd_procfs_fini(struct osd_device *osd);
458 extern char *osd_obj_tag;
459 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh);
460 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh);
461 int __osd_obj2dbuf(const struct lu_env *env, objset_t *os,
462 uint64_t oid, dmu_buf_t **dbp);
463 struct lu_object *osd_object_alloc(const struct lu_env *env,
464 const struct lu_object_header *hdr,
465 struct lu_device *d);
466 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
467 void *buf, uint32_t buflen, struct osd_thandle *oh);
468 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
469 dmu_buf_t **zap_dbp, dmu_tx_t *tx, struct lu_attr *la,
470 uint64_t parent, zap_flags_t flags);
471 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
472 dmu_buf_t **dbp, dmu_tx_t *tx, struct lu_attr *la,
476 int osd_oi_init(const struct lu_env *env, struct osd_device *o);
477 void osd_oi_fini(const struct lu_env *env, struct osd_device *o);
478 int osd_fid_lookup(const struct lu_env *env,
479 struct osd_device *, const struct lu_fid *, uint64_t *);
480 uint64_t osd_get_name_n_idx(const struct lu_env *env, struct osd_device *osd,
481 const struct lu_fid *fid, char *buf);
482 int osd_options_init(void);
483 int osd_ost_seq_exists(const struct lu_env *env, struct osd_device *osd,
486 int osd_index_try(const struct lu_env *env, struct dt_object *dt,
487 const struct dt_index_features *feat);
488 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
489 u64 seq, struct lu_seq_range *range);
490 void osd_zap_cursor_init_serialized(zap_cursor_t *zc, struct objset *os,
491 uint64_t id, uint64_t dirhash);
492 int osd_zap_cursor_init(zap_cursor_t **zc, struct objset *os,
493 uint64_t id, uint64_t dirhash);
494 void osd_zap_cursor_fini(zap_cursor_t *zc);
495 uint64_t osd_zap_cursor_serialize(zap_cursor_t *zc);
498 int __osd_sa_xattr_update(const struct lu_env *env, struct osd_object *obj,
499 struct osd_thandle *oh);
500 int __osd_xattr_load(struct osd_device *osd, uint64_t dnode,
501 nvlist_t **sa_xattr);
502 int __osd_xattr_get_large(const struct lu_env *env, struct osd_device *osd,
503 uint64_t xattr, struct lu_buf *buf,
504 const char *name, int *sizep);
505 int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
506 struct lu_buf *buf, const char *name);
507 int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
508 const struct lu_buf *buf, const char *name,
509 int fl, struct thandle *handle);
510 int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
511 const struct lu_buf *buf, const char *name, int fl,
512 struct thandle *handle);
513 int osd_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
514 const char *name, struct thandle *handle);
515 int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
516 const char *name, struct thandle *handle);
517 void osd_declare_xattrs_destroy(const struct lu_env *env,
518 struct osd_object *obj,
519 struct osd_thandle *oh);
520 int osd_xattrs_destroy(const struct lu_env *env,
521 struct osd_object *obj, struct osd_thandle *oh);
522 int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
523 const struct lu_buf *lb);
524 void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj,
525 int vallen, const char *name, struct osd_thandle *oh);
526 int __osd_sa_xattr_set(const struct lu_env *env, struct osd_object *obj,
527 const struct lu_buf *buf, const char *name, int fl,
528 struct osd_thandle *oh);;
529 int __osd_xattr_set(const struct lu_env *env, struct osd_object *obj,
530 const struct lu_buf *buf, const char *name, int fl,
531 struct osd_thandle *oh);
533 osd_xattr_set_internal(const struct lu_env *env, struct osd_object *obj,
534 const struct lu_buf *buf, const char *name, int fl,
535 struct osd_thandle *oh)
539 if (unlikely(!dt_object_exists(&obj->oo_dt) || obj->oo_destroyed))
543 if (osd_obj2dev(obj)->od_xattr_in_sa) {
544 rc = __osd_sa_xattr_set(env, obj, buf, name, fl, oh);
546 rc = __osd_xattr_set(env, obj, buf, name, fl, oh);
548 rc = __osd_xattr_set(env, obj, buf, name, fl, oh);
554 static inline uint64_t attrs_fs2zfs(const uint32_t flags)
556 return (flags & LUSTRE_APPEND_FL ? ZFS_APPENDONLY : 0) |
557 (flags & LUSTRE_NODUMP_FL ? ZFS_NODUMP : 0) |
558 (flags & LUSTRE_IMMUTABLE_FL ? ZFS_IMMUTABLE : 0);
561 static inline uint32_t attrs_zfs2fs(const uint64_t flags)
563 return (flags & ZFS_APPENDONLY ? LUSTRE_APPEND_FL : 0) |
564 (flags & ZFS_NODUMP ? LUSTRE_NODUMP_FL : 0) |
565 (flags & ZFS_IMMUTABLE ? LUSTRE_IMMUTABLE_FL : 0);
570 #ifndef HAVE_DSL_POOL_CONFIG
571 static inline void dsl_pool_config_enter(dsl_pool_t *dp, char *name)
575 static inline void dsl_pool_config_exit(dsl_pool_t *dp, char *name)
580 #ifdef HAVE_SPA_MAXBLOCKSIZE
581 #define osd_spa_maxblocksize(spa) spa_maxblocksize(spa)
582 #define osd_spa_maxblockshift(spa) fls64(spa_maxblocksize(spa) - 1)
584 #define osd_spa_maxblocksize(spa) SPA_MAXBLOCKSIZE
585 #define osd_spa_maxblockshift(spa) SPA_MAXBLOCKSHIFT
586 #define SPA_OLD_MAXBLOCKSIZE SPA_MAXBLOCKSIZE
589 #ifdef HAVE_SA_SPILL_ALLOC
591 osd_zio_buf_alloc(size_t size)
593 return sa_spill_alloc(KM_SLEEP);
597 osd_zio_buf_free(void *buf, size_t size)
602 #define osd_zio_buf_alloc(size) zio_buf_alloc(size)
603 #define osd_zio_buf_free(buf, size) zio_buf_free(buf, size)
606 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
607 static inline uint64_t
608 osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
609 int dnodesize, dmu_tx_t *tx)
612 dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
614 return dmu_object_alloc_dnsize(os, objtype, blocksize, DMU_OT_SA,
615 DN_BONUS_SIZE(dnodesize), dnodesize, tx);
618 static inline uint64_t
619 osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
620 dmu_object_type_t ot, int leaf_blockshift,
621 int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
624 dnodesize = MAX(dmu_objset_dnodesize(os), DNODE_MIN_SIZE);
626 return zap_create_flags_dnsize(os, normflags, flags, ot,
627 leaf_blockshift, indirect_blockshift,
628 DMU_OT_SA, DN_BONUS_SIZE(dnodesize),
632 static inline uint64_t
633 osd_dmu_object_alloc(objset_t *os, dmu_object_type_t objtype, int blocksize,
634 int dnodesize, dmu_tx_t *tx)
636 return dmu_object_alloc(os, objtype, blocksize, DMU_OT_SA,
637 DN_MAX_BONUSLEN, tx);
640 static inline uint64_t
641 osd_zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
642 dmu_object_type_t ot, int leaf_blockshift,
643 int indirect_blockshift, int dnodesize, dmu_tx_t *tx)
645 return zap_create_flags(os, normflags, flags, ot, leaf_blockshift,
646 indirect_blockshift, DMU_OT_SA,
647 DN_MAX_BONUSLEN, tx);
649 #endif /* HAVE_DMU_OBJECT_ALLOC_DNSIZE */
651 #endif /* _OSD_INTERNAL_H */