4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_handler.c
33 * Top-level entry points into osd module
35 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
36 * Author: Mike Pershin <tappro@whamcloud.com>
37 * Author: Johann Lombardi <johann@whamcloud.com>
40 #define DEBUG_SUBSYSTEM S_OSD
42 #include <libcfs/libcfs.h>
43 #include <obd_support.h>
44 #include <lustre_net.h>
46 #include <obd_class.h>
47 #include <lustre_disk.h>
48 #include <lustre_fid.h>
49 #include <uapi/linux/lustre/lustre_param.h>
50 #include <md_object.h>
52 #include "osd_internal.h"
54 #include <sys/dnode.h>
59 #include <sys/spa_impl.h>
60 #include <sys/zfs_znode.h>
61 #include <sys/dmu_tx.h>
62 #include <sys/dmu_objset.h>
63 #include <sys/dsl_prop.h>
64 #include <sys/sa_impl.h>
67 struct lu_context_key osd_key;
69 /* Slab for OSD object allocation */
70 struct kmem_cache *osd_object_kmem;
72 /* Slab to allocate osd_zap_it */
73 struct kmem_cache *osd_zapit_cachep;
75 static struct lu_kmem_descr osd_caches[] = {
77 .ckd_cache = &osd_object_kmem,
78 .ckd_name = "zfs_osd_obj",
79 .ckd_size = sizeof(struct osd_object)
82 .ckd_cache = &osd_zapit_cachep,
83 .ckd_name = "osd_zapit_cache",
84 .ckd_size = sizeof(struct osd_zap_it)
91 static void arc_prune_func(int64_t bytes, void *private)
93 struct osd_device *od = private;
94 struct lu_site *site = &od->od_site;
98 LASSERT(site->ls_obj_hash);
100 rc = lu_env_init(&env, LCT_SHRINKER);
102 CERROR("%s: can't initialize shrinker env: rc = %d\n",
107 lu_site_purge(&env, site, (bytes >> 10));
113 * Concurrency: doesn't access mutable data
115 static int osd_root_get(const struct lu_env *env,
116 struct dt_device *dev, struct lu_fid *f)
118 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
123 * OSD object methods.
127 * Concurrency: shouldn't matter.
129 static void osd_trans_commit_cb(void *cb_data, int error)
131 struct osd_thandle *oh = cb_data;
132 struct thandle *th = &oh->ot_super;
133 struct osd_device *osd = osd_dt_dev(th->th_dev);
134 struct lu_device *lud = &th->th_dev->dd_lu_dev;
135 struct dt_txn_commit_cb *dcb, *tmp;
140 if (error == ECANCELED)
141 CWARN("%s: transaction @0x%p was aborted\n",
142 osd_dt_dev(th->th_dev)->od_svname, th);
144 CERROR("%s: transaction @0x%p commit error: rc = %d\n",
145 osd_dt_dev(th->th_dev)->od_svname, th, error);
148 dt_txn_hook_commit(th);
150 /* call per-transaction callbacks if any */
151 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
152 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
153 "commit callback entry: magic=%x name='%s'\n",
154 dcb->dcb_magic, dcb->dcb_name);
155 list_del_init(&dcb->dcb_linkage);
156 dcb->dcb_func(NULL, th, dcb, error);
159 /* Unlike ldiskfs, zfs updates space accounting at commit time.
160 * As a consequence, op_end is called only now to inform the quota slave
161 * component that reserved quota space is now accounted in usage and
162 * should be released. Quota space won't be adjusted at this point since
163 * we can't provide a suitable environment. It will be performed
164 * asynchronously by a lquota thread. */
165 qsd_op_end(NULL, osd->od_quota_slave, &oh->ot_quota_trans);
169 lu_context_exit(&th->th_ctx);
170 lu_context_fini(&th->th_ctx);
176 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
178 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
181 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
182 LASSERT(&dcb->dcb_func != NULL);
183 if (dcb->dcb_flags & DCB_TRANS_STOP)
184 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
186 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
192 * Concurrency: shouldn't matter.
194 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
197 struct osd_thandle *oh;
201 oh = container_of0(th, struct osd_thandle, ot_super);
205 rc = dt_txn_hook_start(env, d, th);
209 if (oh->ot_write_commit && OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC))
210 /* Unlike ldiskfs, ZFS checks for available space and returns
211 * -ENOSPC when assigning txg */
214 rc = -dmu_tx_assign(oh->ot_tx, TXG_WAIT);
215 if (unlikely(rc != 0)) {
216 struct osd_device *osd = osd_dt_dev(d);
217 /* dmu will call commit callback with error code during abort */
218 if (!lu_device_is_md(&d->dd_lu_dev) && rc == -ENOSPC)
219 CERROR("%s: failed to start transaction due to ENOSPC"
220 "\n", osd->od_svname);
222 CERROR("%s: can't assign tx: rc = %d\n",
225 /* add commit callback */
226 dmu_tx_callback_register(oh->ot_tx, osd_trans_commit_cb, oh);
228 lu_context_init(&th->th_ctx, th->th_tags);
229 lu_context_enter(&th->th_ctx);
230 lu_device_get(&d->dd_lu_dev);
236 static void osd_unlinked_list_emptify(const struct lu_env *env,
237 struct osd_device *osd,
238 struct list_head *list, bool free)
240 struct osd_object *obj;
243 while (!list_empty(list)) {
244 obj = list_entry(list->next,
245 struct osd_object, oo_unlinked_linkage);
246 LASSERT(obj->oo_dn != NULL);
247 oid = obj->oo_dn->dn_object;
249 list_del_init(&obj->oo_unlinked_linkage);
251 (void)osd_unlinked_object_free(env, osd, oid);
255 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
257 struct dt_txn_commit_cb *dcb;
258 struct dt_txn_commit_cb *tmp;
260 /* call per-transaction stop callbacks if any */
261 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
263 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
264 "commit callback entry: magic=%x name='%s'\n",
265 dcb->dcb_magic, dcb->dcb_name);
266 list_del_init(&dcb->dcb_linkage);
267 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
272 * Concurrency: shouldn't matter.
274 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
277 struct osd_device *osd = osd_dt_dev(th->th_dev);
278 bool sync = (th->th_sync != 0);
279 struct osd_thandle *oh;
280 struct list_head unlinked;
285 oh = container_of0(th, struct osd_thandle, ot_super);
286 INIT_LIST_HEAD(&unlinked);
287 list_splice_init(&oh->ot_unlinked_list, &unlinked);
288 /* reset OI cache for safety */
289 osd_oti_get(env)->oti_ins_cache_used = 0;
291 if (oh->ot_assigned == 0) {
293 dmu_tx_abort(oh->ot_tx);
294 osd_object_sa_dirty_rele(env, oh);
295 osd_unlinked_list_emptify(env, osd, &unlinked, false);
296 /* there won't be any commit, release reserved quota space now,
298 qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
303 rc = dt_txn_hook_stop(env, th);
305 CDEBUG(D_OTHER, "%s: transaction hook failed: rc = %d\n",
308 osd_trans_stop_cb(oh, rc);
311 txg = oh->ot_tx->tx_txg;
313 osd_object_sa_dirty_rele(env, oh);
314 /* XXX: Once dmu_tx_commit() called, oh/th could have been freed
315 * by osd_trans_commit_cb already. */
316 dmu_tx_commit(oh->ot_tx);
318 osd_unlinked_list_emptify(env, osd, &unlinked, true);
321 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
326 static struct thandle *osd_trans_create(const struct lu_env *env,
327 struct dt_device *dt)
329 struct osd_device *osd = osd_dt_dev(dt);
330 struct osd_thandle *oh;
336 CERROR("%s: someone try to start transaction under "
337 "readonly mode, should be disabled.\n",
338 osd_name(osd_dt_dev(dt)));
340 RETURN(ERR_PTR(-EROFS));
343 tx = dmu_tx_create(osd->od_os);
345 RETURN(ERR_PTR(-ENOMEM));
347 /* alloc callback data */
351 RETURN(ERR_PTR(-ENOMEM));
355 INIT_LIST_HEAD(&oh->ot_dcb_list);
356 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
357 INIT_LIST_HEAD(&oh->ot_unlinked_list);
358 INIT_LIST_HEAD(&oh->ot_sa_list);
359 memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
363 th->th_tags = LCT_TX_HANDLE;
367 /* Estimate the total number of objects from a number of blocks */
368 uint64_t osd_objs_count_estimate(uint64_t usedbytes, uint64_t usedobjs,
369 uint64_t nrblocks, uint64_t est_maxblockshift)
371 uint64_t est_totobjs, est_usedblocks, est_usedobjs;
374 * If blocksize is below 64KB (e.g. MDT with recordsize=4096) then
375 * bump the free dnode estimate to assume blocks at least 64KB in
376 * case of a directory-heavy MDT (at 32KB/directory).
378 if (est_maxblockshift < 16) {
379 nrblocks >>= (16 - est_maxblockshift);
380 est_maxblockshift = 16;
384 * Estimate the total number of dnodes from the total blocks count
385 * and the space used per dnode. Since we don't know the overhead
386 * associated with each dnode (xattrs, SAs, VDEV overhead, etc.)
387 * just using DNODE_SHIFT isn't going to give a good estimate.
388 * Instead, compute the current average space usage per dnode, with
389 * an upper and lower cap to avoid unrealistic estimates..
391 * In case there aren't many dnodes or blocks used yet, add a small
392 * correction factor (OSD_DNODE_EST_{COUNT,BLKSHIFT}). This factor
393 * gradually disappears as the number of real dnodes grows. It also
394 * avoids the need to check for divide-by-zero computing dn_per_block.
396 CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
397 CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
399 est_usedblocks = ((OSD_DNODE_EST_COUNT << OSD_DNODE_EST_BLKSHIFT) +
400 usedbytes) >> est_maxblockshift;
401 est_usedobjs = OSD_DNODE_EST_COUNT + usedobjs;
403 if (est_usedobjs <= est_usedblocks) {
405 * Average space/dnode more than maximum block size, use max
406 * block size to estimate free dnodes from adjusted free blocks
407 * count. OSTs typically use multiple blocks per dnode so this
410 est_totobjs = nrblocks;
412 } else if (est_usedobjs >= (est_usedblocks << OSD_DNODE_MIN_BLKSHIFT)) {
414 * Average space/dnode smaller than min dnode size (probably
415 * due to metadnode compression), use min dnode size to
416 * estimate object count. MDTs may use only one block per node
417 * so this case applies.
419 est_totobjs = nrblocks << OSD_DNODE_MIN_BLKSHIFT;
423 * Between the extremes, use average space per existing dnode
424 * to compute the number of dnodes that will fit into nrblocks:
426 * est_totobjs = nrblocks * (est_usedobjs / est_usedblocks)
428 * this may overflow 64 bits or become 0 if not handled well.
430 * We know nrblocks is below 2^(64 - blkbits) bits, and
431 * est_usedobjs is under 48 bits due to DN_MAX_OBJECT_SHIFT,
432 * which means that multiplying them may get as large as
433 * 2 ^ 96 for the minimum blocksize of 64KB allowed above.
435 * The ratio of dnodes per block (est_usedobjs / est_usedblocks)
436 * is under 2^(blkbits - DNODE_SHIFT) = blocksize / 512 due to
437 * the limit checks above, so we can safely compute this first.
438 * We care more about accuracy on the MDT (many dnodes/block)
439 * which is good because this is where truncation errors are
440 * smallest. Since both nrblocks and dn_per_block are a
441 * function of blkbits, their product is at most:
443 * 2^(64 - blkbits) * 2^(blkbits - DNODE_SHIFT) = 2^(64 - 9)
445 * so we can safely use 7 bits to compute a fixed-point
446 * fraction and est_totobjs can still fit in 64 bits.
448 unsigned dn_per_block = (est_usedobjs << 7) / est_usedblocks;
450 est_totobjs = (nrblocks * dn_per_block) >> 7;
455 static int osd_objset_statfs(struct osd_device *osd, struct obd_statfs *osfs)
457 struct objset *os = osd->od_os;
458 uint64_t usedbytes, availbytes, usedobjs, availobjs;
459 uint64_t est_availobjs;
463 dmu_objset_space(os, &usedbytes, &availbytes, &usedobjs, &availobjs);
465 memset(osfs, 0, sizeof(*osfs));
467 /* We're a zfs filesystem. */
468 osfs->os_type = UBERBLOCK_MAGIC;
471 * ZFS allows multiple block sizes. For statfs, Linux makes no
472 * proper distinction between bsize and frsize. For calculations
473 * of free and used blocks incorrectly uses bsize instead of frsize,
474 * but bsize is also used as the optimal blocksize. We return the
475 * largest possible block size as IO size for the optimum performance
476 * and scale the free and used blocks count appropriately.
478 osfs->os_bsize = osd->od_max_blksz;
479 bshift = fls64(osfs->os_bsize) - 1;
481 osfs->os_blocks = (usedbytes + availbytes) >> bshift;
482 osfs->os_bfree = availbytes >> bshift;
483 osfs->os_bavail = osfs->os_bfree; /* no extra root reservation */
485 /* Take replication (i.e. number of copies) into account */
486 if (os->os_copies != 0)
487 osfs->os_bavail /= os->os_copies;
490 * Reserve some space so we don't run into ENOSPC due to grants not
491 * accounting for metadata overhead in ZFS, and to avoid fragmentation.
492 * Rather than report this via os_bavail (which makes users unhappy if
493 * they can't fill the filesystem 100%), reduce os_blocks as well.
495 * Reserve 0.78% of total space, at least 16MB for small filesystems,
496 * for internal files to be created/unlinked when space is tight.
498 CLASSERT(OSD_STATFS_RESERVED_SIZE > 0);
499 reserved = OSD_STATFS_RESERVED_SIZE >> bshift;
500 if (likely(osfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
501 reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
503 osfs->os_blocks -= reserved;
504 osfs->os_bfree -= min(reserved, osfs->os_bfree);
505 osfs->os_bavail -= min(reserved, osfs->os_bavail);
508 * The availobjs value returned from dmu_objset_space() is largely
509 * useless, since it reports the number of objects that might
510 * theoretically still fit into the dataset, independent of minor
511 * issues like how much space is actually available in the pool.
512 * Compute a better estimate in udmu_objs_count_estimate().
514 est_availobjs = osd_objs_count_estimate(usedbytes, usedobjs,
515 osfs->os_bfree, bshift);
517 osfs->os_ffree = min(availobjs, est_availobjs);
518 osfs->os_files = osfs->os_ffree + usedobjs;
520 /* ZFS XXX: fill in backing dataset FSID/UUID
521 memcpy(osfs->os_fsid, .... );*/
523 osfs->os_namelen = MAXNAMELEN;
524 osfs->os_maxbytes = OBD_OBJECT_EOF;
526 if (!spa_writeable(dmu_objset_spa(os)) ||
527 osd->od_dev_set_rdonly || osd->od_prop_rdonly)
528 osfs->os_state |= OS_STATE_READONLY;
534 * Concurrency: shouldn't matter.
536 int osd_statfs(const struct lu_env *env, struct dt_device *d,
537 struct obd_statfs *osfs)
542 rc = osd_objset_statfs(osd_dt_dev(d), osfs);
543 if (unlikely(rc != 0))
546 osfs->os_bavail -= min_t(u64,
547 OSD_GRANT_FOR_LOCAL_OIDS / osfs->os_bsize,
552 static int osd_blk_insert_cost(struct osd_device *osd)
554 int max_blockshift, nr_blkptrshift, bshift;
556 /* max_blockshift is the log2 of the number of blocks needed to reach
557 * the maximum filesize (that's to say 2^64) */
558 bshift = osd_spa_maxblockshift(dmu_objset_spa(osd->od_os));
559 max_blockshift = DN_MAX_OFFSET_SHIFT - bshift;
561 /* nr_blkptrshift is the log2 of the number of block pointers that can
562 * be stored in an indirect block */
563 CLASSERT(DN_MAX_INDBLKSHIFT > SPA_BLKPTRSHIFT);
564 nr_blkptrshift = DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT;
566 /* max_blockshift / nr_blkptrshift is thus the maximum depth of the
567 * tree. We add +1 for rounding purpose.
568 * The tree depth times the indirect block size gives us the maximum
569 * cost of inserting a block in the tree */
570 return (max_blockshift / nr_blkptrshift + 1) * (1<<DN_MAX_INDBLKSHIFT);
574 * Concurrency: doesn't access mutable data.
576 static void osd_conf_get(const struct lu_env *env,
577 const struct dt_device *dev,
578 struct dt_device_param *param)
580 struct osd_device *osd = osd_dt_dev(dev);
583 * XXX should be taken from not-yet-existing fs abstraction layer.
585 param->ddp_max_name_len = MAXNAMELEN;
586 param->ddp_max_nlink = 1 << 31; /* it's 8byte on a disk */
587 param->ddp_symlink_max = PATH_MAX;
588 param->ddp_mount_type = LDD_MT_ZFS;
590 param->ddp_mntopts = MNTOPT_USERXATTR;
591 if (osd->od_posix_acl)
592 param->ddp_mntopts |= MNTOPT_ACL;
593 param->ddp_max_ea_size = DXATTR_MAX_ENTRY_SIZE;
595 /* for maxbytes, report same value as ZPL */
596 param->ddp_maxbytes = MAX_LFS_FILESIZE;
598 /* inodes are dynamically allocated, so we report the per-inode space
599 * consumption to upper layers. This static value is not really accurate
600 * and we should use the same logic as in udmu_objset_statfs() to
601 * estimate the real size consumed by an object */
602 param->ddp_inodespace = OSD_DNODE_EST_COUNT;
603 /* Although ZFS isn't an extent-based filesystem, the metadata overhead
604 * (i.e. 7 levels of indirect blocks, see osd_blk_insert_cost()) should
605 * not be accounted for every single new block insertion.
606 * Instead, the maximum extent size is set to the number of blocks that
607 * can fit into a single contiguous indirect block. There would be some
608 * cases where this crosses indirect blocks, but it also won't have 7
609 * new levels of indirect blocks in that case either, so it will still
610 * have enough reserved space for the extra indirect block */
611 param->ddp_max_extent_blks =
612 (1 << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT));
613 param->ddp_extent_tax = osd_blk_insert_cost(osd);
617 * Concurrency: shouldn't matter.
619 static int osd_sync(const struct lu_env *env, struct dt_device *d)
622 struct osd_device *osd = osd_dt_dev(d);
624 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_ZFS_NAME);
625 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
626 CDEBUG(D_CACHE, "synced OSD %s\n", LUSTRE_OSD_ZFS_NAME);
632 static int osd_commit_async(const struct lu_env *env, struct dt_device *dev)
634 struct osd_device *osd = osd_dt_dev(dev);
635 tx_state_t *tx = &dmu_objset_pool(osd->od_os)->dp_tx;
638 mutex_enter(&tx->tx_sync_lock);
639 txg = tx->tx_open_txg + 1;
640 if (tx->tx_quiesce_txg_waiting < txg) {
641 tx->tx_quiesce_txg_waiting = txg;
642 cv_broadcast(&tx->tx_quiesce_more_cv);
644 mutex_exit(&tx->tx_sync_lock);
650 * Concurrency: shouldn't matter.
652 static int osd_ro(const struct lu_env *env, struct dt_device *d)
654 struct osd_device *osd = osd_dt_dev(d);
657 CERROR("%s: *** setting device %s read-only ***\n",
658 osd->od_svname, LUSTRE_OSD_ZFS_NAME);
659 osd->od_dev_set_rdonly = 1;
660 spa_freeze(dmu_objset_spa(osd->od_os));
665 static struct dt_device_operations osd_dt_ops = {
666 .dt_root_get = osd_root_get,
667 .dt_statfs = osd_statfs,
668 .dt_trans_create = osd_trans_create,
669 .dt_trans_start = osd_trans_start,
670 .dt_trans_stop = osd_trans_stop,
671 .dt_trans_cb_add = osd_trans_cb_add,
672 .dt_conf_get = osd_conf_get,
674 .dt_commit_async = osd_commit_async,
679 * DMU OSD device type methods
681 static int osd_type_init(struct lu_device_type *t)
683 LU_CONTEXT_KEY_INIT(&osd_key);
684 return lu_context_key_register(&osd_key);
687 static void osd_type_fini(struct lu_device_type *t)
689 lu_context_key_degister(&osd_key);
692 static void *osd_key_init(const struct lu_context *ctx,
693 struct lu_context_key *key)
695 struct osd_thread_info *info;
699 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
701 info = ERR_PTR(-ENOMEM);
705 static void osd_key_fini(const struct lu_context *ctx,
706 struct lu_context_key *key, void *data)
708 struct osd_thread_info *info = data;
709 struct osd_idmap_cache *idc = info->oti_ins_cache;
712 LASSERT(info->oti_ins_cache_size > 0);
713 OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
714 info->oti_ins_cache = NULL;
715 info->oti_ins_cache_size = 0;
717 lu_buf_free(&info->oti_xattr_lbuf);
721 static void osd_key_exit(const struct lu_context *ctx,
722 struct lu_context_key *key, void *data)
726 struct lu_context_key osd_key = {
727 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
728 .lct_init = osd_key_init,
729 .lct_fini = osd_key_fini,
730 .lct_exit = osd_key_exit
733 static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
735 if (osd->od_cl_seq == NULL)
738 seq_client_fini(osd->od_cl_seq);
739 OBD_FREE_PTR(osd->od_cl_seq);
740 osd->od_cl_seq = NULL;
743 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
747 /* shutdown quota slave instance associated with the device */
748 if (o->od_quota_slave != NULL) {
749 /* complete all in-flight callbacks */
750 osd_sync(env, &o->od_dt_dev);
751 txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
752 qsd_fini(env, o->od_quota_slave);
753 o->od_quota_slave = NULL;
756 osd_fid_fini(env, o);
761 static void osd_xattr_changed_cb(void *arg, uint64_t newval)
763 struct osd_device *osd = arg;
765 osd->od_xattr_in_sa = (newval == ZFS_XATTR_SA);
768 static void osd_recordsize_changed_cb(void *arg, uint64_t newval)
770 struct osd_device *osd = arg;
772 LASSERT(newval <= osd_spa_maxblocksize(dmu_objset_spa(osd->od_os)));
773 LASSERT(newval >= SPA_MINBLOCKSIZE);
774 LASSERT(ISP2(newval));
776 osd->od_max_blksz = newval;
779 static void osd_readonly_changed_cb(void *arg, uint64_t newval)
781 struct osd_device *osd = arg;
783 osd->od_prop_rdonly = !!newval;
787 * This function unregisters all registered callbacks. It's harmless to
788 * unregister callbacks that were never registered so it is used to safely
789 * unwind a partially completed call to osd_objset_register_callbacks().
791 static void osd_objset_unregister_callbacks(struct osd_device *o)
793 struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
795 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
796 osd_xattr_changed_cb, o);
797 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
798 osd_recordsize_changed_cb, o);
799 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
800 osd_readonly_changed_cb, o);
802 if (o->arc_prune_cb != NULL) {
803 arc_remove_prune_callback(o->arc_prune_cb);
804 o->arc_prune_cb = NULL;
809 * Register the required callbacks to be notified when zfs properties
810 * are modified using the 'zfs(8)' command line utility.
812 static int osd_objset_register_callbacks(struct osd_device *o)
814 struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
815 dsl_pool_t *dp = dmu_objset_pool(o->od_os);
821 dsl_pool_config_enter(dp, FTAG);
822 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
823 osd_xattr_changed_cb, o);
827 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
828 osd_recordsize_changed_cb, o);
832 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
833 osd_readonly_changed_cb, o);
837 o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
839 dsl_pool_config_exit(dp, FTAG);
841 osd_objset_unregister_callbacks(o);
846 static int osd_objset_open(struct osd_device *o)
848 uint64_t version = ZPL_VERSION;
849 uint64_t sa_obj, unlink_obj;
853 rc = -dmu_objset_own(o->od_mntdev, DMU_OST_ZFS,
854 o->od_dt_dev.dd_rdonly ? B_TRUE : B_FALSE,
857 CERROR("%s: can't open %s\n", o->od_svname, o->od_mntdev);
863 /* Check ZFS version */
864 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
865 ZPL_VERSION_STR, 8, 1, &version);
867 CERROR("%s: Error looking up ZPL VERSION\n", o->od_mntdev);
869 * We can't return ENOENT because that would mean the objset
872 GOTO(out, rc = -EIO);
875 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
876 ZFS_SA_ATTRS, 8, 1, &sa_obj);
880 rc = -sa_setup(o->od_os, sa_obj, zfs_attr_table,
881 ZPL_END, &o->z_attr_table);
885 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ,
886 8, 1, &o->od_rootid);
888 CERROR("%s: lookup for root failed: rc = %d\n",
893 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET,
896 CERROR("%s: lookup for %s failed: rc = %d\n",
897 o->od_svname, ZFS_UNLINKED_SET, rc);
901 /* Check that user/group usage tracking is supported */
902 if (!dmu_objset_userused_enabled(o->od_os) ||
903 DMU_USERUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED ||
904 DMU_GROUPUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED) {
905 CERROR("%s: Space accounting not supported by this target, "
906 "aborting\n", o->od_svname);
907 GOTO(out, rc = -ENOTSUPP);
910 rc = __osd_obj2dnode(o->od_os, unlink_obj, &o->od_unlinked);
912 CERROR("%s: can't get dnode for unlinked: rc = %d\n",
918 if (rc != 0 && o->od_os != NULL) {
919 dmu_objset_disown(o->od_os, o);
926 int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd,
929 char *key = osd_oti_get(env)->oti_str;
933 if (osd->od_dt_dev.dd_rdonly) {
934 CERROR("%s: someone try to free objects under "
935 "readonly mode, should be disabled.\n", osd_name(osd));
941 rc = -dmu_free_long_range(osd->od_os, oid, 0, DMU_OBJECT_END);
943 CWARN("%s: Cannot truncate %llu: rc = %d\n",
944 osd->od_svname, oid, rc);
948 tx = dmu_tx_create(osd->od_os);
949 dmu_tx_hold_free(tx, oid, 0, DMU_OBJECT_END);
950 osd_tx_hold_zap(tx, osd->od_unlinked->dn_object, osd->od_unlinked,
952 rc = -dmu_tx_assign(tx, TXG_WAIT);
954 CWARN("%s: Cannot assign tx for %llu: rc = %d\n",
955 osd->od_svname, oid, rc);
959 snprintf(key, sizeof(osd_oti_get(env)->oti_str), "%llx", oid);
960 rc = osd_zap_remove(osd, osd->od_unlinked->dn_object,
961 osd->od_unlinked, key, tx);
963 CWARN("%s: Cannot remove %llu from unlinked set: rc = %d\n",
964 osd->od_svname, oid, rc);
968 rc = -dmu_object_free(osd->od_os, oid, tx);
970 CWARN("%s: Cannot free %llu: rc = %d\n",
971 osd->od_svname, oid, rc);
986 osd_unlinked_drain(const struct lu_env *env, struct osd_device *osd)
989 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
991 zap_cursor_init(&zc, osd->od_os, osd->od_unlinked->dn_object);
993 while (zap_cursor_retrieve(&zc, za) == 0) {
994 /* If cannot free the object, leave it in the unlinked set,
995 * until the OSD is mounted again when obd_unlinked_drain()
997 if (osd_unlinked_object_free(env, osd, za->za_first_integer))
999 zap_cursor_advance(&zc);
1002 zap_cursor_fini(&zc);
1005 static int osd_mount(const struct lu_env *env,
1006 struct osd_device *o, struct lustre_cfg *cfg)
1008 char *mntdev = lustre_cfg_string(cfg, 1);
1009 char *str = lustre_cfg_string(cfg, 2);
1010 char *svname = lustre_cfg_string(cfg, 4);
1016 if (o->od_os != NULL)
1019 if (mntdev == NULL || svname == NULL)
1022 rc = strlcpy(o->od_mntdev, mntdev, sizeof(o->od_mntdev));
1023 if (rc >= sizeof(o->od_mntdev))
1026 rc = strlcpy(o->od_svname, svname, sizeof(o->od_svname));
1027 if (rc >= sizeof(o->od_svname))
1030 str = strstr(str, ":");
1032 unsigned long flags;
1034 rc = kstrtoul(str + 1, 10, &flags);
1038 if (flags & LMD_FLG_DEV_RDONLY) {
1039 o->od_dt_dev.dd_rdonly = 1;
1040 LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
1045 if (server_name_is_ost(o->od_svname))
1048 rc = osd_objset_open(o);
1052 o->od_xattr_in_sa = B_TRUE;
1053 o->od_max_blksz = osd_spa_maxblocksize(o->od_os->os_spa);
1055 rc = __osd_obj2dnode(o->od_os, o->od_rootid, &rootdn);
1058 o->od_root = rootdn->dn_object;
1059 osd_dnode_rele(rootdn);
1061 rc = __osd_obj2dnode(o->od_os, DMU_USERUSED_OBJECT,
1062 &o->od_userused_dn);
1066 rc = __osd_obj2dnode(o->od_os, DMU_GROUPUSED_OBJECT,
1067 &o->od_groupused_dn);
1071 /* 1. initialize oi before any file create or file open */
1072 rc = osd_oi_init(env, o);
1076 rc = lu_site_init(&o->od_site, osd2lu_dev(o));
1079 o->od_site.ls_bottom_dev = osd2lu_dev(o);
1081 rc = lu_site_init_finish(&o->od_site);
1085 rc = osd_objset_register_callbacks(o);
1089 rc = osd_procfs_init(o, o->od_svname);
1093 /* initialize quota slave instance */
1094 o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
1096 if (IS_ERR(o->od_quota_slave)) {
1097 rc = PTR_ERR(o->od_quota_slave);
1098 o->od_quota_slave = NULL;
1102 /* parse mount option "noacl", and enable ACL by default */
1103 opts = lustre_cfg_string(cfg, 3);
1104 if (opts == NULL || strstr(opts, "noacl") == NULL)
1105 o->od_posix_acl = 1;
1107 osd_unlinked_drain(env, o);
1109 if (rc && o->od_os) {
1110 dmu_objset_disown(o->od_os, o);
1117 static void osd_umount(const struct lu_env *env, struct osd_device *o)
1121 if (atomic_read(&o->od_zerocopy_alloc))
1122 CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
1123 atomic_read(&o->od_zerocopy_alloc));
1124 if (atomic_read(&o->od_zerocopy_loan))
1125 CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
1126 atomic_read(&o->od_zerocopy_loan));
1127 if (atomic_read(&o->od_zerocopy_pin))
1128 CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
1129 atomic_read(&o->od_zerocopy_pin));
1131 if (o->od_unlinked) {
1132 osd_dnode_rele(o->od_unlinked);
1133 o->od_unlinked = NULL;
1135 if (o->od_userused_dn) {
1136 osd_dnode_rele(o->od_userused_dn);
1137 o->od_userused_dn = NULL;
1139 if (o->od_groupused_dn) {
1140 osd_dnode_rele(o->od_groupused_dn);
1141 o->od_groupused_dn = NULL;
1144 if (o->od_os != NULL) {
1145 if (!o->od_dt_dev.dd_rdonly)
1146 /* force a txg sync to get all commit callbacks */
1147 txg_wait_synced(dmu_objset_pool(o->od_os), 0ULL);
1149 /* close the object set */
1150 dmu_objset_disown(o->od_os, o);
1158 static int osd_device_init0(const struct lu_env *env,
1159 struct osd_device *o,
1160 struct lustre_cfg *cfg)
1162 struct lu_device *l = osd2lu_dev(o);
1165 /* if the module was re-loaded, env can loose its keys */
1166 rc = lu_env_refill((struct lu_env *) env);
1170 l->ld_ops = &osd_lu_ops;
1171 o->od_dt_dev.dd_ops = &osd_dt_ops;
1177 static struct lu_device *osd_device_fini(const struct lu_env *env,
1178 struct lu_device *dev);
1180 static struct lu_device *osd_device_alloc(const struct lu_env *env,
1181 struct lu_device_type *type,
1182 struct lustre_cfg *cfg)
1184 struct osd_device *dev;
1185 struct osd_seq_list *osl;
1190 return ERR_PTR(-ENOMEM);
1192 osl = &dev->od_seq_list;
1193 INIT_LIST_HEAD(&osl->osl_seq_list);
1194 rwlock_init(&osl->osl_seq_list_lock);
1195 sema_init(&osl->osl_seq_init_sem, 1);
1197 rc = dt_device_init(&dev->od_dt_dev, type);
1199 rc = osd_device_init0(env, dev, cfg);
1201 rc = osd_mount(env, dev, cfg);
1203 osd_device_fini(env, osd2lu_dev(dev));
1206 dt_device_fini(&dev->od_dt_dev);
1209 if (unlikely(rc != 0))
1212 return rc == 0 ? osd2lu_dev(dev) : ERR_PTR(rc);
1215 static struct lu_device *osd_device_free(const struct lu_env *env,
1216 struct lu_device *d)
1218 struct osd_device *o = osd_dev(d);
1221 /* XXX: make osd top device in order to release reference */
1222 d->ld_site->ls_top_dev = d;
1223 lu_site_purge(env, d->ld_site, -1);
1224 if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
1225 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
1226 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
1228 lu_site_fini(&o->od_site);
1229 dt_device_fini(&o->od_dt_dev);
1235 static struct lu_device *osd_device_fini(const struct lu_env *env,
1236 struct lu_device *d)
1238 struct osd_device *o = osd_dev(d);
1244 osd_objset_unregister_callbacks(o);
1245 if (!o->od_dt_dev.dd_rdonly) {
1246 osd_sync(env, lu2dt_dev(d));
1248 spa_get_dsl(dmu_objset_spa(o->od_os)));
1252 /* now with all the callbacks completed we can cleanup the remainings */
1253 osd_shutdown(env, o);
1254 osd_oi_fini(env, o);
1256 rc = osd_procfs_fini(o);
1258 CERROR("proc fini error %d\n", rc);
1259 RETURN(ERR_PTR(rc));
1268 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
1269 const char *name, struct lu_device *next)
1275 * To be removed, setup is performed by osd_device_{init,alloc} and
1276 * cleanup is performed by osd_device_{fini,free).
1278 static int osd_process_config(const struct lu_env *env,
1279 struct lu_device *d, struct lustre_cfg *cfg)
1281 struct osd_device *o = osd_dev(d);
1285 switch(cfg->lcfg_command) {
1287 rc = osd_mount(env, o, cfg);
1290 rc = osd_shutdown(env, o);
1293 LASSERT(&o->od_dt_dev);
1294 rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
1295 cfg, &o->od_dt_dev);
1296 if (rc > 0 || rc == -ENOSYS) {
1297 rc = class_process_proc_param(PARAM_OST,
1298 lprocfs_osd_obd_vars,
1299 cfg, &o->od_dt_dev);
1312 static int osd_recovery_complete(const struct lu_env *env, struct lu_device *d)
1314 struct osd_device *osd = osd_dev(d);
1318 if (osd->od_quota_slave == NULL)
1321 /* start qsd instance on recovery completion, this notifies the quota
1322 * slave code that we are about to process new requests now */
1323 rc = qsd_start(env, osd->od_quota_slave);
1328 * we use exports to track all osd users
1330 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
1331 struct obd_device *obd, struct obd_uuid *cluuid,
1332 struct obd_connect_data *data, void *localdata)
1334 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1335 struct lustre_handle conn;
1339 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
1341 rc = class_connect(&conn, obd, cluuid);
1345 *exp = class_conn2export(&conn);
1347 spin_lock(&obd->obd_dev_lock);
1349 spin_unlock(&obd->obd_dev_lock);
1355 * once last export (we don't count self-export) disappeared
1356 * osd can be released
1358 static int osd_obd_disconnect(struct obd_export *exp)
1360 struct obd_device *obd = exp->exp_obd;
1361 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1362 int rc, release = 0;
1365 /* Only disconnect the underlying layers on the final disconnect. */
1366 spin_lock(&obd->obd_dev_lock);
1368 if (osd->od_connects == 0)
1370 spin_unlock(&obd->obd_dev_lock);
1372 rc = class_disconnect(exp); /* bz 9811 */
1374 if (rc == 0 && release)
1375 class_manual_cleanup(obd);
1379 static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
1381 struct seq_server_site *ss = osd_seq_site(osd);
1385 if (osd->od_is_ost || osd->od_cl_seq != NULL)
1388 if (unlikely(ss == NULL))
1391 OBD_ALLOC_PTR(osd->od_cl_seq);
1392 if (osd->od_cl_seq == NULL)
1395 rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
1396 osd->od_svname, ss->ss_server_seq);
1399 OBD_FREE_PTR(osd->od_cl_seq);
1400 osd->od_cl_seq = NULL;
1406 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
1407 struct lu_device *dev)
1409 struct osd_device *osd = osd_dev(dev);
1413 if (osd->od_quota_slave != NULL) {
1414 /* set up quota slave objects */
1415 rc = qsd_prepare(env, osd->od_quota_slave);
1420 rc = osd_fid_init(env, osd);
1425 struct lu_device_operations osd_lu_ops = {
1426 .ldo_object_alloc = osd_object_alloc,
1427 .ldo_process_config = osd_process_config,
1428 .ldo_recovery_complete = osd_recovery_complete,
1429 .ldo_prepare = osd_prepare,
1432 static void osd_type_start(struct lu_device_type *t)
1436 static void osd_type_stop(struct lu_device_type *t)
1440 int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
1441 struct lu_fid *fid, struct md_op_data *op_data)
1443 struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
1445 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
1448 static struct lu_device_type_operations osd_device_type_ops = {
1449 .ldto_init = osd_type_init,
1450 .ldto_fini = osd_type_fini,
1452 .ldto_start = osd_type_start,
1453 .ldto_stop = osd_type_stop,
1455 .ldto_device_alloc = osd_device_alloc,
1456 .ldto_device_free = osd_device_free,
1458 .ldto_device_init = osd_device_init,
1459 .ldto_device_fini = osd_device_fini
1462 static struct lu_device_type osd_device_type = {
1463 .ldt_tags = LU_DEVICE_DT,
1464 .ldt_name = LUSTRE_OSD_ZFS_NAME,
1465 .ldt_ops = &osd_device_type_ops,
1466 .ldt_ctx_tags = LCT_LOCAL
1470 static struct obd_ops osd_obd_device_ops = {
1471 .o_owner = THIS_MODULE,
1472 .o_connect = osd_obd_connect,
1473 .o_disconnect = osd_obd_disconnect,
1474 .o_fid_alloc = osd_fid_alloc
1477 static int __init osd_init(void)
1481 rc = osd_options_init();
1485 rc = lu_kmem_init(osd_caches);
1489 rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
1490 LUSTRE_OSD_ZFS_NAME, &osd_device_type);
1492 lu_kmem_fini(osd_caches);
1496 static void __exit osd_exit(void)
1498 class_unregister_type(LUSTRE_OSD_ZFS_NAME);
1499 lu_kmem_fini(osd_caches);
1502 extern unsigned int osd_oi_count;
1503 module_param(osd_oi_count, int, 0444);
1504 MODULE_PARM_DESC(osd_oi_count, "Number of Object Index containers to be created, it's only valid for new filesystem.");
1506 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
1507 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
1508 MODULE_VERSION(LUSTRE_VERSION_STRING);
1509 MODULE_LICENSE("GPL");
1511 module_init(osd_init);
1512 module_exit(osd_exit);