4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_handler.c
33 * Top-level entry points into osd module
35 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
36 * Author: Mike Pershin <tappro@whamcloud.com>
37 * Author: Johann Lombardi <johann@whamcloud.com>
40 #define DEBUG_SUBSYSTEM S_OSD
42 #include <libcfs/libcfs.h>
43 #include <obd_support.h>
44 #include <lustre_net.h>
46 #include <obd_class.h>
47 #include <lustre_disk.h>
48 #include <lustre_fid.h>
49 #include <uapi/linux/lustre/lustre_param.h>
50 #include <md_object.h>
52 #include "osd_internal.h"
54 #include <sys/dnode.h>
59 #include <sys/spa_impl.h>
60 #include <sys/zfs_znode.h>
61 #include <sys/dmu_tx.h>
62 #include <sys/dmu_objset.h>
63 #include <sys/dsl_prop.h>
64 #include <sys/sa_impl.h>
67 struct lu_context_key osd_key;
69 static int osd_txg_sync_delay_us = -1;
71 /* Slab for OSD object allocation */
72 struct kmem_cache *osd_object_kmem;
74 /* Slab to allocate osd_zap_it */
75 struct kmem_cache *osd_zapit_cachep;
77 static struct lu_kmem_descr osd_caches[] = {
79 .ckd_cache = &osd_object_kmem,
80 .ckd_name = "zfs_osd_obj",
81 .ckd_size = sizeof(struct osd_object)
84 .ckd_cache = &osd_zapit_cachep,
85 .ckd_name = "osd_zapit_cache",
86 .ckd_size = sizeof(struct osd_zap_it)
93 static void arc_prune_func(int64_t bytes, void *private)
95 struct osd_device *od = private;
96 struct lu_site *site = &od->od_site;
100 LASSERT(site->ls_obj_hash);
102 rc = lu_env_init(&env, LCT_SHRINKER);
104 CERROR("%s: can't initialize shrinker env: rc = %d\n",
109 lu_site_purge(&env, site, (bytes >> 10));
115 * Concurrency: doesn't access mutable data
117 static int osd_root_get(const struct lu_env *env,
118 struct dt_device *dev, struct lu_fid *f)
120 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
125 * OSD object methods.
129 * Concurrency: shouldn't matter.
131 static void osd_trans_commit_cb(void *cb_data, int error)
133 struct osd_thandle *oh = cb_data;
134 struct thandle *th = &oh->ot_super;
135 struct osd_device *osd = osd_dt_dev(th->th_dev);
136 struct lu_device *lud = &th->th_dev->dd_lu_dev;
137 struct dt_txn_commit_cb *dcb, *tmp;
142 if (error == ECANCELED)
143 CWARN("%s: transaction @0x%p was aborted\n",
144 osd_dt_dev(th->th_dev)->od_svname, th);
146 CERROR("%s: transaction @0x%p commit error: rc = %d\n",
147 osd_dt_dev(th->th_dev)->od_svname, th, error);
150 dt_txn_hook_commit(th);
152 /* call per-transaction callbacks if any */
153 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
154 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
155 "commit callback entry: magic=%x name='%s'\n",
156 dcb->dcb_magic, dcb->dcb_name);
157 list_del_init(&dcb->dcb_linkage);
158 dcb->dcb_func(NULL, th, dcb, error);
161 /* Unlike ldiskfs, zfs updates space accounting at commit time.
162 * As a consequence, op_end is called only now to inform the quota slave
163 * component that reserved quota space is now accounted in usage and
164 * should be released. Quota space won't be adjusted at this point since
165 * we can't provide a suitable environment. It will be performed
166 * asynchronously by a lquota thread. */
167 qsd_op_end(NULL, osd->od_quota_slave, &oh->ot_quota_trans);
176 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
178 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
181 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
182 LASSERT(&dcb->dcb_func != NULL);
183 if (dcb->dcb_flags & DCB_TRANS_STOP)
184 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
186 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
192 * Concurrency: shouldn't matter.
194 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
197 struct osd_thandle *oh;
201 oh = container_of0(th, struct osd_thandle, ot_super);
205 rc = dt_txn_hook_start(env, d, th);
209 if (oh->ot_write_commit && OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC))
210 /* Unlike ldiskfs, ZFS checks for available space and returns
211 * -ENOSPC when assigning txg */
214 rc = -dmu_tx_assign(oh->ot_tx, TXG_WAIT);
215 if (unlikely(rc != 0)) {
216 struct osd_device *osd = osd_dt_dev(d);
217 /* dmu will call commit callback with error code during abort */
218 if (!lu_device_is_md(&d->dd_lu_dev) && rc == -ENOSPC)
219 CERROR("%s: failed to start transaction due to ENOSPC"
220 "\n", osd->od_svname);
222 CERROR("%s: can't assign tx: rc = %d\n",
225 /* add commit callback */
226 dmu_tx_callback_register(oh->ot_tx, osd_trans_commit_cb, oh);
228 osd_oti_get(env)->oti_in_trans = 1;
229 lu_device_get(&d->dd_lu_dev);
235 static void osd_unlinked_list_emptify(const struct lu_env *env,
236 struct osd_device *osd,
237 struct list_head *list, bool free)
239 struct osd_object *obj;
242 while (!list_empty(list)) {
243 obj = list_entry(list->next,
244 struct osd_object, oo_unlinked_linkage);
245 LASSERT(obj->oo_dn != NULL);
246 oid = obj->oo_dn->dn_object;
248 list_del_init(&obj->oo_unlinked_linkage);
250 (void)osd_unlinked_object_free(env, osd, oid);
254 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
256 struct dt_txn_commit_cb *dcb;
257 struct dt_txn_commit_cb *tmp;
259 /* call per-transaction stop callbacks if any */
260 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
262 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
263 "commit callback entry: magic=%x name='%s'\n",
264 dcb->dcb_magic, dcb->dcb_name);
265 list_del_init(&dcb->dcb_linkage);
266 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
271 * Concurrency: shouldn't matter.
273 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
276 struct osd_device *osd = osd_dt_dev(th->th_dev);
277 bool sync = (th->th_sync != 0);
278 struct osd_thandle *oh;
279 struct list_head unlinked;
284 oh = container_of0(th, struct osd_thandle, ot_super);
285 INIT_LIST_HEAD(&unlinked);
286 list_splice_init(&oh->ot_unlinked_list, &unlinked);
287 /* reset OI cache for safety */
288 osd_oti_get(env)->oti_ins_cache_used = 0;
290 if (oh->ot_assigned == 0) {
292 dmu_tx_abort(oh->ot_tx);
293 osd_object_sa_dirty_rele(env, oh);
294 osd_unlinked_list_emptify(env, osd, &unlinked, false);
295 /* there won't be any commit, release reserved quota space now,
297 qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
302 rc = dt_txn_hook_stop(env, th);
304 CDEBUG(D_OTHER, "%s: transaction hook failed: rc = %d\n",
307 osd_trans_stop_cb(oh, rc);
310 txg = oh->ot_tx->tx_txg;
312 osd_object_sa_dirty_rele(env, oh);
313 /* XXX: Once dmu_tx_commit() called, oh/th could have been freed
314 * by osd_trans_commit_cb already. */
315 dmu_tx_commit(oh->ot_tx);
316 osd_oti_get(env)->oti_in_trans = 0;
318 osd_unlinked_list_emptify(env, osd, &unlinked, true);
321 if (osd_txg_sync_delay_us < 0)
322 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
324 udelay(osd_txg_sync_delay_us);
330 static struct thandle *osd_trans_create(const struct lu_env *env,
331 struct dt_device *dt)
333 struct osd_device *osd = osd_dt_dev(dt);
334 struct osd_thandle *oh;
340 CERROR("%s: someone try to start transaction under "
341 "readonly mode, should be disabled.\n",
342 osd_name(osd_dt_dev(dt)));
344 RETURN(ERR_PTR(-EROFS));
347 tx = dmu_tx_create(osd->od_os);
349 RETURN(ERR_PTR(-ENOMEM));
351 /* alloc callback data */
355 RETURN(ERR_PTR(-ENOMEM));
359 INIT_LIST_HEAD(&oh->ot_dcb_list);
360 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
361 INIT_LIST_HEAD(&oh->ot_unlinked_list);
362 INIT_LIST_HEAD(&oh->ot_sa_list);
363 memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
370 /* Estimate the total number of objects from a number of blocks */
371 uint64_t osd_objs_count_estimate(uint64_t usedbytes, uint64_t usedobjs,
372 uint64_t nrblocks, uint64_t est_maxblockshift)
374 uint64_t est_totobjs, est_usedblocks, est_usedobjs;
377 * If blocksize is below 64KB (e.g. MDT with recordsize=4096) then
378 * bump the free dnode estimate to assume blocks at least 64KB in
379 * case of a directory-heavy MDT (at 32KB/directory).
381 if (est_maxblockshift < 16) {
382 nrblocks >>= (16 - est_maxblockshift);
383 est_maxblockshift = 16;
387 * Estimate the total number of dnodes from the total blocks count
388 * and the space used per dnode. Since we don't know the overhead
389 * associated with each dnode (xattrs, SAs, VDEV overhead, etc.)
390 * just using DNODE_SHIFT isn't going to give a good estimate.
391 * Instead, compute the current average space usage per dnode, with
392 * an upper and lower cap to avoid unrealistic estimates..
394 * In case there aren't many dnodes or blocks used yet, add a small
395 * correction factor (OSD_DNODE_EST_{COUNT,BLKSHIFT}). This factor
396 * gradually disappears as the number of real dnodes grows. It also
397 * avoids the need to check for divide-by-zero computing dn_per_block.
399 CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
400 CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
402 est_usedblocks = ((OSD_DNODE_EST_COUNT << OSD_DNODE_EST_BLKSHIFT) +
403 usedbytes) >> est_maxblockshift;
404 est_usedobjs = OSD_DNODE_EST_COUNT + usedobjs;
406 if (est_usedobjs <= est_usedblocks) {
408 * Average space/dnode more than maximum block size, use max
409 * block size to estimate free dnodes from adjusted free blocks
410 * count. OSTs typically use multiple blocks per dnode so this
413 est_totobjs = nrblocks;
415 } else if (est_usedobjs >= (est_usedblocks << OSD_DNODE_MIN_BLKSHIFT)) {
417 * Average space/dnode smaller than min dnode size (probably
418 * due to metadnode compression), use min dnode size to
419 * estimate object count. MDTs may use only one block per node
420 * so this case applies.
422 est_totobjs = nrblocks << OSD_DNODE_MIN_BLKSHIFT;
426 * Between the extremes, use average space per existing dnode
427 * to compute the number of dnodes that will fit into nrblocks:
429 * est_totobjs = nrblocks * (est_usedobjs / est_usedblocks)
431 * this may overflow 64 bits or become 0 if not handled well.
433 * We know nrblocks is below 2^(64 - blkbits) bits, and
434 * est_usedobjs is under 48 bits due to DN_MAX_OBJECT_SHIFT,
435 * which means that multiplying them may get as large as
436 * 2 ^ 96 for the minimum blocksize of 64KB allowed above.
438 * The ratio of dnodes per block (est_usedobjs / est_usedblocks)
439 * is under 2^(blkbits - DNODE_SHIFT) = blocksize / 512 due to
440 * the limit checks above, so we can safely compute this first.
441 * We care more about accuracy on the MDT (many dnodes/block)
442 * which is good because this is where truncation errors are
443 * smallest. Since both nrblocks and dn_per_block are a
444 * function of blkbits, their product is at most:
446 * 2^(64 - blkbits) * 2^(blkbits - DNODE_SHIFT) = 2^(64 - 9)
448 * so we can safely use 7 bits to compute a fixed-point
449 * fraction and est_totobjs can still fit in 64 bits.
451 unsigned dn_per_block = (est_usedobjs << 7) / est_usedblocks;
453 est_totobjs = (nrblocks * dn_per_block) >> 7;
458 static int osd_objset_statfs(struct osd_device *osd, struct obd_statfs *osfs)
460 struct objset *os = osd->od_os;
461 uint64_t usedbytes, availbytes, usedobjs, availobjs;
462 uint64_t est_availobjs;
466 dmu_objset_space(os, &usedbytes, &availbytes, &usedobjs, &availobjs);
468 memset(osfs, 0, sizeof(*osfs));
470 /* We're a zfs filesystem. */
471 osfs->os_type = UBERBLOCK_MAGIC;
474 * ZFS allows multiple block sizes. For statfs, Linux makes no
475 * proper distinction between bsize and frsize. For calculations
476 * of free and used blocks incorrectly uses bsize instead of frsize,
477 * but bsize is also used as the optimal blocksize. We return the
478 * largest possible block size as IO size for the optimum performance
479 * and scale the free and used blocks count appropriately.
481 osfs->os_bsize = osd->od_max_blksz;
482 bshift = fls64(osfs->os_bsize) - 1;
484 osfs->os_blocks = (usedbytes + availbytes) >> bshift;
485 osfs->os_bfree = availbytes >> bshift;
486 osfs->os_bavail = osfs->os_bfree; /* no extra root reservation */
488 /* Take replication (i.e. number of copies) into account */
489 if (os->os_copies != 0)
490 osfs->os_bavail /= os->os_copies;
493 * Reserve some space so we don't run into ENOSPC due to grants not
494 * accounting for metadata overhead in ZFS, and to avoid fragmentation.
495 * Rather than report this via os_bavail (which makes users unhappy if
496 * they can't fill the filesystem 100%), reduce os_blocks as well.
498 * Reserve 0.78% of total space, at least 16MB for small filesystems,
499 * for internal files to be created/unlinked when space is tight.
501 CLASSERT(OSD_STATFS_RESERVED_SIZE > 0);
502 reserved = OSD_STATFS_RESERVED_SIZE >> bshift;
503 if (likely(osfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
504 reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
506 osfs->os_blocks -= reserved;
507 osfs->os_bfree -= min(reserved, osfs->os_bfree);
508 osfs->os_bavail -= min(reserved, osfs->os_bavail);
511 * The availobjs value returned from dmu_objset_space() is largely
512 * useless, since it reports the number of objects that might
513 * theoretically still fit into the dataset, independent of minor
514 * issues like how much space is actually available in the pool.
515 * Compute a better estimate in udmu_objs_count_estimate().
517 est_availobjs = osd_objs_count_estimate(usedbytes, usedobjs,
518 osfs->os_bfree, bshift);
520 osfs->os_ffree = min(availobjs, est_availobjs);
521 osfs->os_files = osfs->os_ffree + usedobjs;
523 /* ZFS XXX: fill in backing dataset FSID/UUID
524 memcpy(osfs->os_fsid, .... );*/
526 osfs->os_namelen = MAXNAMELEN;
527 osfs->os_maxbytes = OBD_OBJECT_EOF;
529 if (!spa_writeable(dmu_objset_spa(os)) ||
530 osd->od_dev_set_rdonly || osd->od_prop_rdonly)
531 osfs->os_state |= OS_STATE_READONLY;
537 * Concurrency: shouldn't matter.
539 int osd_statfs(const struct lu_env *env, struct dt_device *d,
540 struct obd_statfs *osfs)
545 rc = osd_objset_statfs(osd_dt_dev(d), osfs);
546 if (unlikely(rc != 0))
549 osfs->os_bavail -= min_t(u64,
550 OSD_GRANT_FOR_LOCAL_OIDS / osfs->os_bsize,
555 static int osd_blk_insert_cost(struct osd_device *osd)
557 int max_blockshift, nr_blkptrshift, bshift;
559 /* max_blockshift is the log2 of the number of blocks needed to reach
560 * the maximum filesize (that's to say 2^64) */
561 bshift = osd_spa_maxblockshift(dmu_objset_spa(osd->od_os));
562 max_blockshift = DN_MAX_OFFSET_SHIFT - bshift;
564 /* nr_blkptrshift is the log2 of the number of block pointers that can
565 * be stored in an indirect block */
566 CLASSERT(DN_MAX_INDBLKSHIFT > SPA_BLKPTRSHIFT);
567 nr_blkptrshift = DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT;
569 /* max_blockshift / nr_blkptrshift is thus the maximum depth of the
570 * tree. We add +1 for rounding purpose.
571 * The tree depth times the indirect block size gives us the maximum
572 * cost of inserting a block in the tree */
573 return (max_blockshift / nr_blkptrshift + 1) * (1<<DN_MAX_INDBLKSHIFT);
577 * Concurrency: doesn't access mutable data.
579 static void osd_conf_get(const struct lu_env *env,
580 const struct dt_device *dev,
581 struct dt_device_param *param)
583 struct osd_device *osd = osd_dt_dev(dev);
586 * XXX should be taken from not-yet-existing fs abstraction layer.
588 param->ddp_max_name_len = MAXNAMELEN;
589 param->ddp_max_nlink = 1 << 31; /* it's 8byte on a disk */
590 param->ddp_symlink_max = PATH_MAX;
591 param->ddp_mount_type = LDD_MT_ZFS;
593 param->ddp_mntopts = MNTOPT_USERXATTR;
594 if (osd->od_posix_acl)
595 param->ddp_mntopts |= MNTOPT_ACL;
596 param->ddp_max_ea_size = DXATTR_MAX_ENTRY_SIZE;
598 /* for maxbytes, report same value as ZPL */
599 param->ddp_maxbytes = MAX_LFS_FILESIZE;
601 /* inodes are dynamically allocated, so we report the per-inode space
602 * consumption to upper layers. This static value is not really accurate
603 * and we should use the same logic as in udmu_objset_statfs() to
604 * estimate the real size consumed by an object */
605 param->ddp_inodespace = OSD_DNODE_EST_COUNT;
606 /* Although ZFS isn't an extent-based filesystem, the metadata overhead
607 * (i.e. 7 levels of indirect blocks, see osd_blk_insert_cost()) should
608 * not be accounted for every single new block insertion.
609 * Instead, the maximum extent size is set to the number of blocks that
610 * can fit into a single contiguous indirect block. There would be some
611 * cases where this crosses indirect blocks, but it also won't have 7
612 * new levels of indirect blocks in that case either, so it will still
613 * have enough reserved space for the extra indirect block */
614 param->ddp_max_extent_blks =
615 (1 << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT));
616 param->ddp_extent_tax = osd_blk_insert_cost(osd);
618 /* Preferred RPC size for efficient disk IO. 1MB shows good
619 * all-around performance for ZFS, but use blocksize (recordsize)
620 * by default if larger to avoid read-modify-write. */
621 if (osd->od_max_blksz > ONE_MB_BRW_SIZE)
622 param->ddp_brw_size = osd->od_max_blksz;
624 param->ddp_brw_size = ONE_MB_BRW_SIZE;
628 * Concurrency: shouldn't matter.
630 static int osd_sync(const struct lu_env *env, struct dt_device *d)
633 struct osd_device *osd = osd_dt_dev(d);
635 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_ZFS_NAME);
636 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
637 CDEBUG(D_CACHE, "synced OSD %s\n", LUSTRE_OSD_ZFS_NAME);
643 static int osd_commit_async(const struct lu_env *env, struct dt_device *dev)
645 struct osd_device *osd = osd_dt_dev(dev);
646 tx_state_t *tx = &dmu_objset_pool(osd->od_os)->dp_tx;
649 mutex_enter(&tx->tx_sync_lock);
650 txg = tx->tx_open_txg + 1;
651 if (tx->tx_quiesce_txg_waiting < txg) {
652 tx->tx_quiesce_txg_waiting = txg;
653 cv_broadcast(&tx->tx_quiesce_more_cv);
655 mutex_exit(&tx->tx_sync_lock);
661 * Concurrency: shouldn't matter.
663 static int osd_ro(const struct lu_env *env, struct dt_device *d)
665 struct osd_device *osd = osd_dt_dev(d);
668 CERROR("%s: *** setting device %s read-only ***\n",
669 osd->od_svname, LUSTRE_OSD_ZFS_NAME);
670 osd->od_dev_set_rdonly = 1;
671 spa_freeze(dmu_objset_spa(osd->od_os));
676 static struct dt_device_operations osd_dt_ops = {
677 .dt_root_get = osd_root_get,
678 .dt_statfs = osd_statfs,
679 .dt_trans_create = osd_trans_create,
680 .dt_trans_start = osd_trans_start,
681 .dt_trans_stop = osd_trans_stop,
682 .dt_trans_cb_add = osd_trans_cb_add,
683 .dt_conf_get = osd_conf_get,
685 .dt_commit_async = osd_commit_async,
690 * DMU OSD device type methods
692 static int osd_type_init(struct lu_device_type *t)
694 LU_CONTEXT_KEY_INIT(&osd_key);
695 return lu_context_key_register(&osd_key);
698 static void osd_type_fini(struct lu_device_type *t)
700 lu_context_key_degister(&osd_key);
703 static void *osd_key_init(const struct lu_context *ctx,
704 struct lu_context_key *key)
706 struct osd_thread_info *info;
710 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
712 info = ERR_PTR(-ENOMEM);
716 static void osd_key_fini(const struct lu_context *ctx,
717 struct lu_context_key *key, void *data)
719 struct osd_thread_info *info = data;
720 struct osd_idmap_cache *idc = info->oti_ins_cache;
723 LASSERT(info->oti_ins_cache_size > 0);
724 OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
725 info->oti_ins_cache = NULL;
726 info->oti_ins_cache_size = 0;
728 lu_buf_free(&info->oti_xattr_lbuf);
732 static void osd_key_exit(const struct lu_context *ctx,
733 struct lu_context_key *key, void *data)
737 struct lu_context_key osd_key = {
738 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
739 .lct_init = osd_key_init,
740 .lct_fini = osd_key_fini,
741 .lct_exit = osd_key_exit
744 static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
746 if (osd->od_cl_seq == NULL)
749 seq_client_fini(osd->od_cl_seq);
750 OBD_FREE_PTR(osd->od_cl_seq);
751 osd->od_cl_seq = NULL;
754 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
758 /* shutdown quota slave instance associated with the device */
759 if (o->od_quota_slave != NULL) {
760 /* complete all in-flight callbacks */
761 osd_sync(env, &o->od_dt_dev);
762 txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
763 qsd_fini(env, o->od_quota_slave);
764 o->od_quota_slave = NULL;
767 osd_fid_fini(env, o);
772 static void osd_xattr_changed_cb(void *arg, uint64_t newval)
774 struct osd_device *osd = arg;
776 osd->od_xattr_in_sa = (newval == ZFS_XATTR_SA);
779 static void osd_recordsize_changed_cb(void *arg, uint64_t newval)
781 struct osd_device *osd = arg;
783 LASSERT(newval <= osd_spa_maxblocksize(dmu_objset_spa(osd->od_os)));
784 LASSERT(newval >= SPA_MINBLOCKSIZE);
785 LASSERT(ISP2(newval));
787 osd->od_max_blksz = newval;
790 static void osd_readonly_changed_cb(void *arg, uint64_t newval)
792 struct osd_device *osd = arg;
794 osd->od_prop_rdonly = !!newval;
797 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
798 static void osd_dnodesize_changed_cb(void *arg, uint64_t newval)
800 struct osd_device *osd = arg;
802 osd->od_dnsize = newval;
806 * This function unregisters all registered callbacks. It's harmless to
807 * unregister callbacks that were never registered so it is used to safely
808 * unwind a partially completed call to osd_objset_register_callbacks().
810 static void osd_objset_unregister_callbacks(struct osd_device *o)
812 struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
814 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
815 osd_xattr_changed_cb, o);
816 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
817 osd_recordsize_changed_cb, o);
818 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
819 osd_readonly_changed_cb, o);
820 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
821 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_DNODESIZE),
822 osd_dnodesize_changed_cb, o);
825 if (o->arc_prune_cb != NULL) {
826 arc_remove_prune_callback(o->arc_prune_cb);
827 o->arc_prune_cb = NULL;
832 * Register the required callbacks to be notified when zfs properties
833 * are modified using the 'zfs(8)' command line utility.
835 static int osd_objset_register_callbacks(struct osd_device *o)
837 struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
838 dsl_pool_t *dp = dmu_objset_pool(o->od_os);
844 dsl_pool_config_enter(dp, FTAG);
845 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
846 osd_xattr_changed_cb, o);
850 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
851 osd_recordsize_changed_cb, o);
855 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
856 osd_readonly_changed_cb, o);
860 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
861 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DNODESIZE),
862 osd_dnodesize_changed_cb, o);
867 o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
869 dsl_pool_config_exit(dp, FTAG);
871 osd_objset_unregister_callbacks(o);
876 static int osd_objset_open(struct osd_device *o)
878 uint64_t version = ZPL_VERSION;
879 uint64_t sa_obj, unlink_obj;
883 rc = -osd_dmu_objset_own(o->od_mntdev, DMU_OST_ZFS,
884 o->od_dt_dev.dd_rdonly ? B_TRUE : B_FALSE,
885 B_FALSE, o, &o->od_os);
888 CERROR("%s: can't open %s\n", o->od_svname, o->od_mntdev);
894 /* Check ZFS version */
895 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
896 ZPL_VERSION_STR, 8, 1, &version);
898 CERROR("%s: Error looking up ZPL VERSION\n", o->od_mntdev);
900 * We can't return ENOENT because that would mean the objset
903 GOTO(out, rc = -EIO);
906 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
907 ZFS_SA_ATTRS, 8, 1, &sa_obj);
911 rc = -sa_setup(o->od_os, sa_obj, zfs_attr_table,
912 ZPL_END, &o->z_attr_table);
916 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ,
917 8, 1, &o->od_rootid);
919 CERROR("%s: lookup for root failed: rc = %d\n",
924 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET,
927 CERROR("%s: lookup for %s failed: rc = %d\n",
928 o->od_svname, ZFS_UNLINKED_SET, rc);
932 /* Check that user/group usage tracking is supported */
933 if (!dmu_objset_userused_enabled(o->od_os) ||
934 DMU_USERUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED ||
935 DMU_GROUPUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED) {
936 CERROR("%s: Space accounting not supported by this target, "
937 "aborting\n", o->od_svname);
938 GOTO(out, rc = -ENOTSUPP);
941 rc = __osd_obj2dnode(o->od_os, unlink_obj, &o->od_unlinked);
943 CERROR("%s: can't get dnode for unlinked: rc = %d\n",
949 if (rc != 0 && o->od_os != NULL) {
950 osd_dmu_objset_disown(o->od_os, B_FALSE, o);
957 int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd,
960 char *key = osd_oti_get(env)->oti_str;
964 if (osd->od_dt_dev.dd_rdonly) {
965 CERROR("%s: someone try to free objects under "
966 "readonly mode, should be disabled.\n", osd_name(osd));
972 rc = -dmu_free_long_range(osd->od_os, oid, 0, DMU_OBJECT_END);
974 CWARN("%s: Cannot truncate %llu: rc = %d\n",
975 osd->od_svname, oid, rc);
979 tx = dmu_tx_create(osd->od_os);
980 dmu_tx_mark_netfree(tx);
981 dmu_tx_hold_free(tx, oid, 0, DMU_OBJECT_END);
982 osd_tx_hold_zap(tx, osd->od_unlinked->dn_object, osd->od_unlinked,
984 rc = -dmu_tx_assign(tx, TXG_WAIT);
986 CWARN("%s: Cannot assign tx for %llu: rc = %d\n",
987 osd->od_svname, oid, rc);
991 snprintf(key, sizeof(osd_oti_get(env)->oti_str), "%llx", oid);
992 rc = osd_zap_remove(osd, osd->od_unlinked->dn_object,
993 osd->od_unlinked, key, tx);
995 CWARN("%s: Cannot remove %llu from unlinked set: rc = %d\n",
996 osd->od_svname, oid, rc);
1000 rc = -dmu_object_free(osd->od_os, oid, tx);
1002 CWARN("%s: Cannot free %llu: rc = %d\n",
1003 osd->od_svname, oid, rc);
1018 osd_unlinked_drain(const struct lu_env *env, struct osd_device *osd)
1021 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
1023 zap_cursor_init(&zc, osd->od_os, osd->od_unlinked->dn_object);
1025 while (zap_cursor_retrieve(&zc, za) == 0) {
1026 /* If cannot free the object, leave it in the unlinked set,
1027 * until the OSD is mounted again when obd_unlinked_drain()
1028 * will be called. */
1029 if (osd_unlinked_object_free(env, osd, za->za_first_integer))
1031 zap_cursor_advance(&zc);
1034 zap_cursor_fini(&zc);
1037 static int osd_mount(const struct lu_env *env,
1038 struct osd_device *o, struct lustre_cfg *cfg)
1040 char *mntdev = lustre_cfg_string(cfg, 1);
1041 char *str = lustre_cfg_string(cfg, 2);
1042 char *svname = lustre_cfg_string(cfg, 4);
1048 if (o->od_os != NULL)
1051 if (mntdev == NULL || svname == NULL)
1054 rc = strlcpy(o->od_mntdev, mntdev, sizeof(o->od_mntdev));
1055 if (rc >= sizeof(o->od_mntdev))
1058 rc = strlcpy(o->od_svname, svname, sizeof(o->od_svname));
1059 if (rc >= sizeof(o->od_svname))
1062 o->od_index_backup_stop = 0;
1063 o->od_index = -1; /* -1 means index is invalid */
1064 rc = server_name2index(o->od_svname, &o->od_index, NULL);
1065 str = strstr(str, ":");
1067 unsigned long flags;
1069 rc = kstrtoul(str + 1, 10, &flags);
1073 if (flags & LMD_FLG_DEV_RDONLY) {
1074 o->od_dt_dev.dd_rdonly = 1;
1075 LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
1079 if (flags & LMD_FLG_NOSCRUB)
1080 o->od_auto_scrub_interval = AS_NEVER;
1083 if (server_name_is_ost(o->od_svname))
1086 rc = osd_objset_open(o);
1090 o->od_xattr_in_sa = B_TRUE;
1091 o->od_max_blksz = osd_spa_maxblocksize(o->od_os->os_spa);
1093 rc = __osd_obj2dnode(o->od_os, o->od_rootid, &rootdn);
1096 o->od_root = rootdn->dn_object;
1097 osd_dnode_rele(rootdn);
1099 rc = __osd_obj2dnode(o->od_os, DMU_USERUSED_OBJECT,
1100 &o->od_userused_dn);
1104 rc = __osd_obj2dnode(o->od_os, DMU_GROUPUSED_OBJECT,
1105 &o->od_groupused_dn);
1109 #ifdef ZFS_PROJINHERIT
1110 if (dmu_objset_projectquota_enabled(o->od_os)) {
1111 rc = __osd_obj2dnode(o->od_os, DMU_PROJECTUSED_OBJECT,
1112 &o->od_projectused_dn);
1113 if (rc && rc != -ENOENT)
1118 rc = lu_site_init(&o->od_site, osd2lu_dev(o));
1121 o->od_site.ls_bottom_dev = osd2lu_dev(o);
1123 rc = lu_site_init_finish(&o->od_site);
1127 rc = osd_objset_register_callbacks(o);
1132 rc = osd_scrub_setup(env, o);
1137 rc = osd_procfs_init(o, o->od_svname);
1141 /* initialize quota slave instance */
1142 o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
1144 if (IS_ERR(o->od_quota_slave)) {
1145 rc = PTR_ERR(o->od_quota_slave);
1146 o->od_quota_slave = NULL;
1150 #ifdef HAVE_DMU_USEROBJ_ACCOUNTING
1151 if (!osd_dmu_userobj_accounting_available(o))
1152 CWARN("%s: dnode accounting not enabled: "
1153 "enable feature@userobj_accounting in pool\n",
1157 /* parse mount option "noacl", and enable ACL by default */
1158 opts = lustre_cfg_string(cfg, 3);
1159 if (opts == NULL || strstr(opts, "noacl") == NULL)
1160 o->od_posix_acl = 1;
1162 osd_unlinked_drain(env, o);
1164 if (rc && o->od_os) {
1165 osd_dmu_objset_disown(o->od_os, B_FALSE, o);
1172 static void osd_umount(const struct lu_env *env, struct osd_device *o)
1176 if (atomic_read(&o->od_zerocopy_alloc))
1177 CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
1178 atomic_read(&o->od_zerocopy_alloc));
1179 if (atomic_read(&o->od_zerocopy_loan))
1180 CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
1181 atomic_read(&o->od_zerocopy_loan));
1182 if (atomic_read(&o->od_zerocopy_pin))
1183 CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
1184 atomic_read(&o->od_zerocopy_pin));
1186 if (o->od_unlinked) {
1187 osd_dnode_rele(o->od_unlinked);
1188 o->od_unlinked = NULL;
1190 if (o->od_userused_dn) {
1191 osd_dnode_rele(o->od_userused_dn);
1192 o->od_userused_dn = NULL;
1194 if (o->od_groupused_dn) {
1195 osd_dnode_rele(o->od_groupused_dn);
1196 o->od_groupused_dn = NULL;
1199 #ifdef ZFS_PROJINHERIT
1200 if (o->od_projectused_dn) {
1201 osd_dnode_rele(o->od_projectused_dn);
1202 o->od_projectused_dn = NULL;
1206 if (o->od_os != NULL) {
1207 if (!o->od_dt_dev.dd_rdonly)
1208 /* force a txg sync to get all commit callbacks */
1209 txg_wait_synced(dmu_objset_pool(o->od_os), 0ULL);
1211 /* close the object set */
1212 osd_dmu_objset_disown(o->od_os, B_FALSE, o);
1219 static int osd_device_init0(const struct lu_env *env,
1220 struct osd_device *o,
1221 struct lustre_cfg *cfg)
1223 struct lu_device *l = osd2lu_dev(o);
1226 /* if the module was re-loaded, env can loose its keys */
1227 rc = lu_env_refill((struct lu_env *) env);
1231 l->ld_ops = &osd_lu_ops;
1232 o->od_dt_dev.dd_ops = &osd_dt_ops;
1233 sema_init(&o->od_otable_sem, 1);
1234 INIT_LIST_HEAD(&o->od_ios_list);
1235 o->od_auto_scrub_interval = AS_DEFAULT;
1241 static struct lu_device *osd_device_fini(const struct lu_env *env,
1242 struct lu_device *dev);
1244 static struct lu_device *osd_device_alloc(const struct lu_env *env,
1245 struct lu_device_type *type,
1246 struct lustre_cfg *cfg)
1248 struct osd_device *dev;
1249 struct osd_seq_list *osl;
1254 return ERR_PTR(-ENOMEM);
1256 osl = &dev->od_seq_list;
1257 INIT_LIST_HEAD(&osl->osl_seq_list);
1258 rwlock_init(&osl->osl_seq_list_lock);
1259 sema_init(&osl->osl_seq_init_sem, 1);
1260 INIT_LIST_HEAD(&dev->od_index_backup_list);
1261 INIT_LIST_HEAD(&dev->od_index_restore_list);
1262 spin_lock_init(&dev->od_lock);
1263 dev->od_index_backup_policy = LIBP_NONE;
1265 rc = dt_device_init(&dev->od_dt_dev, type);
1267 rc = osd_device_init0(env, dev, cfg);
1269 rc = osd_mount(env, dev, cfg);
1271 osd_device_fini(env, osd2lu_dev(dev));
1274 dt_device_fini(&dev->od_dt_dev);
1277 if (unlikely(rc != 0))
1280 return rc == 0 ? osd2lu_dev(dev) : ERR_PTR(rc);
1283 static struct lu_device *osd_device_free(const struct lu_env *env,
1284 struct lu_device *d)
1286 struct osd_device *o = osd_dev(d);
1289 /* XXX: make osd top device in order to release reference */
1290 d->ld_site->ls_top_dev = d;
1291 lu_site_purge(env, d->ld_site, -1);
1292 if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
1293 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
1294 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
1296 lu_site_fini(&o->od_site);
1297 dt_device_fini(&o->od_dt_dev);
1303 static struct lu_device *osd_device_fini(const struct lu_env *env,
1304 struct lu_device *d)
1306 struct osd_device *o = osd_dev(d);
1312 osd_objset_unregister_callbacks(o);
1313 if (!o->od_dt_dev.dd_rdonly) {
1314 osd_sync(env, lu2dt_dev(d));
1316 spa_get_dsl(dmu_objset_spa(o->od_os)));
1320 /* now with all the callbacks completed we can cleanup the remainings */
1321 osd_shutdown(env, o);
1322 osd_scrub_cleanup(env, o);
1324 rc = osd_procfs_fini(o);
1326 CERROR("proc fini error %d\n", rc);
1327 RETURN(ERR_PTR(rc));
1336 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
1337 const char *name, struct lu_device *next)
1343 * To be removed, setup is performed by osd_device_{init,alloc} and
1344 * cleanup is performed by osd_device_{fini,free).
1346 static int osd_process_config(const struct lu_env *env,
1347 struct lu_device *d, struct lustre_cfg *cfg)
1349 struct osd_device *o = osd_dev(d);
1353 switch(cfg->lcfg_command) {
1355 rc = osd_mount(env, o, cfg);
1358 /* For the case LCFG_PRE_CLEANUP is not called in advance,
1359 * that may happend if hit failure during mount process. */
1360 osd_index_backup(env, o, false);
1361 rc = osd_shutdown(env, o);
1364 LASSERT(&o->od_dt_dev);
1365 rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
1366 cfg, &o->od_dt_dev);
1367 if (rc > 0 || rc == -ENOSYS) {
1368 rc = class_process_proc_param(PARAM_OST,
1369 lprocfs_osd_obd_vars,
1370 cfg, &o->od_dt_dev);
1376 case LCFG_PRE_CLEANUP:
1377 osd_index_backup(env, o,
1378 o->od_index_backup_policy != LIBP_NONE);
1388 static int osd_recovery_complete(const struct lu_env *env, struct lu_device *d)
1390 struct osd_device *osd = osd_dev(d);
1394 if (osd->od_quota_slave == NULL)
1397 /* start qsd instance on recovery completion, this notifies the quota
1398 * slave code that we are about to process new requests now */
1399 rc = qsd_start(env, osd->od_quota_slave);
1404 * we use exports to track all osd users
1406 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
1407 struct obd_device *obd, struct obd_uuid *cluuid,
1408 struct obd_connect_data *data, void *localdata)
1410 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1411 struct lustre_handle conn;
1415 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
1417 rc = class_connect(&conn, obd, cluuid);
1421 *exp = class_conn2export(&conn);
1423 spin_lock(&obd->obd_dev_lock);
1425 spin_unlock(&obd->obd_dev_lock);
1431 * once last export (we don't count self-export) disappeared
1432 * osd can be released
1434 static int osd_obd_disconnect(struct obd_export *exp)
1436 struct obd_device *obd = exp->exp_obd;
1437 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1438 int rc, release = 0;
1441 /* Only disconnect the underlying layers on the final disconnect. */
1442 spin_lock(&obd->obd_dev_lock);
1444 if (osd->od_connects == 0)
1446 spin_unlock(&obd->obd_dev_lock);
1448 rc = class_disconnect(exp); /* bz 9811 */
1450 if (rc == 0 && release)
1451 class_manual_cleanup(obd);
1455 static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
1457 struct seq_server_site *ss = osd_seq_site(osd);
1461 if (osd->od_is_ost || osd->od_cl_seq != NULL)
1464 if (unlikely(ss == NULL))
1467 OBD_ALLOC_PTR(osd->od_cl_seq);
1468 if (osd->od_cl_seq == NULL)
1471 rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
1472 osd->od_svname, ss->ss_server_seq);
1475 OBD_FREE_PTR(osd->od_cl_seq);
1476 osd->od_cl_seq = NULL;
1482 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
1483 struct lu_device *dev)
1485 struct osd_device *osd = osd_dev(dev);
1489 if (osd->od_quota_slave != NULL) {
1490 /* set up quota slave objects */
1491 rc = qsd_prepare(env, osd->od_quota_slave);
1496 rc = osd_fid_init(env, osd);
1501 struct lu_device_operations osd_lu_ops = {
1502 .ldo_object_alloc = osd_object_alloc,
1503 .ldo_process_config = osd_process_config,
1504 .ldo_recovery_complete = osd_recovery_complete,
1505 .ldo_prepare = osd_prepare,
1508 static void osd_type_start(struct lu_device_type *t)
1512 static void osd_type_stop(struct lu_device_type *t)
1516 int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
1517 struct lu_fid *fid, struct md_op_data *op_data)
1519 struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
1521 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
1524 static struct lu_device_type_operations osd_device_type_ops = {
1525 .ldto_init = osd_type_init,
1526 .ldto_fini = osd_type_fini,
1528 .ldto_start = osd_type_start,
1529 .ldto_stop = osd_type_stop,
1531 .ldto_device_alloc = osd_device_alloc,
1532 .ldto_device_free = osd_device_free,
1534 .ldto_device_init = osd_device_init,
1535 .ldto_device_fini = osd_device_fini
1538 static struct lu_device_type osd_device_type = {
1539 .ldt_tags = LU_DEVICE_DT,
1540 .ldt_name = LUSTRE_OSD_ZFS_NAME,
1541 .ldt_ops = &osd_device_type_ops,
1542 .ldt_ctx_tags = LCT_LOCAL
1546 static struct obd_ops osd_obd_device_ops = {
1547 .o_owner = THIS_MODULE,
1548 .o_connect = osd_obd_connect,
1549 .o_disconnect = osd_obd_disconnect,
1550 .o_fid_alloc = osd_fid_alloc
1553 static int __init osd_init(void)
1557 rc = osd_options_init();
1561 rc = lu_kmem_init(osd_caches);
1565 rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
1566 LUSTRE_OSD_ZFS_NAME, &osd_device_type);
1568 lu_kmem_fini(osd_caches);
1572 static void __exit osd_exit(void)
1574 class_unregister_type(LUSTRE_OSD_ZFS_NAME);
1575 lu_kmem_fini(osd_caches);
1578 module_param(osd_oi_count, int, 0444);
1579 MODULE_PARM_DESC(osd_oi_count, "Number of Object Index containers to be created, it's only valid for new filesystem.");
1581 module_param(osd_txg_sync_delay_us, int, 0644);
1582 MODULE_PARM_DESC(osd_txg_sync_delay_us,
1583 "When zero or larger delay N usec instead of doing TXG sync");
1585 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
1586 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
1587 MODULE_VERSION(LUSTRE_VERSION_STRING);
1588 MODULE_LICENSE("GPL");
1590 module_init(osd_init);
1591 module_exit(osd_exit);