4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * Copyright (c) 2012, 2013, Intel Corporation.
32 * Use is subject to license terms.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
39 * lustre/osd-zfs/osd_handler.c
40 * Top-level entry points into osd module
42 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
43 * Author: Mike Pershin <tappro@whamcloud.com>
44 * Author: Johann Lombardi <johann@whamcloud.com>
47 #define DEBUG_SUBSYSTEM S_OSD
49 #include <lustre_ver.h>
50 #include <libcfs/libcfs.h>
51 #include <obd_support.h>
52 #include <lustre_net.h>
54 #include <obd_class.h>
55 #include <lustre_disk.h>
56 #include <lustre_fid.h>
57 #include <lustre_param.h>
58 #include <md_object.h>
60 #include "osd_internal.h"
62 #include <sys/dnode.h>
67 #include <sys/spa_impl.h>
68 #include <sys/zfs_znode.h>
69 #include <sys/dmu_tx.h>
70 #include <sys/dmu_objset.h>
71 #include <sys/dsl_prop.h>
72 #include <sys/sa_impl.h>
75 struct lu_context_key osd_key;
77 /* Slab for OSD object allocation */
78 struct kmem_cache *osd_object_kmem;
80 static struct lu_kmem_descr osd_caches[] = {
82 .ckd_cache = &osd_object_kmem,
83 .ckd_name = "zfs_osd_obj",
84 .ckd_size = sizeof(struct osd_object)
91 static void arc_prune_func(int64_t bytes, void *private)
93 struct osd_device *od = private;
94 struct lu_site *site = &od->od_site;
98 rc = lu_env_init(&env, LCT_SHRINKER);
100 CERROR("%s: can't initialize shrinker env: rc = %d\n",
105 lu_site_purge(&env, site, (bytes >> 10));
111 * Concurrency: doesn't access mutable data
113 static int osd_root_get(const struct lu_env *env,
114 struct dt_device *dev, struct lu_fid *f)
116 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
121 * OSD object methods.
125 * Concurrency: shouldn't matter.
127 static void osd_trans_commit_cb(void *cb_data, int error)
129 struct osd_thandle *oh = cb_data;
130 struct thandle *th = &oh->ot_super;
131 struct osd_device *osd = osd_dt_dev(th->th_dev);
132 struct lu_device *lud = &th->th_dev->dd_lu_dev;
133 struct dt_txn_commit_cb *dcb, *tmp;
138 if (error == ECANCELED)
139 CWARN("%s: transaction @0x%p was aborted\n",
140 osd_dt_dev(th->th_dev)->od_svname, th);
142 CERROR("%s: transaction @0x%p commit error: rc = %d\n",
143 osd_dt_dev(th->th_dev)->od_svname, th, error);
146 dt_txn_hook_commit(th);
148 /* call per-transaction callbacks if any */
149 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage)
150 dcb->dcb_func(NULL, th, dcb, error);
152 /* Unlike ldiskfs, zfs updates space accounting at commit time.
153 * As a consequence, op_end is called only now to inform the quota slave
154 * component that reserved quota space is now accounted in usage and
155 * should be released. Quota space won't be adjusted at this point since
156 * we can't provide a suitable environment. It will be performed
157 * asynchronously by a lquota thread. */
158 qsd_op_end(NULL, osd->od_quota_slave, &oh->ot_quota_trans);
162 lu_context_exit(&th->th_ctx);
163 lu_context_fini(&th->th_ctx);
164 thandle_put(&oh->ot_super);
169 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
171 struct osd_thandle *oh;
173 oh = container_of0(th, struct osd_thandle, ot_super);
174 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
180 * Concurrency: shouldn't matter.
182 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
185 struct osd_thandle *oh;
189 oh = container_of0(th, struct osd_thandle, ot_super);
193 rc = dt_txn_hook_start(env, d, th);
197 if (oh->ot_write_commit && OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC))
198 /* Unlike ldiskfs, ZFS checks for available space and returns
199 * -ENOSPC when assigning txg */
202 rc = -dmu_tx_assign(oh->ot_tx, TXG_WAIT);
203 if (unlikely(rc != 0)) {
204 struct osd_device *osd = osd_dt_dev(d);
205 /* dmu will call commit callback with error code during abort */
206 if (!lu_device_is_md(&d->dd_lu_dev) && rc == -ENOSPC)
207 CERROR("%s: failed to start transaction due to ENOSPC. "
208 "Metadata overhead is underestimated or "
209 "grant_ratio is too low.\n", osd->od_svname);
211 CERROR("%s: can't assign tx: rc = %d\n",
214 /* add commit callback */
215 dmu_tx_callback_register(oh->ot_tx, osd_trans_commit_cb, oh);
217 lu_context_init(&th->th_ctx, th->th_tags);
218 lu_context_enter(&th->th_ctx);
219 lu_device_get(&d->dd_lu_dev);
226 * Concurrency: shouldn't matter.
228 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
231 struct osd_device *osd = osd_dt_dev(th->th_dev);
232 struct osd_thandle *oh;
237 oh = container_of0(th, struct osd_thandle, ot_super);
239 if (oh->ot_assigned == 0) {
241 dmu_tx_abort(oh->ot_tx);
242 osd_object_sa_dirty_rele(oh);
243 /* there won't be any commit, release reserved quota space now,
245 qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
246 thandle_put(&oh->ot_super);
250 /* When doing our own inode accounting, the ZAPs storing per-uid/gid
251 * usage are updated at operation execution time, so we should call
252 * qsd_op_end() straight away. Otherwise (for blk accounting maintained
253 * by ZFS and when #inode is estimated from #blks) accounting is updated
254 * at commit time and the call to qsd_op_end() must be delayed */
255 if (oh->ot_quota_trans.lqt_id_cnt > 0 &&
256 !oh->ot_quota_trans.lqt_ids[0].lqi_is_blk &&
257 !osd->od_quota_iused_est)
258 qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
260 rc = dt_txn_hook_stop(env, th);
262 CDEBUG(D_OTHER, "%s: transaction hook failed: rc = %d\n",
266 txg = oh->ot_tx->tx_txg;
268 osd_object_sa_dirty_rele(oh);
269 dmu_tx_commit(oh->ot_tx);
272 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
277 static struct thandle *osd_trans_create(const struct lu_env *env,
278 struct dt_device *dt)
280 struct osd_device *osd = osd_dt_dev(dt);
281 struct osd_thandle *oh;
286 tx = dmu_tx_create(osd->od_os);
288 RETURN(ERR_PTR(-ENOMEM));
290 /* alloc callback data */
294 RETURN(ERR_PTR(-ENOMEM));
298 INIT_LIST_HEAD(&oh->ot_dcb_list);
299 INIT_LIST_HEAD(&oh->ot_sa_list);
300 sema_init(&oh->ot_sa_lock, 1);
301 memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
305 th->th_tags = LCT_TX_HANDLE;
306 atomic_set(&th->th_refc, 1);
307 th->th_alloc_size = sizeof(*oh);
311 /* Estimate the number of objects from a number of blocks */
312 uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
315 uint64_t est_objs, est_refdblocks, est_usedobjs;
317 /* Compute an nrblocks estimate based on the actual number of
318 * dnodes that could fit in the space. Since we don't know the
319 * overhead associated with each dnode (xattrs, SAs, VDEV overhead,
320 * etc) just using DNODE_SHIFT isn't going to give a good estimate.
321 * Instead, compute an estimate based on the average space usage per
322 * dnode, with an upper and lower cap.
324 * In case there aren't many dnodes or blocks used yet, add a small
325 * correction factor using OSD_DNODE_EST_SHIFT. This correction
326 * factor gradually disappears as the number of real dnodes grows.
327 * This also avoids the need to check for divide-by-zero later.
329 CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
330 CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
332 est_refdblocks = (refdbytes >> SPA_MAXBLOCKSHIFT) +
333 (OSD_DNODE_EST_COUNT >> OSD_DNODE_EST_BLKSHIFT);
334 est_usedobjs = usedobjs + OSD_DNODE_EST_COUNT;
336 /* Average space/dnode more than maximum dnode size, use max dnode
337 * size to estimate free dnodes from adjusted free blocks count.
338 * OSTs typically use more than one block dnode so this case applies. */
339 if (est_usedobjs <= est_refdblocks * 2) {
342 /* Average space/dnode smaller than min dnode size (probably due to
343 * metadnode compression), use min dnode size to estimate the number of
345 * An MDT typically uses below 512 bytes/dnode so this case applies. */
346 } else if (est_usedobjs >= (est_refdblocks << OSD_DNODE_MIN_BLKSHIFT)) {
347 est_objs = nrblocks << OSD_DNODE_MIN_BLKSHIFT;
349 /* Between the extremes, we try to use the average size of
350 * existing dnodes to compute the number of dnodes that fit
353 * est_objs = nrblocks * (est_usedobjs / est_refblocks);
355 * but this may overflow 64 bits or become 0 if not handled well
357 * We know nrblocks is below (64 - 17 = 47) bits from
358 * SPA_MAXBLKSHIFT, and est_usedobjs is under 48 bits due to
359 * DN_MAX_OBJECT_SHIFT, which means that multiplying them may
360 * get as large as 2 ^ 95.
362 * We also know (est_usedobjs / est_refdblocks) is between 2 and
363 * 256, due to above checks, we can safely compute this first.
364 * We care more about accuracy on the MDT (many dnodes/block)
365 * which is good because this is where truncation errors are
366 * smallest. This adds 8 bits to nrblocks so we can use 7 bits
367 * to compute a fixed-point fraction and nrblocks can still fit
370 unsigned dnodes_per_block = (est_usedobjs << 7)/est_refdblocks;
372 est_objs = (nrblocks * dnodes_per_block) >> 7;
377 static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
379 uint64_t refdbytes, availbytes, usedobjs, availobjs;
380 uint64_t est_availobjs;
383 dmu_objset_space(os, &refdbytes, &availbytes, &usedobjs,
387 * ZFS allows multiple block sizes. For statfs, Linux makes no
388 * proper distinction between bsize and frsize. For calculations
389 * of free and used blocks incorrectly uses bsize instead of frsize,
390 * but bsize is also used as the optimal blocksize. We return the
391 * largest possible block size as IO size for the optimum performance
392 * and scale the free and used blocks count appropriately.
394 osfs->os_bsize = 1ULL << SPA_MAXBLOCKSHIFT;
396 osfs->os_blocks = (refdbytes + availbytes) >> SPA_MAXBLOCKSHIFT;
397 osfs->os_bfree = availbytes >> SPA_MAXBLOCKSHIFT;
398 osfs->os_bavail = osfs->os_bfree; /* no extra root reservation */
400 /* Take replication (i.e. number of copies) into account */
401 osfs->os_bavail /= os->os_copies;
404 * Reserve some space so we don't run into ENOSPC due to grants not
405 * accounting for metadata overhead in ZFS, and to avoid fragmentation.
406 * Rather than report this via os_bavail (which makes users unhappy if
407 * they can't fill the filesystem 100%), reduce os_blocks as well.
409 * Reserve 0.78% of total space, at least 4MB for small filesystems,
410 * for internal files to be created/unlinked when space is tight.
412 CLASSERT(OSD_STATFS_RESERVED_BLKS > 0);
413 if (likely(osfs->os_blocks >=
414 OSD_STATFS_RESERVED_BLKS << OSD_STATFS_RESERVED_SHIFT))
415 reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
417 reserved = OSD_STATFS_RESERVED_BLKS;
419 osfs->os_blocks -= reserved;
420 osfs->os_bfree -= MIN(reserved, osfs->os_bfree);
421 osfs->os_bavail -= MIN(reserved, osfs->os_bavail);
424 * The availobjs value returned from dmu_objset_space() is largely
425 * useless, since it reports the number of objects that might
426 * theoretically still fit into the dataset, independent of minor
427 * issues like how much space is actually available in the pool.
428 * Compute a better estimate in udmu_objs_count_estimate().
430 est_availobjs = osd_objs_count_estimate(refdbytes, usedobjs,
433 osfs->os_ffree = min(availobjs, est_availobjs);
434 osfs->os_files = osfs->os_ffree + usedobjs;
436 /* ZFS XXX: fill in backing dataset FSID/UUID
437 memcpy(osfs->os_fsid, .... );*/
439 /* We're a zfs filesystem. */
440 osfs->os_type = UBERBLOCK_MAGIC;
442 /* ZFS XXX: fill in appropriate OS_STATE_{DEGRADED,READONLY} flags
443 osfs->os_state = vf_to_stf(vfsp->vfs_flag);
444 if (sb->s_flags & MS_RDONLY)
445 osfs->os_state = OS_STATE_READONLY;
448 osfs->os_namelen = MAXNAMELEN;
449 osfs->os_maxbytes = OBD_OBJECT_EOF;
455 * Concurrency: shouldn't matter.
457 int osd_statfs(const struct lu_env *env, struct dt_device *d,
458 struct obd_statfs *osfs)
460 struct osd_device *osd = osd_dt_dev(d);
464 rc = osd_objset_statfs(osd->od_os, osfs);
465 if (unlikely(rc != 0))
468 osfs->os_bavail -= min_t(obd_size,
469 OSD_GRANT_FOR_LOCAL_OIDS / osfs->os_bsize,
474 static int osd_blk_insert_cost(void)
476 int max_blockshift, nr_blkptrshift;
478 /* max_blockshift is the log2 of the number of blocks needed to reach
479 * the maximum filesize (that's to say 2^64) */
480 max_blockshift = DN_MAX_OFFSET_SHIFT - SPA_MAXBLOCKSHIFT;
482 /* nr_blkptrshift is the log2 of the number of block pointers that can
483 * be stored in an indirect block */
484 CLASSERT(DN_MAX_INDBLKSHIFT > SPA_BLKPTRSHIFT);
485 nr_blkptrshift = DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT;
487 /* max_blockshift / nr_blkptrshift is thus the maximum depth of the
488 * tree. We add +1 for rounding purpose.
489 * The tree depth times the indirect block size gives us the maximum
490 * cost of inserting a block in the tree */
491 return (max_blockshift / nr_blkptrshift + 1) * (1<<DN_MAX_INDBLKSHIFT);
495 * Concurrency: doesn't access mutable data.
497 static void osd_conf_get(const struct lu_env *env,
498 const struct dt_device *dev,
499 struct dt_device_param *param)
501 struct osd_device *osd = osd_dt_dev(dev);
504 * XXX should be taken from not-yet-existing fs abstraction layer.
506 param->ddp_max_name_len = MAXNAMELEN;
507 param->ddp_max_nlink = 1 << 31; /* it's 8byte on a disk */
508 param->ddp_block_shift = 12; /* XXX */
509 param->ddp_mount_type = LDD_MT_ZFS;
511 param->ddp_mntopts = MNTOPT_USERXATTR;
512 if (osd->od_posix_acl)
513 param->ddp_mntopts |= MNTOPT_ACL;
514 param->ddp_max_ea_size = DXATTR_MAX_ENTRY_SIZE;
516 /* for maxbytes, report same value as ZPL */
517 param->ddp_maxbytes = MAX_LFS_FILESIZE;
519 /* Default reserved fraction of the available space that should be kept
520 * for error margin. Unfortunately, there are many factors that can
521 * impact the overhead with zfs, so let's be very cautious for now and
522 * reserve 20% of the available space which is not given out as grant.
523 * This tunable can be changed on a live system via procfs if needed. */
524 param->ddp_grant_reserved = 20;
526 /* inodes are dynamically allocated, so we report the per-inode space
527 * consumption to upper layers. This static value is not really accurate
528 * and we should use the same logic as in udmu_objset_statfs() to
529 * estimate the real size consumed by an object */
530 param->ddp_inodespace = OSD_DNODE_EST_COUNT;
531 /* per-fragment overhead to be used by the client code */
532 param->ddp_grant_frag = osd_blk_insert_cost();
536 * Concurrency: shouldn't matter.
538 static int osd_sync(const struct lu_env *env, struct dt_device *d)
540 struct osd_device *osd = osd_dt_dev(d);
541 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_ZFS_NAME);
542 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
543 CDEBUG(D_CACHE, "synced OSD %s\n", LUSTRE_OSD_ZFS_NAME);
547 static int osd_commit_async(const struct lu_env *env, struct dt_device *dev)
549 struct osd_device *osd = osd_dt_dev(dev);
550 tx_state_t *tx = &dmu_objset_pool(osd->od_os)->dp_tx;
553 mutex_enter(&tx->tx_sync_lock);
554 txg = tx->tx_open_txg + 1;
555 if (tx->tx_quiesce_txg_waiting < txg) {
556 tx->tx_quiesce_txg_waiting = txg;
557 cv_broadcast(&tx->tx_quiesce_more_cv);
559 mutex_exit(&tx->tx_sync_lock);
565 * Concurrency: shouldn't matter.
567 static int osd_ro(const struct lu_env *env, struct dt_device *d)
569 struct osd_device *osd = osd_dt_dev(d);
572 CERROR("%s: *** setting device %s read-only ***\n",
573 osd->od_svname, LUSTRE_OSD_ZFS_NAME);
575 spa_freeze(dmu_objset_spa(osd->od_os));
581 * Concurrency: serialization provided by callers.
583 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
584 int mode, unsigned long timeout, __u32 alg,
585 struct lustre_capa_key *keys)
587 struct osd_device *dev = osd_dt_dev(d);
590 dev->od_fl_capa = mode;
591 dev->od_capa_timeout = timeout;
592 dev->od_capa_alg = alg;
593 dev->od_capa_keys = keys;
598 static struct dt_device_operations osd_dt_ops = {
599 .dt_root_get = osd_root_get,
600 .dt_statfs = osd_statfs,
601 .dt_trans_create = osd_trans_create,
602 .dt_trans_start = osd_trans_start,
603 .dt_trans_stop = osd_trans_stop,
604 .dt_trans_cb_add = osd_trans_cb_add,
605 .dt_conf_get = osd_conf_get,
607 .dt_commit_async = osd_commit_async,
609 .dt_init_capa_ctxt = osd_init_capa_ctxt,
613 * DMU OSD device type methods
615 static int osd_type_init(struct lu_device_type *t)
617 LU_CONTEXT_KEY_INIT(&osd_key);
618 return lu_context_key_register(&osd_key);
621 static void osd_type_fini(struct lu_device_type *t)
623 lu_context_key_degister(&osd_key);
626 static void *osd_key_init(const struct lu_context *ctx,
627 struct lu_context_key *key)
629 struct osd_thread_info *info;
633 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
635 info = ERR_PTR(-ENOMEM);
639 static void osd_key_fini(const struct lu_context *ctx,
640 struct lu_context_key *key, void *data)
642 struct osd_thread_info *info = data;
647 static void osd_key_exit(const struct lu_context *ctx,
648 struct lu_context_key *key, void *data)
650 struct osd_thread_info *info = data;
652 memset(info, 0, sizeof(*info));
655 struct lu_context_key osd_key = {
656 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
657 .lct_init = osd_key_init,
658 .lct_fini = osd_key_fini,
659 .lct_exit = osd_key_exit
662 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
666 /* shutdown quota slave instance associated with the device */
667 if (o->od_quota_slave != NULL) {
668 qsd_fini(env, o->od_quota_slave);
669 o->od_quota_slave = NULL;
675 static void osd_xattr_changed_cb(void *arg, uint64_t newval)
677 struct osd_device *osd = arg;
679 osd->od_xattr_in_sa = (newval == ZFS_XATTR_SA);
682 static int osd_objset_open(struct osd_device *o)
684 uint64_t version = ZPL_VERSION;
689 rc = -dmu_objset_own(o->od_mntdev, DMU_OST_ZFS, B_FALSE, o, &o->od_os);
695 /* Check ZFS version */
696 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
697 ZPL_VERSION_STR, 8, 1, &version);
699 CERROR("%s: Error looking up ZPL VERSION\n", o->od_mntdev);
701 * We can't return ENOENT because that would mean the objset
704 GOTO(out, rc = -EIO);
707 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
708 ZFS_SA_ATTRS, 8, 1, &sa_obj);
712 rc = -sa_setup(o->od_os, sa_obj, zfs_attr_table,
713 ZPL_END, &o->z_attr_table);
717 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ,
718 8, 1, &o->od_rootid);
720 CERROR("%s: lookup for root failed: rc = %d\n",
725 /* Check that user/group usage tracking is supported */
726 if (!dmu_objset_userused_enabled(o->od_os) ||
727 DMU_USERUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED ||
728 DMU_GROUPUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED) {
729 CERROR("%s: Space accounting not supported by this target, "
730 "aborting\n", o->od_svname);
731 GOTO(out, -ENOTSUPP);
735 if (rc != 0 && o->od_os != NULL)
736 dmu_objset_disown(o->od_os, o);
741 static int osd_mount(const struct lu_env *env,
742 struct osd_device *o, struct lustre_cfg *cfg)
744 struct dsl_dataset *ds;
745 char *mntdev = lustre_cfg_string(cfg, 1);
746 char *svname = lustre_cfg_string(cfg, 4);
753 if (o->od_os != NULL)
756 if (mntdev == NULL || svname == NULL)
759 rc = strlcpy(o->od_mntdev, mntdev, sizeof(o->od_mntdev));
760 if (rc >= sizeof(o->od_mntdev))
763 rc = strlcpy(o->od_svname, svname, sizeof(o->od_svname));
764 if (rc >= sizeof(o->od_svname))
767 if (server_name_is_ost(o->od_svname))
770 rc = osd_objset_open(o);
772 CERROR("%s: can't open objset %s: rc = %d\n", o->od_svname,
777 ds = dmu_objset_ds(o->od_os);
778 dp = dmu_objset_pool(o->od_os);
781 dsl_pool_config_enter(dp, FTAG);
782 rc = dsl_prop_register(ds, "xattr", osd_xattr_changed_cb, o);
783 dsl_pool_config_exit(dp, FTAG);
785 CWARN("%s: can't register xattr callback, ignore: rc=%d\n",
788 rc = __osd_obj2dbuf(env, o->od_os, o->od_rootid, &rootdb);
790 CERROR("%s: obj2dbuf() failed: rc = %d\n", o->od_svname, rc);
791 dmu_objset_disown(o->od_os, o);
796 o->od_root = rootdb->db_object;
797 sa_buf_rele(rootdb, osd_obj_tag);
799 /* 1. initialize oi before any file create or file open */
800 rc = osd_oi_init(env, o);
804 rc = lu_site_init(&o->od_site, osd2lu_dev(o));
807 o->od_site.ls_bottom_dev = osd2lu_dev(o);
809 rc = lu_site_init_finish(&o->od_site);
813 rc = osd_convert_root_to_new_seq(env, o);
817 /* Use our own ZAP for inode accounting by default, this can be changed
818 * via procfs to estimate the inode usage from the block usage */
819 o->od_quota_iused_est = 0;
821 rc = osd_procfs_init(o, o->od_svname);
825 o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
827 /* initialize quota slave instance */
828 o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
830 if (IS_ERR(o->od_quota_slave)) {
831 rc = PTR_ERR(o->od_quota_slave);
832 o->od_quota_slave = NULL;
836 /* parse mount option "noacl", and enable ACL by default */
837 opts = lustre_cfg_string(cfg, 3);
838 if (opts == NULL || strstr(opts, "noacl") == NULL)
845 static void osd_umount(const struct lu_env *env, struct osd_device *o)
849 if (atomic_read(&o->od_zerocopy_alloc))
850 CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
851 atomic_read(&o->od_zerocopy_alloc));
852 if (atomic_read(&o->od_zerocopy_loan))
853 CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
854 atomic_read(&o->od_zerocopy_loan));
855 if (atomic_read(&o->od_zerocopy_pin))
856 CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
857 atomic_read(&o->od_zerocopy_pin));
859 if (o->od_os != NULL) {
860 /* force a txg sync to get all commit callbacks */
861 txg_wait_synced(dmu_objset_pool(o->od_os), 0ULL);
863 /* close the object set */
864 dmu_objset_disown(o->od_os, o);
872 static int osd_device_init0(const struct lu_env *env,
873 struct osd_device *o,
874 struct lustre_cfg *cfg)
876 struct lu_device *l = osd2lu_dev(o);
879 /* if the module was re-loaded, env can loose its keys */
880 rc = lu_env_refill((struct lu_env *) env);
884 l->ld_ops = &osd_lu_ops;
885 o->od_dt_dev.dd_ops = &osd_dt_ops;
887 o->od_capa_hash = init_capa_hash();
888 if (o->od_capa_hash == NULL)
889 GOTO(out, rc = -ENOMEM);
895 static struct lu_device *osd_device_fini(const struct lu_env *env,
896 struct lu_device *dev);
898 static struct lu_device *osd_device_alloc(const struct lu_env *env,
899 struct lu_device_type *type,
900 struct lustre_cfg *cfg)
902 struct osd_device *dev;
907 return ERR_PTR(-ENOMEM);
909 rc = dt_device_init(&dev->od_dt_dev, type);
911 rc = osd_device_init0(env, dev, cfg);
913 rc = osd_mount(env, dev, cfg);
915 osd_device_fini(env, osd2lu_dev(dev));
918 dt_device_fini(&dev->od_dt_dev);
921 if (unlikely(rc != 0))
924 return rc == 0 ? osd2lu_dev(dev) : ERR_PTR(rc);
927 static struct lu_device *osd_device_free(const struct lu_env *env,
930 struct osd_device *o = osd_dev(d);
933 cleanup_capa_hash(o->od_capa_hash);
934 /* XXX: make osd top device in order to release reference */
935 d->ld_site->ls_top_dev = d;
936 lu_site_purge(env, d->ld_site, -1);
937 if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
938 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
939 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
941 lu_site_fini(&o->od_site);
942 dt_device_fini(&o->od_dt_dev);
948 static struct lu_device *osd_device_fini(const struct lu_env *env,
951 struct osd_device *o = osd_dev(d);
952 struct dsl_dataset *ds;
957 osd_shutdown(env, o);
961 ds = dmu_objset_ds(o->od_os);
962 rc = dsl_prop_unregister(ds, "xattr", osd_xattr_changed_cb, o);
964 CERROR("%s: dsl_prop_unregister xattr error %d\n",
966 if (o->arc_prune_cb != NULL) {
967 arc_remove_prune_callback(o->arc_prune_cb);
968 o->arc_prune_cb = NULL;
970 osd_sync(env, lu2dt_dev(d));
971 txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
974 rc = osd_procfs_fini(o);
976 CERROR("proc fini error %d\n", rc);
986 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
987 const char *name, struct lu_device *next)
993 * To be removed, setup is performed by osd_device_{init,alloc} and
994 * cleanup is performed by osd_device_{fini,free).
996 static int osd_process_config(const struct lu_env *env,
997 struct lu_device *d, struct lustre_cfg *cfg)
999 struct osd_device *o = osd_dev(d);
1003 switch(cfg->lcfg_command) {
1005 rc = osd_mount(env, o, cfg);
1008 rc = osd_shutdown(env, o);
1011 LASSERT(&o->od_dt_dev);
1012 rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
1013 cfg, &o->od_dt_dev);
1014 if (rc > 0 || rc == -ENOSYS)
1015 rc = class_process_proc_param(PARAM_OST,
1016 lprocfs_osd_obd_vars,
1017 cfg, &o->od_dt_dev);
1027 static int osd_recovery_complete(const struct lu_env *env, struct lu_device *d)
1029 struct osd_device *osd = osd_dev(d);
1033 if (osd->od_quota_slave == NULL)
1036 /* start qsd instance on recovery completion, this notifies the quota
1037 * slave code that we are about to process new requests now */
1038 rc = qsd_start(env, osd->od_quota_slave);
1043 * we use exports to track all osd users
1045 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
1046 struct obd_device *obd, struct obd_uuid *cluuid,
1047 struct obd_connect_data *data, void *localdata)
1049 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1050 struct lustre_handle conn;
1054 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
1056 rc = class_connect(&conn, obd, cluuid);
1060 *exp = class_conn2export(&conn);
1062 spin_lock(&obd->obd_dev_lock);
1064 spin_unlock(&obd->obd_dev_lock);
1070 * once last export (we don't count self-export) disappeared
1071 * osd can be released
1073 static int osd_obd_disconnect(struct obd_export *exp)
1075 struct obd_device *obd = exp->exp_obd;
1076 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1077 int rc, release = 0;
1080 /* Only disconnect the underlying layers on the final disconnect. */
1081 spin_lock(&obd->obd_dev_lock);
1083 if (osd->od_connects == 0)
1085 spin_unlock(&obd->obd_dev_lock);
1087 rc = class_disconnect(exp); /* bz 9811 */
1089 if (rc == 0 && release)
1090 class_manual_cleanup(obd);
1094 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
1095 struct lu_device *dev)
1097 struct osd_device *osd = osd_dev(dev);
1101 if (osd->od_quota_slave != NULL)
1102 /* set up quota slave objects */
1103 rc = qsd_prepare(env, osd->od_quota_slave);
1108 struct lu_device_operations osd_lu_ops = {
1109 .ldo_object_alloc = osd_object_alloc,
1110 .ldo_process_config = osd_process_config,
1111 .ldo_recovery_complete = osd_recovery_complete,
1112 .ldo_prepare = osd_prepare,
1115 static void osd_type_start(struct lu_device_type *t)
1119 static void osd_type_stop(struct lu_device_type *t)
1123 int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
1124 struct lu_fid *fid, struct md_op_data *op_data)
1126 struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
1128 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
1131 static struct lu_device_type_operations osd_device_type_ops = {
1132 .ldto_init = osd_type_init,
1133 .ldto_fini = osd_type_fini,
1135 .ldto_start = osd_type_start,
1136 .ldto_stop = osd_type_stop,
1138 .ldto_device_alloc = osd_device_alloc,
1139 .ldto_device_free = osd_device_free,
1141 .ldto_device_init = osd_device_init,
1142 .ldto_device_fini = osd_device_fini
1145 static struct lu_device_type osd_device_type = {
1146 .ldt_tags = LU_DEVICE_DT,
1147 .ldt_name = LUSTRE_OSD_ZFS_NAME,
1148 .ldt_ops = &osd_device_type_ops,
1149 .ldt_ctx_tags = LCT_LOCAL
1153 static struct obd_ops osd_obd_device_ops = {
1154 .o_owner = THIS_MODULE,
1155 .o_connect = osd_obd_connect,
1156 .o_disconnect = osd_obd_disconnect,
1157 .o_fid_alloc = osd_fid_alloc
1160 int __init osd_init(void)
1164 rc = osd_options_init();
1168 rc = lu_kmem_init(osd_caches);
1172 rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
1173 LUSTRE_OSD_ZFS_NAME, &osd_device_type);
1175 lu_kmem_fini(osd_caches);
1179 void __exit osd_exit(void)
1181 class_unregister_type(LUSTRE_OSD_ZFS_NAME);
1182 lu_kmem_fini(osd_caches);
1185 extern unsigned int osd_oi_count;
1186 CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
1187 "Number of Object Index containers to be created, "
1188 "it's only valid for new filesystem.");
1190 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1191 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
1192 MODULE_LICENSE("GPL");
1194 cfs_module(osd, LUSTRE_VERSION_STRING, osd_init, osd_exit);