4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd-zfs/osd_handler.c
37 * Top-level entry points into osd module
39 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
40 * Author: Mike Pershin <tappro@whamcloud.com>
41 * Author: Johann Lombardi <johann@whamcloud.com>
44 #define DEBUG_SUBSYSTEM S_OSD
46 #include <lustre_ver.h>
47 #include <libcfs/libcfs.h>
48 #include <obd_support.h>
49 #include <lustre_net.h>
51 #include <obd_class.h>
52 #include <lustre_disk.h>
53 #include <lustre_fid.h>
54 #include <lustre_param.h>
55 #include <md_object.h>
57 #include "osd_internal.h"
59 #include <sys/dnode.h>
64 #include <sys/spa_impl.h>
65 #include <sys/zfs_znode.h>
66 #include <sys/dmu_tx.h>
67 #include <sys/dmu_objset.h>
68 #include <sys/dsl_prop.h>
69 #include <sys/sa_impl.h>
72 struct lu_context_key osd_key;
74 /* Slab for OSD object allocation */
75 struct kmem_cache *osd_object_kmem;
77 /* Slab to allocate osd_zap_it */
78 struct kmem_cache *osd_zapit_cachep;
80 static struct lu_kmem_descr osd_caches[] = {
82 .ckd_cache = &osd_object_kmem,
83 .ckd_name = "zfs_osd_obj",
84 .ckd_size = sizeof(struct osd_object)
87 .ckd_cache = &osd_zapit_cachep,
88 .ckd_name = "osd_zapit_cache",
89 .ckd_size = sizeof(struct osd_zap_it)
96 static void arc_prune_func(int64_t bytes, void *private)
98 struct osd_device *od = private;
99 struct lu_site *site = &od->od_site;
103 rc = lu_env_init(&env, LCT_SHRINKER);
105 CERROR("%s: can't initialize shrinker env: rc = %d\n",
110 lu_site_purge(&env, site, (bytes >> 10));
116 * Concurrency: doesn't access mutable data
118 static int osd_root_get(const struct lu_env *env,
119 struct dt_device *dev, struct lu_fid *f)
121 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
126 * OSD object methods.
130 * Concurrency: shouldn't matter.
132 static void osd_trans_commit_cb(void *cb_data, int error)
134 struct osd_thandle *oh = cb_data;
135 struct thandle *th = &oh->ot_super;
136 struct osd_device *osd = osd_dt_dev(th->th_dev);
137 struct lu_device *lud = &th->th_dev->dd_lu_dev;
138 struct dt_txn_commit_cb *dcb, *tmp;
143 if (error == ECANCELED)
144 CWARN("%s: transaction @0x%p was aborted\n",
145 osd_dt_dev(th->th_dev)->od_svname, th);
147 CERROR("%s: transaction @0x%p commit error: rc = %d\n",
148 osd_dt_dev(th->th_dev)->od_svname, th, error);
151 dt_txn_hook_commit(th);
153 /* call per-transaction callbacks if any */
154 list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage)
155 dcb->dcb_func(NULL, th, dcb, error);
157 /* Unlike ldiskfs, zfs updates space accounting at commit time.
158 * As a consequence, op_end is called only now to inform the quota slave
159 * component that reserved quota space is now accounted in usage and
160 * should be released. Quota space won't be adjusted at this point since
161 * we can't provide a suitable environment. It will be performed
162 * asynchronously by a lquota thread. */
163 qsd_op_end(NULL, osd->od_quota_slave, &oh->ot_quota_trans);
167 lu_context_exit(&th->th_ctx);
168 lu_context_fini(&th->th_ctx);
174 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
176 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
179 LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
180 LASSERT(&dcb->dcb_func != NULL);
181 if (dcb->dcb_flags & DCB_TRANS_STOP)
182 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
184 list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
190 * Concurrency: shouldn't matter.
192 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
195 struct osd_thandle *oh;
199 oh = container_of0(th, struct osd_thandle, ot_super);
203 rc = dt_txn_hook_start(env, d, th);
207 if (oh->ot_write_commit && OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC))
208 /* Unlike ldiskfs, ZFS checks for available space and returns
209 * -ENOSPC when assigning txg */
212 rc = -dmu_tx_assign(oh->ot_tx, TXG_WAIT);
213 if (unlikely(rc != 0)) {
214 struct osd_device *osd = osd_dt_dev(d);
215 /* dmu will call commit callback with error code during abort */
216 if (!lu_device_is_md(&d->dd_lu_dev) && rc == -ENOSPC)
217 CERROR("%s: failed to start transaction due to ENOSPC. "
218 "Metadata overhead is underestimated or "
219 "grant_ratio is too low.\n", osd->od_svname);
221 CERROR("%s: can't assign tx: rc = %d\n",
224 /* add commit callback */
225 dmu_tx_callback_register(oh->ot_tx, osd_trans_commit_cb, oh);
227 lu_context_init(&th->th_ctx, th->th_tags);
228 lu_context_enter(&th->th_ctx);
229 lu_device_get(&d->dd_lu_dev);
235 static int osd_unlinked_object_free(struct osd_device *osd, uint64_t oid);
237 static void osd_unlinked_list_emptify(struct osd_device *osd,
238 struct list_head *list, bool free)
240 struct osd_object *obj;
243 while (!list_empty(list)) {
244 obj = list_entry(list->next,
245 struct osd_object, oo_unlinked_linkage);
246 LASSERT(obj->oo_db != NULL);
247 oid = obj->oo_db->db_object;
249 list_del_init(&obj->oo_unlinked_linkage);
251 (void)osd_unlinked_object_free(osd, oid);
255 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
257 struct dt_txn_commit_cb *dcb;
258 struct dt_txn_commit_cb *tmp;
260 /* call per-transaction stop callbacks if any */
261 list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
263 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
264 "commit callback entry: magic=%x name='%s'\n",
265 dcb->dcb_magic, dcb->dcb_name);
266 list_del_init(&dcb->dcb_linkage);
267 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
272 * Concurrency: shouldn't matter.
274 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
277 struct osd_device *osd = osd_dt_dev(th->th_dev);
278 bool sync = (th->th_sync != 0);
279 struct osd_thandle *oh;
280 struct list_head unlinked;
285 oh = container_of0(th, struct osd_thandle, ot_super);
286 INIT_LIST_HEAD(&unlinked);
287 list_splice_init(&oh->ot_unlinked_list, &unlinked);
289 if (oh->ot_assigned == 0) {
291 dmu_tx_abort(oh->ot_tx);
292 osd_object_sa_dirty_rele(oh);
293 osd_unlinked_list_emptify(osd, &unlinked, false);
294 /* there won't be any commit, release reserved quota space now,
296 qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
301 /* When doing our own inode accounting, the ZAPs storing per-uid/gid
302 * usage are updated at operation execution time, so we should call
303 * qsd_op_end() straight away. Otherwise (for blk accounting maintained
304 * by ZFS and when #inode is estimated from #blks) accounting is updated
305 * at commit time and the call to qsd_op_end() must be delayed */
306 if (oh->ot_quota_trans.lqt_id_cnt > 0 &&
307 !oh->ot_quota_trans.lqt_ids[0].lqi_is_blk &&
308 !osd->od_quota_iused_est)
309 qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
311 rc = dt_txn_hook_stop(env, th);
313 CDEBUG(D_OTHER, "%s: transaction hook failed: rc = %d\n",
316 osd_trans_stop_cb(oh, rc);
319 txg = oh->ot_tx->tx_txg;
321 osd_object_sa_dirty_rele(oh);
322 /* XXX: Once dmu_tx_commit() called, oh/th could have been freed
323 * by osd_trans_commit_cb already. */
324 dmu_tx_commit(oh->ot_tx);
326 osd_unlinked_list_emptify(osd, &unlinked, true);
329 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
334 static struct thandle *osd_trans_create(const struct lu_env *env,
335 struct dt_device *dt)
337 struct osd_device *osd = osd_dt_dev(dt);
338 struct osd_thandle *oh;
343 tx = dmu_tx_create(osd->od_os);
345 RETURN(ERR_PTR(-ENOMEM));
347 /* alloc callback data */
351 RETURN(ERR_PTR(-ENOMEM));
355 INIT_LIST_HEAD(&oh->ot_dcb_list);
356 INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
357 INIT_LIST_HEAD(&oh->ot_unlinked_list);
358 INIT_LIST_HEAD(&oh->ot_sa_list);
359 sema_init(&oh->ot_sa_lock, 1);
360 memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
364 th->th_tags = LCT_TX_HANDLE;
368 /* Estimate the number of objects from a number of blocks */
369 uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
370 uint64_t nrblocks, uint64_t est_maxblockshift)
372 uint64_t est_objs, est_refdblocks, est_usedobjs;
374 /* Compute an nrblocks estimate based on the actual number of
375 * dnodes that could fit in the space. Since we don't know the
376 * overhead associated with each dnode (xattrs, SAs, VDEV overhead,
377 * etc) just using DNODE_SHIFT isn't going to give a good estimate.
378 * Instead, compute an estimate based on the average space usage per
379 * dnode, with an upper and lower cap.
381 * In case there aren't many dnodes or blocks used yet, add a small
382 * correction factor using OSD_DNODE_EST_SHIFT. This correction
383 * factor gradually disappears as the number of real dnodes grows.
384 * This also avoids the need to check for divide-by-zero later.
386 CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
387 CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
389 est_refdblocks = (refdbytes >> est_maxblockshift) +
390 (OSD_DNODE_EST_COUNT >> OSD_DNODE_EST_BLKSHIFT);
391 est_usedobjs = usedobjs + OSD_DNODE_EST_COUNT;
393 /* Average space/dnode more than maximum dnode size, use max dnode
394 * size to estimate free dnodes from adjusted free blocks count.
395 * OSTs typically use more than one block dnode so this case applies. */
396 if (est_usedobjs <= est_refdblocks * 2) {
399 /* Average space/dnode smaller than min dnode size (probably due to
400 * metadnode compression), use min dnode size to estimate the number of
402 * An MDT typically uses below 512 bytes/dnode so this case applies. */
403 } else if (est_usedobjs >= (est_refdblocks << OSD_DNODE_MIN_BLKSHIFT)) {
404 est_objs = nrblocks << OSD_DNODE_MIN_BLKSHIFT;
406 /* Between the extremes, we try to use the average size of
407 * existing dnodes to compute the number of dnodes that fit
410 * est_objs = nrblocks * (est_usedobjs / est_refblocks);
412 * but this may overflow 64 bits or become 0 if not handled well
414 * We know nrblocks is below (64 - 17 = 47) bits from
415 * SPA_MAXBLKSHIFT, and est_usedobjs is under 48 bits due to
416 * DN_MAX_OBJECT_SHIFT, which means that multiplying them may
417 * get as large as 2 ^ 95.
419 * We also know (est_usedobjs / est_refdblocks) is between 2 and
420 * 256, due to above checks, we can safely compute this first.
421 * We care more about accuracy on the MDT (many dnodes/block)
422 * which is good because this is where truncation errors are
423 * smallest. This adds 8 bits to nrblocks so we can use 7 bits
424 * to compute a fixed-point fraction and nrblocks can still fit
427 unsigned dnodes_per_block = (est_usedobjs << 7)/est_refdblocks;
429 est_objs = (nrblocks * dnodes_per_block) >> 7;
434 static int osd_objset_statfs(struct osd_device *osd, struct obd_statfs *osfs)
436 struct objset *os = osd->od_os;
437 uint64_t refdbytes, availbytes, usedobjs, availobjs;
438 uint64_t est_availobjs;
442 dmu_objset_space(os, &refdbytes, &availbytes, &usedobjs, &availobjs);
444 memset(osfs, 0, sizeof(*osfs));
446 /* We're a zfs filesystem. */
447 osfs->os_type = UBERBLOCK_MAGIC;
450 * ZFS allows multiple block sizes. For statfs, Linux makes no
451 * proper distinction between bsize and frsize. For calculations
452 * of free and used blocks incorrectly uses bsize instead of frsize,
453 * but bsize is also used as the optimal blocksize. We return the
454 * largest possible block size as IO size for the optimum performance
455 * and scale the free and used blocks count appropriately.
457 osfs->os_bsize = osd->od_max_blksz;
458 bshift = fls64(osfs->os_bsize) - 1;
460 osfs->os_blocks = (refdbytes + availbytes) >> bshift;
461 osfs->os_bfree = availbytes >> bshift;
462 osfs->os_bavail = osfs->os_bfree; /* no extra root reservation */
464 /* Take replication (i.e. number of copies) into account */
465 osfs->os_bavail /= os->os_copies;
468 * Reserve some space so we don't run into ENOSPC due to grants not
469 * accounting for metadata overhead in ZFS, and to avoid fragmentation.
470 * Rather than report this via os_bavail (which makes users unhappy if
471 * they can't fill the filesystem 100%), reduce os_blocks as well.
473 * Reserve 0.78% of total space, at least 16MB for small filesystems,
474 * for internal files to be created/unlinked when space is tight.
476 CLASSERT(OSD_STATFS_RESERVED_SIZE > 0);
477 if (likely(osfs->os_blocks >= OSD_STATFS_RESERVED_SIZE))
478 reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
480 reserved = OSD_STATFS_RESERVED_SIZE >> bshift;
482 osfs->os_blocks -= reserved;
483 osfs->os_bfree -= MIN(reserved, osfs->os_bfree);
484 osfs->os_bavail -= MIN(reserved, osfs->os_bavail);
487 * The availobjs value returned from dmu_objset_space() is largely
488 * useless, since it reports the number of objects that might
489 * theoretically still fit into the dataset, independent of minor
490 * issues like how much space is actually available in the pool.
491 * Compute a better estimate in udmu_objs_count_estimate().
493 est_availobjs = osd_objs_count_estimate(refdbytes, usedobjs,
494 osfs->os_bfree, bshift);
496 osfs->os_ffree = min(availobjs, est_availobjs);
497 osfs->os_files = osfs->os_ffree + usedobjs;
499 /* ZFS XXX: fill in backing dataset FSID/UUID
500 memcpy(osfs->os_fsid, .... );*/
502 osfs->os_namelen = MAXNAMELEN;
503 osfs->os_maxbytes = OBD_OBJECT_EOF;
505 /* ZFS XXX: fill in appropriate OS_STATE_{DEGRADED,READONLY} flags
506 osfs->os_state = vf_to_stf(vfsp->vfs_flag);
507 if (sb->s_flags & MS_RDONLY)
508 osfs->os_state |= OS_STATE_READONLY;
515 * Concurrency: shouldn't matter.
517 int osd_statfs(const struct lu_env *env, struct dt_device *d,
518 struct obd_statfs *osfs)
523 rc = osd_objset_statfs(osd_dt_dev(d), osfs);
524 if (unlikely(rc != 0))
527 osfs->os_bavail -= min_t(u64,
528 OSD_GRANT_FOR_LOCAL_OIDS / osfs->os_bsize,
533 static int osd_blk_insert_cost(struct osd_device *osd)
535 int max_blockshift, nr_blkptrshift, bshift;
537 /* max_blockshift is the log2 of the number of blocks needed to reach
538 * the maximum filesize (that's to say 2^64) */
539 bshift = osd_spa_maxblockshift(dmu_objset_spa(osd->od_os));
540 max_blockshift = DN_MAX_OFFSET_SHIFT - bshift;
542 /* nr_blkptrshift is the log2 of the number of block pointers that can
543 * be stored in an indirect block */
544 CLASSERT(DN_MAX_INDBLKSHIFT > SPA_BLKPTRSHIFT);
545 nr_blkptrshift = DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT;
547 /* max_blockshift / nr_blkptrshift is thus the maximum depth of the
548 * tree. We add +1 for rounding purpose.
549 * The tree depth times the indirect block size gives us the maximum
550 * cost of inserting a block in the tree */
551 return (max_blockshift / nr_blkptrshift + 1) * (1<<DN_MAX_INDBLKSHIFT);
555 * Concurrency: doesn't access mutable data.
557 static void osd_conf_get(const struct lu_env *env,
558 const struct dt_device *dev,
559 struct dt_device_param *param)
561 struct osd_device *osd = osd_dt_dev(dev);
564 * XXX should be taken from not-yet-existing fs abstraction layer.
566 param->ddp_max_name_len = MAXNAMELEN;
567 param->ddp_max_nlink = 1 << 31; /* it's 8byte on a disk */
568 param->ddp_block_shift = 12; /* XXX */
569 param->ddp_mount_type = LDD_MT_ZFS;
571 param->ddp_mntopts = MNTOPT_USERXATTR;
572 if (osd->od_posix_acl)
573 param->ddp_mntopts |= MNTOPT_ACL;
574 param->ddp_max_ea_size = DXATTR_MAX_ENTRY_SIZE;
576 /* for maxbytes, report same value as ZPL */
577 param->ddp_maxbytes = MAX_LFS_FILESIZE;
579 /* Default reserved fraction of the available space that should be kept
580 * for error margin. Unfortunately, there are many factors that can
581 * impact the overhead with zfs, so let's be very cautious for now and
582 * reserve 20% of the available space which is not given out as grant.
583 * This tunable can be changed on a live system via procfs if needed. */
584 param->ddp_grant_reserved = 20;
586 /* inodes are dynamically allocated, so we report the per-inode space
587 * consumption to upper layers. This static value is not really accurate
588 * and we should use the same logic as in udmu_objset_statfs() to
589 * estimate the real size consumed by an object */
590 param->ddp_inodespace = OSD_DNODE_EST_COUNT;
591 /* per-fragment overhead to be used by the client code */
592 param->ddp_grant_frag = osd_blk_insert_cost(osd);
596 * Concurrency: shouldn't matter.
598 static int osd_sync(const struct lu_env *env, struct dt_device *d)
600 struct osd_device *osd = osd_dt_dev(d);
601 CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_ZFS_NAME);
602 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
603 CDEBUG(D_CACHE, "synced OSD %s\n", LUSTRE_OSD_ZFS_NAME);
607 static int osd_commit_async(const struct lu_env *env, struct dt_device *dev)
609 struct osd_device *osd = osd_dt_dev(dev);
610 tx_state_t *tx = &dmu_objset_pool(osd->od_os)->dp_tx;
613 mutex_enter(&tx->tx_sync_lock);
614 txg = tx->tx_open_txg + 1;
615 if (tx->tx_quiesce_txg_waiting < txg) {
616 tx->tx_quiesce_txg_waiting = txg;
617 cv_broadcast(&tx->tx_quiesce_more_cv);
619 mutex_exit(&tx->tx_sync_lock);
625 * Concurrency: shouldn't matter.
627 static int osd_ro(const struct lu_env *env, struct dt_device *d)
629 struct osd_device *osd = osd_dt_dev(d);
632 CERROR("%s: *** setting device %s read-only ***\n",
633 osd->od_svname, LUSTRE_OSD_ZFS_NAME);
635 spa_freeze(dmu_objset_spa(osd->od_os));
640 static struct dt_device_operations osd_dt_ops = {
641 .dt_root_get = osd_root_get,
642 .dt_statfs = osd_statfs,
643 .dt_trans_create = osd_trans_create,
644 .dt_trans_start = osd_trans_start,
645 .dt_trans_stop = osd_trans_stop,
646 .dt_trans_cb_add = osd_trans_cb_add,
647 .dt_conf_get = osd_conf_get,
649 .dt_commit_async = osd_commit_async,
654 * DMU OSD device type methods
656 static int osd_type_init(struct lu_device_type *t)
658 LU_CONTEXT_KEY_INIT(&osd_key);
659 return lu_context_key_register(&osd_key);
662 static void osd_type_fini(struct lu_device_type *t)
664 lu_context_key_degister(&osd_key);
667 static void *osd_key_init(const struct lu_context *ctx,
668 struct lu_context_key *key)
670 struct osd_thread_info *info;
674 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
676 info = ERR_PTR(-ENOMEM);
680 static void osd_key_fini(const struct lu_context *ctx,
681 struct lu_context_key *key, void *data)
683 struct osd_thread_info *info = data;
688 static void osd_key_exit(const struct lu_context *ctx,
689 struct lu_context_key *key, void *data)
691 struct osd_thread_info *info = data;
693 memset(info, 0, sizeof(*info));
696 struct lu_context_key osd_key = {
697 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
698 .lct_init = osd_key_init,
699 .lct_fini = osd_key_fini,
700 .lct_exit = osd_key_exit
703 static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
705 if (osd->od_cl_seq == NULL)
708 seq_client_fini(osd->od_cl_seq);
709 OBD_FREE_PTR(osd->od_cl_seq);
710 osd->od_cl_seq = NULL;
713 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
717 /* shutdown quota slave instance associated with the device */
718 if (o->od_quota_slave != NULL) {
719 qsd_fini(env, o->od_quota_slave);
720 o->od_quota_slave = NULL;
723 osd_fid_fini(env, o);
728 static void osd_xattr_changed_cb(void *arg, uint64_t newval)
730 struct osd_device *osd = arg;
732 osd->od_xattr_in_sa = (newval == ZFS_XATTR_SA);
735 static void osd_recordsize_changed_cb(void *arg, uint64_t newval)
737 struct osd_device *osd = arg;
739 LASSERT(newval <= osd_spa_maxblocksize(dmu_objset_spa(osd->od_os)));
740 LASSERT(newval >= SPA_MINBLOCKSIZE);
741 LASSERT(ISP2(newval));
743 osd->od_max_blksz = newval;
747 * This function unregisters all registered callbacks. It's harmless to
748 * unregister callbacks that were never registered so it is used to safely
749 * unwind a partially completed call to osd_objset_register_callbacks().
751 static void osd_objset_unregister_callbacks(struct osd_device *o)
753 struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
755 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
756 osd_xattr_changed_cb, o);
757 (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
758 osd_recordsize_changed_cb, o);
760 if (o->arc_prune_cb != NULL) {
761 arc_remove_prune_callback(o->arc_prune_cb);
762 o->arc_prune_cb = NULL;
767 * Register the required callbacks to be notified when zfs properties
768 * are modified using the 'zfs(8)' command line utility.
770 static int osd_objset_register_callbacks(struct osd_device *o)
772 struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
773 dsl_pool_t *dp = dmu_objset_pool(o->od_os);
779 dsl_pool_config_enter(dp, FTAG);
780 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
781 osd_xattr_changed_cb, o);
785 rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
786 osd_recordsize_changed_cb, o);
790 o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
792 dsl_pool_config_exit(dp, FTAG);
794 osd_objset_unregister_callbacks(o);
799 static int osd_objset_open(struct osd_device *o)
801 uint64_t version = ZPL_VERSION;
806 rc = -dmu_objset_own(o->od_mntdev, DMU_OST_ZFS, B_FALSE, o, &o->od_os);
812 /* Check ZFS version */
813 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
814 ZPL_VERSION_STR, 8, 1, &version);
816 CERROR("%s: Error looking up ZPL VERSION\n", o->od_mntdev);
818 * We can't return ENOENT because that would mean the objset
821 GOTO(out, rc = -EIO);
824 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ,
825 ZFS_SA_ATTRS, 8, 1, &sa_obj);
829 rc = -sa_setup(o->od_os, sa_obj, zfs_attr_table,
830 ZPL_END, &o->z_attr_table);
834 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ,
835 8, 1, &o->od_rootid);
837 CERROR("%s: lookup for root failed: rc = %d\n",
842 rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET,
843 8, 1, &o->od_unlinkedid);
845 CERROR("%s: lookup for %s failed: rc = %d\n",
846 o->od_svname, ZFS_UNLINKED_SET, rc);
850 /* Check that user/group usage tracking is supported */
851 if (!dmu_objset_userused_enabled(o->od_os) ||
852 DMU_USERUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED ||
853 DMU_GROUPUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED) {
854 CERROR("%s: Space accounting not supported by this target, "
855 "aborting\n", o->od_svname);
856 GOTO(out, -ENOTSUPP);
860 if (rc != 0 && o->od_os != NULL) {
861 dmu_objset_disown(o->od_os, o);
869 osd_unlinked_object_free(struct osd_device *osd, uint64_t oid)
874 rc = -dmu_free_long_range(osd->od_os, oid, 0, DMU_OBJECT_END);
876 CWARN("%s: Cannot truncate "LPU64": rc = %d\n",
877 osd->od_svname, oid, rc);
881 tx = dmu_tx_create(osd->od_os);
882 dmu_tx_hold_free(tx, oid, 0, DMU_OBJECT_END);
883 dmu_tx_hold_zap(tx, osd->od_unlinkedid, FALSE, NULL);
884 rc = -dmu_tx_assign(tx, TXG_WAIT);
886 CWARN("%s: Cannot assign tx for "LPU64": rc = %d\n",
887 osd->od_svname, oid, rc);
891 rc = -zap_remove_int(osd->od_os, osd->od_unlinkedid, oid, tx);
893 CWARN("%s: Cannot remove "LPU64" from unlinked set: rc = %d\n",
894 osd->od_svname, oid, rc);
898 rc = -dmu_object_free(osd->od_os, oid, tx);
900 CWARN("%s: Cannot free "LPU64": rc = %d\n",
901 osd->od_svname, oid, rc);
916 osd_unlinked_drain(const struct lu_env *env, struct osd_device *osd)
919 zap_attribute_t *za = &osd_oti_get(env)->oti_za;
921 zap_cursor_init(&zc, osd->od_os, osd->od_unlinkedid);
923 while (zap_cursor_retrieve(&zc, za) == 0) {
924 /* If cannot free the object, leave it in the unlinked set,
925 * until the OSD is mounted again when obd_unlinked_drain()
927 if (osd_unlinked_object_free(osd, za->za_first_integer) != 0)
929 zap_cursor_advance(&zc);
932 zap_cursor_fini(&zc);
935 static int osd_mount(const struct lu_env *env,
936 struct osd_device *o, struct lustre_cfg *cfg)
938 char *mntdev = lustre_cfg_string(cfg, 1);
939 char *svname = lustre_cfg_string(cfg, 4);
945 if (o->od_os != NULL)
948 if (mntdev == NULL || svname == NULL)
951 rc = strlcpy(o->od_mntdev, mntdev, sizeof(o->od_mntdev));
952 if (rc >= sizeof(o->od_mntdev))
955 rc = strlcpy(o->od_svname, svname, sizeof(o->od_svname));
956 if (rc >= sizeof(o->od_svname))
959 if (server_name_is_ost(o->od_svname))
962 rc = osd_objset_open(o);
966 o->od_xattr_in_sa = B_TRUE;
967 o->od_max_blksz = SPA_OLD_MAXBLOCKSIZE;
969 rc = osd_objset_register_callbacks(o);
973 rc = __osd_obj2dbuf(env, o->od_os, o->od_rootid, &rootdb);
977 o->od_root = rootdb->db_object;
978 sa_buf_rele(rootdb, osd_obj_tag);
980 /* 1. initialize oi before any file create or file open */
981 rc = osd_oi_init(env, o);
985 rc = lu_site_init(&o->od_site, osd2lu_dev(o));
988 o->od_site.ls_bottom_dev = osd2lu_dev(o);
990 rc = lu_site_init_finish(&o->od_site);
994 /* Use our own ZAP for inode accounting by default, this can be changed
995 * via procfs to estimate the inode usage from the block usage */
996 o->od_quota_iused_est = 0;
998 rc = osd_procfs_init(o, o->od_svname);
1002 /* initialize quota slave instance */
1003 o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
1005 if (IS_ERR(o->od_quota_slave)) {
1006 rc = PTR_ERR(o->od_quota_slave);
1007 o->od_quota_slave = NULL;
1011 /* parse mount option "noacl", and enable ACL by default */
1012 opts = lustre_cfg_string(cfg, 3);
1013 if (opts == NULL || strstr(opts, "noacl") == NULL)
1014 o->od_posix_acl = 1;
1016 osd_unlinked_drain(env, o);
1019 dmu_objset_disown(o->od_os, o);
1026 static void osd_umount(const struct lu_env *env, struct osd_device *o)
1030 if (atomic_read(&o->od_zerocopy_alloc))
1031 CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
1032 atomic_read(&o->od_zerocopy_alloc));
1033 if (atomic_read(&o->od_zerocopy_loan))
1034 CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
1035 atomic_read(&o->od_zerocopy_loan));
1036 if (atomic_read(&o->od_zerocopy_pin))
1037 CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
1038 atomic_read(&o->od_zerocopy_pin));
1040 if (o->od_os != NULL) {
1041 /* force a txg sync to get all commit callbacks */
1042 txg_wait_synced(dmu_objset_pool(o->od_os), 0ULL);
1044 /* close the object set */
1045 dmu_objset_disown(o->od_os, o);
1053 static int osd_device_init0(const struct lu_env *env,
1054 struct osd_device *o,
1055 struct lustre_cfg *cfg)
1057 struct lu_device *l = osd2lu_dev(o);
1060 /* if the module was re-loaded, env can loose its keys */
1061 rc = lu_env_refill((struct lu_env *) env);
1065 l->ld_ops = &osd_lu_ops;
1066 o->od_dt_dev.dd_ops = &osd_dt_ops;
1072 static struct lu_device *osd_device_fini(const struct lu_env *env,
1073 struct lu_device *dev);
1075 static struct lu_device *osd_device_alloc(const struct lu_env *env,
1076 struct lu_device_type *type,
1077 struct lustre_cfg *cfg)
1079 struct osd_device *dev;
1084 return ERR_PTR(-ENOMEM);
1086 rc = dt_device_init(&dev->od_dt_dev, type);
1088 rc = osd_device_init0(env, dev, cfg);
1090 rc = osd_mount(env, dev, cfg);
1092 osd_device_fini(env, osd2lu_dev(dev));
1095 dt_device_fini(&dev->od_dt_dev);
1098 if (unlikely(rc != 0))
1101 return rc == 0 ? osd2lu_dev(dev) : ERR_PTR(rc);
1104 static struct lu_device *osd_device_free(const struct lu_env *env,
1105 struct lu_device *d)
1107 struct osd_device *o = osd_dev(d);
1110 /* XXX: make osd top device in order to release reference */
1111 d->ld_site->ls_top_dev = d;
1112 lu_site_purge(env, d->ld_site, -1);
1113 if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
1114 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
1115 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
1117 lu_site_fini(&o->od_site);
1118 dt_device_fini(&o->od_dt_dev);
1124 static struct lu_device *osd_device_fini(const struct lu_env *env,
1125 struct lu_device *d)
1127 struct osd_device *o = osd_dev(d);
1132 osd_shutdown(env, o);
1133 osd_oi_fini(env, o);
1136 osd_objset_unregister_callbacks(o);
1137 osd_sync(env, lu2dt_dev(d));
1138 txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
1141 rc = osd_procfs_fini(o);
1143 CERROR("proc fini error %d\n", rc);
1144 RETURN(ERR_PTR(rc));
1153 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
1154 const char *name, struct lu_device *next)
1160 * To be removed, setup is performed by osd_device_{init,alloc} and
1161 * cleanup is performed by osd_device_{fini,free).
1163 static int osd_process_config(const struct lu_env *env,
1164 struct lu_device *d, struct lustre_cfg *cfg)
1166 struct osd_device *o = osd_dev(d);
1170 switch(cfg->lcfg_command) {
1172 rc = osd_mount(env, o, cfg);
1175 rc = osd_shutdown(env, o);
1178 LASSERT(&o->od_dt_dev);
1179 rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
1180 cfg, &o->od_dt_dev);
1181 if (rc > 0 || rc == -ENOSYS)
1182 rc = class_process_proc_param(PARAM_OST,
1183 lprocfs_osd_obd_vars,
1184 cfg, &o->od_dt_dev);
1194 static int osd_recovery_complete(const struct lu_env *env, struct lu_device *d)
1196 struct osd_device *osd = osd_dev(d);
1200 if (osd->od_quota_slave == NULL)
1203 /* start qsd instance on recovery completion, this notifies the quota
1204 * slave code that we are about to process new requests now */
1205 rc = qsd_start(env, osd->od_quota_slave);
1210 * we use exports to track all osd users
1212 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
1213 struct obd_device *obd, struct obd_uuid *cluuid,
1214 struct obd_connect_data *data, void *localdata)
1216 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1217 struct lustre_handle conn;
1221 CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
1223 rc = class_connect(&conn, obd, cluuid);
1227 *exp = class_conn2export(&conn);
1229 spin_lock(&obd->obd_dev_lock);
1231 spin_unlock(&obd->obd_dev_lock);
1237 * once last export (we don't count self-export) disappeared
1238 * osd can be released
1240 static int osd_obd_disconnect(struct obd_export *exp)
1242 struct obd_device *obd = exp->exp_obd;
1243 struct osd_device *osd = osd_dev(obd->obd_lu_dev);
1244 int rc, release = 0;
1247 /* Only disconnect the underlying layers on the final disconnect. */
1248 spin_lock(&obd->obd_dev_lock);
1250 if (osd->od_connects == 0)
1252 spin_unlock(&obd->obd_dev_lock);
1254 rc = class_disconnect(exp); /* bz 9811 */
1256 if (rc == 0 && release)
1257 class_manual_cleanup(obd);
1261 static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
1263 struct seq_server_site *ss = osd_seq_site(osd);
1267 if (osd->od_is_ost || osd->od_cl_seq != NULL)
1270 if (unlikely(ss == NULL))
1273 OBD_ALLOC_PTR(osd->od_cl_seq);
1274 if (osd->od_cl_seq == NULL)
1277 rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
1278 osd->od_svname, ss->ss_server_seq);
1281 OBD_FREE_PTR(osd->od_cl_seq);
1282 osd->od_cl_seq = NULL;
1288 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
1289 struct lu_device *dev)
1291 struct osd_device *osd = osd_dev(dev);
1295 if (osd->od_quota_slave != NULL) {
1296 /* set up quota slave objects */
1297 rc = qsd_prepare(env, osd->od_quota_slave);
1302 rc = osd_fid_init(env, osd);
1307 struct lu_device_operations osd_lu_ops = {
1308 .ldo_object_alloc = osd_object_alloc,
1309 .ldo_process_config = osd_process_config,
1310 .ldo_recovery_complete = osd_recovery_complete,
1311 .ldo_prepare = osd_prepare,
1314 static void osd_type_start(struct lu_device_type *t)
1318 static void osd_type_stop(struct lu_device_type *t)
1322 int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
1323 struct lu_fid *fid, struct md_op_data *op_data)
1325 struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
1327 return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
1330 static struct lu_device_type_operations osd_device_type_ops = {
1331 .ldto_init = osd_type_init,
1332 .ldto_fini = osd_type_fini,
1334 .ldto_start = osd_type_start,
1335 .ldto_stop = osd_type_stop,
1337 .ldto_device_alloc = osd_device_alloc,
1338 .ldto_device_free = osd_device_free,
1340 .ldto_device_init = osd_device_init,
1341 .ldto_device_fini = osd_device_fini
1344 static struct lu_device_type osd_device_type = {
1345 .ldt_tags = LU_DEVICE_DT,
1346 .ldt_name = LUSTRE_OSD_ZFS_NAME,
1347 .ldt_ops = &osd_device_type_ops,
1348 .ldt_ctx_tags = LCT_LOCAL
1352 static struct obd_ops osd_obd_device_ops = {
1353 .o_owner = THIS_MODULE,
1354 .o_connect = osd_obd_connect,
1355 .o_disconnect = osd_obd_disconnect,
1356 .o_fid_alloc = osd_fid_alloc
1359 static int __init osd_init(void)
1363 rc = osd_options_init();
1367 rc = lu_kmem_init(osd_caches);
1371 rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
1372 LUSTRE_OSD_ZFS_NAME, &osd_device_type);
1374 lu_kmem_fini(osd_caches);
1378 static void __exit osd_exit(void)
1380 class_unregister_type(LUSTRE_OSD_ZFS_NAME);
1381 lu_kmem_fini(osd_caches);
1384 extern unsigned int osd_oi_count;
1385 CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
1386 "Number of Object Index containers to be created, "
1387 "it's only valid for new filesystem.");
1389 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
1390 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
1391 MODULE_VERSION(LUSTRE_VERSION_STRING);
1392 MODULE_LICENSE("GPL");
1394 module_init(osd_init);
1395 module_exit(osd_exit);