Whamcloud - gitweb
LU-8147 osd-zfs: fix osd_mount error path
[fs/lustre-release.git] / lustre / osd-zfs / osd_handler.c
index 62965a7..334310d 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -74,6 +74,9 @@ struct lu_context_key osd_key;
 /* Slab for OSD object allocation */
 struct kmem_cache *osd_object_kmem;
 
+/* Slab to allocate osd_zap_it */
+struct kmem_cache *osd_zapit_cachep;
+
 static struct lu_kmem_descr osd_caches[] = {
        {
                .ckd_cache = &osd_object_kmem,
@@ -81,6 +84,11 @@ static struct lu_kmem_descr osd_caches[] = {
                .ckd_size  = sizeof(struct osd_object)
        },
        {
+               .ckd_cache = &osd_zapit_cachep,
+               .ckd_name  = "osd_zapit_cache",
+               .ckd_size  = sizeof(struct osd_zap_it)
+       },
+       {
                .ckd_cache = NULL
        }
 };
@@ -158,17 +166,22 @@ static void osd_trans_commit_cb(void *cb_data, int error)
        th->th_dev = NULL;
        lu_context_exit(&th->th_ctx);
        lu_context_fini(&th->th_ctx);
-       thandle_put(&oh->ot_super);
+       OBD_FREE_PTR(oh);
 
        EXIT;
 }
 
 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
 {
-       struct osd_thandle *oh;
+       struct osd_thandle *oh = container_of0(th, struct osd_thandle,
+                                              ot_super);
 
-       oh = container_of0(th, struct osd_thandle, ot_super);
-       list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+       LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
+       LASSERT(&dcb->dcb_func != NULL);
+       if (dcb->dcb_flags & DCB_TRANS_STOP)
+               list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
+       else
+               list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
 
        return 0;
 }
@@ -201,9 +214,8 @@ static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
                struct osd_device *osd = osd_dt_dev(d);
                /* dmu will call commit callback with error code during abort */
                if (!lu_device_is_md(&d->dd_lu_dev) && rc == -ENOSPC)
-                       CERROR("%s: failed to start transaction due to ENOSPC. "
-                              "Metadata overhead is underestimated or "
-                              "grant_ratio is too low.\n", osd->od_svname);
+                       CERROR("%s: failed to start transaction due to ENOSPC"
+                              "\n", osd->od_svname);
                else
                        CERROR("%s: can't assign tx: rc = %d\n",
                               osd->od_svname, rc);
@@ -219,6 +231,42 @@ static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
        RETURN(rc);
 }
 
+static int osd_unlinked_object_free(struct osd_device *osd, uint64_t oid);
+
+static void osd_unlinked_list_emptify(struct osd_device *osd,
+                                     struct list_head *list, bool free)
+{
+       struct osd_object *obj;
+       uint64_t           oid;
+
+       while (!list_empty(list)) {
+               obj = list_entry(list->next,
+                                struct osd_object, oo_unlinked_linkage);
+               LASSERT(obj->oo_db != NULL);
+               oid = obj->oo_db->db_object;
+
+               list_del_init(&obj->oo_unlinked_linkage);
+               if (free)
+                       (void)osd_unlinked_object_free(osd, oid);
+       }
+}
+
+static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
+{
+       struct dt_txn_commit_cb *dcb;
+       struct dt_txn_commit_cb *tmp;
+
+       /* call per-transaction stop callbacks if any */
+       list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
+                                dcb_linkage) {
+               LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
+                        "commit callback entry: magic=%x name='%s'\n",
+                        dcb->dcb_magic, dcb->dcb_name);
+               list_del_init(&dcb->dcb_linkage);
+               dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
+       }
+}
+
 /*
  * Concurrency: shouldn't matter.
  */
@@ -226,21 +274,26 @@ static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
                          struct thandle *th)
 {
        struct osd_device       *osd = osd_dt_dev(th->th_dev);
+       bool                     sync = (th->th_sync != 0);
        struct osd_thandle      *oh;
+       struct list_head         unlinked;
        uint64_t                 txg;
        int                      rc;
        ENTRY;
 
        oh = container_of0(th, struct osd_thandle, ot_super);
+       INIT_LIST_HEAD(&unlinked);
+       list_splice_init(&oh->ot_unlinked_list, &unlinked);
 
        if (oh->ot_assigned == 0) {
                LASSERT(oh->ot_tx);
                dmu_tx_abort(oh->ot_tx);
                osd_object_sa_dirty_rele(oh);
+               osd_unlinked_list_emptify(osd, &unlinked, false);
                /* there won't be any commit, release reserved quota space now,
                 * if any */
                qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
-               thandle_put(&oh->ot_super);
+               OBD_FREE_PTR(oh);
                RETURN(0);
        }
 
@@ -259,13 +312,19 @@ static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
                CDEBUG(D_OTHER, "%s: transaction hook failed: rc = %d\n",
                       osd->od_svname, rc);
 
+       osd_trans_stop_cb(oh, rc);
+
        LASSERT(oh->ot_tx);
        txg = oh->ot_tx->tx_txg;
 
        osd_object_sa_dirty_rele(oh);
+       /* XXX: Once dmu_tx_commit() called, oh/th could have been freed
+        * by osd_trans_commit_cb already. */
        dmu_tx_commit(oh->ot_tx);
 
-       if (th->th_sync)
+       osd_unlinked_list_emptify(osd, &unlinked, true);
+
+       if (sync)
                txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
 
        RETURN(rc);
@@ -293,6 +352,8 @@ static struct thandle *osd_trans_create(const struct lu_env *env,
 
        oh->ot_tx = tx;
        INIT_LIST_HEAD(&oh->ot_dcb_list);
+       INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
+       INIT_LIST_HEAD(&oh->ot_unlinked_list);
        INIT_LIST_HEAD(&oh->ot_sa_list);
        sema_init(&oh->ot_sa_lock, 1);
        memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
@@ -300,14 +361,12 @@ static struct thandle *osd_trans_create(const struct lu_env *env,
        th->th_dev = dt;
        th->th_result = 0;
        th->th_tags = LCT_TX_HANDLE;
-       atomic_set(&th->th_refc, 1);
-       th->th_alloc_size = sizeof(*oh);
        RETURN(th);
 }
 
 /* Estimate the number of objects from a number of blocks */
 uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
-                                uint64_t nrblocks)
+                                uint64_t nrblocks, uint64_t est_maxblockshift)
 {
        uint64_t est_objs, est_refdblocks, est_usedobjs;
 
@@ -326,7 +385,7 @@ uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
        CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
        CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
 
-       est_refdblocks = (refdbytes >> SPA_MAXBLOCKSHIFT) +
+       est_refdblocks = (refdbytes >> est_maxblockshift) +
                         (OSD_DNODE_EST_COUNT >> OSD_DNODE_EST_BLKSHIFT);
        est_usedobjs   = usedobjs + OSD_DNODE_EST_COUNT;
 
@@ -371,14 +430,20 @@ uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
        return est_objs;
 }
 
-static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
+static int osd_objset_statfs(struct osd_device *osd, struct obd_statfs *osfs)
 {
+       struct objset *os = osd->od_os;
        uint64_t refdbytes, availbytes, usedobjs, availobjs;
        uint64_t est_availobjs;
        uint64_t reserved;
+       uint64_t bshift;
 
-       dmu_objset_space(os, &refdbytes, &availbytes, &usedobjs,
-                        &availobjs);
+       dmu_objset_space(os, &refdbytes, &availbytes, &usedobjs, &availobjs);
+
+       memset(osfs, 0, sizeof(*osfs));
+
+       /* We're a zfs filesystem. */
+       osfs->os_type = UBERBLOCK_MAGIC;
 
        /*
         * ZFS allows multiple block sizes.  For statfs, Linux makes no
@@ -388,10 +453,11 @@ static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
         * largest possible block size as IO size for the optimum performance
         * and scale the free and used blocks count appropriately.
         */
-       osfs->os_bsize = 1ULL << SPA_MAXBLOCKSHIFT;
+       osfs->os_bsize = osd->od_max_blksz;
+       bshift = fls64(osfs->os_bsize) - 1;
 
-       osfs->os_blocks = (refdbytes + availbytes) >> SPA_MAXBLOCKSHIFT;
-       osfs->os_bfree = availbytes >> SPA_MAXBLOCKSHIFT;
+       osfs->os_blocks = (refdbytes + availbytes) >> bshift;
+       osfs->os_bfree = availbytes >> bshift;
        osfs->os_bavail = osfs->os_bfree; /* no extra root reservation */
 
        /* Take replication (i.e. number of copies) into account */
@@ -403,19 +469,17 @@ static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
         * Rather than report this via os_bavail (which makes users unhappy if
         * they can't fill the filesystem 100%), reduce os_blocks as well.
         *
-        * Reserve 0.78% of total space, at least 4MB for small filesystems,
+        * Reserve 0.78% of total space, at least 16MB for small filesystems,
         * for internal files to be created/unlinked when space is tight.
         */
-       CLASSERT(OSD_STATFS_RESERVED_BLKS > 0);
-       if (likely(osfs->os_blocks >=
-                       OSD_STATFS_RESERVED_BLKS << OSD_STATFS_RESERVED_SHIFT))
+       CLASSERT(OSD_STATFS_RESERVED_SIZE > 0);
+       reserved = OSD_STATFS_RESERVED_SIZE >> bshift;
+       if (likely(osfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
                reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
-       else
-               reserved = OSD_STATFS_RESERVED_BLKS;
 
        osfs->os_blocks -= reserved;
-       osfs->os_bfree  -= MIN(reserved, osfs->os_bfree);
-       osfs->os_bavail -= MIN(reserved, osfs->os_bavail);
+       osfs->os_bfree  -= min(reserved, osfs->os_bfree);
+       osfs->os_bavail -= min(reserved, osfs->os_bavail);
 
        /*
         * The availobjs value returned from dmu_objset_space() is largely
@@ -425,7 +489,7 @@ static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
         * Compute a better estimate in udmu_objs_count_estimate().
         */
        est_availobjs = osd_objs_count_estimate(refdbytes, usedobjs,
-                                               osfs->os_bfree);
+                                               osfs->os_bfree, bshift);
 
        osfs->os_ffree = min(availobjs, est_availobjs);
        osfs->os_files = osfs->os_ffree + usedobjs;
@@ -433,18 +497,13 @@ static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
        /* ZFS XXX: fill in backing dataset FSID/UUID
           memcpy(osfs->os_fsid, .... );*/
 
-       /* We're a zfs filesystem. */
-       osfs->os_type = UBERBLOCK_MAGIC;
-
-       /* ZFS XXX: fill in appropriate OS_STATE_{DEGRADED,READONLY} flags
-          osfs->os_state = vf_to_stf(vfsp->vfs_flag);
-          if (sb->s_flags & MS_RDONLY)
-          osfs->os_state = OS_STATE_READONLY;
-        */
-
        osfs->os_namelen = MAXNAMELEN;
        osfs->os_maxbytes = OBD_OBJECT_EOF;
 
+       if (!spa_writeable(dmu_objset_spa(os)) ||
+           osd->od_dev_set_rdonly || osd->od_prop_rdonly)
+               osfs->os_state |= OS_STATE_READONLY;
+
        return 0;
 }
 
@@ -454,11 +513,10 @@ static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
 int osd_statfs(const struct lu_env *env, struct dt_device *d,
               struct obd_statfs *osfs)
 {
-       struct osd_device *osd = osd_dt_dev(d);
        int                rc;
        ENTRY;
 
-       rc = osd_objset_statfs(osd->od_os, osfs);
+       rc = osd_objset_statfs(osd_dt_dev(d), osfs);
        if (unlikely(rc != 0))
                RETURN(rc);
 
@@ -468,13 +526,14 @@ int osd_statfs(const struct lu_env *env, struct dt_device *d,
        RETURN(0);
 }
 
-static int osd_blk_insert_cost(void)
+static int osd_blk_insert_cost(struct osd_device *osd)
 {
-       int max_blockshift, nr_blkptrshift;
+       int max_blockshift, nr_blkptrshift, bshift;
 
        /* max_blockshift is the log2 of the number of blocks needed to reach
         * the maximum filesize (that's to say 2^64) */
-       max_blockshift = DN_MAX_OFFSET_SHIFT - SPA_MAXBLOCKSHIFT;
+       bshift = osd_spa_maxblockshift(dmu_objset_spa(osd->od_os));
+       max_blockshift = DN_MAX_OFFSET_SHIFT - bshift;
 
        /* nr_blkptrshift is the log2 of the number of block pointers that can
         * be stored in an indirect block */
@@ -502,7 +561,7 @@ static void osd_conf_get(const struct lu_env *env,
         */
        param->ddp_max_name_len = MAXNAMELEN;
        param->ddp_max_nlink    = 1 << 31; /* it's 8byte on a disk */
-       param->ddp_block_shift  = 12; /* XXX */
+       param->ddp_symlink_max  = PATH_MAX;
        param->ddp_mount_type   = LDD_MT_ZFS;
 
        param->ddp_mntopts      = MNTOPT_USERXATTR;
@@ -513,20 +572,22 @@ static void osd_conf_get(const struct lu_env *env,
        /* for maxbytes, report same value as ZPL */
        param->ddp_maxbytes     = MAX_LFS_FILESIZE;
 
-       /* Default reserved fraction of the available space that should be kept
-        * for error margin. Unfortunately, there are many factors that can
-        * impact the overhead with zfs, so let's be very cautious for now and
-        * reserve 20% of the available space which is not given out as grant.
-        * This tunable can be changed on a live system via procfs if needed. */
-       param->ddp_grant_reserved = 20;
-
        /* inodes are dynamically allocated, so we report the per-inode space
         * consumption to upper layers. This static value is not really accurate
         * and we should use the same logic as in udmu_objset_statfs() to
         * estimate the real size consumed by an object */
        param->ddp_inodespace = OSD_DNODE_EST_COUNT;
-       /* per-fragment overhead to be used by the client code */
-       param->ddp_grant_frag = osd_blk_insert_cost();
+       /* Although ZFS isn't an extent-based filesystem, the metadata overhead
+        * (i.e. 7 levels of indirect blocks, see osd_blk_insert_cost()) should
+        * not be accounted for every single new block insertion.
+        * Instead, the maximum extent size is set to the number of blocks that
+        * can fit into a single contiguous indirect block. There would be some
+        * cases where this crosses indirect blocks, but it also won't have 7
+        * new levels of indirect blocks in that case either, so it will still
+        * have enough reserved space for the extra indirect block */
+       param->ddp_max_extent_blks =
+               (1 << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT));
+       param->ddp_extent_tax = osd_blk_insert_cost(osd);
 }
 
 /*
@@ -568,30 +629,12 @@ static int osd_ro(const struct lu_env *env, struct dt_device *d)
 
        CERROR("%s: *** setting device %s read-only ***\n",
               osd->od_svname, LUSTRE_OSD_ZFS_NAME);
-       osd->od_rdonly = 1;
+       osd->od_dev_set_rdonly = 1;
        spa_freeze(dmu_objset_spa(osd->od_os));
 
        RETURN(0);
 }
 
-/*
- * Concurrency: serialization provided by callers.
- */
-static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
-                             int mode, unsigned long timeout, __u32 alg,
-                             struct lustre_capa_key *keys)
-{
-       struct osd_device *dev = osd_dt_dev(d);
-       ENTRY;
-
-       dev->od_fl_capa = mode;
-       dev->od_capa_timeout = timeout;
-       dev->od_capa_alg = alg;
-       dev->od_capa_keys = keys;
-
-       RETURN(0);
-}
-
 static struct dt_device_operations osd_dt_ops = {
        .dt_root_get            = osd_root_get,
        .dt_statfs              = osd_statfs,
@@ -603,7 +646,6 @@ static struct dt_device_operations osd_dt_ops = {
        .dt_sync                = osd_sync,
        .dt_commit_async        = osd_commit_async,
        .dt_ro                  = osd_ro,
-       .dt_init_capa_ctxt      = osd_init_capa_ctxt,
 };
 
 /*
@@ -656,6 +698,16 @@ struct lu_context_key osd_key = {
        .lct_exit = osd_key_exit
 };
 
+static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
+{
+       if (osd->od_cl_seq == NULL)
+               return;
+
+       seq_client_fini(osd->od_cl_seq);
+       OBD_FREE_PTR(osd->od_cl_seq);
+       osd->od_cl_seq = NULL;
+}
+
 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
 {
        ENTRY;
@@ -666,6 +718,8 @@ static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
                o->od_quota_slave = NULL;
        }
 
+       osd_fid_fini(env, o);
+
        RETURN(0);
 }
 
@@ -676,6 +730,84 @@ static void osd_xattr_changed_cb(void *arg, uint64_t newval)
        osd->od_xattr_in_sa = (newval == ZFS_XATTR_SA);
 }
 
+static void osd_recordsize_changed_cb(void *arg, uint64_t newval)
+{
+       struct osd_device *osd = arg;
+
+       LASSERT(newval <= osd_spa_maxblocksize(dmu_objset_spa(osd->od_os)));
+       LASSERT(newval >= SPA_MINBLOCKSIZE);
+       LASSERT(ISP2(newval));
+
+       osd->od_max_blksz = newval;
+}
+
+static void osd_readonly_changed_cb(void *arg, uint64_t newval)
+{
+       struct osd_device *osd = arg;
+
+       osd->od_prop_rdonly = !!newval;
+}
+
+/*
+ * This function unregisters all registered callbacks.  It's harmless to
+ * unregister callbacks that were never registered so it is used to safely
+ * unwind a partially completed call to osd_objset_register_callbacks().
+ */
+static void osd_objset_unregister_callbacks(struct osd_device *o)
+{
+       struct dsl_dataset      *ds = dmu_objset_ds(o->od_os);
+
+       (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
+                                  osd_xattr_changed_cb, o);
+       (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
+                                  osd_recordsize_changed_cb, o);
+       (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
+                                  osd_readonly_changed_cb, o);
+
+       if (o->arc_prune_cb != NULL) {
+               arc_remove_prune_callback(o->arc_prune_cb);
+               o->arc_prune_cb = NULL;
+       }
+}
+
+/*
+ * Register the required callbacks to be notified when zfs properties
+ * are modified using the 'zfs(8)' command line utility.
+ */
+static int osd_objset_register_callbacks(struct osd_device *o)
+{
+       struct dsl_dataset      *ds = dmu_objset_ds(o->od_os);
+       dsl_pool_t              *dp = dmu_objset_pool(o->od_os);
+       int                     rc;
+
+       LASSERT(ds);
+       LASSERT(dp);
+
+       dsl_pool_config_enter(dp, FTAG);
+       rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
+                               osd_xattr_changed_cb, o);
+       if (rc)
+               GOTO(err, rc);
+
+       rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
+                               osd_recordsize_changed_cb, o);
+       if (rc)
+               GOTO(err, rc);
+
+       rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
+                               osd_readonly_changed_cb, o);
+       if (rc)
+               GOTO(err, rc);
+
+       o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
+err:
+       dsl_pool_config_exit(dp, FTAG);
+       if (rc)
+               osd_objset_unregister_callbacks(o);
+
+       RETURN(rc);
+}
+
 static int osd_objset_open(struct osd_device *o)
 {
        uint64_t        version = ZPL_VERSION;
@@ -719,6 +851,14 @@ static int osd_objset_open(struct osd_device *o)
                GOTO(out, rc);
        }
 
+       rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET,
+                        8, 1, &o->od_unlinkedid);
+       if (rc) {
+               CERROR("%s: lookup for %s failed: rc = %d\n",
+                      o->od_svname, ZFS_UNLINKED_SET, rc);
+               GOTO(out, rc);
+       }
+
        /* Check that user/group usage tracking is supported */
        if (!dmu_objset_userused_enabled(o->od_os) ||
            DMU_USERUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED ||
@@ -729,20 +869,87 @@ static int osd_objset_open(struct osd_device *o)
        }
 
 out:
-       if (rc != 0 && o->od_os != NULL)
+       if (rc != 0 && o->od_os != NULL) {
                dmu_objset_disown(o->od_os, o);
+               o->od_os = NULL;
+       }
 
        RETURN(rc);
 }
 
+static int
+osd_unlinked_object_free(struct osd_device *osd, uint64_t oid)
+{
+       int       rc;
+       dmu_tx_t *tx;
+
+       rc = -dmu_free_long_range(osd->od_os, oid, 0, DMU_OBJECT_END);
+       if (rc != 0) {
+               CWARN("%s: Cannot truncate "LPU64": rc = %d\n",
+                     osd->od_svname, oid, rc);
+               return rc;
+       }
+
+       tx = dmu_tx_create(osd->od_os);
+       dmu_tx_hold_free(tx, oid, 0, DMU_OBJECT_END);
+       dmu_tx_hold_zap(tx, osd->od_unlinkedid, FALSE, NULL);
+       rc = -dmu_tx_assign(tx, TXG_WAIT);
+       if (rc != 0) {
+               CWARN("%s: Cannot assign tx for "LPU64": rc = %d\n",
+                     osd->od_svname, oid, rc);
+               goto failed;
+       }
+
+       rc = -zap_remove_int(osd->od_os, osd->od_unlinkedid, oid, tx);
+       if (rc != 0) {
+               CWARN("%s: Cannot remove "LPU64" from unlinked set: rc = %d\n",
+                     osd->od_svname, oid, rc);
+               goto failed;
+       }
+
+       rc = -dmu_object_free(osd->od_os, oid, tx);
+       if (rc != 0) {
+               CWARN("%s: Cannot free "LPU64": rc = %d\n",
+                     osd->od_svname, oid, rc);
+               goto failed;
+       }
+       dmu_tx_commit(tx);
+
+       return 0;
+
+failed:
+       LASSERT(rc != 0);
+       dmu_tx_abort(tx);
+
+       return rc;
+}
+
+static void
+osd_unlinked_drain(const struct lu_env *env, struct osd_device *osd)
+{
+       zap_cursor_t     zc;
+       zap_attribute_t *za = &osd_oti_get(env)->oti_za;
+
+       zap_cursor_init(&zc, osd->od_os, osd->od_unlinkedid);
+
+       while (zap_cursor_retrieve(&zc, za) == 0) {
+               /* If cannot free the object, leave it in the unlinked set,
+                * until the OSD is mounted again when obd_unlinked_drain()
+                * will be called. */
+               if (osd_unlinked_object_free(osd, za->za_first_integer) != 0)
+                       break;
+               zap_cursor_advance(&zc);
+       }
+
+       zap_cursor_fini(&zc);
+}
+
 static int osd_mount(const struct lu_env *env,
                     struct osd_device *o, struct lustre_cfg *cfg)
 {
-       struct dsl_dataset      *ds;
        char                    *mntdev = lustre_cfg_string(cfg, 1);
        char                    *svname = lustre_cfg_string(cfg, 4);
        dmu_buf_t               *rootdb;
-       dsl_pool_t              *dp;
        const char              *opts;
        int                      rc;
        ENTRY;
@@ -765,30 +972,19 @@ static int osd_mount(const struct lu_env *env,
                o->od_is_ost = 1;
 
        rc = osd_objset_open(o);
-       if (rc) {
-               CERROR("%s: can't open objset %s: rc = %d\n", o->od_svname,
-                       o->od_mntdev, rc);
-               RETURN(rc);
-       }
+       if (rc)
+               GOTO(err, rc);
 
-       ds = dmu_objset_ds(o->od_os);
-       dp = dmu_objset_pool(o->od_os);
-       LASSERT(ds);
-       LASSERT(dp);
-       dsl_pool_config_enter(dp, FTAG);
-       rc = dsl_prop_register(ds, "xattr", osd_xattr_changed_cb, o);
-       dsl_pool_config_exit(dp, FTAG);
+       o->od_xattr_in_sa = B_TRUE;
+       o->od_max_blksz = SPA_OLD_MAXBLOCKSIZE;
+
+       rc = osd_objset_register_callbacks(o);
        if (rc)
-               CWARN("%s: can't register xattr callback, ignore: rc=%d\n",
-                     o->od_svname, rc);
+               GOTO(err, rc);
 
        rc = __osd_obj2dbuf(env, o->od_os, o->od_rootid, &rootdb);
-       if (rc) {
-               CERROR("%s: obj2dbuf() failed: rc = %d\n", o->od_svname, rc);
-               dmu_objset_disown(o->od_os, o);
-               o->od_os = NULL;
-               RETURN(rc);
-       }
+       if (rc)
+               GOTO(err, rc);
 
        o->od_root = rootdb->db_object;
        sa_buf_rele(rootdb, osd_obj_tag);
@@ -807,10 +1003,6 @@ static int osd_mount(const struct lu_env *env,
        if (rc)
                GOTO(err, rc);
 
-       rc = osd_convert_root_to_new_seq(env, o);
-       if (rc)
-               GOTO(err, rc);
-
        /* Use our own ZAP for inode accounting by default, this can be changed
         * via procfs to estimate the inode usage from the block usage */
        o->od_quota_iused_est = 0;
@@ -819,8 +1011,6 @@ static int osd_mount(const struct lu_env *env,
        if (rc)
                GOTO(err, rc);
 
-       o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
-
        /* initialize quota slave instance */
        o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
                                     o->od_proc_entry);
@@ -835,7 +1025,14 @@ static int osd_mount(const struct lu_env *env,
        if (opts == NULL || strstr(opts, "noacl") == NULL)
                o->od_posix_acl = 1;
 
+       osd_unlinked_drain(env, o);
 err:
+       if (rc) {
+               if (o->od_os)
+                       dmu_objset_disown(o->od_os, o);
+               o->od_os = NULL;
+       }
+
        RETURN(rc);
 }
 
@@ -881,10 +1078,6 @@ static int osd_device_init0(const struct lu_env *env,
        l->ld_ops = &osd_lu_ops;
        o->od_dt_dev.dd_ops = &osd_dt_ops;
 
-       o->od_capa_hash = init_capa_hash();
-       if (o->od_capa_hash == NULL)
-               GOTO(out, rc = -ENOMEM);
-
 out:
        RETURN(rc);
 }
@@ -896,13 +1089,19 @@ static struct lu_device *osd_device_alloc(const struct lu_env *env,
                                          struct lu_device_type *type,
                                          struct lustre_cfg *cfg)
 {
-       struct osd_device *dev;
-       int                rc;
+       struct osd_device       *dev;
+       struct osd_seq_list     *osl;
+       int                     rc;
 
        OBD_ALLOC_PTR(dev);
        if (dev == NULL)
                return ERR_PTR(-ENOMEM);
 
+       osl = &dev->od_seq_list;
+       INIT_LIST_HEAD(&osl->osl_seq_list);
+       rwlock_init(&osl->osl_seq_list_lock);
+       sema_init(&osl->osl_seq_init_sem, 1);
+
        rc = dt_device_init(&dev->od_dt_dev, type);
        if (rc == 0) {
                rc = osd_device_init0(env, dev, cfg);
@@ -927,7 +1126,6 @@ static struct lu_device *osd_device_free(const struct lu_env *env,
        struct osd_device *o = osd_dev(d);
        ENTRY;
 
-       cleanup_capa_hash(o->od_capa_hash);
        /* XXX: make osd top device in order to release reference */
        d->ld_site->ls_top_dev = d;
        lu_site_purge(env, d->ld_site, -1);
@@ -946,7 +1144,6 @@ static struct lu_device *osd_device_fini(const struct lu_env *env,
                                         struct lu_device *d)
 {
        struct osd_device *o = osd_dev(d);
-       struct dsl_dataset *ds;
        int                rc;
        ENTRY;
 
@@ -955,15 +1152,7 @@ static struct lu_device *osd_device_fini(const struct lu_env *env,
        osd_oi_fini(env, o);
 
        if (o->od_os) {
-               ds = dmu_objset_ds(o->od_os);
-               rc = dsl_prop_unregister(ds, "xattr", osd_xattr_changed_cb, o);
-               if (rc)
-                       CERROR("%s: dsl_prop_unregister xattr error %d\n",
-                               o->od_svname, rc);
-               if (o->arc_prune_cb != NULL) {
-                       arc_remove_prune_callback(o->arc_prune_cb);
-                       o->arc_prune_cb = NULL;
-               }
+               osd_objset_unregister_callbacks(o);
                osd_sync(env, lu2dt_dev(d));
                txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
        }
@@ -1088,6 +1277,33 @@ static int osd_obd_disconnect(struct obd_export *exp)
        RETURN(rc);
 }
 
+static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
+{
+       struct seq_server_site  *ss = osd_seq_site(osd);
+       int                     rc;
+       ENTRY;
+
+       if (osd->od_is_ost || osd->od_cl_seq != NULL)
+               RETURN(0);
+
+       if (unlikely(ss == NULL))
+               RETURN(-ENODEV);
+
+       OBD_ALLOC_PTR(osd->od_cl_seq);
+       if (osd->od_cl_seq == NULL)
+               RETURN(-ENOMEM);
+
+       rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
+                            osd->od_svname, ss->ss_server_seq);
+
+       if (rc != 0) {
+               OBD_FREE_PTR(osd->od_cl_seq);
+               osd->od_cl_seq = NULL;
+       }
+
+       RETURN(rc);
+}
+
 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
                       struct lu_device *dev)
 {
@@ -1095,9 +1311,14 @@ static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
        int                      rc = 0;
        ENTRY;
 
-       if (osd->od_quota_slave != NULL)
+       if (osd->od_quota_slave != NULL) {
                /* set up quota slave objects */
                rc = qsd_prepare(env, osd->od_quota_slave);
+               if (rc != 0)
+                       RETURN(rc);
+       }
+
+       rc = osd_fid_init(env, osd);
 
        RETURN(rc);
 }
@@ -1154,7 +1375,7 @@ static struct obd_ops osd_obd_device_ops = {
        .o_fid_alloc    = osd_fid_alloc
 };
 
-int __init osd_init(void)
+static int __init osd_init(void)
 {
        int rc;
 
@@ -1173,19 +1394,20 @@ int __init osd_init(void)
        return rc;
 }
 
-void __exit osd_exit(void)
+static void __exit osd_exit(void)
 {
        class_unregister_type(LUSTRE_OSD_ZFS_NAME);
        lu_kmem_fini(osd_caches);
 }
 
 extern unsigned int osd_oi_count;
-CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
-               "Number of Object Index containers to be created, "
-               "it's only valid for new filesystem.");
+module_param(osd_oi_count, int, 0444);
+MODULE_PARM_DESC(osd_oi_count, "Number of Object Index containers to be created, it's only valid for new filesystem.");
 
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
 MODULE_LICENSE("GPL");
 
-cfs_module(osd, LUSTRE_VERSION_STRING, osd_init, osd_exit);
+module_init(osd_init);
+module_exit(osd_exit);