*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_OSD
-#include <lustre_ver.h>
#include <libcfs/libcfs.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <obd_class.h>
#include <lustre_disk.h>
#include <lustre_fid.h>
-#include <lustre_param.h>
+#include <uapi/linux/lustre/lustre_param.h>
#include <md_object.h>
#include "osd_internal.h"
struct lu_env env;
int rc;
+ LASSERT(site->ls_obj_hash);
+
rc = lu_env_init(&env, LCT_SHRINKER);
if (rc) {
CERROR("%s: can't initialize shrinker env: rc = %d\n",
dt_txn_hook_commit(th);
/* call per-transaction callbacks if any */
- list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage)
+ list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+ LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
+ "commit callback entry: magic=%x name='%s'\n",
+ dcb->dcb_magic, dcb->dcb_name);
+ list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, th, dcb, error);
+ }
/* Unlike ldiskfs, zfs updates space accounting at commit time.
* As a consequence, op_end is called only now to inform the quota slave
th->th_dev = NULL;
lu_context_exit(&th->th_ctx);
lu_context_fini(&th->th_ctx);
- thandle_put(&oh->ot_super);
+ OBD_FREE_PTR(oh);
EXIT;
}
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh = container_of0(th, struct osd_thandle,
+ ot_super);
- oh = container_of0(th, struct osd_thandle, ot_super);
- list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+ LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
+ LASSERT(&dcb->dcb_func != NULL);
+ if (dcb->dcb_flags & DCB_TRANS_STOP)
+ list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
+ else
+ list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
return 0;
}
struct osd_device *osd = osd_dt_dev(d);
/* dmu will call commit callback with error code during abort */
if (!lu_device_is_md(&d->dd_lu_dev) && rc == -ENOSPC)
- CERROR("%s: failed to start transaction due to ENOSPC. "
- "Metadata overhead is underestimated or "
- "grant_ratio is too low.\n", osd->od_svname);
+ CERROR("%s: failed to start transaction due to ENOSPC"
+ "\n", osd->od_svname);
else
CERROR("%s: can't assign tx: rc = %d\n",
osd->od_svname, rc);
RETURN(rc);
}
+static void osd_unlinked_list_emptify(const struct lu_env *env,
+ struct osd_device *osd,
+ struct list_head *list, bool free)
+{
+ struct osd_object *obj;
+ uint64_t oid;
+
+ while (!list_empty(list)) {
+ obj = list_entry(list->next,
+ struct osd_object, oo_unlinked_linkage);
+ LASSERT(obj->oo_dn != NULL);
+ oid = obj->oo_dn->dn_object;
+
+ list_del_init(&obj->oo_unlinked_linkage);
+ if (free)
+ (void)osd_unlinked_object_free(env, osd, oid);
+ }
+}
+
+static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
+{
+ struct dt_txn_commit_cb *dcb;
+ struct dt_txn_commit_cb *tmp;
+
+ /* call per-transaction stop callbacks if any */
+ list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
+ dcb_linkage) {
+ LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
+ "commit callback entry: magic=%x name='%s'\n",
+ dcb->dcb_magic, dcb->dcb_name);
+ list_del_init(&dcb->dcb_linkage);
+ dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
+ }
+}
+
/*
* Concurrency: shouldn't matter.
*/
struct thandle *th)
{
struct osd_device *osd = osd_dt_dev(th->th_dev);
+ bool sync = (th->th_sync != 0);
struct osd_thandle *oh;
+ struct list_head unlinked;
uint64_t txg;
int rc;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
+ INIT_LIST_HEAD(&unlinked);
+ list_splice_init(&oh->ot_unlinked_list, &unlinked);
+ /* reset OI cache for safety */
+ osd_oti_get(env)->oti_ins_cache_used = 0;
if (oh->ot_assigned == 0) {
LASSERT(oh->ot_tx);
dmu_tx_abort(oh->ot_tx);
- osd_object_sa_dirty_rele(oh);
+ osd_object_sa_dirty_rele(env, oh);
+ osd_unlinked_list_emptify(env, osd, &unlinked, false);
/* there won't be any commit, release reserved quota space now,
* if any */
qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
- thandle_put(&oh->ot_super);
+ OBD_FREE_PTR(oh);
RETURN(0);
}
- /* When doing our own inode accounting, the ZAPs storing per-uid/gid
- * usage are updated at operation execution time, so we should call
- * qsd_op_end() straight away. Otherwise (for blk accounting maintained
- * by ZFS and when #inode is estimated from #blks) accounting is updated
- * at commit time and the call to qsd_op_end() must be delayed */
- if (oh->ot_quota_trans.lqt_id_cnt > 0 &&
- !oh->ot_quota_trans.lqt_ids[0].lqi_is_blk &&
- !osd->od_quota_iused_est)
- qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
-
rc = dt_txn_hook_stop(env, th);
if (rc != 0)
CDEBUG(D_OTHER, "%s: transaction hook failed: rc = %d\n",
osd->od_svname, rc);
+ osd_trans_stop_cb(oh, rc);
+
LASSERT(oh->ot_tx);
txg = oh->ot_tx->tx_txg;
- osd_object_sa_dirty_rele(oh);
+ osd_object_sa_dirty_rele(env, oh);
+ /* XXX: Once dmu_tx_commit() called, oh/th could have been freed
+ * by osd_trans_commit_cb already. */
dmu_tx_commit(oh->ot_tx);
- if (th->th_sync)
+ osd_unlinked_list_emptify(env, osd, &unlinked, true);
+
+ if (sync)
txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
RETURN(rc);
dmu_tx_t *tx;
ENTRY;
+ if (dt->dd_rdonly) {
+ CERROR("%s: someone try to start transaction under "
+ "readonly mode, should be disabled.\n",
+ osd_name(osd_dt_dev(dt)));
+ dump_stack();
+ RETURN(ERR_PTR(-EROFS));
+ }
+
tx = dmu_tx_create(osd->od_os);
if (tx == NULL)
RETURN(ERR_PTR(-ENOMEM));
oh->ot_tx = tx;
INIT_LIST_HEAD(&oh->ot_dcb_list);
+ INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
+ INIT_LIST_HEAD(&oh->ot_unlinked_list);
INIT_LIST_HEAD(&oh->ot_sa_list);
- sema_init(&oh->ot_sa_lock, 1);
memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
th = &oh->ot_super;
th->th_dev = dt;
th->th_result = 0;
th->th_tags = LCT_TX_HANDLE;
- atomic_set(&th->th_refc, 1);
- th->th_alloc_size = sizeof(*oh);
RETURN(th);
}
-/* Estimate the number of objects from a number of blocks */
-uint64_t osd_objs_count_estimate(uint64_t refdbytes, uint64_t usedobjs,
- uint64_t nrblocks)
+/* Estimate the total number of objects from a number of blocks */
+uint64_t osd_objs_count_estimate(uint64_t usedbytes, uint64_t usedobjs,
+ uint64_t nrblocks, uint64_t est_maxblockshift)
{
- uint64_t est_objs, est_refdblocks, est_usedobjs;
-
- /* Compute an nrblocks estimate based on the actual number of
- * dnodes that could fit in the space. Since we don't know the
- * overhead associated with each dnode (xattrs, SAs, VDEV overhead,
- * etc) just using DNODE_SHIFT isn't going to give a good estimate.
- * Instead, compute an estimate based on the average space usage per
- * dnode, with an upper and lower cap.
+ uint64_t est_totobjs, est_usedblocks, est_usedobjs;
+
+ /*
+ * If blocksize is below 64KB (e.g. MDT with recordsize=4096) then
+ * bump the free dnode estimate to assume blocks at least 64KB in
+ * case of a directory-heavy MDT (at 32KB/directory).
+ */
+ if (est_maxblockshift < 16) {
+ nrblocks >>= (16 - est_maxblockshift);
+ est_maxblockshift = 16;
+ }
+
+ /*
+ * Estimate the total number of dnodes from the total blocks count
+ * and the space used per dnode. Since we don't know the overhead
+ * associated with each dnode (xattrs, SAs, VDEV overhead, etc.)
+ * just using DNODE_SHIFT isn't going to give a good estimate.
+ * Instead, compute the current average space usage per dnode, with
+ * an upper and lower cap to avoid unrealistic estimates..
*
* In case there aren't many dnodes or blocks used yet, add a small
- * correction factor using OSD_DNODE_EST_SHIFT. This correction
- * factor gradually disappears as the number of real dnodes grows.
- * This also avoids the need to check for divide-by-zero later.
+ * correction factor (OSD_DNODE_EST_{COUNT,BLKSHIFT}). This factor
+ * gradually disappears as the number of real dnodes grows. It also
+ * avoids the need to check for divide-by-zero computing dn_per_block.
*/
CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
- est_refdblocks = (refdbytes >> SPA_MAXBLOCKSHIFT) +
- (OSD_DNODE_EST_COUNT >> OSD_DNODE_EST_BLKSHIFT);
- est_usedobjs = usedobjs + OSD_DNODE_EST_COUNT;
-
- /* Average space/dnode more than maximum dnode size, use max dnode
- * size to estimate free dnodes from adjusted free blocks count.
- * OSTs typically use more than one block dnode so this case applies. */
- if (est_usedobjs <= est_refdblocks * 2) {
- est_objs = nrblocks;
-
- /* Average space/dnode smaller than min dnode size (probably due to
- * metadnode compression), use min dnode size to estimate the number of
- * objects.
- * An MDT typically uses below 512 bytes/dnode so this case applies. */
- } else if (est_usedobjs >= (est_refdblocks << OSD_DNODE_MIN_BLKSHIFT)) {
- est_objs = nrblocks << OSD_DNODE_MIN_BLKSHIFT;
-
- /* Between the extremes, we try to use the average size of
- * existing dnodes to compute the number of dnodes that fit
- * into nrblocks:
+ est_usedblocks = ((OSD_DNODE_EST_COUNT << OSD_DNODE_EST_BLKSHIFT) +
+ usedbytes) >> est_maxblockshift;
+ est_usedobjs = OSD_DNODE_EST_COUNT + usedobjs;
+
+ if (est_usedobjs <= est_usedblocks) {
+ /*
+ * Average space/dnode more than maximum block size, use max
+ * block size to estimate free dnodes from adjusted free blocks
+ * count. OSTs typically use multiple blocks per dnode so this
+ * case applies.
+ */
+ est_totobjs = nrblocks;
+
+ } else if (est_usedobjs >= (est_usedblocks << OSD_DNODE_MIN_BLKSHIFT)) {
+ /*
+ * Average space/dnode smaller than min dnode size (probably
+ * due to metadnode compression), use min dnode size to
+ * estimate object count. MDTs may use only one block per node
+ * so this case applies.
+ */
+ est_totobjs = nrblocks << OSD_DNODE_MIN_BLKSHIFT;
+
+ } else {
+ /*
+ * Between the extremes, use average space per existing dnode
+ * to compute the number of dnodes that will fit into nrblocks:
*
- * est_objs = nrblocks * (est_usedobjs / est_refblocks);
+ * est_totobjs = nrblocks * (est_usedobjs / est_usedblocks)
*
- * but this may overflow 64 bits or become 0 if not handled well
+ * this may overflow 64 bits or become 0 if not handled well.
*
- * We know nrblocks is below (64 - 17 = 47) bits from
- * SPA_MAXBLKSHIFT, and est_usedobjs is under 48 bits due to
- * DN_MAX_OBJECT_SHIFT, which means that multiplying them may
- * get as large as 2 ^ 95.
+ * We know nrblocks is below 2^(64 - blkbits) bits, and
+ * est_usedobjs is under 48 bits due to DN_MAX_OBJECT_SHIFT,
+ * which means that multiplying them may get as large as
+ * 2 ^ 96 for the minimum blocksize of 64KB allowed above.
*
- * We also know (est_usedobjs / est_refdblocks) is between 2 and
- * 256, due to above checks, we can safely compute this first.
+ * The ratio of dnodes per block (est_usedobjs / est_usedblocks)
+ * is under 2^(blkbits - DNODE_SHIFT) = blocksize / 512 due to
+ * the limit checks above, so we can safely compute this first.
* We care more about accuracy on the MDT (many dnodes/block)
* which is good because this is where truncation errors are
- * smallest. This adds 8 bits to nrblocks so we can use 7 bits
- * to compute a fixed-point fraction and nrblocks can still fit
- * in 64 bits. */
- } else {
- unsigned dnodes_per_block = (est_usedobjs << 7)/est_refdblocks;
+ * smallest. Since both nrblocks and dn_per_block are a
+ * function of blkbits, their product is at most:
+ *
+ * 2^(64 - blkbits) * 2^(blkbits - DNODE_SHIFT) = 2^(64 - 9)
+ *
+ * so we can safely use 7 bits to compute a fixed-point
+ * fraction and est_totobjs can still fit in 64 bits.
+ */
+ unsigned dn_per_block = (est_usedobjs << 7) / est_usedblocks;
- est_objs = (nrblocks * dnodes_per_block) >> 7;
+ est_totobjs = (nrblocks * dn_per_block) >> 7;
}
- return est_objs;
+ return est_totobjs;
}
-static int osd_objset_statfs(struct objset *os, struct obd_statfs *osfs)
+static int osd_objset_statfs(struct osd_device *osd, struct obd_statfs *osfs)
{
- uint64_t refdbytes, availbytes, usedobjs, availobjs;
+ struct objset *os = osd->od_os;
+ uint64_t usedbytes, availbytes, usedobjs, availobjs;
uint64_t est_availobjs;
uint64_t reserved;
+ uint64_t bshift;
+
+ dmu_objset_space(os, &usedbytes, &availbytes, &usedobjs, &availobjs);
- dmu_objset_space(os, &refdbytes, &availbytes, &usedobjs,
- &availobjs);
+ memset(osfs, 0, sizeof(*osfs));
+
+ /* We're a zfs filesystem. */
+ osfs->os_type = UBERBLOCK_MAGIC;
/*
* ZFS allows multiple block sizes. For statfs, Linux makes no
* largest possible block size as IO size for the optimum performance
* and scale the free and used blocks count appropriately.
*/
- osfs->os_bsize = 1ULL << SPA_MAXBLOCKSHIFT;
+ osfs->os_bsize = osd->od_max_blksz;
+ bshift = fls64(osfs->os_bsize) - 1;
- osfs->os_blocks = (refdbytes + availbytes) >> SPA_MAXBLOCKSHIFT;
- osfs->os_bfree = availbytes >> SPA_MAXBLOCKSHIFT;
+ osfs->os_blocks = (usedbytes + availbytes) >> bshift;
+ osfs->os_bfree = availbytes >> bshift;
osfs->os_bavail = osfs->os_bfree; /* no extra root reservation */
/* Take replication (i.e. number of copies) into account */
- osfs->os_bavail /= os->os_copies;
+ if (os->os_copies != 0)
+ osfs->os_bavail /= os->os_copies;
/*
* Reserve some space so we don't run into ENOSPC due to grants not
* Rather than report this via os_bavail (which makes users unhappy if
* they can't fill the filesystem 100%), reduce os_blocks as well.
*
- * Reserve 0.78% of total space, at least 4MB for small filesystems,
+ * Reserve 0.78% of total space, at least 16MB for small filesystems,
* for internal files to be created/unlinked when space is tight.
*/
- CLASSERT(OSD_STATFS_RESERVED_BLKS > 0);
- if (likely(osfs->os_blocks >=
- OSD_STATFS_RESERVED_BLKS << OSD_STATFS_RESERVED_SHIFT))
+ CLASSERT(OSD_STATFS_RESERVED_SIZE > 0);
+ reserved = OSD_STATFS_RESERVED_SIZE >> bshift;
+ if (likely(osfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
- else
- reserved = OSD_STATFS_RESERVED_BLKS;
osfs->os_blocks -= reserved;
- osfs->os_bfree -= MIN(reserved, osfs->os_bfree);
- osfs->os_bavail -= MIN(reserved, osfs->os_bavail);
+ osfs->os_bfree -= min(reserved, osfs->os_bfree);
+ osfs->os_bavail -= min(reserved, osfs->os_bavail);
/*
* The availobjs value returned from dmu_objset_space() is largely
* issues like how much space is actually available in the pool.
* Compute a better estimate in udmu_objs_count_estimate().
*/
- est_availobjs = osd_objs_count_estimate(refdbytes, usedobjs,
- osfs->os_bfree);
+ est_availobjs = osd_objs_count_estimate(usedbytes, usedobjs,
+ osfs->os_bfree, bshift);
osfs->os_ffree = min(availobjs, est_availobjs);
osfs->os_files = osfs->os_ffree + usedobjs;
/* ZFS XXX: fill in backing dataset FSID/UUID
memcpy(osfs->os_fsid, .... );*/
- /* We're a zfs filesystem. */
- osfs->os_type = UBERBLOCK_MAGIC;
-
- /* ZFS XXX: fill in appropriate OS_STATE_{DEGRADED,READONLY} flags
- osfs->os_state = vf_to_stf(vfsp->vfs_flag);
- if (sb->s_flags & MS_RDONLY)
- osfs->os_state = OS_STATE_READONLY;
- */
-
osfs->os_namelen = MAXNAMELEN;
osfs->os_maxbytes = OBD_OBJECT_EOF;
+ if (!spa_writeable(dmu_objset_spa(os)) ||
+ osd->od_dev_set_rdonly || osd->od_prop_rdonly)
+ osfs->os_state |= OS_STATE_READONLY;
+
return 0;
}
int osd_statfs(const struct lu_env *env, struct dt_device *d,
struct obd_statfs *osfs)
{
- struct osd_device *osd = osd_dt_dev(d);
int rc;
ENTRY;
- rc = osd_objset_statfs(osd->od_os, osfs);
+ rc = osd_objset_statfs(osd_dt_dev(d), osfs);
if (unlikely(rc != 0))
RETURN(rc);
RETURN(0);
}
-static int osd_blk_insert_cost(void)
+static int osd_blk_insert_cost(struct osd_device *osd)
{
- int max_blockshift, nr_blkptrshift;
+ int max_blockshift, nr_blkptrshift, bshift;
/* max_blockshift is the log2 of the number of blocks needed to reach
* the maximum filesize (that's to say 2^64) */
- max_blockshift = DN_MAX_OFFSET_SHIFT - SPA_MAXBLOCKSHIFT;
+ bshift = osd_spa_maxblockshift(dmu_objset_spa(osd->od_os));
+ max_blockshift = DN_MAX_OFFSET_SHIFT - bshift;
/* nr_blkptrshift is the log2 of the number of block pointers that can
* be stored in an indirect block */
*/
param->ddp_max_name_len = MAXNAMELEN;
param->ddp_max_nlink = 1 << 31; /* it's 8byte on a disk */
- param->ddp_block_shift = 12; /* XXX */
+ param->ddp_symlink_max = PATH_MAX;
param->ddp_mount_type = LDD_MT_ZFS;
param->ddp_mntopts = MNTOPT_USERXATTR;
/* for maxbytes, report same value as ZPL */
param->ddp_maxbytes = MAX_LFS_FILESIZE;
- /* Default reserved fraction of the available space that should be kept
- * for error margin. Unfortunately, there are many factors that can
- * impact the overhead with zfs, so let's be very cautious for now and
- * reserve 20% of the available space which is not given out as grant.
- * This tunable can be changed on a live system via procfs if needed. */
- param->ddp_grant_reserved = 20;
-
/* inodes are dynamically allocated, so we report the per-inode space
* consumption to upper layers. This static value is not really accurate
* and we should use the same logic as in udmu_objset_statfs() to
* estimate the real size consumed by an object */
param->ddp_inodespace = OSD_DNODE_EST_COUNT;
- /* per-fragment overhead to be used by the client code */
- param->ddp_grant_frag = osd_blk_insert_cost();
+ /* Although ZFS isn't an extent-based filesystem, the metadata overhead
+ * (i.e. 7 levels of indirect blocks, see osd_blk_insert_cost()) should
+ * not be accounted for every single new block insertion.
+ * Instead, the maximum extent size is set to the number of blocks that
+ * can fit into a single contiguous indirect block. There would be some
+ * cases where this crosses indirect blocks, but it also won't have 7
+ * new levels of indirect blocks in that case either, so it will still
+ * have enough reserved space for the extra indirect block */
+ param->ddp_max_extent_blks =
+ (1 << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT));
+ param->ddp_extent_tax = osd_blk_insert_cost(osd);
}
/*
*/
static int osd_sync(const struct lu_env *env, struct dt_device *d)
{
- struct osd_device *osd = osd_dt_dev(d);
- CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_ZFS_NAME);
- txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
- CDEBUG(D_CACHE, "synced OSD %s\n", LUSTRE_OSD_ZFS_NAME);
+ if (!d->dd_rdonly) {
+ struct osd_device *osd = osd_dt_dev(d);
+
+ CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_ZFS_NAME);
+ txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
+ CDEBUG(D_CACHE, "synced OSD %s\n", LUSTRE_OSD_ZFS_NAME);
+ }
+
return 0;
}
CERROR("%s: *** setting device %s read-only ***\n",
osd->od_svname, LUSTRE_OSD_ZFS_NAME);
- osd->od_rdonly = 1;
+ osd->od_dev_set_rdonly = 1;
spa_freeze(dmu_objset_spa(osd->od_os));
RETURN(0);
struct lu_context_key *key, void *data)
{
struct osd_thread_info *info = data;
+ struct osd_idmap_cache *idc = info->oti_ins_cache;
+ if (idc != NULL) {
+ LASSERT(info->oti_ins_cache_size > 0);
+ OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
+ info->oti_ins_cache = NULL;
+ info->oti_ins_cache_size = 0;
+ }
+ lu_buf_free(&info->oti_xattr_lbuf);
OBD_FREE_PTR(info);
}
static void osd_key_exit(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
- struct osd_thread_info *info = data;
-
- memset(info, 0, sizeof(*info));
}
struct lu_context_key osd_key = {
/* shutdown quota slave instance associated with the device */
if (o->od_quota_slave != NULL) {
+ /* complete all in-flight callbacks */
+ osd_sync(env, &o->od_dt_dev);
+ txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
qsd_fini(env, o->od_quota_slave);
o->od_quota_slave = NULL;
}
osd->od_xattr_in_sa = (newval == ZFS_XATTR_SA);
}
+static void osd_recordsize_changed_cb(void *arg, uint64_t newval)
+{
+ struct osd_device *osd = arg;
+
+ LASSERT(newval <= osd_spa_maxblocksize(dmu_objset_spa(osd->od_os)));
+ LASSERT(newval >= SPA_MINBLOCKSIZE);
+ LASSERT(ISP2(newval));
+
+ osd->od_max_blksz = newval;
+}
+
+static void osd_readonly_changed_cb(void *arg, uint64_t newval)
+{
+ struct osd_device *osd = arg;
+
+ osd->od_prop_rdonly = !!newval;
+}
+
+/*
+ * This function unregisters all registered callbacks. It's harmless to
+ * unregister callbacks that were never registered so it is used to safely
+ * unwind a partially completed call to osd_objset_register_callbacks().
+ */
+static void osd_objset_unregister_callbacks(struct osd_device *o)
+{
+ struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
+
+ (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
+ osd_xattr_changed_cb, o);
+ (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
+ osd_recordsize_changed_cb, o);
+ (void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
+ osd_readonly_changed_cb, o);
+
+ if (o->arc_prune_cb != NULL) {
+ arc_remove_prune_callback(o->arc_prune_cb);
+ o->arc_prune_cb = NULL;
+ }
+}
+
+/*
+ * Register the required callbacks to be notified when zfs properties
+ * are modified using the 'zfs(8)' command line utility.
+ */
+static int osd_objset_register_callbacks(struct osd_device *o)
+{
+ struct dsl_dataset *ds = dmu_objset_ds(o->od_os);
+ dsl_pool_t *dp = dmu_objset_pool(o->od_os);
+ int rc;
+
+ LASSERT(ds);
+ LASSERT(dp);
+
+ dsl_pool_config_enter(dp, FTAG);
+ rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
+ osd_xattr_changed_cb, o);
+ if (rc)
+ GOTO(err, rc);
+
+ rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
+ osd_recordsize_changed_cb, o);
+ if (rc)
+ GOTO(err, rc);
+
+ rc = -dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
+ osd_readonly_changed_cb, o);
+ if (rc)
+ GOTO(err, rc);
+
+ o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
+err:
+ dsl_pool_config_exit(dp, FTAG);
+ if (rc)
+ osd_objset_unregister_callbacks(o);
+
+ RETURN(rc);
+}
+
static int osd_objset_open(struct osd_device *o)
{
uint64_t version = ZPL_VERSION;
- uint64_t sa_obj;
+ uint64_t sa_obj, unlink_obj;
int rc;
ENTRY;
- rc = -dmu_objset_own(o->od_mntdev, DMU_OST_ZFS, B_FALSE, o, &o->od_os);
+ rc = -osd_dmu_objset_own(o->od_mntdev, DMU_OST_ZFS,
+ o->od_dt_dev.dd_rdonly ? B_TRUE : B_FALSE,
+ B_FALSE, o, &o->od_os);
+
if (rc) {
+ CERROR("%s: can't open %s\n", o->od_svname, o->od_mntdev);
o->od_os = NULL;
- goto out;
+
+ GOTO(out, rc);
}
/* Check ZFS version */
GOTO(out, rc);
}
+ rc = -zap_lookup(o->od_os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET,
+ 8, 1, &unlink_obj);
+ if (rc) {
+ CERROR("%s: lookup for %s failed: rc = %d\n",
+ o->od_svname, ZFS_UNLINKED_SET, rc);
+ GOTO(out, rc);
+ }
+
/* Check that user/group usage tracking is supported */
if (!dmu_objset_userused_enabled(o->od_os) ||
DMU_USERUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED ||
DMU_GROUPUSED_DNODE(o->od_os)->dn_type != DMU_OT_USERGROUP_USED) {
CERROR("%s: Space accounting not supported by this target, "
"aborting\n", o->od_svname);
- GOTO(out, -ENOTSUPP);
+ GOTO(out, rc = -ENOTSUPP);
+ }
+
+ rc = __osd_obj2dnode(o->od_os, unlink_obj, &o->od_unlinked);
+ if (rc) {
+ CERROR("%s: can't get dnode for unlinked: rc = %d\n",
+ o->od_svname, rc);
+ GOTO(out, rc);
}
out:
- if (rc != 0 && o->od_os != NULL)
- dmu_objset_disown(o->od_os, o);
+ if (rc != 0 && o->od_os != NULL) {
+ osd_dmu_objset_disown(o->od_os, B_FALSE, o);
+ o->od_os = NULL;
+ }
RETURN(rc);
}
+int osd_unlinked_object_free(const struct lu_env *env, struct osd_device *osd,
+ uint64_t oid)
+{
+ char *key = osd_oti_get(env)->oti_str;
+ int rc;
+ dmu_tx_t *tx;
+
+ if (osd->od_dt_dev.dd_rdonly) {
+ CERROR("%s: someone try to free objects under "
+ "readonly mode, should be disabled.\n", osd_name(osd));
+ dump_stack();
+
+ return -EROFS;
+ }
+
+ rc = -dmu_free_long_range(osd->od_os, oid, 0, DMU_OBJECT_END);
+ if (rc != 0) {
+ CWARN("%s: Cannot truncate %llu: rc = %d\n",
+ osd->od_svname, oid, rc);
+ return rc;
+ }
+
+ tx = dmu_tx_create(osd->od_os);
+ dmu_tx_hold_free(tx, oid, 0, DMU_OBJECT_END);
+ osd_tx_hold_zap(tx, osd->od_unlinked->dn_object, osd->od_unlinked,
+ FALSE, NULL);
+ rc = -dmu_tx_assign(tx, TXG_WAIT);
+ if (rc != 0) {
+ CWARN("%s: Cannot assign tx for %llu: rc = %d\n",
+ osd->od_svname, oid, rc);
+ goto failed;
+ }
+
+ snprintf(key, sizeof(osd_oti_get(env)->oti_str), "%llx", oid);
+ rc = osd_zap_remove(osd, osd->od_unlinked->dn_object,
+ osd->od_unlinked, key, tx);
+ if (rc != 0) {
+ CWARN("%s: Cannot remove %llu from unlinked set: rc = %d\n",
+ osd->od_svname, oid, rc);
+ goto failed;
+ }
+
+ rc = -dmu_object_free(osd->od_os, oid, tx);
+ if (rc != 0) {
+ CWARN("%s: Cannot free %llu: rc = %d\n",
+ osd->od_svname, oid, rc);
+ goto failed;
+ }
+ dmu_tx_commit(tx);
+
+ return 0;
+
+failed:
+ LASSERT(rc != 0);
+ dmu_tx_abort(tx);
+
+ return rc;
+}
+
+static void
+osd_unlinked_drain(const struct lu_env *env, struct osd_device *osd)
+{
+ zap_cursor_t zc;
+ zap_attribute_t *za = &osd_oti_get(env)->oti_za;
+
+ zap_cursor_init(&zc, osd->od_os, osd->od_unlinked->dn_object);
+
+ while (zap_cursor_retrieve(&zc, za) == 0) {
+ /* If cannot free the object, leave it in the unlinked set,
+ * until the OSD is mounted again when obd_unlinked_drain()
+ * will be called. */
+ if (osd_unlinked_object_free(env, osd, za->za_first_integer))
+ break;
+ zap_cursor_advance(&zc);
+ }
+
+ zap_cursor_fini(&zc);
+}
+
static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
- struct dsl_dataset *ds;
char *mntdev = lustre_cfg_string(cfg, 1);
+ char *str = lustre_cfg_string(cfg, 2);
char *svname = lustre_cfg_string(cfg, 4);
- dmu_buf_t *rootdb;
- dsl_pool_t *dp;
+ dnode_t *rootdn;
const char *opts;
int rc;
ENTRY;
if (rc >= sizeof(o->od_svname))
RETURN(-E2BIG);
+ str = strstr(str, ":");
+ if (str) {
+ unsigned long flags;
+
+ rc = kstrtoul(str + 1, 10, &flags);
+ if (rc)
+ RETURN(-EINVAL);
+
+ if (flags & LMD_FLG_DEV_RDONLY) {
+ o->od_dt_dev.dd_rdonly = 1;
+ LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
+ svname);
+ }
+ }
+
if (server_name_is_ost(o->od_svname))
o->od_is_ost = 1;
rc = osd_objset_open(o);
- if (rc) {
- CERROR("%s: can't open objset %s: rc = %d\n", o->od_svname,
- o->od_mntdev, rc);
+ if (rc)
RETURN(rc);
- }
- ds = dmu_objset_ds(o->od_os);
- dp = dmu_objset_pool(o->od_os);
- LASSERT(ds);
- LASSERT(dp);
- dsl_pool_config_enter(dp, FTAG);
- rc = dsl_prop_register(ds, "xattr", osd_xattr_changed_cb, o);
- dsl_pool_config_exit(dp, FTAG);
+ o->od_xattr_in_sa = B_TRUE;
+ o->od_max_blksz = osd_spa_maxblocksize(o->od_os->os_spa);
+
+ rc = __osd_obj2dnode(o->od_os, o->od_rootid, &rootdn);
if (rc)
- CWARN("%s: can't register xattr callback, ignore: rc=%d\n",
- o->od_svname, rc);
+ GOTO(err, rc);
+ o->od_root = rootdn->dn_object;
+ osd_dnode_rele(rootdn);
- rc = __osd_obj2dbuf(env, o->od_os, o->od_rootid, &rootdb);
- if (rc) {
- CERROR("%s: obj2dbuf() failed: rc = %d\n", o->od_svname, rc);
- dmu_objset_disown(o->od_os, o);
- o->od_os = NULL;
- RETURN(rc);
- }
+ rc = __osd_obj2dnode(o->od_os, DMU_USERUSED_OBJECT,
+ &o->od_userused_dn);
+ if (rc)
+ GOTO(err, rc);
- o->od_root = rootdb->db_object;
- sa_buf_rele(rootdb, osd_obj_tag);
+ rc = __osd_obj2dnode(o->od_os, DMU_GROUPUSED_OBJECT,
+ &o->od_groupused_dn);
+ if (rc)
+ GOTO(err, rc);
/* 1. initialize oi before any file create or file open */
rc = osd_oi_init(env, o);
if (rc)
GOTO(err, rc);
- rc = osd_convert_root_to_new_seq(env, o);
+ rc = osd_objset_register_callbacks(o);
if (rc)
GOTO(err, rc);
- /* Use our own ZAP for inode accounting by default, this can be changed
- * via procfs to estimate the inode usage from the block usage */
- o->od_quota_iused_est = 0;
-
rc = osd_procfs_init(o, o->od_svname);
if (rc)
GOTO(err, rc);
- o->arc_prune_cb = arc_add_prune_callback(arc_prune_func, o);
-
/* initialize quota slave instance */
o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
o->od_proc_entry);
GOTO(err, rc);
}
+#ifdef HAVE_DMU_USEROBJ_ACCOUNTING
+ if (!osd_dmu_userobj_accounting_available(o))
+ CWARN("%s: dnode accounting not enabled: "
+ "enable feature@userobj_accounting in pool\n",
+ o->od_mntdev);
+#endif
+
/* parse mount option "noacl", and enable ACL by default */
opts = lustre_cfg_string(cfg, 3);
if (opts == NULL || strstr(opts, "noacl") == NULL)
o->od_posix_acl = 1;
+ osd_unlinked_drain(env, o);
err:
+ if (rc && o->od_os) {
+ osd_dmu_objset_disown(o->od_os, B_FALSE, o);
+ o->od_os = NULL;
+ }
+
RETURN(rc);
}
CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
atomic_read(&o->od_zerocopy_pin));
+ if (o->od_unlinked) {
+ osd_dnode_rele(o->od_unlinked);
+ o->od_unlinked = NULL;
+ }
+ if (o->od_userused_dn) {
+ osd_dnode_rele(o->od_userused_dn);
+ o->od_userused_dn = NULL;
+ }
+ if (o->od_groupused_dn) {
+ osd_dnode_rele(o->od_groupused_dn);
+ o->od_groupused_dn = NULL;
+ }
+
if (o->od_os != NULL) {
- /* force a txg sync to get all commit callbacks */
- txg_wait_synced(dmu_objset_pool(o->od_os), 0ULL);
+ if (!o->od_dt_dev.dd_rdonly)
+ /* force a txg sync to get all commit callbacks */
+ txg_wait_synced(dmu_objset_pool(o->od_os), 0ULL);
/* close the object set */
- dmu_objset_disown(o->od_os, o);
-
+ osd_dmu_objset_disown(o->od_os, B_FALSE, o);
o->od_os = NULL;
}
struct lu_device_type *type,
struct lustre_cfg *cfg)
{
- struct osd_device *dev;
- int rc;
+ struct osd_device *dev;
+ struct osd_seq_list *osl;
+ int rc;
OBD_ALLOC_PTR(dev);
if (dev == NULL)
return ERR_PTR(-ENOMEM);
+ osl = &dev->od_seq_list;
+ INIT_LIST_HEAD(&osl->osl_seq_list);
+ rwlock_init(&osl->osl_seq_list_lock);
+ sema_init(&osl->osl_seq_init_sem, 1);
+
rc = dt_device_init(&dev->od_dt_dev, type);
if (rc == 0) {
rc = osd_device_init0(env, dev, cfg);
struct lu_device *d)
{
struct osd_device *o = osd_dev(d);
- struct dsl_dataset *ds;
int rc;
ENTRY;
- osd_shutdown(env, o);
- osd_oi_fini(env, o);
-
if (o->od_os) {
- ds = dmu_objset_ds(o->od_os);
- rc = dsl_prop_unregister(ds, "xattr", osd_xattr_changed_cb, o);
- if (rc)
- CERROR("%s: dsl_prop_unregister xattr error %d\n",
- o->od_svname, rc);
- if (o->arc_prune_cb != NULL) {
- arc_remove_prune_callback(o->arc_prune_cb);
- o->arc_prune_cb = NULL;
+ osd_objset_unregister_callbacks(o);
+ if (!o->od_dt_dev.dd_rdonly) {
+ osd_sync(env, lu2dt_dev(d));
+ txg_wait_callbacks(
+ spa_get_dsl(dmu_objset_spa(o->od_os)));
}
- osd_sync(env, lu2dt_dev(d));
- txg_wait_callbacks(spa_get_dsl(dmu_objset_spa(o->od_os)));
}
+ /* now with all the callbacks completed we can cleanup the remainings */
+ osd_shutdown(env, o);
+ osd_oi_fini(env, o);
+
rc = osd_procfs_fini(o);
if (rc) {
CERROR("proc fini error %d\n", rc);
LASSERT(&o->od_dt_dev);
rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
cfg, &o->od_dt_dev);
- if (rc > 0 || rc == -ENOSYS)
+ if (rc > 0 || rc == -ENOSYS) {
rc = class_process_proc_param(PARAM_OST,
lprocfs_osd_obd_vars,
cfg, &o->od_dt_dev);
+ if (rc > 0)
+ rc = 0;
+ }
break;
}
default:
.o_fid_alloc = osd_fid_alloc
};
-int __init osd_init(void)
+static int __init osd_init(void)
{
int rc;
return rc;
}
-void __exit osd_exit(void)
+static void __exit osd_exit(void)
{
class_unregister_type(LUSTRE_OSD_ZFS_NAME);
lu_kmem_fini(osd_caches);
}
extern unsigned int osd_oi_count;
-CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
- "Number of Object Index containers to be created, "
- "it's only valid for new filesystem.");
+module_param(osd_oi_count, int, 0444);
+MODULE_PARM_DESC(osd_oi_count, "Number of Object Index containers to be created, it's only valid for new filesystem.");
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(osd, LUSTRE_VERSION_STRING, osd_init, osd_exit);
+module_init(osd_init);
+module_exit(osd_exit);