* Use is subject to license terms.
*/
/*
- * Copyright (c) 2011, 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*
*/
* Author: Johann Lombardi <johann@whamcloud.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_OSD
#include <lustre_ver.h>
#include <obd_class.h>
#include <lustre_disk.h>
#include <lustre_fid.h>
+#include <lustre_param.h>
+#include <md_object.h>
#include "osd_internal.h"
static char *root_tag = "osd_mount, rootdb";
/* Slab for OSD object allocation */
-cfs_mem_cache_t *osd_object_kmem;
+struct kmem_cache *osd_object_kmem;
static struct lu_kmem_descr osd_caches[] = {
{
th->th_dev = NULL;
lu_context_exit(&th->th_ctx);
lu_context_fini(&th->th_ctx);
- OBD_FREE_PTR(oh);
+ thandle_put(&oh->ot_super);
EXIT;
}
/*
* Concurrency: shouldn't matter.
*/
-static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
+static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
+ struct thandle *th)
{
struct osd_device *osd = osd_dt_dev(th->th_dev);
struct osd_thandle *oh;
/* there won't be any commit, release reserved quota space now,
* if any */
qsd_op_end(env, osd->od_quota_slave, &oh->ot_quota_trans);
- OBD_FREE_PTR(oh);
+ thandle_put(&oh->ot_super);
RETURN(0);
}
oh->ot_tx = tx;
CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
CFS_INIT_LIST_HEAD(&oh->ot_sa_list);
- cfs_sema_init(&oh->ot_sa_lock, 1);
+ sema_init(&oh->ot_sa_lock, 1);
memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
th = &oh->ot_super;
th->th_dev = dt;
th->th_result = 0;
th->th_tags = LCT_TX_HANDLE;
+ atomic_set(&th->th_refc, 1);
+ th->th_alloc_size = sizeof(*oh);
RETURN(th);
}
ENTRY;
rc = udmu_objset_statfs(&osd->od_objset, osfs);
- if (rc)
+ if (unlikely(rc))
RETURN(rc);
osfs->os_bavail -= min_t(obd_size,
OSD_GRANT_FOR_LOCAL_OIDS / osfs->os_bsize,
param->ddp_inodespace = OSD_DNODE_EST_COUNT;
/* per-fragment overhead to be used by the client code */
param->ddp_grant_frag = udmu_blk_insert_cost();
-
- param->ddp_mnt = NULL;
}
/*
tx_state_t *tx = &dmu_objset_pool(osd->od_objset.os)->dp_tx;
uint64_t txg;
+ mutex_enter(&tx->tx_sync_lock);
txg = tx->tx_open_txg + 1;
if (tx->tx_quiesce_txg_waiting < txg) {
tx->tx_quiesce_txg_waiting = txg;
static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
- struct dsl_dataset *ds;
- char *dev = lustre_cfg_string(cfg, 1);
- dmu_buf_t *rootdb;
- int rc;
+ struct dsl_dataset *ds;
+ char *dev = lustre_cfg_string(cfg, 1);
+ dmu_buf_t *rootdb;
+ dsl_pool_t *dp;
+ int rc;
ENTRY;
if (o->od_objset.os != NULL)
strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
sizeof(o->od_svname) - 1);
+ rc = osd_zfs_acct_init(env, o);
+ if (rc)
+ RETURN(rc);
+
+ if (server_name_is_ost(o->od_svname))
+ o->od_is_ost = 1;
+
rc = -udmu_objset_open(o->od_mntdev, &o->od_objset);
if (rc) {
CERROR("can't open objset %s: %d\n", o->od_mntdev, rc);
}
ds = dmu_objset_ds(o->od_objset.os);
+ dp = dmu_objset_pool(o->od_objset.os);
LASSERT(ds);
+ LASSERT(dp);
+ dsl_pool_config_enter(dp, FTAG);
rc = dsl_prop_register(ds, "xattr", osd_xattr_changed_cb, o);
+ dsl_pool_config_exit(dp, FTAG);
if (rc)
CERROR("%s: cat not register xattr callback, ignore: %d\n",
o->od_svname, rc);
if (rc)
GOTO(err, rc);
+ rc = osd_convert_root_to_new_seq(env, o);
+ if (rc)
+ GOTO(err, rc);
+
/* Use our own ZAP for inode accounting by default, this can be changed
* via procfs to estimate the inode usage from the block usage */
o->od_quota_iused_est = 0;
{
ENTRY;
- if (cfs_atomic_read(&o->od_zerocopy_alloc))
+ if (atomic_read(&o->od_zerocopy_alloc))
CERROR("%s: lost %d allocated page(s)\n", o->od_svname,
- cfs_atomic_read(&o->od_zerocopy_alloc));
- if (cfs_atomic_read(&o->od_zerocopy_loan))
+ atomic_read(&o->od_zerocopy_alloc));
+ if (atomic_read(&o->od_zerocopy_loan))
CERROR("%s: lost %d loaned abuf(s)\n", o->od_svname,
- cfs_atomic_read(&o->od_zerocopy_loan));
- if (cfs_atomic_read(&o->od_zerocopy_pin))
+ atomic_read(&o->od_zerocopy_loan));
+ if (atomic_read(&o->od_zerocopy_pin))
CERROR("%s: lost %d pinned dbuf(s)\n", o->od_svname,
- cfs_atomic_read(&o->od_zerocopy_pin));
+ atomic_read(&o->od_zerocopy_pin));
+
+ osd_zfs_acct_fini(env, o);
if (o->od_objset.os != NULL)
udmu_objset_close(&o->od_objset);
ENTRY;
+ osd_shutdown(env, o);
osd_oi_fini(env, o);
if (o->od_objset.os) {
struct lu_device *d, struct lustre_cfg *cfg)
{
struct osd_device *o = osd_dev(d);
- int err;
+ int rc;
ENTRY;
switch(cfg->lcfg_command) {
case LCFG_SETUP:
- err = osd_mount(env, o, cfg);
+ rc = osd_mount(env, o, cfg);
break;
case LCFG_CLEANUP:
- err = osd_shutdown(env, o);
+ rc = osd_shutdown(env, o);
break;
+ case LCFG_PARAM: {
+ LASSERT(&o->od_dt_dev);
+ rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ if (rc > 0 || rc == -ENOSYS)
+ rc = class_process_proc_param(PARAM_OST,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
+ break;
+ }
default:
- err = -ENOTTY;
+ rc = -ENOTTY;
}
- RETURN(err);
+ RETURN(rc);
}
static int osd_recovery_complete(const struct lu_env *env, struct lu_device *d)
{
+ struct osd_device *osd = osd_dev(d);
+ int rc = 0;
ENTRY;
- RETURN(0);
+
+ if (osd->od_quota_slave == NULL)
+ RETURN(0);
+
+ /* start qsd instance on recovery completion, this notifies the quota
+ * slave code that we are about to process new requests now */
+ rc = qsd_start(env, osd->od_quota_slave);
+ RETURN(rc);
}
/*
*exp = class_conn2export(&conn);
- cfs_spin_lock(&osd->od_objset.lock);
+ spin_lock(&osd->od_objset.lock);
osd->od_connects++;
- cfs_spin_unlock(&osd->od_objset.lock);
+ spin_unlock(&osd->od_objset.lock);
RETURN(0);
}
ENTRY;
/* Only disconnect the underlying layers on the final disconnect. */
- cfs_spin_lock(&osd->od_objset.lock);
+ spin_lock(&osd->od_objset.lock);
osd->od_connects--;
if (osd->od_connects == 0)
release = 1;
- cfs_spin_unlock(&osd->od_objset.lock);
+ spin_unlock(&osd->od_objset.lock);
rc = class_disconnect(exp); /* bz 9811 */
int rc = 0;
ENTRY;
- if (dev->ld_site && lu_device_is_md(dev->ld_site->ls_top_dev)) {
- /* MDT/MDD still use old infrastructure to create
- * special files */
- rc = llo_local_objects_setup(env, lu2md_dev(pdev),
- lu2dt_dev(dev));
- if (rc)
- RETURN(rc);
- }
-
if (osd->od_quota_slave != NULL)
/* set up quota slave objects */
rc = qsd_prepare(env, osd->od_quota_slave);
if (rc)
return rc;
- rc = class_register_type(&osd_obd_device_ops, NULL,
+ rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
+#ifndef HAVE_ONLY_PROCFS_SEQ
lprocfs_osd_module_vars,
+#endif
LUSTRE_OSD_ZFS_NAME, &osd_device_type);
if (rc)
lu_kmem_fini(osd_caches);