* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
/********************** config llog list **********************/
static CFS_LIST_HEAD(config_llog_list);
-static spinlock_t config_list_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
ENTRY;
if (cld->cld_stopping)
RETURN(1);
- atomic_inc(&cld->cld_refcount);
+ cfs_atomic_inc(&cld->cld_refcount);
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
+ cfs_atomic_read(&cld->cld_refcount));
RETURN(0);
}
ENTRY;
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ cfs_atomic_read(&cld->cld_refcount));
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* spinlock to make sure no item with 0 refcount in the list */
- spin_lock(&config_list_lock);
- if (unlikely(atomic_dec_and_test(&cld->cld_refcount))) {
- list_del(&cld->cld_list_chain);
- spin_unlock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
+ if (unlikely(cfs_atomic_dec_and_test(&cld->cld_refcount))) {
+ cfs_list_del(&cld->cld_list_chain);
+ cfs_spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
strlen(cld->cld_cfg.cfg_instance) + 1);
OBD_FREE(cld, sizeof(*cld));
} else {
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
}
EXIT;
RETURN(ERR_PTR(-EINVAL));
}
- spin_lock(&config_list_lock);
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ cfs_spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
if (match_instance && cld->cld_cfg.cfg_instance &&
strcmp(logid, cld->cld_cfg.cfg_instance) == 0)
goto out_found;
strcmp(logid, cld->cld_logname) == 0)
goto out_found;
}
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
CDEBUG(D_CONFIG, "can't get log %s\n", logid);
RETURN(ERR_PTR(-ENOENT));
out_found:
- atomic_inc(&cld->cld_refcount);
- spin_unlock(&config_list_lock);
+ cfs_atomic_inc(&cld->cld_refcount);
+ cfs_spin_unlock(&config_list_lock);
LASSERT(cld->cld_stopping == 0 || cld->cld_is_sptlrpc == 0);
RETURN(cld);
}
strcpy(cld->cld_logname, logname);
if (cfg)
cld->cld_cfg = *cfg;
+ cfs_mutex_init(&cld->cld_lock);
cld->cld_cfg.cfg_last_idx = 0;
cld->cld_cfg.cfg_flags = 0;
cld->cld_cfg.cfg_sb = sb;
cld->cld_is_sptlrpc = is_sptlrpc;
- atomic_set(&cld->cld_refcount, 1);
+ cfs_atomic_set(&cld->cld_refcount, 1);
/* Keep the mgc around until we are done */
cld->cld_mgcexp = class_export_get(obd->obd_self_export);
rc = mgc_logname2resid(logname, &cld->cld_resid);
- spin_lock(&config_list_lock);
- list_add(&cld->cld_list_chain, &config_llog_list);
- spin_unlock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
+ cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+ cfs_spin_unlock(&config_list_lock);
if (rc) {
config_log_put(cld);
RETURN(cld);
}
-/**
- * Add this log to our list of active logs.
+/** Add this log to the list of active logs watched by an MGC.
+ * Active means we're watching for updates.
* We have one active log per "mount" - client instance or servername.
* Each instance may be at a different point in the log.
*/
RETURN(0);
}
-DECLARE_MUTEX(llog_process_lock);
+CFS_DECLARE_MUTEX(llog_process_lock);
-/* Stop watching for updates on this log. */
+/** Stop watching for updates on this log.
+ */
static int config_log_end(char *logname, struct config_llog_instance *cfg)
{
struct config_llog_data *cld, *cld_sptlrpc = NULL;
if (IS_ERR(cld))
RETURN(PTR_ERR(cld));
- down(&llog_process_lock);
+ cfs_mutex_lock(&cld->cld_lock);
/*
* if cld_stopping is set, it means we didn't start the log thus
* not owning the start ref. this can happen after previous umount:
* calling start_log.
*/
if (unlikely(cld->cld_stopping)) {
- up(&llog_process_lock);
+ cfs_mutex_unlock(&cld->cld_lock);
/* drop the ref from the find */
config_log_put(cld);
RETURN(rc);
}
cld->cld_stopping = 1;
- up(&llog_process_lock);
+ cfs_mutex_unlock(&cld->cld_lock);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
cld_sptlrpc = cld->cld_sptlrpc;
cld->cld_sptlrpc = NULL;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
static void do_requeue(struct config_llog_data *cld)
{
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* Do not run mgc_process_log on a disconnected export or an
export which is being disconnected. Take the client
semaphore to make the check non-racy. */
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
}
- up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
/* Whether we enqueued again or not in mgc_process_log, we're done
* with the ref from the old enqueue */
CDEBUG(D_MGC, "Starting requeue thread\n");
- lwi_later = LWI_TIMEOUT(60 * HZ, NULL, NULL);
+ lwi_later = LWI_TIMEOUT(60 * CFS_HZ, NULL, NULL);
l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP), &lwi_later);
/* Keep trying failed locks periodically */
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
while (rq_state & (RQ_NOW | RQ_LATER)) {
/* Any new or requeued lostlocks will change the state */
rq_state &= ~(RQ_NOW | RQ_LATER);
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
random so everyone doesn't try to reconnect at once. */
- lwi_now = LWI_TIMEOUT(3 * HZ + (ll_rand() & 0xff) * (HZ / 100),
+ lwi_now = LWI_TIMEOUT(3 * CFS_HZ + (cfs_rand() & 0xff) * \
+ (CFS_HZ / 100),
NULL, NULL);
l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi_now);
*/
cld_prev = NULL;
- spin_lock(&config_list_lock);
- list_for_each_entry_safe(cld, cld_next, &config_llog_list,
- cld_list_chain) {
+ cfs_spin_lock(&config_list_lock);
+ cfs_list_for_each_entry_safe(cld, cld_next, &config_llog_list,
+ cld_list_chain) {
if (cld->cld_list_chain.next != &config_llog_list)
- atomic_inc(&cld_next->cld_refcount);
+ cfs_atomic_inc(&cld_next->cld_refcount);
if (cld->cld_lostlock) {
if (cld->cld_sptlrpc &&
cld->cld_sptlrpc->cld_lostlock) {
cld->cld_sptlrpc->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
do_requeue(cld->cld_sptlrpc);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
LASSERT(cld->cld_lostlock);
}
cld->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
do_requeue(cld);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
}
if (cld_prev) {
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
config_log_put(cld_prev);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
}
cld_prev = cld_next;
}
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
/* Wait a bit to see if anyone else needs a requeue */
l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
&lwi_later);
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
}
/* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
rq_state &= ~RQ_RUNNING;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "Ending requeue thread\n");
RETURN(rc);
int rc = 0;
CDEBUG(D_INFO, "log %s: requeue (l=%d r=%d sp=%d st=%x)\n",
- cld->cld_logname, later, atomic_read(&cld->cld_refcount),
+ cld->cld_logname, later, cfs_atomic_read(&cld->cld_refcount),
cld->cld_stopping, rq_state);
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* Hold lock for rq_state */
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
if (cld->cld_stopping || (rq_state & RQ_STOP)) {
cld->cld_lostlock = 0;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
config_log_put(cld);
RETURN(0);
}
if (!(rq_state & RQ_RUNNING)) {
LASSERT(rq_state == 0);
rq_state = RQ_RUNNING | (later ? RQ_LATER : RQ_NOW);
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
rc = cfs_kernel_thread(mgc_requeue_thread, 0,
CLONE_VM | CLONE_FILES);
if (rc < 0) {
}
} else {
rq_state |= later ? RQ_LATER : RQ_NOW;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
cfs_waitq_signal(&rq_waitq);
}
LASSERT(lsi->lsi_srv_mnt == mnt);
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- down(&cli->cl_mgc_sem);
+ cfs_down(&cli->cl_mgc_sem);
- cleanup_group_info();
+ cfs_cleanup_group_info();
obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
if (IS_ERR(obd->obd_fsops)) {
- up(&cli->cl_mgc_sem);
+ cfs_up(&cli->cl_mgc_sem);
CERROR("No fstype %s rc=%ld\n", MT_STR(lsi->lsi_ldd),
PTR_ERR(obd->obd_fsops));
RETURN(PTR_ERR(obd->obd_fsops));
fsfilt_put_ops(obd->obd_fsops);
obd->obd_fsops = NULL;
cli->cl_mgc_vfsmnt = NULL;
- up(&cli->cl_mgc_sem);
+ cfs_up(&cli->cl_mgc_sem);
RETURN(err);
}
if (obd->obd_fsops)
fsfilt_put_ops(obd->obd_fsops);
- up(&cli->cl_mgc_sem);
+ cfs_up(&cli->cl_mgc_sem);
RETURN(rc);
}
-static atomic_t mgc_count = ATOMIC_INIT(0);
+static cfs_atomic_t mgc_count = CFS_ATOMIC_INIT(0);
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
- if (atomic_dec_and_test(&mgc_count)) {
+ if (cfs_atomic_dec_and_test(&mgc_count)) {
/* Kick the requeue waitq - cld's should all be
stopping */
- spin_lock(&config_list_lock);
+ cfs_spin_lock(&config_list_lock);
rq_state |= RQ_STOP;
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
cfs_waitq_signal(&rq_waitq);
}
rc = obd_llog_finish(obd, 0);
lprocfs_obd_setup(obd, lvars.obd_vars);
sptlrpc_lprocfs_cliobd_attach(obd);
- spin_lock(&config_list_lock);
- atomic_inc(&mgc_count);
- if (atomic_read(&mgc_count) == 1) {
+ cfs_spin_lock(&config_list_lock);
+ cfs_atomic_inc(&mgc_count);
+ if (cfs_atomic_read(&mgc_count) == 1) {
rq_state &= ~RQ_STOP;
cfs_waitq_init(&rq_waitq);
}
- spin_unlock(&config_list_lock);
+ cfs_spin_unlock(&config_list_lock);
RETURN(rc);
RETURN(rc);
}
+/* Not sure where this should go... */
+#define MGC_ENQUEUE_LIMIT 50
+#define MGC_TARGET_REG_LIMIT 10
+#define MGC_SEND_PARAM_LIMIT 10
+
/* Send parameter to MGS*/
static int mgc_set_mgs_param(struct obd_export *exp,
struct mgs_send_param *msp)
memcpy(req_msp, msp, sizeof(*req_msp));
ptlrpc_request_set_replen(req);
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = MGC_SEND_PARAM_LIMIT;
rc = ptlrpc_queue_wait(req);
if (!rc) {
rep_msp = req_capsule_server_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
struct config_llog_data *cld = (struct config_llog_data *)data;
struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
ldlm_completion_ast, NULL, NULL, data};
-
+ struct ptlrpc_request *req;
+ int short_limit = cld->cld_is_sptlrpc;
int rc;
ENTRY;
/* We need a callback for every lockholder, so don't try to
ldlm_lock_match (see rev 1.1.2.11.2.47) */
-
- rc = ldlm_cli_enqueue(exp, NULL, &einfo, &cld->cld_resid, NULL, flags,
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
+ LDLM_ENQUEUE);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+ ptlrpc_request_set_replen(req);
+ /* check if this is server or client */
+ if (cld->cld_cfg.cfg_sb) {
+ struct lustre_sb_info *lsi = s2lsi(cld->cld_cfg.cfg_sb);
+ if (lsi && (lsi->lsi_flags & LSI_SERVER))
+ short_limit = 1;
+ }
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
+ rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
NULL, 0, lockh, 0);
/* A failed enqueue should still call the mgc_blocking_ast,
where it will be requeued if needed ("grant failed"). */
-
+ ptlrpc_req_finished(req);
RETURN(rc);
}
RETURN(0);
}
-#if 0
-static int mgc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct obd_ioctl_data *data = karg;
- struct llog_ctxt *ctxt;
- struct lvfs_run_ctxt saved;
- int rc;
- ENTRY;
-
- if (!try_module_get(THIS_MODULE)) {
- CERROR("Can't get module. Is it alive?");
- return -EINVAL;
- }
- switch (cmd) {
- /* REPLicator context */
- case OBD_IOC_PARSE: {
- CERROR("MGC parsing llog %s\n", data->ioc_inlbuf1);
- ctxt = llog_get_context(exp->exp_obd, LLOG_CONFIG_REPL_CTXT);
- rc = class_config_parse_llog(ctxt, data->ioc_inlbuf1, NULL);
- GOTO(out, rc);
- }
-#ifdef __KERNEL__
- case OBD_IOC_LLOG_INFO:
- case OBD_IOC_LLOG_PRINT: {
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- rc = llog_ioctl(ctxt, cmd, data);
-
- GOTO(out, rc);
- }
-#endif
- /* ORIGinator context */
- case OBD_IOC_DUMP_LOG: {
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = class_config_dump_llog(ctxt, data->ioc_inlbuf1, NULL);
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- if (rc)
- RETURN(rc);
-
- GOTO(out, rc);
- }
- default:
- CERROR("mgc_ioctl(): unrecognised ioctl %#x\n", cmd);
- GOTO(out, rc = -ENOTTY);
- }
-out:
- module_put(THIS_MODULE);
-
- return rc;
-}
-#endif
-
/* Send target_reg message to MGS */
static int mgc_target_register(struct obd_export *exp,
struct mgs_target_info *mti)
memcpy(req_mti, mti, sizeof(*req_mti));
ptlrpc_request_set_replen(req);
CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
rc = ptlrpc_queue_wait(req);
if (!rc) {
void *key, obd_count vallen, void *val,
struct ptlrpc_request_set *set)
{
- struct obd_import *imp = class_exp2cliimp(exp);
int rc = -EINVAL;
ENTRY;
- /* Try to "recover" the initial connection; i.e. retry */
- if (KEY_IS(KEY_INIT_RECOV)) {
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- spin_lock(&imp->imp_lock);
- imp->imp_initial_recov = *(int *)val;
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
- exp->exp_obd->obd_name, imp->imp_initial_recov);
- RETURN(0);
- }
/* Turn off initial_recov after we try all backup servers once */
if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
+ struct obd_import *imp = class_exp2cliimp(exp);
int value;
if (vallen != sizeof(int))
RETURN(-EINVAL);
value = *(int *)val;
- spin_lock(&imp->imp_lock);
- imp->imp_initial_recov_bk = value > 0;
- /* Even after the initial connection, give up all comms if
- nobody answers the first time. */
- imp->imp_recon_bk = 1;
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_MGC, "InitRecov %s %d/%d:d%d:i%d:r%d:or%d:%s\n",
- imp->imp_obd->obd_name, value, imp->imp_initial_recov,
+ CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
+ imp->imp_obd->obd_name, value,
imp->imp_deactive, imp->imp_invalid,
imp->imp_replayable, imp->imp_obd->obd_replayable,
ptlrpc_import_state_name(imp->imp_state));
/* Resurrect if we previously died */
- if (imp->imp_invalid || value > 1)
+ if ((imp->imp_state != LUSTRE_IMP_FULL &&
+ imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
ptlrpc_reconnect_import(imp);
RETURN(0);
}
RETURN(rc);
}
-/* Get a config log from the MGS and process it.
- This func is called for both clients and servers. */
+/** Get a config log from the MGS and process it.
+ * This func is called for both clients and servers.
+ * Copy the log locally before parsing it if appropriate (non-MGS server)
+ */
int mgc_process_log(struct obd_device *mgc,
struct config_llog_data *cld)
{
sounds like badness. It actually might be fine, as long as
we're not trying to update from the same log
simultaneously (in which case we should use a per-log sem.) */
- down(&llog_process_lock);
+ cfs_mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
- up(&llog_process_lock);
+ cfs_mutex_unlock(&cld->cld_lock);
RETURN(0);
}
ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
if (!ctxt) {
CERROR("missing llog context\n");
- up(&llog_process_lock);
+ cfs_mutex_unlock(&cld->cld_lock);
RETURN(-EINVAL);
}
* read it up here.
*/
if (rcl && cld->cld_is_sptlrpc)
- goto out_pop;
+ GOTO(out_pop, rc);
/* Copy the setup log locally if we can. Don't mess around if we're
running an MGS though (logs are already local). */
/* logname and instance info should be the same, so use our
copy of the instance for the update. The cfg_last_idx will
be updated here. */
- rc = class_config_parse_llog(ctxt, cld->cld_logname, &cld->cld_cfg);
+ if (rcl == 0 || lctxt == ctxt)
+ rc = class_config_parse_llog(ctxt, cld->cld_logname, &cld->cld_cfg);
out_pop:
llog_ctxt_put(ctxt);
if (ctxt != lctxt)
CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
- up(&llog_process_lock);
+ cfs_mutex_unlock(&cld->cld_lock);
RETURN(rc);
}
+/** Called from lustre_process_log.
+ * LCFG_LOG_START gets the config log from the MGS, processes it to start
+ * any services, and adds it to the list logs to watch (follow).
+ */
static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
{
struct lustre_cfg *lcfg = buf;
switch(cmd = lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD: {
- /* Add any new target, not just osts */
+ /* Overloading this cfg command: register a new target */
struct mgs_target_info *mti;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) !=
break;
}
case LCFG_LOV_DEL_OBD:
- /* Remove target from the fs? */
- /* FIXME */
+ /* Unregister has no meaning at the moment. */
CERROR("lov_del_obd unimplemented\n");
rc = -ENOSYS;
break;