#include <lustre_disk.h>
#include "mgc_internal.h"
-static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id)
+static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
+ int type)
{
__u64 resname = 0;
}
memcpy(&resname, name, len);
- memset(res_id, 0, sizeof(*res_id));
-
/* Always use the same endianness for the resid */
+ memset(res_id, 0, sizeof(*res_id));
res_id->name[0] = cpu_to_le64(resname);
+ /* XXX: unfortunately, sptlprc and config llog share one lock */
+ switch(type) {
+ case CONFIG_T_CONFIG:
+ case CONFIG_T_SPTLRPC:
+ resname = 0;
+ break;
+ default:
+ LBUG();
+ }
+ res_id->name[1] = cpu_to_le64(resname);
CDEBUG(D_MGC, "log %s to resid "LPX64"/"LPX64" (%.8s)\n", name,
res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
return 0;
}
-int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id)
+int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
{
/* fsname is at most 8 chars long, maybe contain "-".
* e.g. "lustre", "SUN-000" */
- return mgc_name2resid(fsname, strlen(fsname), res_id);
+ return mgc_name2resid(fsname, strlen(fsname), res_id, type);
}
EXPORT_SYMBOL(mgc_fsname2resid);
-int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id)
+int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
{
char *name_end;
int len;
name_end = strrchr(logname, '-');
LASSERT(name_end);
len = name_end - logname;
- return mgc_name2resid(logname, len, res_id);
+ return mgc_name2resid(logname, len, res_id, type);
}
/********************** config llog list **********************/
static CFS_LIST_HEAD(config_llog_list);
-static cfs_spinlock_t config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
{
ENTRY;
- if (cld->cld_stopping)
- RETURN(1);
cfs_atomic_inc(&cld->cld_refcount);
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
cfs_atomic_read(&cld->cld_refcount));
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* spinlock to make sure no item with 0 refcount in the list */
- cfs_spin_lock(&config_list_lock);
- if (unlikely(cfs_atomic_dec_and_test(&cld->cld_refcount))) {
+ if (cfs_atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
cfs_list_del(&cld->cld_list_chain);
cfs_spin_unlock(&config_list_lock);
if (cld->cld_sptlrpc)
config_log_put(cld->cld_sptlrpc);
- if (cld->cld_is_sptlrpc)
+ if (cld_is_sptlrpc(cld))
sptlrpc_conf_log_stop(cld->cld_logname);
class_export_put(cld->cld_mgcexp);
- OBD_FREE(cld->cld_logname, strlen(cld->cld_logname) + 1);
- OBD_FREE(cld, sizeof(*cld));
- } else {
- cfs_spin_unlock(&config_list_lock);
+ OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
}
EXIT;
}
if (found) {
cfs_atomic_inc(&found->cld_refcount);
- LASSERT(found->cld_stopping == 0 || found->cld_is_sptlrpc == 0);
+ LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
}
cfs_spin_unlock(&config_list_lock);
RETURN(found);
static
struct config_llog_data *do_config_log_add(struct obd_device *obd,
char *logname,
- unsigned int is_sptlrpc,
+ int type,
struct config_llog_instance *cfg,
struct super_block *sb)
{
CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
cfg ? cfg->cfg_instance : 0);
- OBD_ALLOC(cld, sizeof(*cld));
+ OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
if (!cld)
RETURN(ERR_PTR(-ENOMEM));
- OBD_ALLOC(cld->cld_logname, strlen(logname) + 1);
- if (!cld->cld_logname) {
- OBD_FREE(cld, sizeof(*cld));
- RETURN(ERR_PTR(-ENOMEM));
- }
+
strcpy(cld->cld_logname, logname);
if (cfg)
cld->cld_cfg = *cfg;
cld->cld_cfg.cfg_last_idx = 0;
cld->cld_cfg.cfg_flags = 0;
cld->cld_cfg.cfg_sb = sb;
- cld->cld_is_sptlrpc = is_sptlrpc;
+ cld->cld_type = type;
cfs_atomic_set(&cld->cld_refcount, 1);
/* Keep the mgc around until we are done */
cld->cld_mgcexp = class_export_get(obd->obd_self_export);
- if (is_sptlrpc) {
+ if (cld_is_sptlrpc(cld)) {
sptlrpc_conf_log_start(logname);
cld->cld_cfg.cfg_obdname = obd->obd_name;
}
- rc = mgc_logname2resid(logname, &cld->cld_resid);
+ rc = mgc_logname2resid(logname, &cld->cld_resid, type);
cfs_spin_lock(&config_list_lock);
cfs_list_add(&cld->cld_list_chain, &config_llog_list);
RETURN(ERR_PTR(rc));
}
- if (is_sptlrpc) {
+ if (cld_is_sptlrpc(cld)) {
rc = mgc_process_log(obd, cld);
if (rc)
CERROR("failed processing sptlrpc log: %d\n", rc);
struct config_llog_instance *cfg,
struct super_block *sb)
{
- struct config_llog_data *cld, *sptlrpc_cld;
- char seclogname[20];
+ struct config_llog_data *cld;
+ struct config_llog_data *sptlrpc_cld;
+ char seclogname[32];
char *ptr;
ENTRY;
sptlrpc_cld = config_log_find(seclogname, NULL);
if (sptlrpc_cld == NULL) {
- sptlrpc_cld = do_config_log_add(obd, seclogname, 1, NULL, NULL);
+ sptlrpc_cld = do_config_log_add(obd, seclogname,
+ CONFIG_T_SPTLRPC, NULL, NULL);
if (IS_ERR(sptlrpc_cld)) {
CERROR("can't create sptlrpc log: %s\n", seclogname);
RETURN(PTR_ERR(sptlrpc_cld));
}
}
- cld = do_config_log_add(obd, logname, 0, cfg, sb);
+ cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
if (IS_ERR(cld)) {
CERROR("can't create log: %s\n", logname);
config_log_put(sptlrpc_cld);
#define RQ_NOW 0x2
#define RQ_LATER 0x4
#define RQ_STOP 0x8
-static int rq_state = 0;
-static cfs_waitq_t rq_waitq;
-
-static int mgc_requeue_add(struct config_llog_data *cld, int later);
+static int rq_state = 0;
+static cfs_waitq_t rq_waitq;
+static CFS_DECLARE_COMPLETION(rq_exit);
static void do_requeue(struct config_llog_data *cld)
{
+ ENTRY;
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* Do not run mgc_process_log on a disconnected export or an
}
cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
- /* Whether we enqueued again or not in mgc_process_log, we're done
- * with the ref from the old enqueue */
- config_log_put(cld);
+ EXIT;
}
+/* this timeout represents how many seconds MGC should wait before
+ * requeue config and recover lock to the MGS. We need to randomize this
+ * in order to not flood the MGS.
+ */
+#define MGC_TIMEOUT_MIN_SECONDS 5
+#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
+
static int mgc_requeue_thread(void *data)
{
- struct l_wait_info lwi_now, lwi_later;
- struct config_llog_data *cld, *cld_next, *cld_prev;
char name[] = "ll_cfg_requeue";
int rc = 0;
ENTRY;
CDEBUG(D_MGC, "Starting requeue thread\n");
- lwi_later = LWI_TIMEOUT(60 * CFS_HZ, NULL, NULL);
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP), &lwi_later);
-
/* Keep trying failed locks periodically */
cfs_spin_lock(&config_list_lock);
- while (rq_state & (RQ_NOW | RQ_LATER)) {
+ rq_state |= RQ_RUNNING;
+ while (1) {
+ struct l_wait_info lwi;
+ struct config_llog_data *cld, *cld_prev;
+ int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int stopped = !!(rq_state & RQ_STOP);
+ int to;
+
/* Any new or requeued lostlocks will change the state */
rq_state &= ~(RQ_NOW | RQ_LATER);
cfs_spin_unlock(&config_list_lock);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
random so everyone doesn't try to reconnect at once. */
- lwi_now = LWI_TIMEOUT(3 * CFS_HZ + (cfs_rand() & 0xff) * \
- (CFS_HZ / 100),
- NULL, NULL);
- l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi_now);
+ to = MGC_TIMEOUT_MIN_SECONDS * CFS_HZ;
+ to += rand * CFS_HZ / 100; /* rand is centi-seconds */
+ lwi = LWI_TIMEOUT(to, NULL, NULL);
+ l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi);
/*
* iterate & processing through the list. for each cld, process
* it's guaranteed any item in the list must have
* reference > 0; and if cld_lostlock is set, at
* least one reference is taken by the previous enqueue.
- *
- * Note: releasing a cld might lead to itself and its depended
- * sptlrpc cld be unlinked from the list. to safely iterate
- * we need to take a reference on next cld before processing.
*/
cld_prev = NULL;
cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry_safe(cld, cld_next, &config_llog_list,
- cld_list_chain) {
- if (cld->cld_list_chain.next != &config_llog_list)
- cfs_atomic_inc(&cld_next->cld_refcount);
+ cfs_list_for_each_entry(cld, &config_llog_list,
+ cld_list_chain) {
+ if (!cld->cld_lostlock)
+ continue;
- if (cld->cld_lostlock) {
- if (cld->cld_sptlrpc &&
- cld->cld_sptlrpc->cld_lostlock) {
- cld->cld_sptlrpc->cld_lostlock = 0;
+ cfs_spin_unlock(&config_list_lock);
- cfs_spin_unlock(&config_list_lock);
- do_requeue(cld->cld_sptlrpc);
- cfs_spin_lock(&config_list_lock);
- LASSERT(cld->cld_lostlock);
- }
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
- cld->cld_lostlock = 0;
+ /* Whether we enqueued again or not in mgc_process_log,
+ * we're done with the ref from the old enqueue */
+ if (cld_prev)
+ config_log_put(cld_prev);
+ cld_prev = cld;
- cfs_spin_unlock(&config_list_lock);
+ cld->cld_lostlock = 0;
+ if (likely(!stopped))
do_requeue(cld);
- cfs_spin_lock(&config_list_lock);
- }
-
- if (cld_prev) {
- cfs_spin_unlock(&config_list_lock);
- config_log_put(cld_prev);
- cfs_spin_lock(&config_list_lock);
- }
-
- cld_prev = cld_next;
+ cfs_spin_lock(&config_list_lock);
}
cfs_spin_unlock(&config_list_lock);
+ if (cld_prev)
+ config_log_put(cld_prev);
+
+ /* break after scanning the list so that we can drop
+ * refcount to losing lock clds */
+ if (unlikely(stopped)) {
+ cfs_spin_lock(&config_list_lock);
+ break;
+ }
/* Wait a bit to see if anyone else needs a requeue */
+ lwi = (struct l_wait_info) { 0 };
l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi_later);
+ &lwi);
cfs_spin_lock(&config_list_lock);
}
/* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
rq_state &= ~RQ_RUNNING;
cfs_spin_unlock(&config_list_lock);
+ cfs_complete(&rq_exit);
+
CDEBUG(D_MGC, "Ending requeue thread\n");
RETURN(rc);
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
We are responsible for dropping the config log reference from here on out. */
-static int mgc_requeue_add(struct config_llog_data *cld, int later)
+static void mgc_requeue_add(struct config_llog_data *cld)
{
- int rc = 0;
+ ENTRY;
- CDEBUG(D_INFO, "log %s: requeue (l=%d r=%d sp=%d st=%x)\n",
- cld->cld_logname, later, cfs_atomic_read(&cld->cld_refcount),
+ CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
+ cld->cld_logname, cfs_atomic_read(&cld->cld_refcount),
cld->cld_stopping, rq_state);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
- /* Hold lock for rq_state */
- cfs_spin_lock(&config_list_lock);
-
- if (cld->cld_stopping || (rq_state & RQ_STOP)) {
- cld->cld_lostlock = 0;
- cfs_spin_unlock(&config_list_lock);
- config_log_put(cld);
- RETURN(0);
+ cfs_mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping || cld->cld_lostlock) {
+ cfs_mutex_unlock(&cld->cld_lock);
+ RETURN_EXIT;
}
-
+ /* this refcount will be released in mgc_requeue_thread. */
+ config_log_get(cld);
cld->cld_lostlock = 1;
+ cfs_mutex_unlock(&cld->cld_lock);
- if (!(rq_state & RQ_RUNNING)) {
- LASSERT(rq_state == 0);
- rq_state = RQ_RUNNING | (later ? RQ_LATER : RQ_NOW);
+ /* Hold lock for rq_state */
+ cfs_spin_lock(&config_list_lock);
+ if (rq_state & RQ_STOP) {
cfs_spin_unlock(&config_list_lock);
- rc = cfs_create_thread(mgc_requeue_thread, NULL,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("log %s: cannot start requeue thread (%d),"
- "no more log updates!\n", cld->cld_logname, rc);
- /* Drop the ref, since the rq thread won't */
- cld->cld_lostlock = 0;
- config_log_put(cld);
- rq_state = 0;
- RETURN(rc);
- }
+ cld->cld_lostlock = 0;
+ config_log_put(cld);
} else {
- rq_state |= later ? RQ_LATER : RQ_NOW;
+ rq_state |= RQ_NOW;
cfs_spin_unlock(&config_list_lock);
cfs_waitq_signal(&rq_waitq);
}
-
- RETURN(0);
+ EXIT;
}
/********************** class fns **********************/
break;
case OBD_CLEANUP_EXPORTS:
if (cfs_atomic_dec_and_test(&mgc_count)) {
- /* Kick the requeue waitq - cld's should all be
- stopping */
+ int running;
+ /* stop requeue thread */
cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_STOP;
+ running = rq_state & RQ_RUNNING;
+ if (running)
+ rq_state |= RQ_STOP;
cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ if (running) {
+ cfs_waitq_signal(&rq_waitq);
+ cfs_wait_for_completion(&rq_exit);
+ }
}
obd_cleanup_client_import(obd);
rc = obd_llog_finish(obd, 0);
lprocfs_obd_setup(obd, lvars.obd_vars);
sptlrpc_lprocfs_cliobd_attach(obd);
- cfs_spin_lock(&config_list_lock);
- cfs_atomic_inc(&mgc_count);
- if (cfs_atomic_read(&mgc_count) == 1) {
- rq_state &= ~RQ_STOP;
+ if (cfs_atomic_inc_return(&mgc_count) == 1) {
+ rq_state = 0;
cfs_waitq_init(&rq_waitq);
+
+ /* start requeue thread */
+ rc = cfs_create_thread(mgc_requeue_thread, NULL,
+ CFS_DAEMON_FLAGS);
+ if (rc < 0) {
+ CERROR("%s: Cannot start requeue thread (%d),"
+ "no more log updates!\n",
+ obd->obd_name, rc);
+ GOTO(err_cleanup, rc);
+ }
+ /* rc is the pid of mgc_requeue_thread. */
+ rc = 0;
}
- cfs_spin_unlock(&config_list_lock);
RETURN(rc);
ldlm_lock2handle(lock, &lockh);
rc = ldlm_cli_cancel(&lockh);
break;
- case LDLM_CB_CANCELING: {
+ case LDLM_CB_CANCELING:
/* We've given up the lock, prepare ourselves to update. */
LDLM_DEBUG(lock, "MGC cancel CB");
(char *)&lock->l_resource->lr_name.name[0]);
if (!cld) {
- CERROR("missing data, won't requeue\n");
+ CDEBUG(D_INFO, "missing data, won't requeue\n");
break;
}
+
+ /* held at mgc_process_log(). */
+ LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
/* Are we done with this log? */
if (cld->cld_stopping) {
CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
(we get called from client_disconnect_export) */
if (!lock->l_conn_export ||
!lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
- CDEBUG(D_MGC, "log %s: disconnecting, won't requeue\n",
+ CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
cld->cld_logname);
config_log_put(cld);
break;
}
- /* Did we fail to get the lock? */
- if (lock->l_req_mode != lock->l_granted_mode &&
- !cld->cld_is_sptlrpc) {
- CDEBUG(D_MGC, "log %s: original grant failed, will "
- "requeue later\n", cld->cld_logname);
- /* Try to re-enqueue later */
- rc = mgc_requeue_add(cld, 1);
- break;
- }
+
/* Re-enqueue now */
- rc = mgc_requeue_add(cld, 0);
+ mgc_requeue_add(cld);
+ config_log_put(cld);
break;
- }
default:
LBUG();
}
-
- if (rc) {
- CERROR("%s CB failed %d:\n", flag == LDLM_CB_BLOCKING ?
- "blocking" : "cancel", rc);
- LDLM_ERROR(lock, "MGC ast");
- }
RETURN(rc);
}
{
struct config_llog_data *cld = (struct config_llog_data *)data;
struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
- ldlm_completion_ast, NULL, NULL, data};
+ ldlm_completion_ast, NULL, NULL, NULL };
struct ptlrpc_request *req;
- int short_limit = cld->cld_is_sptlrpc;
+ int short_limit = cld_is_sptlrpc(cld);
int rc;
ENTRY;
CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
cld->cld_resid.name[0]);
- /* We can only drop this config log ref when we drop the lock */
- if (config_log_get(cld))
- RETURN(ELDLM_LOCK_ABORTED);
-
/* We need a callback for every lockholder, so don't try to
ldlm_lock_match (see rev 1.1.2.11.2.47) */
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
if (req == NULL)
RETURN(-ENOMEM);
ptlrpc_request_set_replen(req);
+
/* check if this is server or client */
if (cld->cld_cfg.cfg_sb) {
struct lustre_sb_info *lsi = s2lsi(cld->cld_cfg.cfg_sb);
RETURN(0);
}
+static void mgc_notify_active(struct obd_device *unused)
+{
+ /* wakeup mgc_requeue_thread to requeue mgc lock */
+ cfs_spin_lock(&config_list_lock);
+ rq_state |= RQ_NOW;
+ cfs_spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
+
+ /* TODO: Help the MGS rebuild nidtbl. -jay */
+}
+
/* Send target_reg message to MGS */
static int mgc_target_register(struct obd_export *exp,
struct mgs_target_info *mti)
LCONSOLE_WARN("%s: Reactivating import\n", obd->obd_name);
/* Clearing obd_no_recov allows us to continue pinging */
obd->obd_no_recov = 0;
+ mgc_notify_active(obd);
break;
case IMP_EVENT_OCD:
break;
RETURN(rc);
}
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Copy the log locally before parsing it if appropriate (non-MGS server)
- */
-int mgc_process_log(struct obd_device *mgc,
- struct config_llog_data *cld)
+/* local_only means it cannot get remote llogs */
+static int mgc_process_cfg_log(struct obd_device *mgc,
+ struct config_llog_data *cld,
+ int local_only)
{
- struct llog_ctxt *ctxt, *lctxt;
- struct lustre_handle lockh;
+ struct llog_ctxt *ctxt, *lctxt = NULL;
struct client_obd *cli = &mgc->u.cli;
struct lvfs_run_ctxt *saved_ctxt;
struct lustre_sb_info *lsi = NULL;
- int rc = 0, rcl, flags = 0, must_pop = 0;
+ int rc = 0, must_pop = 0;
+ bool sptlrpc_started = false;
+
ENTRY;
LASSERT(cld);
+ LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
- /* I don't want multiple processes running process_log at once --
- sounds like badness. It actually might be fine, as long as
- we're not trying to update from the same log
- simultaneously (in which case we should use a per-log sem.) */
- cfs_mutex_lock(&cld->cld_lock);
-
- if (cld->cld_stopping) {
- cfs_mutex_unlock(&cld->cld_lock);
+ /*
+ * local copy of sptlrpc log is controlled elsewhere, don't try to
+ * read it up here.
+ */
+ if (cld_is_sptlrpc(cld) && local_only)
RETURN(0);
- }
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
if (cld->cld_cfg.cfg_sb)
lsi = s2lsi(cld->cld_cfg.cfg_sb);
- CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
- cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
-
ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
if (!ctxt) {
CERROR("missing llog context\n");
- cfs_mutex_unlock(&cld->cld_lock);
RETURN(-EINVAL);
}
OBD_ALLOC_PTR(saved_ctxt);
- if (saved_ctxt == NULL) {
- cfs_mutex_unlock(&cld->cld_lock);
+ if (saved_ctxt == NULL)
RETURN(-ENOMEM);
- }
-
- /* Get the cfg lock on the llog */
- rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
- LCK_CR, &flags, NULL, NULL, NULL,
- cld, 0, NULL, &lockh);
- if (rcl)
- CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
lctxt = llog_get_context(mgc, LLOG_CONFIG_ORIG_CTXT);
- /*
- * local copy of sptlrpc log is controlled elsewhere, don't try to
- * read it up here.
- */
- if (rcl && cld->cld_is_sptlrpc)
- GOTO(out_pop, rc);
-
/* Copy the setup log locally if we can. Don't mess around if we're
running an MGS though (logs are already local). */
if (lctxt && lsi && (lsi->lsi_flags & LSI_SERVER) &&
!IS_MGS(lsi->lsi_ldd)) {
push_ctxt(saved_ctxt, &mgc->obd_lvfs_ctxt, NULL);
must_pop++;
- if (rcl == 0)
+ if (!local_only)
/* Only try to copy log if we have the lock. */
rc = mgc_copy_llog(mgc, ctxt, lctxt, cld->cld_logname);
- if (rcl || rc) {
+ if (local_only || rc) {
if (mgc_llog_is_empty(mgc, lctxt, cld->cld_logname)) {
LCONSOLE_ERROR_MSG(0x13a, "Failed to get MGS "
"log %s and no local copy."
log has. */
llog_ctxt_put(ctxt);
ctxt = lctxt;
+ lctxt = NULL;
+ } else if (local_only) { /* no local log at client side */
+ GOTO(out_pop, rc = -EIO);
}
- if (cld->cld_is_sptlrpc)
+ if (cld_is_sptlrpc(cld)) {
sptlrpc_conf_log_update_begin(cld->cld_logname);
+ sptlrpc_started = true;
+ }
/* logname and instance info should be the same, so use our
copy of the instance for the update. The cfg_last_idx will
be updated here. */
- if (rcl == 0 || lctxt == ctxt)
- rc = class_config_parse_llog(ctxt, cld->cld_logname, &cld->cld_cfg);
+ rc = class_config_parse_llog(ctxt, cld->cld_logname, &cld->cld_cfg);
+ EXIT;
+
out_pop:
llog_ctxt_put(ctxt);
- if (ctxt != lctxt)
+ if (lctxt)
llog_ctxt_put(lctxt);
if (must_pop)
pop_ctxt(saved_ctxt, &mgc->obd_lvfs_ctxt, NULL);
* in parallel.
* the logname must be <fsname>-sptlrpc
*/
- if (cld->cld_is_sptlrpc && rcl == 0) {
+ if (sptlrpc_started) {
+ LASSERT(cld_is_sptlrpc(cld));
sptlrpc_conf_log_update_end(cld->cld_logname);
class_notify_sptlrpc_conf(cld->cld_logname,
strlen(cld->cld_logname) -
strlen("-sptlrpc"));
}
+ RETURN(rc);
+}
+
+/** Get a config log from the MGS and process it.
+ * This func is called for both clients and servers.
+ * Copy the log locally before parsing it if appropriate (non-MGS server)
+ */
+int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
+{
+ struct lustre_handle lockh = { 0 };
+ int rc = 0, rcl, flags = 0;
+ ENTRY;
+
+ LASSERT(cld);
+
+ /* I don't want multiple processes running process_log at once --
+ sounds like badness. It actually might be fine, as long as
+ we're not trying to update from the same log
+ simultaneously (in which case we should use a per-log sem.) */
+ cfs_mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping) {
+ cfs_mutex_unlock(&cld->cld_lock);
+ RETURN(0);
+ }
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
+
+ CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
+ cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
+
+ /* Get the cfg lock on the llog */
+ rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
+ LCK_CR, &flags, NULL, NULL, NULL,
+ cld, 0, NULL, &lockh);
+ if (rcl == 0) {
+ /* Get the cld, it will be released in mgc_blocking_ast. */
+ config_log_get(cld);
+ rc = ldlm_lock_set_data(&lockh, (void *)cld);
+ LASSERT(rc == 0);
+ } else {
+ CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
+
+ /* mark cld_lostlock so that it will requeue
+ * after MGC becomes available. */
+ cld->cld_lostlock = 1;
+ /* Get extra reference, it will be put in requeue thread */
+ config_log_get(cld);
+ }
+
+ rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
+
+ CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
+ mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
+
+ cfs_mutex_unlock(&cld->cld_lock);
+
/* Now drop the lock so MGS can revoke it */
if (!rcl) {
rcl = mgc_cancel(mgc->u.cli.cl_mgc_mgsexp, NULL,
CERROR("Can't drop cfg lock: %d\n", rcl);
}
- CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
- mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
-
- cfs_mutex_unlock(&cld->cld_lock);
-
RETURN(rc);
}
+
/** Called from lustre_process_log.
* LCFG_LOG_START gets the config log from the MGS, processes it to start
* any services, and adds it to the list logs to watch (follow).