*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/module.h>
#include <linux/kthread.h>
+#include <linux/random.h>
#include <dt_object.h>
#include <lprocfs_status.h>
#include <lustre_nodemap.h>
#include <lustre_swab.h>
#include <obd_class.h>
+#include <lustre_barrier.h>
#include "mgc_internal.h"
case CONFIG_T_RECOVER:
case CONFIG_T_PARAMS:
case CONFIG_T_NODEMAP:
+ case CONFIG_T_BARRIER:
resname = type;
break;
default:
LBUG();
}
res_id->name[1] = cpu_to_le64(resname);
- CDEBUG(D_MGC, "log %s to resid "LPX64"/"LPX64" (%.8s)\n", name,
+ CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name,
res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
return 0;
}
}
EXPORT_SYMBOL(mgc_fsname2resid);
-static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id,
- int type)
+int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
{
char *name_end;
int len;
len = name_end - logname;
return mgc_name2resid(logname, len, res_id, type);
}
+EXPORT_SYMBOL(mgc_logname2resid);
/********************** config llog list **********************/
-static struct list_head config_llog_list = LIST_HEAD_INIT(config_llog_list);
-static DEFINE_SPINLOCK(config_list_lock);
+static LIST_HEAD(config_llog_list);
+static DEFINE_SPINLOCK(config_list_lock); /* protects config_llog_list */
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
{
ENTRY;
+ if (unlikely(!cld))
+ RETURN_EXIT;
+
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
atomic_read(&cld->cld_refcount));
LASSERT(atomic_read(&cld->cld_refcount) > 0);
CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
- if (cld->cld_recover)
- config_log_put(cld->cld_recover);
- if (cld->cld_sptlrpc)
- config_log_put(cld->cld_sptlrpc);
- if (cld->cld_params)
- config_log_put(cld->cld_params);
- if (cld->cld_nodemap)
- config_log_put(cld->cld_nodemap);
+ config_log_put(cld->cld_barrier);
+ config_log_put(cld->cld_recover);
+ config_log_put(cld->cld_params);
+ config_log_put(cld->cld_nodemap);
+ config_log_put(cld->cld_sptlrpc);
if (cld_is_sptlrpc(cld))
sptlrpc_conf_log_stop(cld->cld_logname);
struct config_llog_data *config_log_find(char *logname,
struct config_llog_instance *cfg)
{
- struct config_llog_data *cld;
- struct config_llog_data *found = NULL;
- void * instance;
- ENTRY;
+ struct config_llog_data *cld;
+ struct config_llog_data *found = NULL;
+ unsigned long cfg_instance;
- LASSERT(logname != NULL);
+ ENTRY;
+ LASSERT(logname != NULL);
- instance = cfg ? cfg->cfg_instance : NULL;
+ cfg_instance = cfg ? cfg->cfg_instance : 0;
spin_lock(&config_list_lock);
list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- /* check if instance equals */
- if (instance != cld->cld_cfg.cfg_instance)
- continue;
+ /* check if cfg_instance is the one we want */
+ if (cfg_instance != cld->cld_cfg.cfg_instance)
+ continue;
/* instance may be NULL, should check name */
if (strcmp(logname, cld->cld_logname) == 0) {
found = cld;
+ config_log_get(found);
break;
}
}
- if (found) {
- atomic_inc(&found->cld_refcount);
- LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
- }
spin_unlock(&config_list_lock);
RETURN(found);
}
ENTRY;
- CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
- cfg ? cfg->cfg_instance : NULL);
+ CDEBUG(D_MGC, "do adding config log %s-%016lx\n", logname,
+ cfg ? cfg->cfg_instance : 0);
OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
if (!cld)
RETURN(ERR_PTR(-ENOMEM));
+ rc = mgc_logname2resid(logname, &cld->cld_resid, type);
+ if (rc) {
+ OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
+ RETURN(ERR_PTR(rc));
+ }
+
strcpy(cld->cld_logname, logname);
if (cfg)
cld->cld_cfg = *cfg;
/* Keep the mgc around until we are done */
cld->cld_mgcexp = class_export_get(obd->obd_self_export);
- if (cld_is_sptlrpc(cld)) {
+ if (cld_is_sptlrpc(cld))
sptlrpc_conf_log_start(logname);
- cld->cld_cfg.cfg_obdname = obd->obd_name;
- }
-
- rc = mgc_logname2resid(logname, &cld->cld_resid, type);
spin_lock(&config_list_lock);
list_add(&cld->cld_list_chain, &config_llog_list);
spin_unlock(&config_list_lock);
- if (rc) {
- config_log_put(cld);
- RETURN(ERR_PTR(rc));
- }
-
- if (cld_is_sptlrpc(cld) || cld_is_nodemap(cld)) {
+ if (cld_is_sptlrpc(cld) || cld_is_nodemap(cld) || cld_is_barrier(cld)) {
rc = mgc_process_log(obd, cld);
if (rc && rc != -ENOENT)
CERROR("%s: failed processing log, type %d: rc = %d\n",
}
static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
- char *fsname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
+ char *fsname,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
{
- struct config_llog_instance lcfg = *cfg;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct config_llog_data *cld;
- char logname[32];
+ struct config_llog_instance lcfg = *cfg;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct config_llog_data *cld;
+ char logname[32];
if (IS_OST(lsi))
- return NULL;
+ return NULL;
/* for osp-on-ost, see lustre_start_osp() */
if (IS_MDT(lsi) && lcfg.cfg_instance)
return NULL;
- /* we have to use different llog for clients and mdts for cmd
- * where only clients are notified if one of cmd server restarts */
- LASSERT(strlen(fsname) < sizeof(logname) / 2);
- strcpy(logname, fsname);
+ /* We have to use different llog for clients and MDTs for DNE,
+ * where only clients are notified if one of DNE server restarts.
+ */
+ LASSERT(strlen(fsname) < sizeof(logname) / 2);
+ strncpy(logname, fsname, sizeof(logname));
if (IS_SERVER(lsi)) { /* mdt */
- LASSERT(lcfg.cfg_instance == NULL);
- lcfg.cfg_instance = sb;
- strcat(logname, "-mdtir");
- } else {
- LASSERT(lcfg.cfg_instance != NULL);
- strcat(logname, "-cliir");
- }
+ LASSERT(lcfg.cfg_instance == 0);
+ lcfg.cfg_instance = ll_get_cfg_instance(sb);
+ strncat(logname, "-mdtir", sizeof(logname));
+ } else {
+ LASSERT(lcfg.cfg_instance != 0);
+ strncat(logname, "-cliir", sizeof(logname));
+ }
- cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
- return cld;
+ cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
+ return cld;
}
-static struct config_llog_data *config_params_log_add(struct obd_device *obd,
- struct config_llog_instance *cfg, struct super_block *sb)
+static struct config_llog_data *config_log_find_or_add(struct obd_device *obd,
+ char *logname, struct super_block *sb, int type,
+ struct config_llog_instance *cfg)
{
- struct config_llog_instance lcfg = *cfg;
- struct config_llog_data *cld;
+ struct config_llog_instance lcfg = *cfg;
+ struct config_llog_data *cld;
- lcfg.cfg_instance = sb;
+ /* Note class_config_llog_handler() depends on getting "obd" back */
+ lcfg.cfg_instance = sb ? ll_get_cfg_instance(sb) : (unsigned long)obd;
- cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
- &lcfg, sb);
+ cld = config_log_find(logname, &lcfg);
+ if (unlikely(cld != NULL))
+ return cld;
- return cld;
+ return do_config_log_add(obd, logname, type, &lcfg, sb);
}
/** Add this log to the list of active logs watched by an MGC.
* We have one active log per "mount" - client instance or servername.
* Each instance may be at a different point in the log.
*/
-static int config_log_add(struct obd_device *obd, char *logname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
+static struct config_llog_data *
+config_log_add(struct obd_device *obd, char *logname,
+ struct config_llog_instance *cfg, struct super_block *sb)
{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct config_llog_data *cld;
- struct config_llog_data *sptlrpc_cld;
- struct config_llog_data *params_cld;
- struct config_llog_data *nodemap_cld;
- char seclogname[32];
- char *ptr;
- int rc;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct config_llog_data *cld = NULL;
+ struct config_llog_data *sptlrpc_cld = NULL;
+ struct config_llog_data *params_cld = NULL;
+ struct config_llog_data *nodemap_cld = NULL;
+ struct config_llog_data *barrier_cld = NULL;
+ char seclogname[32];
+ char *ptr;
+ int rc;
+ bool locked = false;
ENTRY;
- CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
+ CDEBUG(D_MGC, "add config log %s-%016lx\n", logname,
+ cfg->cfg_instance);
/*
* for each regular log, the depended sptlrpc log name is
ptr = strrchr(logname, '-');
if (ptr == NULL || ptr - logname > 8) {
CERROR("logname %s is too long\n", logname);
- RETURN(-EINVAL);
+ RETURN(ERR_PTR(-EINVAL));
}
memcpy(seclogname, logname, ptr - logname);
strcpy(seclogname + (ptr - logname), "-sptlrpc");
- sptlrpc_cld = config_log_find(seclogname, NULL);
- if (sptlrpc_cld == NULL) {
- sptlrpc_cld = do_config_log_add(obd, seclogname,
- CONFIG_T_SPTLRPC, NULL, NULL);
+ if (cfg->cfg_sub_clds & CONFIG_SUB_SPTLRPC) {
+ sptlrpc_cld = config_log_find_or_add(obd, seclogname, NULL,
+ CONFIG_T_SPTLRPC, cfg);
if (IS_ERR(sptlrpc_cld)) {
- CERROR("can't create sptlrpc log: %s\n", seclogname);
- GOTO(out, rc = PTR_ERR(sptlrpc_cld));
+ CERROR("%s: can't create sptlrpc log %s: rc = %ld\n",
+ obd->obd_name, seclogname, PTR_ERR(sptlrpc_cld));
+ RETURN(sptlrpc_cld);
}
}
- nodemap_cld = config_log_find(LUSTRE_NODEMAP_NAME, NULL);
- if (!nodemap_cld && IS_SERVER(lsi) && !IS_MGS(lsi)) {
- nodemap_cld = do_config_log_add(obd, LUSTRE_NODEMAP_NAME,
- CONFIG_T_NODEMAP, NULL, NULL);
+ if (!IS_MGS(lsi) && cfg->cfg_sub_clds & CONFIG_SUB_NODEMAP) {
+ nodemap_cld = config_log_find_or_add(obd, LUSTRE_NODEMAP_NAME,
+ NULL, CONFIG_T_NODEMAP,
+ cfg);
if (IS_ERR(nodemap_cld)) {
rc = PTR_ERR(nodemap_cld);
CERROR("%s: cannot create nodemap log: rc = %d\n",
}
}
- params_cld = config_params_log_add(obd, cfg, sb);
- if (IS_ERR(params_cld)) {
- rc = PTR_ERR(params_cld);
- CERROR("%s: can't create params log: rc = %d\n",
- obd->obd_name, rc);
- GOTO(out_nodemap, rc);
+ if (cfg->cfg_sub_clds & CONFIG_SUB_PARAMS) {
+ params_cld = config_log_find_or_add(obd, PARAMS_FILENAME, sb,
+ CONFIG_T_PARAMS, cfg);
+ if (IS_ERR(params_cld)) {
+ rc = PTR_ERR(params_cld);
+ CERROR("%s: can't create params log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_nodemap, rc);
+ }
+ }
+
+ if (IS_MDT(s2lsi(sb)) && cfg->cfg_sub_clds & CONFIG_SUB_BARRIER) {
+ snprintf(seclogname + (ptr - logname), sizeof(seclogname) - 1,
+ "-%s", BARRIER_FILENAME);
+ barrier_cld = config_log_find_or_add(obd, seclogname, sb,
+ CONFIG_T_BARRIER, cfg);
+ if (IS_ERR(barrier_cld)) {
+ rc = PTR_ERR(barrier_cld);
+ CERROR("%s: can't create barrier log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_params, rc);
+ }
}
cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
if (IS_ERR(cld)) {
- CERROR("can't create log: %s\n", logname);
- GOTO(out_params, rc = PTR_ERR(cld));
+ rc = PTR_ERR(cld);
+ CERROR("%s: can't create log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_barrier, rc = PTR_ERR(cld));
}
LASSERT(lsi->lsi_lmd);
- if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
+ if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR) &&
+ cfg->cfg_sub_clds & CONFIG_SUB_RECOVER) {
struct config_llog_data *recover_cld;
+
ptr = strrchr(seclogname, '-');
if (ptr != NULL) {
*ptr = 0;
- }
- else {
+ } else {
CERROR("%s: sptlrpc log name not correct, %s: "
"rc = %d\n", obd->obd_name, seclogname, -EINVAL);
- config_log_put(cld);
- RETURN(-EINVAL);
+ GOTO(out_cld, rc = -EINVAL);
}
+
recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
- if (IS_ERR(recover_cld))
- GOTO(out_cld, rc = PTR_ERR(recover_cld));
+ if (IS_ERR(recover_cld)) {
+ rc = PTR_ERR(recover_cld);
+ CERROR("%s: can't create recover log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_cld, rc);
+ }
+
+ mutex_lock(&cld->cld_lock);
+ locked = true;
cld->cld_recover = recover_cld;
}
- cld->cld_sptlrpc = sptlrpc_cld;
+ if (!locked)
+ mutex_lock(&cld->cld_lock);
cld->cld_params = params_cld;
+ cld->cld_barrier = barrier_cld;
cld->cld_nodemap = nodemap_cld;
+ cld->cld_sptlrpc = sptlrpc_cld;
+ mutex_unlock(&cld->cld_lock);
- RETURN(0);
+ RETURN(cld);
out_cld:
config_log_put(cld);
-
+out_barrier:
+ config_log_put(barrier_cld);
out_params:
config_log_put(params_cld);
-
out_nodemap:
config_log_put(nodemap_cld);
-
out_sptlrpc:
config_log_put(sptlrpc_cld);
-out:
- return rc;
+ return ERR_PTR(rc);
}
-DEFINE_MUTEX(llog_process_lock);
+static inline void config_mark_cld_stop(struct config_llog_data *cld)
+{
+ if (cld) {
+ mutex_lock(&cld->cld_lock);
+ spin_lock(&config_list_lock);
+ cld->cld_stopping = 1;
+ spin_unlock(&config_list_lock);
+ mutex_unlock(&cld->cld_lock);
+ }
+}
/** Stop watching for updates on this log.
*/
struct config_llog_data *cld_params = NULL;
struct config_llog_data *cld_recover = NULL;
struct config_llog_data *cld_nodemap = NULL;
+ struct config_llog_data *cld_barrier = NULL;
int rc = 0;
ENTRY;
RETURN(rc);
}
+ spin_lock(&config_list_lock);
cld->cld_stopping = 1;
+ spin_unlock(&config_list_lock);
cld_recover = cld->cld_recover;
cld->cld_recover = NULL;
- mutex_unlock(&cld->cld_lock);
-
- if (cld_recover) {
- mutex_lock(&cld_recover->cld_lock);
- cld_recover->cld_stopping = 1;
- mutex_unlock(&cld_recover->cld_lock);
- config_log_put(cld_recover);
- }
-
- spin_lock(&config_list_lock);
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
cld_params = cld->cld_params;
cld->cld_params = NULL;
cld_nodemap = cld->cld_nodemap;
cld->cld_nodemap = NULL;
- spin_unlock(&config_list_lock);
+ cld_barrier = cld->cld_barrier;
+ cld->cld_barrier = NULL;
+ cld_sptlrpc = cld->cld_sptlrpc;
+ cld->cld_sptlrpc = NULL;
+ mutex_unlock(&cld->cld_lock);
- if (cld_sptlrpc)
- config_log_put(cld_sptlrpc);
+ config_mark_cld_stop(cld_recover);
+ config_log_put(cld_recover);
- if (cld_params) {
- mutex_lock(&cld_params->cld_lock);
- cld_params->cld_stopping = 1;
- mutex_unlock(&cld_params->cld_lock);
- config_log_put(cld_params);
- }
+ config_mark_cld_stop(cld_params);
+ config_log_put(cld_params);
- if (cld_nodemap) {
- mutex_lock(&cld_nodemap->cld_lock);
- cld_nodemap->cld_stopping = 1;
- mutex_unlock(&cld_nodemap->cld_lock);
- config_log_put(cld_nodemap);
+ /* don't set cld_stopping on nm lock as other targets may be active */
+ config_log_put(cld_nodemap);
+
+ if (cld_barrier) {
+ mutex_lock(&cld_barrier->cld_lock);
+ cld_barrier->cld_stopping = 1;
+ mutex_unlock(&cld_barrier->cld_lock);
+ config_log_put(cld_barrier);
}
+ config_log_put(cld_sptlrpc);
+
/* drop the ref from the find */
config_log_put(cld);
/* drop the start ref */
RETURN(rc);
}
-#ifdef CONFIG_PROC_FS
int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
{
struct obd_device *obd = data;
struct obd_import *imp;
struct obd_connect_data *ocd;
struct config_llog_data *cld;
- ENTRY;
- LASSERT(obd != NULL);
+ ENTRY;
+ LASSERT(obd);
LPROCFS_CLIMP_CHECK(obd);
imp = obd->u.cli.cl_import;
ocd = &imp->imp_connect_data;
LPROCFS_CLIMP_EXIT(obd);
RETURN(0);
}
-#endif
/* reenqueue any lost locks */
#define RQ_RUNNING 0x1
LASSERT(atomic_read(&cld->cld_refcount) > 0);
- /* Do not run mgc_process_log on a disconnected export or an
+ /*
+ * Do not run mgc_process_log on a disconnected export or an
* export which is being disconnected. Take the client
- * semaphore to make the check non-racy. */
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ * semaphore to make the check non-racy.
+ */
+ down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem,
+ OBD_CLI_SEM_MGC);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
* in order to not flood the MGS.
*/
#define MGC_TIMEOUT_MIN_SECONDS 5
-#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
+#define MGC_TIMEOUT_RAND_CENTISEC 500
static int mgc_requeue_thread(void *data)
{
/* Keep trying failed locks periodically */
spin_lock(&config_list_lock);
rq_state |= RQ_RUNNING;
- while (1) {
+ while (!(rq_state & RQ_STOP)) {
struct l_wait_info lwi;
struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
- int stopped = !!(rq_state & RQ_STOP);
+ int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
int to;
/* Any new or requeued lostlocks will change the state */
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
random so everyone doesn't try to reconnect at once. */
- to = msecs_to_jiffies(MGC_TIMEOUT_MIN_SECONDS * MSEC_PER_SEC);
+ to = cfs_time_seconds(MGC_TIMEOUT_MIN_SECONDS * 100 + rand);
/* rand is centi-seconds */
- to += msecs_to_jiffies(rand * MSEC_PER_SEC / 100);
- lwi = LWI_TIMEOUT(to, NULL, NULL);
+ lwi = LWI_TIMEOUT(to / 100, NULL, NULL);
l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
&lwi);
- /*
- * iterate & processing through the list. for each cld, process
- * its depending sptlrpc cld firstly (if any) and then itself.
- *
- * it's guaranteed any item in the list must have
- * reference > 0; and if cld_lostlock is set, at
- * least one reference is taken by the previous enqueue.
- */
- cld_prev = NULL;
+ /*
+ * iterate & processing through the list. for each cld, process
+ * its depending sptlrpc cld firstly (if any) and then itself.
+ *
+ * it's guaranteed any item in the list must have
+ * reference > 0; and if cld_lostlock is set, at
+ * least one reference is taken by the previous enqueue.
+ */
+ cld_prev = NULL;
spin_lock(&config_list_lock);
rq_state &= ~RQ_PRECLEANUP;
list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
- if (!cld->cld_lostlock)
+ cld_list_chain) {
+ if (!cld->cld_lostlock || cld->cld_stopping)
continue;
+ /* hold reference to avoid being freed during
+ * subsequent processing. */
+ config_log_get(cld);
+ cld->cld_lostlock = 0;
spin_unlock(&config_list_lock);
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- /* Whether we enqueued again or not in mgc_process_log,
- * we're done with the ref from the old enqueue */
- if (cld_prev)
- config_log_put(cld_prev);
+ config_log_put(cld_prev);
cld_prev = cld;
- cld->cld_lostlock = 0;
- if (likely(!stopped))
+ if (likely(!(rq_state & RQ_STOP))) {
do_requeue(cld);
-
- spin_lock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ } else {
+ spin_lock(&config_list_lock);
+ break;
+ }
}
spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* break after scanning the list so that we can drop
- * refcount to losing lock clds */
- if (unlikely(stopped)) {
- spin_lock(&config_list_lock);
- break;
- }
+ config_log_put(cld_prev);
/* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
+ wait_event_idle(rq_waitq, rq_state & (RQ_NOW | RQ_STOP));
spin_lock(&config_list_lock);
}
+
/* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
rq_state &= ~RQ_RUNNING;
spin_unlock(&config_list_lock);
We are responsible for dropping the config log reference from here on out. */
static void mgc_requeue_add(struct config_llog_data *cld)
{
+ bool wakeup = false;
ENTRY;
CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
LASSERT(atomic_read(&cld->cld_refcount) > 0);
mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping || cld->cld_lostlock) {
- mutex_unlock(&cld->cld_lock);
- RETURN_EXIT;
- }
- /* this refcount will be released in mgc_requeue_thread. */
- config_log_get(cld);
- cld->cld_lostlock = 1;
- mutex_unlock(&cld->cld_lock);
-
- /* Hold lock for rq_state */
spin_lock(&config_list_lock);
- if (rq_state & RQ_STOP) {
- spin_unlock(&config_list_lock);
- cld->cld_lostlock = 0;
- config_log_put(cld);
- } else {
+ if (!(rq_state & RQ_STOP) && !cld->cld_stopping && !cld->cld_lostlock) {
+ cld->cld_lostlock = 1;
rq_state |= RQ_NOW;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
+ wakeup = true;
}
+ spin_unlock(&config_list_lock);
+ mutex_unlock(&cld->cld_lock);
+ if (wakeup)
+ wake_up(&rq_waitq);
+
EXIT;
}
dto = local_file_find_or_create(env, cli->cl_mgc_los, root,
MOUNT_CONFIGS_DIR,
S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
- lu_object_put_nocache(env, &root->do_lu);
+ dt_object_put_nocache(env, root);
if (IS_ERR(dto))
GOTO(out_los, rc = PTR_ERR(dto));
EXIT;
out_llog:
if (rc) {
- lu_object_put(env, &cli->cl_mgc_configs_dir->do_lu);
+ dt_object_put(env, cli->cl_mgc_configs_dir);
cli->cl_mgc_configs_dir = NULL;
}
out_los:
mgc_local_llog_fini(env, obd);
- lu_object_put_nocache(env, &cli->cl_mgc_configs_dir->do_lu);
+ dt_object_put_nocache(env, cli->cl_mgc_configs_dir);
cli->cl_mgc_configs_dir = NULL;
local_oid_storage_fini(env, cli->cl_mgc_los);
GOTO(err_cleanup, rc);
}
-#ifdef CONFIG_PROC_FS
- obd->obd_vars = lprocfs_mgc_obd_vars;
- lprocfs_obd_setup(obd);
-#endif
- sptlrpc_lprocfs_cliobd_attach(obd);
+ rc = mgc_tunables_init(obd);
+ if (rc)
+ GOTO(err_sysfs, rc);
if (atomic_inc_return(&mgc_count) == 1) {
rq_state = 0;
CERROR("%s: cannot start requeue thread: rc = %d; "
"no more log updates\n",
obd->obd_name, rc);
- GOTO(err_cleanup, rc);
+ GOTO(err_sysfs, rc);
}
/* rc is the task_struct pointer of mgc_requeue_thread. */
rc = 0;
RETURN(rc);
+err_sysfs:
+ lprocfs_obd_cleanup(obd);
err_cleanup:
client_obd_cleanup(obd);
err_decref:
/* held at mgc_process_log(). */
LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ lock->l_ast_data = NULL;
/* Are we done with this log? */
if (cld->cld_stopping) {
CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
+ PING_INTERVAL)
#define MGC_TARGET_REG_LIMIT 10
+#define MGC_TARGET_REG_LIMIT_MAX RECONNECT_DELAY_MAX
#define MGC_SEND_PARAM_LIMIT 10
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 13, 53, 0)
/* Send parameter to MGS*/
static int mgc_set_mgs_param(struct obd_export *exp,
struct mgs_send_param *msp)
RETURN(rc);
}
+#endif
/* Take a config lock so we can get cancel notifications */
static int mgc_enqueue(struct obd_export *exp, enum ldlm_type type,
union ldlm_policy_data *policy, enum ldlm_mode mode,
- __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
+ __u64 *flags, ldlm_glimpse_callback glimpse_callback,
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
{
.ei_mode = mode,
.ei_cb_bl = mgc_blocking_ast,
.ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_gl = glimpse_callback,
};
struct ptlrpc_request *req;
int short_limit = cld_is_sptlrpc(cld);
int rc;
ENTRY;
- CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
+ CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname,
cld->cld_resid.name[0]);
/* We need a callback for every lockholder, so don't try to
RETURN(-ENOMEM);
}
- memcpy(req_mti, mti, sizeof(*req_mti));
- ptlrpc_request_set_replen(req);
- CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
+ memcpy(req_mti, mti, sizeof(*req_mti));
+ ptlrpc_request_set_replen(req);
+ CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
+
+ /* if the target needs to regenerate the config log in MGS, it's better
+ * to use some longer limit to let MGC have time to change connection to
+ * another MGS (or try again with the same MGS) for the target (server)
+ * will fail and exit if the request expired due to delay limit. */
+ if (mti->mti_flags & (LDD_F_UPDATE | LDD_F_NEED_INDEX))
+ req->rq_delay_limit = MGC_TARGET_REG_LIMIT_MAX;
rc = ptlrpc_queue_wait(req);
if (!rc) {
int rc = -EINVAL;
ENTRY;
- /* Turn off initial_recov after we try all backup servers once */
- if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- int value;
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- value = *(int *)val;
- CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
- imp->imp_obd->obd_name, value,
- imp->imp_deactive, imp->imp_invalid,
- imp->imp_replayable, imp->imp_obd->obd_replayable,
- ptlrpc_import_state_name(imp->imp_state));
- /* Resurrect if we previously died */
- if ((imp->imp_state != LUSTRE_IMP_FULL &&
- imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
- ptlrpc_reconnect_import(imp);
- RETURN(0);
- }
+ /* Turn off initial_recov after we try all backup servers once */
+ if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
+ struct obd_import *imp = class_exp2cliimp(exp);
+ int value;
+ if (vallen != sizeof(int))
+ RETURN(-EINVAL);
+ value = *(int *)val;
+ CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
+ imp->imp_obd->obd_name, value,
+ imp->imp_deactive, imp->imp_invalid,
+ imp->imp_replayable, imp->imp_obd->obd_replayable,
+ ptlrpc_import_state_name(imp->imp_state));
+ /* Resurrect the import immediately if
+ * 1. we previously got disconnected,
+ * 2. value > 1 (at the same node with MGS)
+ * */
+ if (imp->imp_state == LUSTRE_IMP_DISCON || value > 1)
+ ptlrpc_reconnect_import(imp);
+
+ RETURN(0);
+ }
+
/* FIXME move this to mgc_process_config */
if (KEY_IS(KEY_REGISTER_TARGET)) {
struct mgs_target_info *mti;
rc = mgc_fs_cleanup(env, exp->exp_obd);
RETURN(rc);
}
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 13, 53, 0)
if (KEY_IS(KEY_SET_INFO)) {
struct mgs_send_param *msp;
rc = mgc_set_mgs_param(exp, msp);
RETURN(rc);
}
+#endif
if (KEY_IS(KEY_MGSSEC)) {
struct client_obd *cli = &exp->exp_obd->u.cli;
struct sptlrpc_flavor flvr;
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
__u64 max_version,
void *data, int datalen, bool mne_swab)
{
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
- struct mgs_nidtbl_entry *entry;
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- u64 prev_version = 0;
- char *inst;
- char *buf;
- int bufsz;
- int pos;
- int rc = 0;
- int off = 0;
- ENTRY;
+ struct config_llog_instance *cfg = &cld->cld_cfg;
+ struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
+ struct mgs_nidtbl_entry *entry;
+ struct lustre_cfg *lcfg;
+ struct lustre_cfg_bufs bufs;
+ u64 prev_version = 0;
+ char *inst;
+ char *buf;
+ int bufsz;
+ int pos = 0;
+ int rc = 0;
+ int off = 0;
- LASSERT(cfg->cfg_instance != NULL);
- LASSERT(cfg->cfg_sb == cfg->cfg_instance);
+ ENTRY;
+ LASSERT(cfg->cfg_instance != 0);
+ LASSERT(ll_get_cfg_instance(cfg->cfg_sb) == cfg->cfg_instance);
- OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ OBD_ALLOC(inst, PAGE_SIZE);
if (inst == NULL)
RETURN(-ENOMEM);
if (!IS_SERVER(lsi)) {
- pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_CACHE_SIZE) {
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ pos = snprintf(inst, PAGE_SIZE, "%016lx", cfg->cfg_instance);
+ if (pos >= PAGE_SIZE) {
+ OBD_FREE(inst, PAGE_SIZE);
return -E2BIG;
}
- } else {
+#ifdef HAVE_SERVER_SUPPORT
+ } else {
LASSERT(IS_MDT(lsi));
rc = server_name2svname(lsi->lsi_svname, inst, NULL,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (rc) {
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ OBD_FREE(inst, PAGE_SIZE);
RETURN(-EINVAL);
}
pos = strlen(inst);
- }
+#endif /* HAVE_SERVER_SUPPORT */
+ }
++pos;
buf = inst + pos;
- bufsz = PAGE_CACHE_SIZE - pos;
+ bufsz = PAGE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_CACHE_SIZE) {
+ if (entry->mne_length > PAGE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
lustre_cfg_bufs_set_string(&bufs, 1, params);
- lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
- if (lcfg == NULL) {
+ OBD_ALLOC(lcfg, lustre_cfg_len(bufs.lcfg_bufcount,
+ bufs.lcfg_buflen));
+ if (!lcfg) {
rc = -ENOMEM;
break;
}
+ lustre_cfg_init(lcfg, LCFG_PARAM, &bufs);
- CDEBUG(D_INFO, "ir apply logs "LPD64"/"LPD64" for %s -> %s\n",
+ CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n",
prev_version, max_version, obdname, params);
rc = class_process_config(lcfg);
- lustre_cfg_free(lcfg);
+ OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount,
+ lcfg->lcfg_buflens));
if (rc)
CDEBUG(D_INFO, "process config for %s error %d\n",
obdname, rc);
/* continue, even one with error */
}
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ OBD_FREE(inst, PAGE_SIZE);
RETURN(rc);
}
static int mgc_process_recover_nodemap_log(struct obd_device *obd,
struct config_llog_data *cld)
{
+ struct ptlrpc_connection *mgc_conn;
struct ptlrpc_request *req = NULL;
struct config_llog_instance *cfg = &cld->cld_cfg;
struct mgs_config_body *body;
struct mgs_config_res *res;
-
- /* When a nodemap config is received, we build a new nodemap config,
- * with new nodemap structs. We keep track of the most recently added
- * nodemap since the config is read ordered by nodemap_id, and so it
- * is likely that the next record will be related. Because access to
- * the nodemaps is single threaded until the nodemap_config is active,
- * we don't need to reference count with recent_nodemap, though
- * recent_nodemap should be set to NULL when the nodemap_config
- * is either destroyed or set active.
- */
struct nodemap_config *new_config = NULL;
struct lu_nodemap *recent_nodemap = NULL;
-
struct ptlrpc_bulk_desc *desc;
- struct page **pages;
+ struct page **pages = NULL;
__u64 config_read_offset = 0;
- int nrpages;
+ __u8 nodemap_cur_pass = 0;
+ int nrpages = 0;
bool eof = true;
bool mne_swab = false;
int i;
int ealen;
int rc;
-
ENTRY;
+ mgc_conn = class_exp2cliimp(cld->cld_mgcexp)->imp_connection;
+
+ /* don't need to get local config */
+ if (cld_is_nodemap(cld) &&
+ (LNET_NETTYP(LNET_NIDNET(mgc_conn->c_peer.nid)) == LOLND))
+ GOTO(out, rc = 0);
+
/* allocate buffer for bulk transfer.
* if this is the first time for this mgs to read logs,
* CONFIG_READ_NRPAGES_INIT will be used since it will read all logs
GOTO(out, rc = -ENOMEM);
}
+again:
#ifdef HAVE_SERVER_SUPPORT
- if (cld_is_nodemap(cld)) {
+ if (cld_is_nodemap(cld) && config_read_offset == 0) {
new_config = nodemap_config_alloc();
if (IS_ERR(new_config)) {
rc = PTR_ERR(new_config);
}
}
#endif
-again:
LASSERT(cld_is_recover(cld) || cld_is_nodemap(cld));
LASSERT(mutex_is_locked(&cld->cld_lock));
req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
else
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_CACHE_SHIFT;
+ body->mcb_bits = PAGE_SHIFT;
body->mcb_units = nrpages;
+ body->mcb_nm_cur_pass = nodemap_cur_pass;
/* allocate bulk transfer descriptor */
desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
for (i = 0; i < nrpages; i++)
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (cld_is_nodemap(cld)) {
config_read_offset = res->mcr_offset;
eof = config_read_offset == II_END_OFF;
+ nodemap_cur_pass = res->mcr_nm_cur_pass;
} else {
if (res->mcr_size < res->mcr_offset)
GOTO(out, rc = -EINVAL);
cfg->cfg_last_idx = res->mcr_offset;
eof = res->mcr_offset == res->mcr_size;
- CDEBUG(D_INFO, "Latest version "LPD64", more %d.\n",
+ CDEBUG(D_INFO, "Latest version %lld, more %d.\n",
res->mcr_offset, eof == false);
}
if (ealen < 0)
GOTO(out, rc = ealen);
- if (ealen > nrpages << PAGE_CACHE_SHIFT)
+ if (ealen > nrpages << PAGE_SHIFT)
GOTO(out, rc = -EINVAL);
if (ealen == 0) { /* no logs transferred */
+#ifdef HAVE_SERVER_SUPPORT
+ /* config changed since first read RPC */
+ if (cld_is_nodemap(cld) && config_read_offset == 0) {
+ CDEBUG(D_INFO, "nodemap config changed in transit, retrying\n");
+ GOTO(out, rc = -EAGAIN);
+ }
+#endif
if (!eof)
rc = -EINVAL;
GOTO(out, rc);
}
- mne_swab = !!ptlrpc_rep_need_swab(req);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
- /* This import flag means the server did an extra swab of IR MNE
- * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
- if (unlikely(req->rq_import->imp_need_mne_swab))
- mne_swab = !mne_swab;
-#endif
+ mne_swab = ptlrpc_rep_need_swab(req);
+ /* When a nodemap config is received, we build a new nodemap config,
+ * with new nodemap structs. We keep track of the most recently added
+ * nodemap since the config is read ordered by nodemap_id, and so it
+ * is likely that the next record will be related. Because access to
+ * the nodemaps is single threaded until the nodemap_config is active,
+ * we don't need to reference count with recent_nodemap, though
+ * recent_nodemap should be set to NULL when the nodemap_config
+ * is either destroyed or set active.
+ */
for (i = 0; i < nrpages && ealen > 0; i++) {
int rc2;
union lu_page *ptr;
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset,
ptr,
min_t(int, ealen,
- PAGE_CACHE_SIZE),
+ PAGE_SIZE),
mne_swab);
kunmap(pages[i]);
if (rc2 < 0) {
cld_is_nodemap(cld) ? "nodemap" : "recovery",
cld->cld_logname,
rc2);
- break;
+ GOTO(out, rc = rc2);
}
- ealen -= PAGE_CACHE_SIZE;
+ ealen -= PAGE_SIZE;
}
out:
- if (req)
+ if (req) {
ptlrpc_req_finished(req);
+ req = NULL;
+ }
if (rc == 0 && !eof)
goto again;
#ifdef HAVE_SERVER_SUPPORT
if (new_config != NULL) {
- recent_nodemap = NULL;
+ /* recent_nodemap cannot be used after set_active/dealloc */
if (rc == 0)
- nodemap_config_set_active(new_config);
+ nodemap_config_set_active_mgc(new_config);
else
nodemap_config_dealloc(new_config);
}
return rc;
}
+static int mgc_barrier_glimpse_ast(struct ldlm_lock *lock, void *data)
+{
+ struct config_llog_data *cld = lock->l_ast_data;
+ int rc;
+ ENTRY;
+
+ if (cld->cld_stopping)
+ RETURN(-ENODEV);
+
+ rc = barrier_handler(s2lsi(cld->cld_cfg.cfg_sb)->lsi_dt_dev,
+ (struct ptlrpc_request *)data);
+
+ RETURN(rc);
+}
+
/* Copy a remote log locally */
static int mgc_llog_local_copy(const struct lu_env *env,
struct obd_device *obd,
struct client_obd *cli = &mgc->u.cli;
struct lustre_sb_info *lsi = NULL;
int rc = 0;
- bool sptlrpc_started = false;
struct lu_env *env;
ENTRY;
LASSERT(cld);
LASSERT(mutex_is_locked(&cld->cld_lock));
- /*
- * local copy of sptlrpc log is controlled elsewhere, don't try to
- * read it up here.
- */
- if (cld_is_sptlrpc(cld) && local_only)
- RETURN(0);
-
if (cld->cld_cfg.cfg_sb)
lsi = s2lsi(cld->cld_cfg.cfg_sb);
cli->cl_mgc_configs_dir != NULL &&
lu2dt_dev(cli->cl_mgc_configs_dir->do_lu.lo_dev) ==
lsi->lsi_dt_dev) {
- if (!local_only)
+ if (!local_only && !lsi->lsi_dt_dev->dd_rdonly)
/* Only try to copy log if we have the lock. */
rc = mgc_llog_local_copy(env, mgc, ctxt, lctxt,
cld->cld_logname);
GOTO(out_pop, rc = -EIO);
}
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_update_begin(cld->cld_logname);
- sptlrpc_started = true;
- }
+ rc = -EAGAIN;
+ if (lsi && IS_SERVER(lsi) && !IS_MGS(lsi) &&
+ lsi->lsi_dt_dev->dd_rdonly) {
+ struct llog_ctxt *rctxt;
- /* logname and instance info should be the same, so use our
- * copy of the instance for the update. The cfg_last_idx will
- * be updated here. */
- rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
- &cld->cld_cfg);
- EXIT;
+ /* Under readonly mode, we may have no local copy or local
+ * copy is incomplete, so try to use remote llog firstly. */
+ rctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
+ LASSERT(rctxt);
-out_pop:
- __llog_ctxt_put(env, ctxt);
- if (lctxt)
- __llog_ctxt_put(env, lctxt);
+ rc = class_config_parse_llog(env, rctxt, cld->cld_logname,
+ &cld->cld_cfg);
+ llog_ctxt_put(rctxt);
+ }
+
+ if (rc && rc != -ENOENT)
+ rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
+ &cld->cld_cfg);
/*
- * update settings on existing OBDs. doing it inside
- * of llog_process_lock so no device is attaching/detaching
- * in parallel.
+ * update settings on existing OBDs.
* the logname must be <fsname>-sptlrpc
*/
- if (sptlrpc_started) {
- LASSERT(cld_is_sptlrpc(cld));
- sptlrpc_conf_log_update_end(cld->cld_logname);
+ if (rc == 0 && cld_is_sptlrpc(cld))
class_notify_sptlrpc_conf(cld->cld_logname,
strlen(cld->cld_logname) -
strlen("-sptlrpc"));
- }
+ EXIT;
+
+out_pop:
+ __llog_ctxt_put(env, ctxt);
+ if (lctxt)
+ __llog_ctxt_put(env, lctxt);
lu_env_fini(env);
out_free:
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
mutex_unlock(&cld->cld_lock);
- RETURN(0);
- }
+ RETURN(0);
+ }
- OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
- CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
+ CDEBUG(D_MGC, "Process log %s-%016lx from %d\n", cld->cld_logname,
cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
/* Get the cfg lock on the llog */
rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
- LCK_CR, &flags, NULL, NULL, NULL,
+ LCK_CR, &flags,
+ cld_is_barrier(cld) ? mgc_barrier_glimpse_ast : NULL,
cld, 0, NULL, &lockh);
if (rcl == 0) {
/* Get the cld, it will be released in mgc_blocking_ast. */
atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
struct obd_import *imp;
struct l_wait_info lwi;
- int secs = cfs_time_seconds(obd_timeout);
+ long timeout = cfs_time_seconds(obd_timeout);
mutex_unlock(&cld->cld_lock);
imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
* FULL or closed */
ptlrpc_pinger_force(imp);
- lwi = LWI_TIMEOUT(secs, NULL, NULL);
+ lwi = LWI_TIMEOUT(timeout, NULL, NULL);
l_wait_event(imp->imp_recovery_waitq,
!mgc_import_in_recovery(imp), &lwi);
goto restart;
} else {
mutex_lock(&cld->cld_lock);
+ /* unlock/lock mutex, so check stopping again */
+ if (cld->cld_stopping) {
+ mutex_unlock(&cld->cld_lock);
+ RETURN(0);
+ }
+ spin_lock(&config_list_lock);
cld->cld_lostlock = 1;
+ spin_unlock(&config_list_lock);
}
} else {
/* mark cld_lostlock so that it will requeue
* after MGC becomes available. */
+ spin_lock(&config_list_lock);
cld->cld_lostlock = 1;
+ spin_unlock(&config_list_lock);
}
- /* Get extra reference, it will be put in requeue thread */
- config_log_get(cld);
}
-
if (cld_is_recover(cld) || cld_is_nodemap(cld)) {
if (!rcl)
rc = mgc_process_recover_nodemap_log(mgc, cld);
if (!rcl) {
CERROR("%s: recover log %s failed, not fatal: rc = %d\n",
mgc->obd_name, cld->cld_logname, rc);
+ spin_lock(&config_list_lock);
cld->cld_lostlock = 1;
+ spin_unlock(&config_list_lock);
}
rc = 0; /* this is not a fatal error for recover log */
}
- } else {
+ } else if (!cld_is_barrier(cld)) {
rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
}
CERROR("Can't drop cfg lock: %d\n", rcl);
}
+ /* requeue nodemap lock immediately if transfer was interrupted */
+ if (cld_is_nodemap(cld) && rc == -EAGAIN) {
+ mgc_requeue_add(cld);
+ rc = 0;
+ }
+
RETURN(rc);
}
cfg->cfg_last_idx);
/* We're only called through here on the initial mount */
- rc = config_log_add(obd, logname, cfg, sb);
- if (rc)
- break;
- cld = config_log_find(logname, cfg);
- if (cld == NULL) {
- rc = -ENOENT;
- break;
- }
+ cld = config_log_add(obd, logname, cfg, sb);
+ if (IS_ERR(cld)) {
+ rc = PTR_ERR(cld);
+ break;
+ }
- /* COMPAT_146 */
- /* FIXME only set this for old logs! Right now this forces
- us to always skip the "inside markers" check */
- cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
-
- rc = mgc_process_log(obd, cld);
- if (rc == 0 && cld->cld_recover != NULL) {
- if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
- imp_connect_data, IMP_RECOV)) {
- rc = mgc_process_log(obd, cld->cld_recover);
- } else {
- struct config_llog_data *cir = cld->cld_recover;
- cld->cld_recover = NULL;
- config_log_put(cir);
- }
- if (rc)
- CERROR("Cannot process recover llog %d\n", rc);
- }
+ rc = mgc_process_log(obd, cld);
+ if (rc == 0 && cld->cld_recover != NULL) {
+ if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
+ imp_connect_data, IMP_RECOV)) {
+ rc = mgc_process_log(obd, cld->cld_recover);
+ } else {
+ struct config_llog_data *cir;
+
+ mutex_lock(&cld->cld_lock);
+ cir = cld->cld_recover;
+ cld->cld_recover = NULL;
+ mutex_unlock(&cld->cld_lock);
+ config_log_put(cir);
+ }
+
+ if (rc)
+ CERROR("Cannot process recover llog %d\n", rc);
+ }
if (rc == 0 && cld->cld_params != NULL) {
rc = mgc_process_log(obd, cld->cld_params);
CERROR("%s: can't process params llog: rc = %d\n",
obd->obd_name, rc);
}
- config_log_put(cld);
break;
}
static int __init mgc_init(void)
{
- return class_register_type(&mgc_obd_ops, NULL, true, NULL,
+ return class_register_type(&mgc_obd_ops, NULL, false, NULL,
LUSTRE_MGC_NAME, NULL);
}