*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define D_MGC D_CONFIG /*|D_WARNING*/
#include <linux/module.h>
-#include <obd_class.h>
-#include <lustre_dlm.h>
+#include <linux/kthread.h>
+#include <linux/random.h>
+
+#include <dt_object.h>
#include <lprocfs_status.h>
-#include <lustre_log.h>
+#include <lustre_dlm.h>
#include <lustre_disk.h>
-#include <dt_object.h>
+#include <lustre_log.h>
+#include <lustre_nodemap.h>
+#include <lustre_swab.h>
+#include <obd_class.h>
+#include <lustre_barrier.h>
#include "mgc_internal.h"
break;
case CONFIG_T_RECOVER:
case CONFIG_T_PARAMS:
- resname = type;
- break;
+ case CONFIG_T_NODEMAP:
+ case CONFIG_T_BARRIER:
+ resname = type;
+ break;
default:
LBUG();
}
res_id->name[1] = cpu_to_le64(resname);
- CDEBUG(D_MGC, "log %s to resid "LPX64"/"LPX64" (%.8s)\n", name,
+ CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name,
res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
return 0;
}
len = name_end - logname;
return mgc_name2resid(logname, len, res_id, type);
}
+EXPORT_SYMBOL(mgc_logname2resid);
/********************** config llog list **********************/
-static CFS_LIST_HEAD(config_llog_list);
-static DEFINE_SPINLOCK(config_list_lock);
+static LIST_HEAD(config_llog_list);
+static DEFINE_SPINLOCK(config_list_lock); /* protects config_llog_list */
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
{
- ENTRY;
- cfs_atomic_inc(&cld->cld_refcount);
- CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- cfs_atomic_read(&cld->cld_refcount));
- RETURN(0);
+ ENTRY;
+ atomic_inc(&cld->cld_refcount);
+ CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
+ atomic_read(&cld->cld_refcount));
+ RETURN(0);
}
/* Drop a reference to a config log. When no longer referenced,
we can free the config log data */
static void config_log_put(struct config_llog_data *cld)
{
- ENTRY;
+ ENTRY;
+
+ if (unlikely(!cld))
+ RETURN_EXIT;
- CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- cfs_atomic_read(&cld->cld_refcount));
- LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
+ CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
+ atomic_read(&cld->cld_refcount));
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
- /* spinlock to make sure no item with 0 refcount in the list */
- if (cfs_atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
- cfs_list_del(&cld->cld_list_chain);
+ /* spinlock to make sure no item with 0 refcount in the list */
+ if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
+ list_del(&cld->cld_list_chain);
spin_unlock(&config_list_lock);
- CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
+ CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
- if (cld->cld_recover)
- config_log_put(cld->cld_recover);
- if (cld->cld_sptlrpc)
- config_log_put(cld->cld_sptlrpc);
- if (cld->cld_params)
- config_log_put(cld->cld_params);
- if (cld_is_sptlrpc(cld))
- sptlrpc_conf_log_stop(cld->cld_logname);
+ config_log_put(cld->cld_barrier);
+ config_log_put(cld->cld_recover);
+ config_log_put(cld->cld_params);
+ config_log_put(cld->cld_nodemap);
+ config_log_put(cld->cld_sptlrpc);
+ if (cld_is_sptlrpc(cld))
+ sptlrpc_conf_log_stop(cld->cld_logname);
- class_export_put(cld->cld_mgcexp);
- OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
- }
+ class_export_put(cld->cld_mgcexp);
+ OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
+ }
- EXIT;
+ EXIT;
}
/* Find a config log by name */
struct config_llog_data *config_log_find(char *logname,
struct config_llog_instance *cfg)
{
- struct config_llog_data *cld;
- struct config_llog_data *found = NULL;
- void * instance;
- ENTRY;
+ struct config_llog_data *cld;
+ struct config_llog_data *found = NULL;
+ unsigned long cfg_instance;
- LASSERT(logname != NULL);
+ ENTRY;
+ LASSERT(logname != NULL);
- instance = cfg ? cfg->cfg_instance : NULL;
+ cfg_instance = cfg ? cfg->cfg_instance : 0;
spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- /* check if instance equals */
- if (instance != cld->cld_cfg.cfg_instance)
- continue;
+ list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ /* check if cfg_instance is the one we want */
+ if (cfg_instance != cld->cld_cfg.cfg_instance)
+ continue;
- /* instance may be NULL, should check name */
- if (strcmp(logname, cld->cld_logname) == 0) {
- found = cld;
- break;
- }
- }
- if (found) {
- cfs_atomic_inc(&found->cld_refcount);
- LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
- }
+ /* instance may be NULL, should check name */
+ if (strcmp(logname, cld->cld_logname) == 0) {
+ found = cld;
+ config_log_get(found);
+ break;
+ }
+ }
spin_unlock(&config_list_lock);
RETURN(found);
}
static
struct config_llog_data *do_config_log_add(struct obd_device *obd,
- char *logname,
- int type,
- struct config_llog_instance *cfg,
- struct super_block *sb)
+ char *logname,
+ int type,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
{
- struct config_llog_data *cld;
- int rc;
- ENTRY;
+ struct config_llog_data *cld;
+ int rc;
- CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
- cfg ? cfg->cfg_instance : 0);
+ ENTRY;
+
+ CDEBUG(D_MGC, "do adding config log %s-%016lx\n", logname,
+ cfg ? cfg->cfg_instance : 0);
+
+ OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
+ if (!cld)
+ RETURN(ERR_PTR(-ENOMEM));
- OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
- if (!cld)
- RETURN(ERR_PTR(-ENOMEM));
+ rc = mgc_logname2resid(logname, &cld->cld_resid, type);
+ if (rc) {
+ OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
+ RETURN(ERR_PTR(rc));
+ }
- strcpy(cld->cld_logname, logname);
- if (cfg)
- cld->cld_cfg = *cfg;
+ strcpy(cld->cld_logname, logname);
+ if (cfg)
+ cld->cld_cfg = *cfg;
else
cld->cld_cfg.cfg_callback = class_config_llog_handler;
mutex_init(&cld->cld_lock);
- cld->cld_cfg.cfg_last_idx = 0;
- cld->cld_cfg.cfg_flags = 0;
- cld->cld_cfg.cfg_sb = sb;
- cld->cld_type = type;
- cfs_atomic_set(&cld->cld_refcount, 1);
-
- /* Keep the mgc around until we are done */
- cld->cld_mgcexp = class_export_get(obd->obd_self_export);
-
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_start(logname);
- cld->cld_cfg.cfg_obdname = obd->obd_name;
- }
+ cld->cld_cfg.cfg_last_idx = 0;
+ cld->cld_cfg.cfg_flags = 0;
+ cld->cld_cfg.cfg_sb = sb;
+ cld->cld_type = type;
+ atomic_set(&cld->cld_refcount, 1);
- rc = mgc_logname2resid(logname, &cld->cld_resid, type);
+ /* Keep the mgc around until we are done */
+ cld->cld_mgcexp = class_export_get(obd->obd_self_export);
+
+ if (cld_is_sptlrpc(cld))
+ sptlrpc_conf_log_start(logname);
spin_lock(&config_list_lock);
- cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+ list_add(&cld->cld_list_chain, &config_llog_list);
spin_unlock(&config_list_lock);
- if (rc) {
- config_log_put(cld);
- RETURN(ERR_PTR(rc));
- }
-
- if (cld_is_sptlrpc(cld)) {
- rc = mgc_process_log(obd, cld);
+ if (cld_is_sptlrpc(cld) || cld_is_nodemap(cld) || cld_is_barrier(cld)) {
+ rc = mgc_process_log(obd, cld);
if (rc && rc != -ENOENT)
- CERROR("failed processing sptlrpc log: %d\n", rc);
- }
+ CERROR("%s: failed processing log, type %d: rc = %d\n",
+ obd->obd_name, type, rc);
+ }
- RETURN(cld);
+ RETURN(cld);
}
static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
- char *fsname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
+ char *fsname,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
{
- struct config_llog_instance lcfg = *cfg;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct config_llog_data *cld;
- char logname[32];
+ struct config_llog_instance lcfg = *cfg;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct config_llog_data *cld;
+ char logname[32];
if (IS_OST(lsi))
- return NULL;
+ return NULL;
/* for osp-on-ost, see lustre_start_osp() */
if (IS_MDT(lsi) && lcfg.cfg_instance)
return NULL;
- /* we have to use different llog for clients and mdts for cmd
- * where only clients are notified if one of cmd server restarts */
- LASSERT(strlen(fsname) < sizeof(logname) / 2);
- strcpy(logname, fsname);
+ /* We have to use different llog for clients and MDTs for DNE,
+ * where only clients are notified if one of DNE server restarts.
+ */
+ LASSERT(strlen(fsname) < sizeof(logname) / 2);
+ strncpy(logname, fsname, sizeof(logname));
if (IS_SERVER(lsi)) { /* mdt */
- LASSERT(lcfg.cfg_instance == NULL);
- lcfg.cfg_instance = sb;
- strcat(logname, "-mdtir");
- } else {
- LASSERT(lcfg.cfg_instance != NULL);
- strcat(logname, "-cliir");
- }
+ LASSERT(lcfg.cfg_instance == 0);
+ lcfg.cfg_instance = ll_get_cfg_instance(sb);
+ strncat(logname, "-mdtir", sizeof(logname));
+ } else {
+ LASSERT(lcfg.cfg_instance != 0);
+ strncat(logname, "-cliir", sizeof(logname));
+ }
- cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
- return cld;
+ cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
+ return cld;
}
-static struct config_llog_data *config_params_log_add(struct obd_device *obd,
- struct config_llog_instance *cfg, struct super_block *sb)
+static struct config_llog_data *config_log_find_or_add(struct obd_device *obd,
+ char *logname, struct super_block *sb, int type,
+ struct config_llog_instance *cfg)
{
- struct config_llog_instance lcfg = *cfg;
- struct config_llog_data *cld;
+ struct config_llog_instance lcfg = *cfg;
+ struct config_llog_data *cld;
- lcfg.cfg_instance = sb;
+ /* Note class_config_llog_handler() depends on getting "obd" back */
+ lcfg.cfg_instance = sb ? ll_get_cfg_instance(sb) : (unsigned long)obd;
- cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
- &lcfg, sb);
+ cld = config_log_find(logname, &lcfg);
+ if (unlikely(cld != NULL))
+ return cld;
- return cld;
+ return do_config_log_add(obd, logname, type, &lcfg, sb);
}
/** Add this log to the list of active logs watched by an MGC.
* We have one active log per "mount" - client instance or servername.
* Each instance may be at a different point in the log.
*/
-static int config_log_add(struct obd_device *obd, char *logname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
+static struct config_llog_data *
+config_log_add(struct obd_device *obd, char *logname,
+ struct config_llog_instance *cfg, struct super_block *sb)
{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct config_llog_data *cld;
- struct config_llog_data *sptlrpc_cld;
- struct config_llog_data *params_cld;
- char seclogname[32];
- char *ptr;
- int rc;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct config_llog_data *cld = NULL;
+ struct config_llog_data *sptlrpc_cld = NULL;
+ struct config_llog_data *params_cld = NULL;
+ struct config_llog_data *nodemap_cld = NULL;
+ struct config_llog_data *barrier_cld = NULL;
+ char seclogname[32];
+ char *ptr;
+ int rc;
+ bool locked = false;
ENTRY;
- CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
+ CDEBUG(D_MGC, "add config log %s-%016lx\n", logname,
+ cfg->cfg_instance);
- /*
- * for each regular log, the depended sptlrpc log name is
- * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
- */
- ptr = strrchr(logname, '-');
- if (ptr == NULL || ptr - logname > 8) {
- CERROR("logname %s is too long\n", logname);
- RETURN(-EINVAL);
- }
+ /*
+ * for each regular log, the depended sptlrpc log name is
+ * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
+ */
+ ptr = strrchr(logname, '-');
+ if (ptr == NULL || ptr - logname > 8) {
+ CERROR("logname %s is too long\n", logname);
+ RETURN(ERR_PTR(-EINVAL));
+ }
- memcpy(seclogname, logname, ptr - logname);
- strcpy(seclogname + (ptr - logname), "-sptlrpc");
+ memcpy(seclogname, logname, ptr - logname);
+ strcpy(seclogname + (ptr - logname), "-sptlrpc");
- sptlrpc_cld = config_log_find(seclogname, NULL);
- if (sptlrpc_cld == NULL) {
- sptlrpc_cld = do_config_log_add(obd, seclogname,
- CONFIG_T_SPTLRPC, NULL, NULL);
- if (IS_ERR(sptlrpc_cld)) {
- CERROR("can't create sptlrpc log: %s\n", seclogname);
- GOTO(out_err, rc = PTR_ERR(sptlrpc_cld));
- }
- }
- params_cld = config_params_log_add(obd, cfg, sb);
- if (IS_ERR(params_cld)) {
- rc = PTR_ERR(params_cld);
- CERROR("%s: can't create params log: rc = %d\n",
- obd->obd_name, rc);
- GOTO(out_err1, rc);
+ if (cfg->cfg_sub_clds & CONFIG_SUB_SPTLRPC) {
+ sptlrpc_cld = config_log_find_or_add(obd, seclogname, NULL,
+ CONFIG_T_SPTLRPC, cfg);
+ if (IS_ERR(sptlrpc_cld)) {
+ CERROR("%s: can't create sptlrpc log %s: rc = %ld\n",
+ obd->obd_name, seclogname, PTR_ERR(sptlrpc_cld));
+ RETURN(sptlrpc_cld);
+ }
+ }
+
+ if (!IS_MGS(lsi) && cfg->cfg_sub_clds & CONFIG_SUB_NODEMAP) {
+ nodemap_cld = config_log_find_or_add(obd, LUSTRE_NODEMAP_NAME,
+ NULL, CONFIG_T_NODEMAP,
+ cfg);
+ if (IS_ERR(nodemap_cld)) {
+ rc = PTR_ERR(nodemap_cld);
+ CERROR("%s: cannot create nodemap log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_sptlrpc, rc);
+ }
+ }
+
+ if (cfg->cfg_sub_clds & CONFIG_SUB_PARAMS) {
+ params_cld = config_log_find_or_add(obd, PARAMS_FILENAME, sb,
+ CONFIG_T_PARAMS, cfg);
+ if (IS_ERR(params_cld)) {
+ rc = PTR_ERR(params_cld);
+ CERROR("%s: can't create params log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_nodemap, rc);
+ }
+ }
+
+ if (IS_MDT(s2lsi(sb)) && cfg->cfg_sub_clds & CONFIG_SUB_BARRIER) {
+ snprintf(seclogname + (ptr - logname), sizeof(seclogname) - 1,
+ "-%s", BARRIER_FILENAME);
+ barrier_cld = config_log_find_or_add(obd, seclogname, sb,
+ CONFIG_T_BARRIER, cfg);
+ if (IS_ERR(barrier_cld)) {
+ rc = PTR_ERR(barrier_cld);
+ CERROR("%s: can't create barrier log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_params, rc);
+ }
}
cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
if (IS_ERR(cld)) {
- CERROR("can't create log: %s\n", logname);
- GOTO(out_err2, rc = PTR_ERR(cld));
+ rc = PTR_ERR(cld);
+ CERROR("%s: can't create log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_barrier, rc = PTR_ERR(cld));
}
- cld->cld_sptlrpc = sptlrpc_cld;
- cld->cld_params = params_cld;
+ LASSERT(lsi->lsi_lmd);
+ if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR) &&
+ cfg->cfg_sub_clds & CONFIG_SUB_RECOVER) {
+ struct config_llog_data *recover_cld;
+
+ ptr = strrchr(seclogname, '-');
+ if (ptr != NULL) {
+ *ptr = 0;
+ } else {
+ CERROR("%s: sptlrpc log name not correct, %s: "
+ "rc = %d\n", obd->obd_name, seclogname, -EINVAL);
+ GOTO(out_cld, rc = -EINVAL);
+ }
- LASSERT(lsi->lsi_lmd);
- if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
- struct config_llog_data *recover_cld;
- *strrchr(seclogname, '-') = 0;
- recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
- if (IS_ERR(recover_cld))
- GOTO(out_err3, rc = PTR_ERR(recover_cld));
+ recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
+ if (IS_ERR(recover_cld)) {
+ rc = PTR_ERR(recover_cld);
+ CERROR("%s: can't create recover log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_cld, rc);
+ }
+
+ mutex_lock(&cld->cld_lock);
+ locked = true;
cld->cld_recover = recover_cld;
}
- RETURN(0);
+ if (!locked)
+ mutex_lock(&cld->cld_lock);
+ cld->cld_params = params_cld;
+ cld->cld_barrier = barrier_cld;
+ cld->cld_nodemap = nodemap_cld;
+ cld->cld_sptlrpc = sptlrpc_cld;
+ mutex_unlock(&cld->cld_lock);
-out_err3:
- config_log_put(cld);
+ RETURN(cld);
-out_err2:
+out_cld:
+ config_log_put(cld);
+out_barrier:
+ config_log_put(barrier_cld);
+out_params:
config_log_put(params_cld);
-
-out_err1:
+out_nodemap:
+ config_log_put(nodemap_cld);
+out_sptlrpc:
config_log_put(sptlrpc_cld);
-out_err:
- RETURN(rc);
+ return ERR_PTR(rc);
}
-DEFINE_MUTEX(llog_process_lock);
+static inline void config_mark_cld_stop(struct config_llog_data *cld)
+{
+ if (cld) {
+ mutex_lock(&cld->cld_lock);
+ spin_lock(&config_list_lock);
+ cld->cld_stopping = 1;
+ spin_unlock(&config_list_lock);
+ mutex_unlock(&cld->cld_lock);
+ }
+}
/** Stop watching for updates on this log.
*/
static int config_log_end(char *logname, struct config_llog_instance *cfg)
{
- struct config_llog_data *cld;
- struct config_llog_data *cld_sptlrpc = NULL;
+ struct config_llog_data *cld;
+ struct config_llog_data *cld_sptlrpc = NULL;
struct config_llog_data *cld_params = NULL;
- struct config_llog_data *cld_recover = NULL;
- int rc = 0;
- ENTRY;
+ struct config_llog_data *cld_recover = NULL;
+ struct config_llog_data *cld_nodemap = NULL;
+ struct config_llog_data *cld_barrier = NULL;
+ int rc = 0;
- cld = config_log_find(logname, cfg);
- if (cld == NULL)
- RETURN(-ENOENT);
+ ENTRY;
+
+ cld = config_log_find(logname, cfg);
+ if (cld == NULL)
+ RETURN(-ENOENT);
mutex_lock(&cld->cld_lock);
- /*
- * if cld_stopping is set, it means we didn't start the log thus
- * not owning the start ref. this can happen after previous umount:
- * the cld still hanging there waiting for lock cancel, and we
- * remount again but failed in the middle and call log_end without
- * calling start_log.
- */
- if (unlikely(cld->cld_stopping)) {
+ /*
+ * if cld_stopping is set, it means we didn't start the log thus
+ * not owning the start ref. this can happen after previous umount:
+ * the cld still hanging there waiting for lock cancel, and we
+ * remount again but failed in the middle and call log_end without
+ * calling start_log.
+ */
+ if (unlikely(cld->cld_stopping)) {
mutex_unlock(&cld->cld_lock);
- /* drop the ref from the find */
- config_log_put(cld);
- RETURN(rc);
- }
-
- cld->cld_stopping = 1;
-
- cld_recover = cld->cld_recover;
- cld->cld_recover = NULL;
- mutex_unlock(&cld->cld_lock);
-
- if (cld_recover) {
- mutex_lock(&cld_recover->cld_lock);
- cld_recover->cld_stopping = 1;
- mutex_unlock(&cld_recover->cld_lock);
- config_log_put(cld_recover);
+ /* drop the ref from the find */
+ config_log_put(cld);
+ RETURN(rc);
}
spin_lock(&config_list_lock);
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
+ cld->cld_stopping = 1;
+ spin_unlock(&config_list_lock);
+
+ cld_recover = cld->cld_recover;
+ cld->cld_recover = NULL;
cld_params = cld->cld_params;
cld->cld_params = NULL;
- spin_unlock(&config_list_lock);
+ cld_nodemap = cld->cld_nodemap;
+ cld->cld_nodemap = NULL;
+ cld_barrier = cld->cld_barrier;
+ cld->cld_barrier = NULL;
+ cld_sptlrpc = cld->cld_sptlrpc;
+ cld->cld_sptlrpc = NULL;
+ mutex_unlock(&cld->cld_lock);
- if (cld_sptlrpc)
- config_log_put(cld_sptlrpc);
+ config_mark_cld_stop(cld_recover);
+ config_log_put(cld_recover);
- if (cld_params) {
- mutex_lock(&cld_params->cld_lock);
- cld_params->cld_stopping = 1;
- mutex_unlock(&cld_params->cld_lock);
- config_log_put(cld_params);
+ config_mark_cld_stop(cld_params);
+ config_log_put(cld_params);
+
+ /* don't set cld_stopping on nm lock as other targets may be active */
+ config_log_put(cld_nodemap);
+
+ if (cld_barrier) {
+ mutex_lock(&cld_barrier->cld_lock);
+ cld_barrier->cld_stopping = 1;
+ mutex_unlock(&cld_barrier->cld_lock);
+ config_log_put(cld_barrier);
}
- /* drop the ref from the find */
- config_log_put(cld);
- /* drop the start ref */
- config_log_put(cld);
+ config_log_put(cld_sptlrpc);
- CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
- rc);
- RETURN(rc);
+ /* drop the ref from the find */
+ config_log_put(cld);
+ /* drop the start ref */
+ config_log_put(cld);
+
+ CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
+ rc);
+ RETURN(rc);
}
-int lprocfs_mgc_rd_ir_state(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
{
- struct obd_device *obd = data;
- struct obd_import *imp = obd->u.cli.cl_import;
- struct obd_connect_data *ocd = &imp->imp_connect_data;
- struct config_llog_data *cld;
- int rc = 0;
- ENTRY;
+ struct obd_device *obd = data;
+ struct obd_import *imp;
+ struct obd_connect_data *ocd;
+ struct config_llog_data *cld;
+
+ ENTRY;
+ LASSERT(obd);
+ LPROCFS_CLIMP_CHECK(obd);
+ imp = obd->u.cli.cl_import;
+ ocd = &imp->imp_connect_data;
- rc = snprintf(page, count, "imperative_recovery: %s\n",
- OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
- rc += snprintf(page + rc, count - rc, "client_state:\n");
+ seq_printf(m, "imperative_recovery: %s\n",
+ OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
+ seq_printf(m, "client_state:\n");
spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
if (cld->cld_recover == NULL)
continue;
- rc += snprintf(page + rc, count - rc,
- " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
+ seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
+ cld->cld_logname,
+ cld->cld_recover->cld_cfg.cfg_last_idx);
}
spin_unlock(&config_list_lock);
- RETURN(rc);
+ LPROCFS_CLIMP_EXIT(obd);
+ RETURN(0);
}
/* reenqueue any lost locks */
-#define RQ_RUNNING 0x1
-#define RQ_NOW 0x2
-#define RQ_LATER 0x4
-#define RQ_STOP 0x8
+#define RQ_RUNNING 0x1
+#define RQ_NOW 0x2
+#define RQ_LATER 0x4
+#define RQ_STOP 0x8
+#define RQ_PRECLEANUP 0x10
static int rq_state = 0;
-static cfs_waitq_t rq_waitq;
+static wait_queue_head_t rq_waitq;
static DECLARE_COMPLETION(rq_exit);
+static DECLARE_COMPLETION(rq_start);
static void do_requeue(struct config_llog_data *cld)
{
- ENTRY;
- LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
-
- /* Do not run mgc_process_log on a disconnected export or an
- export which is being disconnected. Take the client
- semaphore to make the check non-racy. */
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
- if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
- CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
- mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
- } else {
- CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
- cld->cld_logname);
- }
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ /*
+ * Do not run mgc_process_log on a disconnected export or an
+ * export which is being disconnected. Take the client
+ * semaphore to make the check non-racy.
+ */
+ down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem,
+ OBD_CLI_SEM_MGC);
+ if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
+ CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
+ rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ if (rc && rc != -ENOENT)
+ CERROR("failed processing log: %d\n", rc);
+ } else {
+ CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
+ cld->cld_logname);
+ }
up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
EXIT;
* in order to not flood the MGS.
*/
#define MGC_TIMEOUT_MIN_SECONDS 5
-#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
+#define MGC_TIMEOUT_RAND_CENTISEC 500
static int mgc_requeue_thread(void *data)
{
- int rc = 0;
- ENTRY;
+ int rc = 0;
+ bool first = true;
+ ENTRY;
- CDEBUG(D_MGC, "Starting requeue thread\n");
+ CDEBUG(D_MGC, "Starting requeue thread\n");
- /* Keep trying failed locks periodically */
+ /* Keep trying failed locks periodically */
spin_lock(&config_list_lock);
rq_state |= RQ_RUNNING;
- while (1) {
+ while (!(rq_state & RQ_STOP)) {
struct l_wait_info lwi;
struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
- int stopped = !!(rq_state & RQ_STOP);
+ int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
int to;
/* Any new or requeued lostlocks will change the state */
rq_state &= ~(RQ_NOW | RQ_LATER);
spin_unlock(&config_list_lock);
- /* Always wait a few seconds to allow the server who
- caused the lock revocation to finish its setup, plus some
- random so everyone doesn't try to reconnect at once. */
- to = MGC_TIMEOUT_MIN_SECONDS * CFS_HZ;
- to += rand * CFS_HZ / 100; /* rand is centi-seconds */
- lwi = LWI_TIMEOUT(to, NULL, NULL);
- l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi);
+ if (first) {
+ first = false;
+ complete(&rq_start);
+ }
- /*
- * iterate & processing through the list. for each cld, process
- * its depending sptlrpc cld firstly (if any) and then itself.
- *
- * it's guaranteed any item in the list must have
- * reference > 0; and if cld_lostlock is set, at
- * least one reference is taken by the previous enqueue.
- */
- cld_prev = NULL;
+ /* Always wait a few seconds to allow the server who
+ caused the lock revocation to finish its setup, plus some
+ random so everyone doesn't try to reconnect at once. */
+ to = cfs_time_seconds(MGC_TIMEOUT_MIN_SECONDS * 100 + rand);
+ /* rand is centi-seconds */
+ lwi = LWI_TIMEOUT(to / 100, NULL, NULL);
+ l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
+ &lwi);
+
+ /*
+ * iterate & processing through the list. for each cld, process
+ * its depending sptlrpc cld firstly (if any) and then itself.
+ *
+ * it's guaranteed any item in the list must have
+ * reference > 0; and if cld_lostlock is set, at
+ * least one reference is taken by the previous enqueue.
+ */
+ cld_prev = NULL;
spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
- if (!cld->cld_lostlock)
+ rq_state &= ~RQ_PRECLEANUP;
+ list_for_each_entry(cld, &config_llog_list,
+ cld_list_chain) {
+ if (!cld->cld_lostlock || cld->cld_stopping)
continue;
+ /* hold reference to avoid being freed during
+ * subsequent processing. */
+ config_log_get(cld);
+ cld->cld_lostlock = 0;
spin_unlock(&config_list_lock);
- LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
-
- /* Whether we enqueued again or not in mgc_process_log,
- * we're done with the ref from the old enqueue */
- if (cld_prev)
- config_log_put(cld_prev);
- cld_prev = cld;
+ config_log_put(cld_prev);
+ cld_prev = cld;
- cld->cld_lostlock = 0;
- if (likely(!stopped))
+ if (likely(!(rq_state & RQ_STOP))) {
do_requeue(cld);
-
- spin_lock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ } else {
+ spin_lock(&config_list_lock);
+ break;
+ }
}
spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* break after scanning the list so that we can drop
- * refcount to losing lock clds */
- if (unlikely(stopped)) {
- spin_lock(&config_list_lock);
- break;
- }
+ config_log_put(cld_prev);
/* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
+ wait_event_idle(rq_waitq, rq_state & (RQ_NOW | RQ_STOP));
spin_lock(&config_list_lock);
}
+
/* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
rq_state &= ~RQ_RUNNING;
spin_unlock(&config_list_lock);
We are responsible for dropping the config log reference from here on out. */
static void mgc_requeue_add(struct config_llog_data *cld)
{
- ENTRY;
+ bool wakeup = false;
+ ENTRY;
- CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
- cld->cld_logname, cfs_atomic_read(&cld->cld_refcount),
- cld->cld_stopping, rq_state);
- LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
+ CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
+ cld->cld_logname, atomic_read(&cld->cld_refcount),
+ cld->cld_stopping, rq_state);
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping || cld->cld_lostlock) {
- mutex_unlock(&cld->cld_lock);
- RETURN_EXIT;
- }
- /* this refcount will be released in mgc_requeue_thread. */
- config_log_get(cld);
- cld->cld_lostlock = 1;
- mutex_unlock(&cld->cld_lock);
-
- /* Hold lock for rq_state */
spin_lock(&config_list_lock);
- if (rq_state & RQ_STOP) {
- spin_unlock(&config_list_lock);
- cld->cld_lostlock = 0;
- config_log_put(cld);
- } else {
+ if (!(rq_state & RQ_STOP) && !cld->cld_stopping && !cld->cld_lostlock) {
+ cld->cld_lostlock = 1;
rq_state |= RQ_NOW;
- spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ wakeup = true;
}
+ spin_unlock(&config_list_lock);
+ mutex_unlock(&cld->cld_lock);
+ if (wakeup)
+ wake_up(&rq_waitq);
+
EXIT;
}
RETURN(0);
}
-static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb)
+static int mgc_fs_setup(const struct lu_env *env, struct obd_device *obd,
+ struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
struct client_obd *cli = &obd->u.cli;
struct lu_fid rfid, fid;
struct dt_object *root, *dto;
- struct lu_env *env;
int rc = 0;
ENTRY;
LASSERT(lsi);
LASSERT(lsi->lsi_dt_dev);
- OBD_ALLOC_PTR(env);
- if (env == NULL)
- RETURN(-ENOMEM);
-
- /* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- down(&cli->cl_mgc_sem);
-
- cfs_cleanup_group_info();
+ /* The mgc fs exclusion mutex. Only one fs can be setup at a time. */
+ mutex_lock(&cli->cl_mgc_mutex);
/* Setup the configs dir */
- rc = lu_env_init(env, LCT_MG_THREAD);
- if (rc)
- GOTO(out_err, rc);
-
fid.f_seq = FID_SEQ_LOCAL_NAME;
fid.f_oid = 1;
fid.f_ver = 0;
rc = local_oid_storage_init(env, lsi->lsi_dt_dev, &fid,
&cli->cl_mgc_los);
if (rc)
- GOTO(out_env, rc);
+ RETURN(rc);
rc = dt_root_get(env, lsi->lsi_dt_dev, &rfid);
if (rc)
- GOTO(out_env, rc);
+ GOTO(out_los, rc);
root = dt_locate_at(env, lsi->lsi_dt_dev, &rfid,
- &cli->cl_mgc_los->los_dev->dd_lu_dev);
+ &cli->cl_mgc_los->los_dev->dd_lu_dev, NULL);
if (unlikely(IS_ERR(root)))
GOTO(out_los, rc = PTR_ERR(root));
dto = local_file_find_or_create(env, cli->cl_mgc_los, root,
MOUNT_CONFIGS_DIR,
S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
- lu_object_put_nocache(env, &root->do_lu);
+ dt_object_put_nocache(env, root);
if (IS_ERR(dto))
GOTO(out_los, rc = PTR_ERR(dto));
EXIT;
out_llog:
if (rc) {
- lu_object_put(env, &cli->cl_mgc_configs_dir->do_lu);
+ dt_object_put(env, cli->cl_mgc_configs_dir);
cli->cl_mgc_configs_dir = NULL;
}
out_los:
if (rc < 0) {
local_oid_storage_fini(env, cli->cl_mgc_los);
cli->cl_mgc_los = NULL;
- up(&cli->cl_mgc_sem);
+ mutex_unlock(&cli->cl_mgc_mutex);
}
-out_env:
- lu_env_fini(env);
-out_err:
- OBD_FREE_PTR(env);
return rc;
}
-static int mgc_fs_cleanup(struct obd_device *obd)
+static int mgc_fs_cleanup(const struct lu_env *env, struct obd_device *obd)
{
- struct lu_env env;
struct client_obd *cli = &obd->u.cli;
- int rc;
-
ENTRY;
LASSERT(cli->cl_mgc_los != NULL);
- rc = lu_env_init(&env, LCT_MG_THREAD);
- if (rc)
- GOTO(unlock, rc);
-
- mgc_local_llog_fini(&env, obd);
+ mgc_local_llog_fini(env, obd);
- lu_object_put_nocache(&env, &cli->cl_mgc_configs_dir->do_lu);
+ dt_object_put_nocache(env, cli->cl_mgc_configs_dir);
cli->cl_mgc_configs_dir = NULL;
- local_oid_storage_fini(&env, cli->cl_mgc_los);
+ local_oid_storage_fini(env, cli->cl_mgc_los);
cli->cl_mgc_los = NULL;
- lu_env_fini(&env);
-unlock:
class_decref(obd, "mgc_fs", obd);
- up(&cli->cl_mgc_sem);
+ mutex_unlock(&cli->cl_mgc_mutex);
RETURN(0);
}
}
-static cfs_atomic_t mgc_count = CFS_ATOMIC_INIT(0);
-static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static atomic_t mgc_count = ATOMIC_INIT(0);
+static int mgc_precleanup(struct obd_device *obd)
{
- int rc = 0;
- ENTRY;
+ int rc = 0;
+ int temp;
+ ENTRY;
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- if (cfs_atomic_dec_and_test(&mgc_count)) {
- int running;
- /* stop requeue thread */
- spin_lock(&config_list_lock);
- running = rq_state & RQ_RUNNING;
- if (running)
- rq_state |= RQ_STOP;
- spin_unlock(&config_list_lock);
- if (running) {
- cfs_waitq_signal(&rq_waitq);
- wait_for_completion(&rq_exit);
- }
- }
- obd_cleanup_client_import(obd);
- rc = mgc_llog_fini(NULL, obd);
- if (rc != 0)
- CERROR("failed to cleanup llogging subsystems\n");
- break;
- }
- RETURN(rc);
+ if (atomic_dec_and_test(&mgc_count)) {
+ LASSERT(rq_state & RQ_RUNNING);
+ /* stop requeue thread */
+ temp = RQ_STOP;
+ } else {
+ /* wakeup requeue thread to clean our cld */
+ temp = RQ_NOW | RQ_PRECLEANUP;
+ }
+
+ spin_lock(&config_list_lock);
+ rq_state |= temp;
+ spin_unlock(&config_list_lock);
+ wake_up(&rq_waitq);
+
+ if (temp & RQ_STOP)
+ wait_for_completion(&rq_exit);
+ obd_cleanup_client_import(obd);
+
+ rc = mgc_llog_fini(NULL, obd);
+ if (rc != 0)
+ CERROR("failed to cleanup llogging subsystems\n");
+
+ RETURN(rc);
}
static int mgc_cleanup(struct obd_device *obd)
/* COMPAT_146 - old config logs may have added profiles we don't
know about */
- if (obd->obd_type->typ_refcnt <= 1)
+ if (atomic_read(&obd->obd_type->typ_refcnt) <= 1)
/* Only for the last mgc */
class_del_profiles();
static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
- struct lprocfs_static_vars lvars;
- int rc;
- ENTRY;
+ struct task_struct *task;
+ int rc;
+ ENTRY;
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ RETURN(rc);
- rc = client_obd_setup(obd, lcfg);
- if (rc)
- GOTO(err_decref, rc);
+ rc = client_obd_setup(obd, lcfg);
+ if (rc)
+ GOTO(err_decref, rc);
rc = mgc_llog_init(NULL, obd);
- if (rc) {
- CERROR("failed to setup llogging subsystems\n");
- GOTO(err_cleanup, rc);
- }
+ if (rc) {
+ CERROR("failed to setup llogging subsystems\n");
+ GOTO(err_cleanup, rc);
+ }
- lprocfs_mgc_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars);
- sptlrpc_lprocfs_cliobd_attach(obd);
+ rc = mgc_tunables_init(obd);
+ if (rc)
+ GOTO(err_sysfs, rc);
- if (cfs_atomic_inc_return(&mgc_count) == 1) {
+ if (atomic_inc_return(&mgc_count) == 1) {
rq_state = 0;
- cfs_waitq_init(&rq_waitq);
+ init_waitqueue_head(&rq_waitq);
/* start requeue thread */
- rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
- "ll_cfg_requeue"));
- if (IS_ERR_VALUE(rc)) {
- CERROR("%s: Cannot start requeue thread (%d),"
- "no more log updates!\n",
+ task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("%s: cannot start requeue thread: rc = %d; "
+ "no more log updates\n",
obd->obd_name, rc);
- GOTO(err_cleanup, rc);
+ GOTO(err_sysfs, rc);
}
/* rc is the task_struct pointer of mgc_requeue_thread. */
rc = 0;
- }
+ wait_for_completion(&rq_start);
+ }
- RETURN(rc);
+ RETURN(rc);
+err_sysfs:
+ lprocfs_obd_cleanup(obd);
err_cleanup:
- client_obd_cleanup(obd);
+ client_obd_cleanup(obd);
err_decref:
- ptlrpcd_decref();
- RETURN(rc);
+ ptlrpcd_decref();
+ RETURN(rc);
}
/* based on ll_mdc_blocking_ast */
PLDLMRES(lock->l_resource),
(char *)&lock->l_resource->lr_name.name[0]);
- if (!cld) {
- CDEBUG(D_INFO, "missing data, won't requeue\n");
- break;
- }
+ if (!cld) {
+ CDEBUG(D_INFO, "missing data, won't requeue\n");
+ break;
+ }
- /* held at mgc_process_log(). */
- LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
- /* Are we done with this log? */
- if (cld->cld_stopping) {
- CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
- cld->cld_logname);
- config_log_put(cld);
- break;
- }
- /* Make sure not to re-enqueue when the mgc is stopping
- (we get called from client_disconnect_export) */
- if (!lock->l_conn_export ||
- !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
- CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
- cld->cld_logname);
- config_log_put(cld);
- break;
- }
+ /* held at mgc_process_log(). */
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ lock->l_ast_data = NULL;
+ /* Are we done with this log? */
+ if (cld->cld_stopping) {
+ CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
+ cld->cld_logname);
+ config_log_put(cld);
+ break;
+ }
+ /* Make sure not to re-enqueue when the mgc is stopping
+ (we get called from client_disconnect_export) */
+ if (lock->l_conn_export == NULL ||
+ lock->l_conn_export->exp_obd->u.cli.cl_conn_count == 0) {
+ CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
+ cld->cld_logname);
+ config_log_put(cld);
+ break;
+ }
/* Re-enqueue now */
mgc_requeue_add(cld);
}
/* Not sure where this should go... */
-#define MGC_ENQUEUE_LIMIT 50
+/* This is the timeout value for MGS_CONNECT request plus a ping interval, such
+ * that we can have a chance to try the secondary MGS if any. */
+#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
+ + PING_INTERVAL)
#define MGC_TARGET_REG_LIMIT 10
+#define MGC_TARGET_REG_LIMIT_MAX RECONNECT_DELAY_MAX
#define MGC_SEND_PARAM_LIMIT 10
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 13, 53, 0)
/* Send parameter to MGS*/
static int mgc_set_mgs_param(struct obd_export *exp,
struct mgs_send_param *msp)
RETURN(rc);
}
+#endif
/* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
- __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
+static int mgc_enqueue(struct obd_export *exp, enum ldlm_type type,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
+ __u64 *flags, ldlm_glimpse_callback glimpse_callback,
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
{
.ei_mode = mode,
.ei_cb_bl = mgc_blocking_ast,
.ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_gl = glimpse_callback,
};
struct ptlrpc_request *req;
int short_limit = cld_is_sptlrpc(cld);
int rc;
ENTRY;
- CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
+ CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname,
cld->cld_resid.name[0]);
/* We need a callback for every lockholder, so don't try to
RETURN(rc);
}
-static int mgc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
- __u32 mode, struct lustre_handle *lockh)
+static int mgc_cancel(struct obd_export *exp, enum ldlm_mode mode,
+ struct lustre_handle *lockh)
{
- ENTRY;
+ ENTRY;
- ldlm_lock_decref(lockh, mode);
+ ldlm_lock_decref(lockh, mode);
- RETURN(0);
+ RETURN(0);
}
static void mgc_notify_active(struct obd_device *unused)
spin_lock(&config_list_lock);
rq_state |= RQ_NOW;
spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ wake_up(&rq_waitq);
/* TODO: Help the MGS rebuild nidtbl. -jay */
}
RETURN(-ENOMEM);
}
- memcpy(req_mti, mti, sizeof(*req_mti));
- ptlrpc_request_set_replen(req);
- CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
+ memcpy(req_mti, mti, sizeof(*req_mti));
+ ptlrpc_request_set_replen(req);
+ CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
+
+ /* if the target needs to regenerate the config log in MGS, it's better
+ * to use some longer limit to let MGC have time to change connection to
+ * another MGS (or try again with the same MGS) for the target (server)
+ * will fail and exit if the request expired due to delay limit. */
+ if (mti->mti_flags & (LDD_F_UPDATE | LDD_F_NEED_INDEX))
+ req->rq_delay_limit = MGC_TARGET_REG_LIMIT_MAX;
rc = ptlrpc_queue_wait(req);
if (!rc) {
RETURN(rc);
}
-int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- obd_count keylen, void *key, obd_count vallen,
- void *val, struct ptlrpc_request_set *set)
+static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ u32 keylen, void *key,
+ u32 vallen, void *val,
+ struct ptlrpc_request_set *set)
{
int rc = -EINVAL;
ENTRY;
- /* Turn off initial_recov after we try all backup servers once */
- if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- int value;
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- value = *(int *)val;
- CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
- imp->imp_obd->obd_name, value,
- imp->imp_deactive, imp->imp_invalid,
- imp->imp_replayable, imp->imp_obd->obd_replayable,
- ptlrpc_import_state_name(imp->imp_state));
- /* Resurrect if we previously died */
- if ((imp->imp_state != LUSTRE_IMP_FULL &&
- imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
- ptlrpc_reconnect_import(imp);
- RETURN(0);
- }
+ /* Turn off initial_recov after we try all backup servers once */
+ if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
+ struct obd_import *imp = class_exp2cliimp(exp);
+ int value;
+ if (vallen != sizeof(int))
+ RETURN(-EINVAL);
+ value = *(int *)val;
+ CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
+ imp->imp_obd->obd_name, value,
+ imp->imp_deactive, imp->imp_invalid,
+ imp->imp_replayable, imp->imp_obd->obd_replayable,
+ ptlrpc_import_state_name(imp->imp_state));
+ /* Resurrect the import immediately if
+ * 1. we previously got disconnected,
+ * 2. value > 1 (at the same node with MGS)
+ * */
+ if (imp->imp_state == LUSTRE_IMP_DISCON || value > 1)
+ ptlrpc_reconnect_import(imp);
+
+ RETURN(0);
+ }
+
/* FIXME move this to mgc_process_config */
if (KEY_IS(KEY_REGISTER_TARGET)) {
struct mgs_target_info *mti;
if (vallen != sizeof(struct super_block))
RETURN(-EINVAL);
- rc = mgc_fs_setup(exp->exp_obd, sb);
+ rc = mgc_fs_setup(env, exp->exp_obd, sb);
RETURN(rc);
}
if (KEY_IS(KEY_CLEAR_FS)) {
if (vallen != 0)
RETURN(-EINVAL);
- rc = mgc_fs_cleanup(exp->exp_obd);
+ rc = mgc_fs_cleanup(env, exp->exp_obd);
RETURN(rc);
}
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 13, 53, 0)
if (KEY_IS(KEY_SET_INFO)) {
struct mgs_send_param *msp;
rc = mgc_set_mgs_param(exp, msp);
RETURN(rc);
}
+#endif
if (KEY_IS(KEY_MGSSEC)) {
struct client_obd *cli = &exp->exp_obd->u.cli;
struct sptlrpc_flavor flvr;
}
static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *unused)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
int rc = -EINVAL;
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
__u64 max_version,
void *data, int datalen, bool mne_swab)
{
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
- struct mgs_nidtbl_entry *entry;
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- u64 prev_version = 0;
- char *inst;
- char *buf;
- int bufsz;
- int pos;
- int rc = 0;
- int off = 0;
- ENTRY;
+ struct config_llog_instance *cfg = &cld->cld_cfg;
+ struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
+ struct mgs_nidtbl_entry *entry;
+ struct lustre_cfg *lcfg;
+ struct lustre_cfg_bufs bufs;
+ u64 prev_version = 0;
+ char *inst;
+ char *buf;
+ int bufsz;
+ int pos = 0;
+ int rc = 0;
+ int off = 0;
- LASSERT(cfg->cfg_instance != NULL);
- LASSERT(cfg->cfg_sb == cfg->cfg_instance);
+ ENTRY;
+ LASSERT(cfg->cfg_instance != 0);
+ LASSERT(ll_get_cfg_instance(cfg->cfg_sb) == cfg->cfg_instance);
- OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ OBD_ALLOC(inst, PAGE_SIZE);
if (inst == NULL)
RETURN(-ENOMEM);
if (!IS_SERVER(lsi)) {
- pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_CACHE_SIZE) {
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ pos = snprintf(inst, PAGE_SIZE, "%016lx", cfg->cfg_instance);
+ if (pos >= PAGE_SIZE) {
+ OBD_FREE(inst, PAGE_SIZE);
return -E2BIG;
}
- } else {
+#ifdef HAVE_SERVER_SUPPORT
+ } else {
LASSERT(IS_MDT(lsi));
rc = server_name2svname(lsi->lsi_svname, inst, NULL,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (rc) {
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ OBD_FREE(inst, PAGE_SIZE);
RETURN(-EINVAL);
}
pos = strlen(inst);
- }
+#endif /* HAVE_SERVER_SUPPORT */
+ }
++pos;
buf = inst + pos;
- bufsz = PAGE_CACHE_SIZE - pos;
+ bufsz = PAGE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
- int is_ost;
+ int is_ost, i;
struct obd_device *obd;
char *obdname;
char *cname;
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_CACHE_SIZE) {
+ if (entry->mne_length > PAGE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
continue;
}
- /* TODO: iterate all nids to find one */
- /* find uuid by nid */
- rc = client_import_find_conn(obd->u.cli.cl_import,
- entry->u.nids[0],
- (struct obd_uuid *)uuid);
+ /* iterate all nids to find one */
+ /* find uuid by nid */
+ rc = -ENOENT;
+ for (i = 0; i < entry->mne_nid_count; i++) {
+ rc = client_import_find_conn(obd->u.cli.cl_import,
+ entry->u.nids[i],
+ (struct obd_uuid *)uuid);
+ if (rc == 0)
+ break;
+ }
+
up_read(&obd->u.cli.cl_sem);
if (rc < 0) {
CERROR("mgc: cannot find uuid by nid %s\n",
lustre_cfg_bufs_set_string(&bufs, 1, params);
- rc = -ENOMEM;
- lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
- if (lcfg == NULL) {
- CERROR("mgc: cannot allocate memory\n");
- break;
- }
+ OBD_ALLOC(lcfg, lustre_cfg_len(bufs.lcfg_bufcount,
+ bufs.lcfg_buflen));
+ if (!lcfg) {
+ rc = -ENOMEM;
+ break;
+ }
+ lustre_cfg_init(lcfg, LCFG_PARAM, &bufs);
- CDEBUG(D_INFO, "ir apply logs "LPD64"/"LPD64" for %s -> %s\n",
+ CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n",
prev_version, max_version, obdname, params);
rc = class_process_config(lcfg);
- lustre_cfg_free(lcfg);
+ OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount,
+ lcfg->lcfg_buflens));
if (rc)
CDEBUG(D_INFO, "process config for %s error %d\n",
obdname, rc);
/* continue, even one with error */
}
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ OBD_FREE(inst, PAGE_SIZE);
RETURN(rc);
}
/**
* This function is called if this client was notified for target restarting
- * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs.
+ * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery or
+ * nodemap logs.
*/
-static int mgc_process_recover_log(struct obd_device *obd,
- struct config_llog_data *cld)
+static int mgc_process_recover_nodemap_log(struct obd_device *obd,
+ struct config_llog_data *cld)
{
- struct ptlrpc_request *req = NULL;
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct mgs_config_body *body;
- struct mgs_config_res *res;
- struct ptlrpc_bulk_desc *desc;
- struct page **pages;
- int nrpages;
- bool eof = true;
+ struct ptlrpc_connection *mgc_conn;
+ struct ptlrpc_request *req = NULL;
+ struct config_llog_instance *cfg = &cld->cld_cfg;
+ struct mgs_config_body *body;
+ struct mgs_config_res *res;
+ struct nodemap_config *new_config = NULL;
+ struct lu_nodemap *recent_nodemap = NULL;
+ struct ptlrpc_bulk_desc *desc;
+ struct page **pages = NULL;
+ __u64 config_read_offset = 0;
+ __u8 nodemap_cur_pass = 0;
+ int nrpages = 0;
+ bool eof = true;
bool mne_swab = false;
- int i;
- int ealen;
- int rc;
- ENTRY;
+ int i;
+ int ealen;
+ int rc;
+ ENTRY;
+
+ mgc_conn = class_exp2cliimp(cld->cld_mgcexp)->imp_connection;
+
+ /* don't need to get local config */
+ if (cld_is_nodemap(cld) &&
+ (LNET_NETTYP(LNET_NIDNET(mgc_conn->c_peer.nid)) == LOLND))
+ GOTO(out, rc = 0);
/* allocate buffer for bulk transfer.
* if this is the first time for this mgs to read logs,
* small and CONFIG_READ_NRPAGES will be used.
*/
nrpages = CONFIG_READ_NRPAGES;
- if (cfg->cfg_last_idx == 0) /* the first time */
+ if (cfg->cfg_last_idx == 0 || cld_is_nodemap(cld))
nrpages = CONFIG_READ_NRPAGES_INIT;
OBD_ALLOC(pages, sizeof(*pages) * nrpages);
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++) {
- pages[i] = alloc_page(GFP_IOFS);
+ pages[i] = alloc_page(GFP_KERNEL);
if (pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
again:
- LASSERT(cld_is_recover(cld));
+#ifdef HAVE_SERVER_SUPPORT
+ if (cld_is_nodemap(cld) && config_read_offset == 0) {
+ new_config = nodemap_config_alloc();
+ if (IS_ERR(new_config)) {
+ rc = PTR_ERR(new_config);
+ new_config = NULL;
+ GOTO(out, rc);
+ }
+ }
+#endif
+ LASSERT(cld_is_recover(cld) || cld_is_nodemap(cld));
LASSERT(mutex_is_locked(&cld->cld_lock));
- req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
- &RQF_MGS_CONFIG_READ);
- if (req == NULL)
- GOTO(out, rc = -ENOMEM);
+ req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
+ &RQF_MGS_CONFIG_READ);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
- rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ);
- if (rc)
- GOTO(out, rc);
+ rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ);
+ if (rc)
+ GOTO(out, rc);
- /* pack request */
- body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
- LASSERT(body != NULL);
- LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
+ /* pack request */
+ body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
+ LASSERT(body != NULL);
+ LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
>= sizeof(body->mcb_name))
GOTO(out, rc = -E2BIG);
- body->mcb_offset = cfg->cfg_last_idx + 1;
- body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_CACHE_SHIFT;
- body->mcb_units = nrpages;
+ if (cld_is_nodemap(cld))
+ body->mcb_offset = config_read_offset;
+ else
+ body->mcb_offset = cfg->cfg_last_idx + 1;
+ body->mcb_type = cld->cld_type;
+ body->mcb_bits = PAGE_SHIFT;
+ body->mcb_units = nrpages;
+ body->mcb_nm_cur_pass = nodemap_cur_pass;
/* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
- MGS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
+ PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ MGS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (desc == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+ desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
+ PAGE_SIZE);
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- GOTO(out, rc);
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
- res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
- if (res->mcr_size < res->mcr_offset)
- GOTO(out, rc = -EINVAL);
+ res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
+ if (!res)
+ GOTO(out, rc = -EPROTO);
- /* always update the index even though it might have errors with
- * handling the recover logs */
- cfg->cfg_last_idx = res->mcr_offset;
- eof = res->mcr_offset == res->mcr_size;
+ if (cld_is_nodemap(cld)) {
+ config_read_offset = res->mcr_offset;
+ eof = config_read_offset == II_END_OFF;
+ nodemap_cur_pass = res->mcr_nm_cur_pass;
+ } else {
+ if (res->mcr_size < res->mcr_offset)
+ GOTO(out, rc = -EINVAL);
- CDEBUG(D_INFO, "Latest version "LPD64", more %d.\n",
- res->mcr_offset, eof == false);
+ /* always update the index even though it might have errors with
+ * handling the recover logs
+ */
+ cfg->cfg_last_idx = res->mcr_offset;
+ eof = res->mcr_offset == res->mcr_size;
- ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0);
- if (ealen < 0)
- GOTO(out, rc = ealen);
+ CDEBUG(D_INFO, "Latest version %lld, more %d.\n",
+ res->mcr_offset, eof == false);
+ }
- if (ealen > nrpages << PAGE_CACHE_SHIFT)
- GOTO(out, rc = -EINVAL);
+ ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0);
+ if (ealen < 0)
+ GOTO(out, rc = ealen);
- if (ealen == 0) { /* no logs transferred */
- if (!eof)
- rc = -EINVAL;
- GOTO(out, rc);
- }
+ if (ealen > nrpages << PAGE_SHIFT)
+ GOTO(out, rc = -EINVAL);
- mne_swab = !!ptlrpc_rep_need_swab(req);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
- /* This import flag means the server did an extra swab of IR MNE
- * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
- if (unlikely(req->rq_import->imp_need_mne_swab))
- mne_swab = !mne_swab;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
+ if (ealen == 0) { /* no logs transferred */
+#ifdef HAVE_SERVER_SUPPORT
+ /* config changed since first read RPC */
+ if (cld_is_nodemap(cld) && config_read_offset == 0) {
+ CDEBUG(D_INFO, "nodemap config changed in transit, retrying\n");
+ GOTO(out, rc = -EAGAIN);
+ }
#endif
+ if (!eof)
+ rc = -EINVAL;
+ GOTO(out, rc);
+ }
+
+ mne_swab = ptlrpc_rep_need_swab(req);
- for (i = 0; i < nrpages && ealen > 0; i++) {
- int rc2;
- void *ptr;
+ /* When a nodemap config is received, we build a new nodemap config,
+ * with new nodemap structs. We keep track of the most recently added
+ * nodemap since the config is read ordered by nodemap_id, and so it
+ * is likely that the next record will be related. Because access to
+ * the nodemaps is single threaded until the nodemap_config is active,
+ * we don't need to reference count with recent_nodemap, though
+ * recent_nodemap should be set to NULL when the nodemap_config
+ * is either destroyed or set active.
+ */
+ for (i = 0; i < nrpages && ealen > 0; i++) {
+ int rc2;
+ union lu_page *ptr;
ptr = kmap(pages[i]);
- rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, PAGE_CACHE_SIZE),
- mne_swab);
+ if (cld_is_nodemap(cld))
+ rc2 = nodemap_process_idx_pages(new_config, ptr,
+ &recent_nodemap);
+ else
+ rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset,
+ ptr,
+ min_t(int, ealen,
+ PAGE_SIZE),
+ mne_swab);
kunmap(pages[i]);
if (rc2 < 0) {
- CWARN("Process recover log %s error %d\n",
- cld->cld_logname, rc2);
- break;
- }
+ CWARN("%s: error processing %s log %s: rc = %d\n",
+ obd->obd_name,
+ cld_is_nodemap(cld) ? "nodemap" : "recovery",
+ cld->cld_logname,
+ rc2);
+ GOTO(out, rc = rc2);
+ }
- ealen -= PAGE_CACHE_SIZE;
- }
+ ealen -= PAGE_SIZE;
+ }
out:
- if (req)
- ptlrpc_req_finished(req);
+ if (req) {
+ ptlrpc_req_finished(req);
+ req = NULL;
+ }
+
+ if (rc == 0 && !eof)
+ goto again;
- if (rc == 0 && !eof)
- goto again;
+#ifdef HAVE_SERVER_SUPPORT
+ if (new_config != NULL) {
+ /* recent_nodemap cannot be used after set_active/dealloc */
+ if (rc == 0)
+ nodemap_config_set_active_mgc(new_config);
+ else
+ nodemap_config_dealloc(new_config);
+ }
+#endif
if (pages) {
for (i = 0; i < nrpages; i++) {
return rc;
}
+static int mgc_barrier_glimpse_ast(struct ldlm_lock *lock, void *data)
+{
+ struct config_llog_data *cld = lock->l_ast_data;
+ int rc;
+ ENTRY;
+
+ if (cld->cld_stopping)
+ RETURN(-ENODEV);
+
+ rc = barrier_handler(s2lsi(cld->cld_cfg.cfg_sb)->lsi_dt_dev,
+ (struct ptlrpc_request *)data);
+
+ RETURN(rc);
+}
+
/* Copy a remote log locally */
static int mgc_llog_local_copy(const struct lu_env *env,
struct obd_device *obd,
* - if failed then move bakup to logname again
*/
- OBD_ALLOC(temp_log, strlen(logname) + 1);
+ OBD_ALLOC(temp_log, strlen(logname) + 2);
if (!temp_log)
RETURN(-ENOMEM);
sprintf(temp_log, "%sT", logname);
obd->obd_name, logname, rc);
}
llog_erase(env, lctxt, NULL, temp_log);
- OBD_FREE(temp_log, strlen(logname) + 1);
+ OBD_FREE(temp_log, strlen(logname) + 2);
return rc;
}
struct client_obd *cli = &mgc->u.cli;
struct lustre_sb_info *lsi = NULL;
int rc = 0;
- bool sptlrpc_started = false;
struct lu_env *env;
ENTRY;
LASSERT(cld);
LASSERT(mutex_is_locked(&cld->cld_lock));
- /*
- * local copy of sptlrpc log is controlled elsewhere, don't try to
- * read it up here.
- */
- if (cld_is_sptlrpc(cld) && local_only)
- RETURN(0);
-
if (cld->cld_cfg.cfg_sb)
lsi = s2lsi(cld->cld_cfg.cfg_sb);
cli->cl_mgc_configs_dir != NULL &&
lu2dt_dev(cli->cl_mgc_configs_dir->do_lu.lo_dev) ==
lsi->lsi_dt_dev) {
- if (!local_only)
+ if (!local_only && !lsi->lsi_dt_dev->dd_rdonly)
/* Only try to copy log if we have the lock. */
rc = mgc_llog_local_copy(env, mgc, ctxt, lctxt,
cld->cld_logname);
if (local_only || rc) {
- if (llog_is_empty(env, lctxt, cld->cld_logname)) {
+ if (strcmp(cld->cld_logname, PARAMS_FILENAME) != 0 &&
+ llog_is_empty(env, lctxt, cld->cld_logname)) {
LCONSOLE_ERROR_MSG(0x13a, "Failed to get MGS "
"log %s and no local copy."
"\n", cld->cld_logname);
CDEBUG(D_MGC, "Failed to get MGS log %s, using local "
"copy for now, will try to update later.\n",
cld->cld_logname);
+ rc = 0;
}
/* Now, whether we copied or not, start using the local llog.
* If we failed to copy, we'll start using whatever the old
GOTO(out_pop, rc = -EIO);
}
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_update_begin(cld->cld_logname);
- sptlrpc_started = true;
- }
+ rc = -EAGAIN;
+ if (lsi && IS_SERVER(lsi) && !IS_MGS(lsi) &&
+ lsi->lsi_dt_dev->dd_rdonly) {
+ struct llog_ctxt *rctxt;
- /* logname and instance info should be the same, so use our
- * copy of the instance for the update. The cfg_last_idx will
- * be updated here. */
- rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
- &cld->cld_cfg);
- EXIT;
+ /* Under readonly mode, we may have no local copy or local
+ * copy is incomplete, so try to use remote llog firstly. */
+ rctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
+ LASSERT(rctxt);
-out_pop:
- __llog_ctxt_put(env, ctxt);
- if (lctxt)
- __llog_ctxt_put(env, lctxt);
+ rc = class_config_parse_llog(env, rctxt, cld->cld_logname,
+ &cld->cld_cfg);
+ llog_ctxt_put(rctxt);
+ }
+
+ if (rc && rc != -ENOENT)
+ rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
+ &cld->cld_cfg);
/*
- * update settings on existing OBDs. doing it inside
- * of llog_process_lock so no device is attaching/detaching
- * in parallel.
+ * update settings on existing OBDs.
* the logname must be <fsname>-sptlrpc
*/
- if (sptlrpc_started) {
- LASSERT(cld_is_sptlrpc(cld));
- sptlrpc_conf_log_update_end(cld->cld_logname);
+ if (rc == 0 && cld_is_sptlrpc(cld))
class_notify_sptlrpc_conf(cld->cld_logname,
strlen(cld->cld_logname) -
strlen("-sptlrpc"));
- }
+ EXIT;
+
+out_pop:
+ __llog_ctxt_put(env, ctxt);
+ if (lctxt)
+ __llog_ctxt_put(env, lctxt);
lu_env_fini(env);
out_free:
return rc;
}
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Copy the log locally before parsing it if appropriate (non-MGS server)
+static bool mgc_import_in_recovery(struct obd_import *imp)
+{
+ bool in_recovery = true;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL ||
+ imp->imp_state == LUSTRE_IMP_CLOSED)
+ in_recovery = false;
+ spin_unlock(&imp->imp_lock);
+
+ return in_recovery;
+}
+
+/**
+ * Get a configuration log from the MGS and process it.
+ *
+ * This function is called for both clients and servers to process the
+ * configuration log from the MGS. The MGC enqueues a DLM lock on the
+ * log from the MGS, and if the lock gets revoked the MGC will be notified
+ * by the lock cancellation callback that the config log has changed,
+ * and will enqueue another MGS lock on it, and then continue processing
+ * the new additions to the end of the log.
+ *
+ * Since the MGC import is not replayable, if the import is being evicted
+ * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process
+ * the log until recovery is finished or the import is closed.
+ *
+ * Make a local copy of the log before parsing it if appropriate (non-MGS
+ * server) so that the server can start even when the MGS is down.
+ *
+ * There shouldn't be multiple processes running process_log at once --
+ * sounds like badness. It actually might be fine, as long as they're not
+ * trying to update from the same log simultaneously, in which case we
+ * should use a per-log semaphore instead of cld_lock.
+ *
+ * \param[in] mgc MGC device by which to fetch the configuration log
+ * \param[in] cld log processing state (stored in lock callback data)
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
*/
int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
{
struct lustre_handle lockh = { 0 };
__u64 flags = LDLM_FL_NO_LRU;
int rc = 0, rcl;
+ bool retry = false;
ENTRY;
- LASSERT(cld);
+ LASSERT(cld != NULL);
/* I don't want multiple processes running process_log at once --
sounds like badness. It actually might be fine, as long as
we're not trying to update from the same log
simultaneously (in which case we should use a per-log sem.) */
+restart:
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
mutex_unlock(&cld->cld_lock);
- RETURN(0);
- }
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
-
- CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
- cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
-
- /* Get the cfg lock on the llog */
- rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
- LCK_CR, &flags, NULL, NULL, NULL,
- cld, 0, NULL, &lockh);
- if (rcl == 0) {
- /* Get the cld, it will be released in mgc_blocking_ast. */
- config_log_get(cld);
- rc = ldlm_lock_set_data(&lockh, (void *)cld);
- LASSERT(rc == 0);
- } else {
- CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
-
- /* mark cld_lostlock so that it will requeue
- * after MGC becomes available. */
- cld->cld_lostlock = 1;
- /* Get extra reference, it will be put in requeue thread */
- config_log_get(cld);
- }
+ RETURN(0);
+ }
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
+
+ CDEBUG(D_MGC, "Process log %s-%016lx from %d\n", cld->cld_logname,
+ cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
+
+ /* Get the cfg lock on the llog */
+ rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
+ LCK_CR, &flags,
+ cld_is_barrier(cld) ? mgc_barrier_glimpse_ast : NULL,
+ cld, 0, NULL, &lockh);
+ if (rcl == 0) {
+ /* Get the cld, it will be released in mgc_blocking_ast. */
+ config_log_get(cld);
+ rc = ldlm_lock_set_data(&lockh, (void *)cld);
+ LASSERT(rc == 0);
+ } else {
+ CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
+
+ if (rcl == -ESHUTDOWN &&
+ atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
+ struct obd_import *imp;
+ struct l_wait_info lwi;
+ long timeout = cfs_time_seconds(obd_timeout);
+
+ mutex_unlock(&cld->cld_lock);
+ imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
+
+ /* Let's force the pinger, and wait the import to be
+ * connected, note: since mgc import is non-replayable,
+ * and even the import state is disconnected, it does
+ * not mean the "recovery" is stopped, so we will keep
+ * waitting until timeout or the import state is
+ * FULL or closed */
+ ptlrpc_pinger_force(imp);
+
+ lwi = LWI_TIMEOUT(timeout, NULL, NULL);
+ l_wait_event(imp->imp_recovery_waitq,
+ !mgc_import_in_recovery(imp), &lwi);
+
+ if (imp->imp_state == LUSTRE_IMP_FULL) {
+ retry = true;
+ goto restart;
+ } else {
+ mutex_lock(&cld->cld_lock);
+ /* unlock/lock mutex, so check stopping again */
+ if (cld->cld_stopping) {
+ mutex_unlock(&cld->cld_lock);
+ RETURN(0);
+ }
+ spin_lock(&config_list_lock);
+ cld->cld_lostlock = 1;
+ spin_unlock(&config_list_lock);
+ }
+ } else {
+ /* mark cld_lostlock so that it will requeue
+ * after MGC becomes available. */
+ spin_lock(&config_list_lock);
+ cld->cld_lostlock = 1;
+ spin_unlock(&config_list_lock);
+ }
+ }
- if (cld_is_recover(cld)) {
- rc = 0; /* this is not a fatal error for recover log */
- if (rcl == 0)
- rc = mgc_process_recover_log(mgc, cld);
- } else {
- rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
- }
+ if (cld_is_recover(cld) || cld_is_nodemap(cld)) {
+ if (!rcl)
+ rc = mgc_process_recover_nodemap_log(mgc, cld);
+ else if (cld_is_nodemap(cld))
+ rc = rcl;
+
+ if (cld_is_recover(cld) && rc) {
+ if (!rcl) {
+ CERROR("%s: recover log %s failed, not fatal: rc = %d\n",
+ mgc->obd_name, cld->cld_logname, rc);
+ spin_lock(&config_list_lock);
+ cld->cld_lostlock = 1;
+ spin_unlock(&config_list_lock);
+ }
+ rc = 0; /* this is not a fatal error for recover log */
+ }
+ } else if (!cld_is_barrier(cld)) {
+ rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
+ }
- CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
- mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
+ CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
+ mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
mutex_unlock(&cld->cld_lock);
- /* Now drop the lock so MGS can revoke it */
- if (!rcl) {
- rcl = mgc_cancel(mgc->u.cli.cl_mgc_mgsexp, NULL,
- LCK_CR, &lockh);
- if (rcl)
- CERROR("Can't drop cfg lock: %d\n", rcl);
- }
+ /* Now drop the lock so MGS can revoke it */
+ if (!rcl) {
+ rcl = mgc_cancel(mgc->u.cli.cl_mgc_mgsexp, LCK_CR, &lockh);
+ if (rcl)
+ CERROR("Can't drop cfg lock: %d\n", rcl);
+ }
- RETURN(rc);
+ /* requeue nodemap lock immediately if transfer was interrupted */
+ if (cld_is_nodemap(cld) && rc == -EAGAIN) {
+ mgc_requeue_add(cld);
+ rc = 0;
+ }
+
+ RETURN(rc);
}
* LCFG_LOG_START gets the config log from the MGS, processes it to start
* any services, and adds it to the list logs to watch (follow).
*/
-static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
+static int mgc_process_config(struct obd_device *obd, size_t len, void *buf)
{
struct lustre_cfg *lcfg = buf;
struct config_llog_instance *cfg = NULL;
cfg->cfg_last_idx);
/* We're only called through here on the initial mount */
- rc = config_log_add(obd, logname, cfg, sb);
- if (rc)
- break;
- cld = config_log_find(logname, cfg);
- if (cld == NULL) {
- rc = -ENOENT;
- break;
- }
+ cld = config_log_add(obd, logname, cfg, sb);
+ if (IS_ERR(cld)) {
+ rc = PTR_ERR(cld);
+ break;
+ }
- /* COMPAT_146 */
- /* FIXME only set this for old logs! Right now this forces
- us to always skip the "inside markers" check */
- cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
-
- rc = mgc_process_log(obd, cld);
- if (rc == 0 && cld->cld_recover != NULL) {
- if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
- imp_connect_data, IMP_RECOV)) {
- rc = mgc_process_log(obd, cld->cld_recover);
- } else {
- struct config_llog_data *cir = cld->cld_recover;
- cld->cld_recover = NULL;
- config_log_put(cir);
- }
- if (rc)
- CERROR("Cannot process recover llog %d\n", rc);
- }
+ rc = mgc_process_log(obd, cld);
+ if (rc == 0 && cld->cld_recover != NULL) {
+ if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
+ imp_connect_data, IMP_RECOV)) {
+ rc = mgc_process_log(obd, cld->cld_recover);
+ } else {
+ struct config_llog_data *cir;
+
+ mutex_lock(&cld->cld_lock);
+ cir = cld->cld_recover;
+ cld->cld_recover = NULL;
+ mutex_unlock(&cld->cld_lock);
+ config_log_put(cir);
+ }
+
+ if (rc)
+ CERROR("Cannot process recover llog %d\n", rc);
+ }
if (rc == 0 && cld->cld_params != NULL) {
rc = mgc_process_log(obd, cld->cld_params);
if (rc == -ENOENT) {
- CDEBUG(D_MGC, "There is no params"
+ CDEBUG(D_MGC, "There is no params "
"config file yet\n");
rc = 0;
}
CERROR("%s: can't process params llog: rc = %d\n",
obd->obd_name, rc);
}
- config_log_put(cld);
break;
}
RETURN(rc);
}
-struct obd_ops mgc_obd_ops = {
+static struct obd_ops mgc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = mgc_setup,
.o_precleanup = mgc_precleanup,
.o_del_conn = client_import_del_conn,
.o_connect = client_connect_import,
.o_disconnect = client_disconnect_export,
- //.o_enqueue = mgc_enqueue,
- .o_cancel = mgc_cancel,
- //.o_iocontrol = mgc_iocontrol,
.o_set_info_async = mgc_set_info_async,
.o_get_info = mgc_get_info,
.o_import_event = mgc_import_event,
.o_process_config = mgc_process_config,
};
-int __init mgc_init(void)
+static int __init mgc_init(void)
{
- return class_register_type(&mgc_obd_ops, NULL, NULL,
- LUSTRE_MGC_NAME, NULL);
+ return class_register_type(&mgc_obd_ops, NULL, false, NULL,
+ LUSTRE_MGC_NAME, NULL);
}
-#ifdef __KERNEL__
-static void /*__exit*/ mgc_exit(void)
+static void __exit mgc_exit(void)
{
class_unregister_type(LUSTRE_MGC_NAME);
}
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Management Client");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(mgc_init);
module_exit(mgc_exit);
-#endif