X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fmgc%2Fmgc_request.c;h=456891a657ab9d7af9af8d6922de46b52ebe71ea;hp=32b000e39e097af3e1727df5665ab778a01d319a;hb=546993d587c5fc380e9745eae98f863e02e68575;hpb=8931d9070415e808e09bb4befd7cd38ef2431149 diff --git a/lustre/mgc/mgc_request.c b/lustre/mgc/mgc_request.c index 32b000e..456891a 100644 --- a/lustre/mgc/mgc_request.c +++ b/lustre/mgc/mgc_request.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -42,12 +38,18 @@ #define D_MGC D_CONFIG /*|D_WARNING*/ #include -#include -#include +#include +#include + +#include #include -#include +#include #include -#include +#include +#include +#include +#include +#include #include "mgc_internal.h" @@ -77,13 +79,15 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id, break; case CONFIG_T_RECOVER: case CONFIG_T_PARAMS: - resname = type; - break; + case CONFIG_T_NODEMAP: + case CONFIG_T_BARRIER: + resname = type; + break; default: LBUG(); } res_id->name[1] = cpu_to_le64(resname); - CDEBUG(D_MGC, "log %s to resid "LPX64"/"LPX64" (%.8s)\n", name, + CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name, res_id->name[0], res_id->name[1], (char *)&res_id->name[0]); return 0; } @@ -111,10 +115,11 @@ int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type) len = name_end - logname; return mgc_name2resid(logname, len, res_id, type); } +EXPORT_SYMBOL(mgc_logname2resid); /********************** config llog list **********************/ -static CFS_LIST_HEAD(config_llog_list); -static DEFINE_SPINLOCK(config_list_lock); +static LIST_HEAD(config_llog_list); +static DEFINE_SPINLOCK(config_list_lock); /* protects config_llog_list */ /* Take a reference to a config log */ static int config_log_get(struct config_llog_data *cld) @@ -132,31 +137,33 @@ static void config_log_put(struct config_llog_data *cld) { ENTRY; + if (unlikely(!cld)) + RETURN_EXIT; + CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname, atomic_read(&cld->cld_refcount)); LASSERT(atomic_read(&cld->cld_refcount) > 0); /* spinlock to make sure no item with 0 refcount in the list */ if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) { - cfs_list_del(&cld->cld_list_chain); + list_del(&cld->cld_list_chain); spin_unlock(&config_list_lock); - CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname); + CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname); - if (cld->cld_recover) - config_log_put(cld->cld_recover); - if (cld->cld_sptlrpc) - config_log_put(cld->cld_sptlrpc); - if (cld->cld_params) - config_log_put(cld->cld_params); - if (cld_is_sptlrpc(cld)) - sptlrpc_conf_log_stop(cld->cld_logname); + config_log_put(cld->cld_barrier); + config_log_put(cld->cld_recover); + config_log_put(cld->cld_params); + config_log_put(cld->cld_nodemap); + config_log_put(cld->cld_sptlrpc); + if (cld_is_sptlrpc(cld)) + sptlrpc_conf_log_stop(cld->cld_logname); - class_export_put(cld->cld_mgcexp); - OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1); - } + class_export_put(cld->cld_mgcexp); + OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1); + } - EXIT; + EXIT; } /* Find a config log by name */ @@ -164,51 +171,55 @@ static struct config_llog_data *config_log_find(char *logname, struct config_llog_instance *cfg) { - struct config_llog_data *cld; - struct config_llog_data *found = NULL; - void * instance; - ENTRY; + struct config_llog_data *cld; + struct config_llog_data *found = NULL; + unsigned long cfg_instance; - LASSERT(logname != NULL); + ENTRY; + LASSERT(logname != NULL); - instance = cfg ? cfg->cfg_instance : NULL; + cfg_instance = cfg ? cfg->cfg_instance : 0; spin_lock(&config_list_lock); - cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - /* check if instance equals */ - if (instance != cld->cld_cfg.cfg_instance) - continue; + list_for_each_entry(cld, &config_llog_list, cld_list_chain) { + /* check if cfg_instance is the one we want */ + if (cfg_instance != cld->cld_cfg.cfg_instance) + continue; /* instance may be NULL, should check name */ if (strcmp(logname, cld->cld_logname) == 0) { found = cld; + config_log_get(found); break; } } - if (found) { - atomic_inc(&found->cld_refcount); - LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0); - } spin_unlock(&config_list_lock); RETURN(found); } static struct config_llog_data *do_config_log_add(struct obd_device *obd, - char *logname, - int type, - struct config_llog_instance *cfg, - struct super_block *sb) + char *logname, + int type, + struct config_llog_instance *cfg, + struct super_block *sb) { - struct config_llog_data *cld; - int rc; - ENTRY; + struct config_llog_data *cld; + int rc; - CDEBUG(D_MGC, "do adding config log %s:%p\n", logname, - cfg ? cfg->cfg_instance : 0); + ENTRY; - OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1); - if (!cld) - RETURN(ERR_PTR(-ENOMEM)); + CDEBUG(D_MGC, "do adding config log %s-%016lx\n", logname, + cfg ? cfg->cfg_instance : 0); + + OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1); + if (!cld) + RETURN(ERR_PTR(-ENOMEM)); + + rc = mgc_logname2resid(logname, &cld->cld_resid, type); + if (rc) { + OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1); + RETURN(ERR_PTR(rc)); + } strcpy(cld->cld_logname, logname); if (cfg) @@ -222,80 +233,76 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd, cld->cld_type = type; atomic_set(&cld->cld_refcount, 1); - /* Keep the mgc around until we are done */ - cld->cld_mgcexp = class_export_get(obd->obd_self_export); - - if (cld_is_sptlrpc(cld)) { - sptlrpc_conf_log_start(logname); - cld->cld_cfg.cfg_obdname = obd->obd_name; - } + /* Keep the mgc around until we are done */ + cld->cld_mgcexp = class_export_get(obd->obd_self_export); - rc = mgc_logname2resid(logname, &cld->cld_resid, type); + if (cld_is_sptlrpc(cld)) + sptlrpc_conf_log_start(logname); spin_lock(&config_list_lock); - cfs_list_add(&cld->cld_list_chain, &config_llog_list); + list_add(&cld->cld_list_chain, &config_llog_list); spin_unlock(&config_list_lock); - if (rc) { - config_log_put(cld); - RETURN(ERR_PTR(rc)); - } - - if (cld_is_sptlrpc(cld)) { - rc = mgc_process_log(obd, cld); + if (cld_is_sptlrpc(cld) || cld_is_nodemap(cld) || cld_is_barrier(cld)) { + rc = mgc_process_log(obd, cld); if (rc && rc != -ENOENT) - CERROR("failed processing sptlrpc log: %d\n", rc); - } + CERROR("%s: failed processing log, type %d: rc = %d\n", + obd->obd_name, type, rc); + } - RETURN(cld); + RETURN(cld); } static struct config_llog_data *config_recover_log_add(struct obd_device *obd, - char *fsname, - struct config_llog_instance *cfg, - struct super_block *sb) + char *fsname, + struct config_llog_instance *cfg, + struct super_block *sb) { - struct config_llog_instance lcfg = *cfg; - struct lustre_sb_info *lsi = s2lsi(sb); - struct config_llog_data *cld; - char logname[32]; + struct config_llog_instance lcfg = *cfg; + struct lustre_sb_info *lsi = s2lsi(sb); + struct config_llog_data *cld; + char logname[32]; if (IS_OST(lsi)) - return NULL; + return NULL; /* for osp-on-ost, see lustre_start_osp() */ if (IS_MDT(lsi) && lcfg.cfg_instance) return NULL; - /* we have to use different llog for clients and mdts for cmd - * where only clients are notified if one of cmd server restarts */ - LASSERT(strlen(fsname) < sizeof(logname) / 2); - strcpy(logname, fsname); + /* We have to use different llog for clients and MDTs for DNE, + * where only clients are notified if one of DNE server restarts. + */ + LASSERT(strlen(fsname) < sizeof(logname) / 2); + strncpy(logname, fsname, sizeof(logname)); if (IS_SERVER(lsi)) { /* mdt */ - LASSERT(lcfg.cfg_instance == NULL); - lcfg.cfg_instance = sb; - strcat(logname, "-mdtir"); - } else { - LASSERT(lcfg.cfg_instance != NULL); - strcat(logname, "-cliir"); - } + LASSERT(lcfg.cfg_instance == 0); + lcfg.cfg_instance = ll_get_cfg_instance(sb); + strncat(logname, "-mdtir", sizeof(logname)); + } else { + LASSERT(lcfg.cfg_instance != 0); + strncat(logname, "-cliir", sizeof(logname)); + } - cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb); - return cld; + cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb); + return cld; } -static struct config_llog_data *config_params_log_add(struct obd_device *obd, - struct config_llog_instance *cfg, struct super_block *sb) +static struct config_llog_data *config_log_find_or_add(struct obd_device *obd, + char *logname, struct super_block *sb, int type, + struct config_llog_instance *cfg) { - struct config_llog_instance lcfg = *cfg; - struct config_llog_data *cld; + struct config_llog_instance lcfg = *cfg; + struct config_llog_data *cld; - lcfg.cfg_instance = sb; + /* Note class_config_llog_handler() depends on getting "obd" back */ + lcfg.cfg_instance = sb ? ll_get_cfg_instance(sb) : (unsigned long)obd; - cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS, - &lcfg, sb); + cld = config_log_find(logname, &lcfg); + if (unlikely(cld != NULL)) + return cld; - return cld; + return do_config_log_add(obd, logname, type, &lcfg, sb); } /** Add this log to the list of active logs watched by an MGC. @@ -303,200 +310,272 @@ static struct config_llog_data *config_params_log_add(struct obd_device *obd, * We have one active log per "mount" - client instance or servername. * Each instance may be at a different point in the log. */ -static int config_log_add(struct obd_device *obd, char *logname, - struct config_llog_instance *cfg, - struct super_block *sb) +static struct config_llog_data * +config_log_add(struct obd_device *obd, char *logname, + struct config_llog_instance *cfg, struct super_block *sb) { - struct lustre_sb_info *lsi = s2lsi(sb); - struct config_llog_data *cld; - struct config_llog_data *sptlrpc_cld; - struct config_llog_data *params_cld; - char seclogname[32]; - char *ptr; - int rc; + struct lustre_sb_info *lsi = s2lsi(sb); + struct config_llog_data *cld = NULL; + struct config_llog_data *sptlrpc_cld = NULL; + struct config_llog_data *params_cld = NULL; + struct config_llog_data *nodemap_cld = NULL; + struct config_llog_data *barrier_cld = NULL; + char seclogname[32]; + char *ptr; + int rc; + bool locked = false; ENTRY; - CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance); + CDEBUG(D_MGC, "add config log %s-%016lx\n", logname, + cfg->cfg_instance); - /* - * for each regular log, the depended sptlrpc log name is - * -sptlrpc. multiple regular logs may share one sptlrpc log. - */ - ptr = strrchr(logname, '-'); - if (ptr == NULL || ptr - logname > 8) { - CERROR("logname %s is too long\n", logname); - RETURN(-EINVAL); - } + /* + * for each regular log, the depended sptlrpc log name is + * -sptlrpc. multiple regular logs may share one sptlrpc log. + */ + ptr = strrchr(logname, '-'); + if (ptr == NULL || ptr - logname > 8) { + CERROR("logname %s is too long\n", logname); + RETURN(ERR_PTR(-EINVAL)); + } - memcpy(seclogname, logname, ptr - logname); - strcpy(seclogname + (ptr - logname), "-sptlrpc"); + memcpy(seclogname, logname, ptr - logname); + strcpy(seclogname + (ptr - logname), "-sptlrpc"); - sptlrpc_cld = config_log_find(seclogname, NULL); - if (sptlrpc_cld == NULL) { - sptlrpc_cld = do_config_log_add(obd, seclogname, - CONFIG_T_SPTLRPC, NULL, NULL); - if (IS_ERR(sptlrpc_cld)) { - CERROR("can't create sptlrpc log: %s\n", seclogname); - GOTO(out_err, rc = PTR_ERR(sptlrpc_cld)); - } - } - params_cld = config_params_log_add(obd, cfg, sb); - if (IS_ERR(params_cld)) { - rc = PTR_ERR(params_cld); - CERROR("%s: can't create params log: rc = %d\n", - obd->obd_name, rc); - GOTO(out_err1, rc); + if (cfg->cfg_sub_clds & CONFIG_SUB_SPTLRPC) { + sptlrpc_cld = config_log_find_or_add(obd, seclogname, NULL, + CONFIG_T_SPTLRPC, cfg); + if (IS_ERR(sptlrpc_cld)) { + CERROR("%s: can't create sptlrpc log %s: rc = %ld\n", + obd->obd_name, seclogname, PTR_ERR(sptlrpc_cld)); + RETURN(sptlrpc_cld); + } + } + + if (!IS_MGS(lsi) && cfg->cfg_sub_clds & CONFIG_SUB_NODEMAP) { + nodemap_cld = config_log_find_or_add(obd, LUSTRE_NODEMAP_NAME, + NULL, CONFIG_T_NODEMAP, + cfg); + if (IS_ERR(nodemap_cld)) { + rc = PTR_ERR(nodemap_cld); + CERROR("%s: cannot create nodemap log: rc = %d\n", + obd->obd_name, rc); + GOTO(out_sptlrpc, rc); + } + } + + if (cfg->cfg_sub_clds & CONFIG_SUB_PARAMS) { + params_cld = config_log_find_or_add(obd, PARAMS_FILENAME, sb, + CONFIG_T_PARAMS, cfg); + if (IS_ERR(params_cld)) { + rc = PTR_ERR(params_cld); + CERROR("%s: can't create params log: rc = %d\n", + obd->obd_name, rc); + GOTO(out_nodemap, rc); + } + } + + if (IS_MDT(s2lsi(sb)) && cfg->cfg_sub_clds & CONFIG_SUB_BARRIER) { + snprintf(seclogname + (ptr - logname), sizeof(seclogname) - 1, + "-%s", BARRIER_FILENAME); + barrier_cld = config_log_find_or_add(obd, seclogname, sb, + CONFIG_T_BARRIER, cfg); + if (IS_ERR(barrier_cld)) { + rc = PTR_ERR(barrier_cld); + CERROR("%s: can't create barrier log: rc = %d\n", + obd->obd_name, rc); + GOTO(out_params, rc); + } } cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb); if (IS_ERR(cld)) { - CERROR("can't create log: %s\n", logname); - GOTO(out_err2, rc = PTR_ERR(cld)); + rc = PTR_ERR(cld); + CERROR("%s: can't create log: rc = %d\n", + obd->obd_name, rc); + GOTO(out_barrier, rc = PTR_ERR(cld)); } - cld->cld_sptlrpc = sptlrpc_cld; - cld->cld_params = params_cld; + LASSERT(lsi->lsi_lmd); + if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR) && + cfg->cfg_sub_clds & CONFIG_SUB_RECOVER) { + struct config_llog_data *recover_cld; - LASSERT(lsi->lsi_lmd); - if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) { - struct config_llog_data *recover_cld; ptr = strrchr(seclogname, '-'); if (ptr != NULL) { *ptr = 0; + } else { + CERROR("%s: sptlrpc log name not correct, %s: " + "rc = %d\n", obd->obd_name, seclogname, -EINVAL); + GOTO(out_cld, rc = -EINVAL); } - else { - CERROR("sptlrpc log name not correct: %s", seclogname); - config_log_put(cld); - RETURN(-EINVAL); + + recover_cld = config_recover_log_add(obd, seclogname, cfg, sb); + if (IS_ERR(recover_cld)) { + rc = PTR_ERR(recover_cld); + CERROR("%s: can't create recover log: rc = %d\n", + obd->obd_name, rc); + GOTO(out_cld, rc); } - recover_cld = config_recover_log_add(obd, seclogname, cfg, sb); - if (IS_ERR(recover_cld)) - GOTO(out_err3, rc = PTR_ERR(recover_cld)); + + mutex_lock(&cld->cld_lock); + locked = true; cld->cld_recover = recover_cld; } - RETURN(0); + if (!locked) + mutex_lock(&cld->cld_lock); + cld->cld_params = params_cld; + cld->cld_barrier = barrier_cld; + cld->cld_nodemap = nodemap_cld; + cld->cld_sptlrpc = sptlrpc_cld; + mutex_unlock(&cld->cld_lock); -out_err3: - config_log_put(cld); + RETURN(cld); -out_err2: +out_cld: + config_log_put(cld); +out_barrier: + config_log_put(barrier_cld); +out_params: config_log_put(params_cld); - -out_err1: +out_nodemap: + config_log_put(nodemap_cld); +out_sptlrpc: config_log_put(sptlrpc_cld); -out_err: - RETURN(rc); + return ERR_PTR(rc); } -DEFINE_MUTEX(llog_process_lock); +static inline void config_mark_cld_stop(struct config_llog_data *cld) +{ + if (cld) { + mutex_lock(&cld->cld_lock); + spin_lock(&config_list_lock); + cld->cld_stopping = 1; + spin_unlock(&config_list_lock); + mutex_unlock(&cld->cld_lock); + } +} /** Stop watching for updates on this log. */ static int config_log_end(char *logname, struct config_llog_instance *cfg) { - struct config_llog_data *cld; - struct config_llog_data *cld_sptlrpc = NULL; + struct config_llog_data *cld; + struct config_llog_data *cld_sptlrpc = NULL; struct config_llog_data *cld_params = NULL; - struct config_llog_data *cld_recover = NULL; - int rc = 0; - ENTRY; + struct config_llog_data *cld_recover = NULL; + struct config_llog_data *cld_nodemap = NULL; + struct config_llog_data *cld_barrier = NULL; + int rc = 0; + + ENTRY; - cld = config_log_find(logname, cfg); - if (cld == NULL) - RETURN(-ENOENT); + cld = config_log_find(logname, cfg); + if (cld == NULL) + RETURN(-ENOENT); mutex_lock(&cld->cld_lock); - /* - * if cld_stopping is set, it means we didn't start the log thus - * not owning the start ref. this can happen after previous umount: - * the cld still hanging there waiting for lock cancel, and we - * remount again but failed in the middle and call log_end without - * calling start_log. - */ - if (unlikely(cld->cld_stopping)) { + /* + * if cld_stopping is set, it means we didn't start the log thus + * not owning the start ref. this can happen after previous umount: + * the cld still hanging there waiting for lock cancel, and we + * remount again but failed in the middle and call log_end without + * calling start_log. + */ + if (unlikely(cld->cld_stopping)) { mutex_unlock(&cld->cld_lock); - /* drop the ref from the find */ - config_log_put(cld); - RETURN(rc); - } - - cld->cld_stopping = 1; - - cld_recover = cld->cld_recover; - cld->cld_recover = NULL; - mutex_unlock(&cld->cld_lock); - - if (cld_recover) { - mutex_lock(&cld_recover->cld_lock); - cld_recover->cld_stopping = 1; - mutex_unlock(&cld_recover->cld_lock); - config_log_put(cld_recover); + /* drop the ref from the find */ + config_log_put(cld); + RETURN(rc); } spin_lock(&config_list_lock); - cld_sptlrpc = cld->cld_sptlrpc; - cld->cld_sptlrpc = NULL; + cld->cld_stopping = 1; + spin_unlock(&config_list_lock); + + cld_recover = cld->cld_recover; + cld->cld_recover = NULL; cld_params = cld->cld_params; cld->cld_params = NULL; - spin_unlock(&config_list_lock); + cld_nodemap = cld->cld_nodemap; + cld->cld_nodemap = NULL; + cld_barrier = cld->cld_barrier; + cld->cld_barrier = NULL; + cld_sptlrpc = cld->cld_sptlrpc; + cld->cld_sptlrpc = NULL; + mutex_unlock(&cld->cld_lock); + + config_mark_cld_stop(cld_recover); + config_log_put(cld_recover); - if (cld_sptlrpc) - config_log_put(cld_sptlrpc); + config_mark_cld_stop(cld_params); + config_log_put(cld_params); - if (cld_params) { - mutex_lock(&cld_params->cld_lock); - cld_params->cld_stopping = 1; - mutex_unlock(&cld_params->cld_lock); - config_log_put(cld_params); + /* don't set cld_stopping on nm lock as other targets may be active */ + config_log_put(cld_nodemap); + + if (cld_barrier) { + mutex_lock(&cld_barrier->cld_lock); + cld_barrier->cld_stopping = 1; + mutex_unlock(&cld_barrier->cld_lock); + config_log_put(cld_barrier); } - /* drop the ref from the find */ - config_log_put(cld); - /* drop the start ref */ - config_log_put(cld); + config_log_put(cld_sptlrpc); - CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client", - rc); - RETURN(rc); + /* drop the ref from the find */ + config_log_put(cld); + /* drop the start ref */ + config_log_put(cld); + + CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client", + rc); + RETURN(rc); } -#ifdef LPROCFS int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data) { - struct obd_device *obd = data; - struct obd_import *imp = obd->u.cli.cl_import; - struct obd_connect_data *ocd = &imp->imp_connect_data; - struct config_llog_data *cld; - ENTRY; + struct obd_device *obd = data; + struct obd_import *imp; + struct obd_connect_data *ocd; + struct config_llog_data *cld; + + ENTRY; + LASSERT(obd); + LPROCFS_CLIMP_CHECK(obd); + imp = obd->u.cli.cl_import; + ocd = &imp->imp_connect_data; seq_printf(m, "imperative_recovery: %s\n", OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); seq_printf(m, "client_state:\n"); spin_lock(&config_list_lock); - cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) { + list_for_each_entry(cld, &config_llog_list, cld_list_chain) { if (cld->cld_recover == NULL) continue; seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", - cld->cld_logname, - cld->cld_recover->cld_cfg.cfg_last_idx); + cld->cld_logname, + cld->cld_recover->cld_cfg.cfg_last_idx); } spin_unlock(&config_list_lock); + LPROCFS_CLIMP_EXIT(obd); RETURN(0); } -#endif /* reenqueue any lost locks */ -#define RQ_RUNNING 0x1 -#define RQ_NOW 0x2 -#define RQ_LATER 0x4 -#define RQ_STOP 0x8 +#define RQ_RUNNING 0x1 +#define RQ_NOW 0x2 +#define RQ_LATER 0x4 +#define RQ_STOP 0x8 +#define RQ_PRECLEANUP 0x10 static int rq_state = 0; static wait_queue_head_t rq_waitq; static DECLARE_COMPLETION(rq_exit); +static DECLARE_COMPLETION(rq_start); static void do_requeue(struct config_llog_data *cld) { @@ -505,10 +584,13 @@ static void do_requeue(struct config_llog_data *cld) LASSERT(atomic_read(&cld->cld_refcount) > 0); - /* Do not run mgc_process_log on a disconnected export or an + /* + * Do not run mgc_process_log on a disconnected export or an * export which is being disconnected. Take the client - * semaphore to make the check non-racy. */ - down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem); + * semaphore to make the check non-racy. + */ + down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem, + OBD_CLI_SEM_MGC); if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) { CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname); rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld); @@ -528,86 +610,85 @@ static void do_requeue(struct config_llog_data *cld) * in order to not flood the MGS. */ #define MGC_TIMEOUT_MIN_SECONDS 5 -#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */ +#define MGC_TIMEOUT_RAND_CENTISEC 500 static int mgc_requeue_thread(void *data) { - int rc = 0; - ENTRY; + int rc = 0; + bool first = true; + ENTRY; - CDEBUG(D_MGC, "Starting requeue thread\n"); + CDEBUG(D_MGC, "Starting requeue thread\n"); - /* Keep trying failed locks periodically */ + /* Keep trying failed locks periodically */ spin_lock(&config_list_lock); rq_state |= RQ_RUNNING; - while (1) { + while (!(rq_state & RQ_STOP)) { struct l_wait_info lwi; struct config_llog_data *cld, *cld_prev; - int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC; - int stopped = !!(rq_state & RQ_STOP); + int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC); int to; /* Any new or requeued lostlocks will change the state */ rq_state &= ~(RQ_NOW | RQ_LATER); spin_unlock(&config_list_lock); + if (first) { + first = false; + complete(&rq_start); + } + /* Always wait a few seconds to allow the server who caused the lock revocation to finish its setup, plus some random so everyone doesn't try to reconnect at once. */ - to = MGC_TIMEOUT_MIN_SECONDS * HZ; - to += rand * HZ / 100; /* rand is centi-seconds */ - lwi = LWI_TIMEOUT(to, NULL, NULL); - l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi); + to = cfs_time_seconds(MGC_TIMEOUT_MIN_SECONDS * 100 + rand); + /* rand is centi-seconds */ + lwi = LWI_TIMEOUT(to / 100, NULL, NULL); + l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP), + &lwi); - /* - * iterate & processing through the list. for each cld, process - * its depending sptlrpc cld firstly (if any) and then itself. - * - * it's guaranteed any item in the list must have - * reference > 0; and if cld_lostlock is set, at - * least one reference is taken by the previous enqueue. - */ - cld_prev = NULL; + /* + * iterate & processing through the list. for each cld, process + * its depending sptlrpc cld firstly (if any) and then itself. + * + * it's guaranteed any item in the list must have + * reference > 0; and if cld_lostlock is set, at + * least one reference is taken by the previous enqueue. + */ + cld_prev = NULL; spin_lock(&config_list_lock); - cfs_list_for_each_entry(cld, &config_llog_list, - cld_list_chain) { - if (!cld->cld_lostlock) + rq_state &= ~RQ_PRECLEANUP; + list_for_each_entry(cld, &config_llog_list, + cld_list_chain) { + if (!cld->cld_lostlock || cld->cld_stopping) continue; + /* hold reference to avoid being freed during + * subsequent processing. */ + config_log_get(cld); + cld->cld_lostlock = 0; spin_unlock(&config_list_lock); - LASSERT(atomic_read(&cld->cld_refcount) > 0); - - /* Whether we enqueued again or not in mgc_process_log, - * we're done with the ref from the old enqueue */ - if (cld_prev) - config_log_put(cld_prev); + config_log_put(cld_prev); cld_prev = cld; - cld->cld_lostlock = 0; - if (likely(!stopped)) + if (likely(!(rq_state & RQ_STOP))) { do_requeue(cld); - - spin_lock(&config_list_lock); + spin_lock(&config_list_lock); + } else { + spin_lock(&config_list_lock); + break; + } } spin_unlock(&config_list_lock); - if (cld_prev) - config_log_put(cld_prev); - - /* break after scanning the list so that we can drop - * refcount to losing lock clds */ - if (unlikely(stopped)) { - spin_lock(&config_list_lock); - break; - } + config_log_put(cld_prev); /* Wait a bit to see if anyone else needs a requeue */ - lwi = (struct l_wait_info) { 0 }; - l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP), - &lwi); + wait_event_idle(rq_waitq, rq_state & (RQ_NOW | RQ_STOP)); spin_lock(&config_list_lock); } + /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */ rq_state &= ~RQ_RUNNING; spin_unlock(&config_list_lock); @@ -622,6 +703,7 @@ static int mgc_requeue_thread(void *data) We are responsible for dropping the config log reference from here on out. */ static void mgc_requeue_add(struct config_llog_data *cld) { + bool wakeup = false; ENTRY; CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n", @@ -630,26 +712,17 @@ static void mgc_requeue_add(struct config_llog_data *cld) LASSERT(atomic_read(&cld->cld_refcount) > 0); mutex_lock(&cld->cld_lock); - if (cld->cld_stopping || cld->cld_lostlock) { - mutex_unlock(&cld->cld_lock); - RETURN_EXIT; - } - /* this refcount will be released in mgc_requeue_thread. */ - config_log_get(cld); - cld->cld_lostlock = 1; - mutex_unlock(&cld->cld_lock); - - /* Hold lock for rq_state */ spin_lock(&config_list_lock); - if (rq_state & RQ_STOP) { - spin_unlock(&config_list_lock); - cld->cld_lostlock = 0; - config_log_put(cld); - } else { + if (!(rq_state & RQ_STOP) && !cld->cld_stopping && !cld->cld_lostlock) { + cld->cld_lostlock = 1; rq_state |= RQ_NOW; - spin_unlock(&config_list_lock); - wake_up(&rq_waitq); + wakeup = true; } + spin_unlock(&config_list_lock); + mutex_unlock(&cld->cld_lock); + if (wakeup) + wake_up(&rq_waitq); + EXIT; } @@ -689,13 +762,13 @@ static int mgc_local_llog_fini(const struct lu_env *env, RETURN(0); } -static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb) +static int mgc_fs_setup(const struct lu_env *env, struct obd_device *obd, + struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); struct client_obd *cli = &obd->u.cli; struct lu_fid rfid, fid; struct dt_object *root, *dto; - struct lu_env *env; int rc = 0; ENTRY; @@ -703,31 +776,21 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb) LASSERT(lsi); LASSERT(lsi->lsi_dt_dev); - OBD_ALLOC_PTR(env); - if (env == NULL) - RETURN(-ENOMEM); - - /* The mgc fs exclusion sem. Only one fs can be setup at a time. */ - down(&cli->cl_mgc_sem); - - cfs_cleanup_group_info(); + /* The mgc fs exclusion mutex. Only one fs can be setup at a time. */ + mutex_lock(&cli->cl_mgc_mutex); /* Setup the configs dir */ - rc = lu_env_init(env, LCT_MG_THREAD); - if (rc) - GOTO(out_err, rc); - fid.f_seq = FID_SEQ_LOCAL_NAME; fid.f_oid = 1; fid.f_ver = 0; rc = local_oid_storage_init(env, lsi->lsi_dt_dev, &fid, &cli->cl_mgc_los); if (rc) - GOTO(out_env, rc); + RETURN(rc); rc = dt_root_get(env, lsi->lsi_dt_dev, &rfid); if (rc) - GOTO(out_env, rc); + GOTO(out_los, rc); root = dt_locate_at(env, lsi->lsi_dt_dev, &rfid, &cli->cl_mgc_los->los_dev->dd_lu_dev, NULL); @@ -737,7 +800,7 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb) dto = local_file_find_or_create(env, cli->cl_mgc_los, root, MOUNT_CONFIGS_DIR, S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO); - lu_object_put_nocache(env, &root->do_lu); + dt_object_put_nocache(env, root); if (IS_ERR(dto)) GOTO(out_los, rc = PTR_ERR(dto)); @@ -756,48 +819,35 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb) EXIT; out_llog: if (rc) { - lu_object_put(env, &cli->cl_mgc_configs_dir->do_lu); + dt_object_put(env, cli->cl_mgc_configs_dir); cli->cl_mgc_configs_dir = NULL; } out_los: if (rc < 0) { local_oid_storage_fini(env, cli->cl_mgc_los); cli->cl_mgc_los = NULL; - up(&cli->cl_mgc_sem); + mutex_unlock(&cli->cl_mgc_mutex); } -out_env: - lu_env_fini(env); -out_err: - OBD_FREE_PTR(env); return rc; } -static int mgc_fs_cleanup(struct obd_device *obd) +static int mgc_fs_cleanup(const struct lu_env *env, struct obd_device *obd) { - struct lu_env env; struct client_obd *cli = &obd->u.cli; - int rc; - ENTRY; LASSERT(cli->cl_mgc_los != NULL); - rc = lu_env_init(&env, LCT_MG_THREAD); - if (rc) - GOTO(unlock, rc); - - mgc_local_llog_fini(&env, obd); + mgc_local_llog_fini(env, obd); - lu_object_put_nocache(&env, &cli->cl_mgc_configs_dir->do_lu); + dt_object_put_nocache(env, cli->cl_mgc_configs_dir); cli->cl_mgc_configs_dir = NULL; - local_oid_storage_fini(&env, cli->cl_mgc_los); + local_oid_storage_fini(env, cli->cl_mgc_los); cli->cl_mgc_los = NULL; - lu_env_fini(&env); -unlock: class_decref(obd, "mgc_fs", obd); - up(&cli->cl_mgc_sem); + mutex_unlock(&cli->cl_mgc_mutex); RETURN(0); } @@ -840,34 +890,34 @@ static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd) static atomic_t mgc_count = ATOMIC_INIT(0); -static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) +static int mgc_precleanup(struct obd_device *obd) { - int rc = 0; + int rc = 0; + int temp; ENTRY; - switch (stage) { - case OBD_CLEANUP_EARLY: - break; - case OBD_CLEANUP_EXPORTS: - if (atomic_dec_and_test(&mgc_count)) { - int running; - /* stop requeue thread */ - spin_lock(&config_list_lock); - running = rq_state & RQ_RUNNING; - if (running) - rq_state |= RQ_STOP; - spin_unlock(&config_list_lock); - if (running) { - wake_up(&rq_waitq); - wait_for_completion(&rq_exit); - } - } - obd_cleanup_client_import(obd); - rc = mgc_llog_fini(NULL, obd); - if (rc != 0) - CERROR("failed to cleanup llogging subsystems\n"); - break; + if (atomic_dec_and_test(&mgc_count)) { + LASSERT(rq_state & RQ_RUNNING); + /* stop requeue thread */ + temp = RQ_STOP; + } else { + /* wakeup requeue thread to clean our cld */ + temp = RQ_NOW | RQ_PRECLEANUP; } + + spin_lock(&config_list_lock); + rq_state |= temp; + spin_unlock(&config_list_lock); + wake_up(&rq_waitq); + + if (temp & RQ_STOP) + wait_for_completion(&rq_exit); + obd_cleanup_client_import(obd); + + rc = mgc_llog_fini(NULL, obd); + if (rc != 0) + CERROR("failed to cleanup llogging subsystems\n"); + RETURN(rc); } @@ -891,7 +941,8 @@ static int mgc_cleanup(struct obd_device *obd) static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { - int rc; + struct task_struct *task; + int rc; ENTRY; rc = ptlrpcd_addref(); @@ -908,31 +959,32 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) GOTO(err_cleanup, rc); } -#ifdef LPROCFS - obd->obd_vars = lprocfs_mgc_obd_vars; - lprocfs_seq_obd_setup(obd); -#endif - sptlrpc_lprocfs_cliobd_attach(obd); + rc = mgc_tunables_init(obd); + if (rc) + GOTO(err_sysfs, rc); if (atomic_inc_return(&mgc_count) == 1) { rq_state = 0; init_waitqueue_head(&rq_waitq); /* start requeue thread */ - rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL, - "ll_cfg_requeue")); - if (IS_ERR_VALUE(rc)) { - CERROR("%s: Cannot start requeue thread (%d)," - "no more log updates!\n", + task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: cannot start requeue thread: rc = %d; " + "no more log updates\n", obd->obd_name, rc); - GOTO(err_cleanup, rc); + GOTO(err_sysfs, rc); } /* rc is the task_struct pointer of mgc_requeue_thread. */ rc = 0; + wait_for_completion(&rq_start); } RETURN(rc); +err_sysfs: + lprocfs_obd_cleanup(obd); err_cleanup: client_obd_cleanup(obd); err_decref: @@ -971,6 +1023,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, /* held at mgc_process_log(). */ LASSERT(atomic_read(&cld->cld_refcount) > 0); + + lock->l_ast_data = NULL; /* Are we done with this log? */ if (cld->cld_stopping) { CDEBUG(D_MGC, "log %s: stopping, won't requeue\n", @@ -980,8 +1034,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } /* Make sure not to re-enqueue when the mgc is stopping (we get called from client_disconnect_export) */ - if (!lock->l_conn_export || - !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) { + if (lock->l_conn_export == NULL || + lock->l_conn_export->exp_obd->u.cli.cl_conn_count == 0) { CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n", cld->cld_logname); config_log_put(cld); @@ -1000,10 +1054,15 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } /* Not sure where this should go... */ -#define MGC_ENQUEUE_LIMIT 50 +/* This is the timeout value for MGS_CONNECT request plus a ping interval, such + * that we can have a chance to try the secondary MGS if any. */ +#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \ + + PING_INTERVAL) #define MGC_TARGET_REG_LIMIT 10 +#define MGC_TARGET_REG_LIMIT_MAX RECONNECT_DELAY_MAX #define MGC_SEND_PARAM_LIMIT 10 +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 13, 53, 0) /* Send parameter to MGS*/ static int mgc_set_mgs_param(struct obd_export *exp, struct mgs_send_param *msp) @@ -1040,11 +1099,12 @@ static int mgc_set_mgs_param(struct obd_export *exp, RETURN(rc); } +#endif /* Take a config lock so we can get cancel notifications */ -static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, - __u32 type, ldlm_policy_data_t *policy, __u32 mode, - __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb, +static int mgc_enqueue(struct obd_export *exp, enum ldlm_type type, + union ldlm_policy_data *policy, enum ldlm_mode mode, + __u64 *flags, ldlm_glimpse_callback glimpse_callback, void *data, __u32 lvb_len, void *lvb_swabber, struct lustre_handle *lockh) { @@ -1054,13 +1114,14 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, .ei_mode = mode, .ei_cb_bl = mgc_blocking_ast, .ei_cb_cp = ldlm_completion_ast, + .ei_cb_gl = glimpse_callback, }; struct ptlrpc_request *req; int short_limit = cld_is_sptlrpc(cld); int rc; ENTRY; - CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname, + CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname, cld->cld_resid.name[0]); /* We need a callback for every lockholder, so don't try to @@ -1090,14 +1151,14 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, RETURN(rc); } -static int mgc_cancel(struct obd_export *exp, struct lov_stripe_md *md, - __u32 mode, struct lustre_handle *lockh) +static int mgc_cancel(struct obd_export *exp, enum ldlm_mode mode, + struct lustre_handle *lockh) { - ENTRY; + ENTRY; - ldlm_lock_decref(lockh, mode); + ldlm_lock_decref(lockh, mode); - RETURN(0); + RETURN(0); } static void mgc_notify_active(struct obd_device *unused) @@ -1132,11 +1193,18 @@ static int mgc_target_register(struct obd_export *exp, RETURN(-ENOMEM); } - memcpy(req_mti, mti, sizeof(*req_mti)); - ptlrpc_request_set_replen(req); - CDEBUG(D_MGC, "register %s\n", mti->mti_svname); - /* Limit how long we will wait for the enqueue to complete */ - req->rq_delay_limit = MGC_TARGET_REG_LIMIT; + memcpy(req_mti, mti, sizeof(*req_mti)); + ptlrpc_request_set_replen(req); + CDEBUG(D_MGC, "register %s\n", mti->mti_svname); + /* Limit how long we will wait for the enqueue to complete */ + req->rq_delay_limit = MGC_TARGET_REG_LIMIT; + + /* if the target needs to regenerate the config log in MGS, it's better + * to use some longer limit to let MGC have time to change connection to + * another MGS (or try again with the same MGS) for the target (server) + * will fail and exit if the request expired due to delay limit. */ + if (mti->mti_flags & (LDD_F_UPDATE | LDD_F_NEED_INDEX)) + req->rq_delay_limit = MGC_TARGET_REG_LIMIT_MAX; rc = ptlrpc_queue_wait(req); if (!rc) { @@ -1151,31 +1219,36 @@ static int mgc_target_register(struct obd_export *exp, RETURN(rc); } -int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, - obd_count keylen, void *key, obd_count vallen, - void *val, struct ptlrpc_request_set *set) +static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, + u32 keylen, void *key, + u32 vallen, void *val, + struct ptlrpc_request_set *set) { int rc = -EINVAL; ENTRY; - /* Turn off initial_recov after we try all backup servers once */ - if (KEY_IS(KEY_INIT_RECOV_BACKUP)) { - struct obd_import *imp = class_exp2cliimp(exp); - int value; - if (vallen != sizeof(int)) - RETURN(-EINVAL); - value = *(int *)val; - CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n", - imp->imp_obd->obd_name, value, - imp->imp_deactive, imp->imp_invalid, - imp->imp_replayable, imp->imp_obd->obd_replayable, - ptlrpc_import_state_name(imp->imp_state)); - /* Resurrect if we previously died */ - if ((imp->imp_state != LUSTRE_IMP_FULL && - imp->imp_state != LUSTRE_IMP_NEW) || value > 1) - ptlrpc_reconnect_import(imp); - RETURN(0); - } + /* Turn off initial_recov after we try all backup servers once */ + if (KEY_IS(KEY_INIT_RECOV_BACKUP)) { + struct obd_import *imp = class_exp2cliimp(exp); + int value; + if (vallen != sizeof(int)) + RETURN(-EINVAL); + value = *(int *)val; + CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n", + imp->imp_obd->obd_name, value, + imp->imp_deactive, imp->imp_invalid, + imp->imp_replayable, imp->imp_obd->obd_replayable, + ptlrpc_import_state_name(imp->imp_state)); + /* Resurrect the import immediately if + * 1. we previously got disconnected, + * 2. value > 1 (at the same node with MGS) + * */ + if (imp->imp_state == LUSTRE_IMP_DISCON || value > 1) + ptlrpc_reconnect_import(imp); + + RETURN(0); + } + /* FIXME move this to mgc_process_config */ if (KEY_IS(KEY_REGISTER_TARGET)) { struct mgs_target_info *mti; @@ -1193,15 +1266,16 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, if (vallen != sizeof(struct super_block)) RETURN(-EINVAL); - rc = mgc_fs_setup(exp->exp_obd, sb); + rc = mgc_fs_setup(env, exp->exp_obd, sb); RETURN(rc); } if (KEY_IS(KEY_CLEAR_FS)) { if (vallen != 0) RETURN(-EINVAL); - rc = mgc_fs_cleanup(exp->exp_obd); + rc = mgc_fs_cleanup(env, exp->exp_obd); RETURN(rc); } +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 13, 53, 0) if (KEY_IS(KEY_SET_INFO)) { struct mgs_send_param *msp; @@ -1209,6 +1283,7 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, rc = mgc_set_mgs_param(exp, msp); RETURN(rc); } +#endif if (KEY_IS(KEY_MGSSEC)) { struct client_obd *cli = &exp->exp_obd->u.cli; struct sptlrpc_flavor flvr; @@ -1257,8 +1332,7 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, } static int mgc_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val, - struct lov_stripe_md *unused) + __u32 keylen, void *key, __u32 *vallen, void *val) { int rc = -EINVAL; @@ -1318,7 +1392,7 @@ static int mgc_import_event(struct obd_device *obd, } enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), + CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT), CONFIG_READ_NRPAGES = 4 }; @@ -1327,51 +1401,53 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, __u64 max_version, void *data, int datalen, bool mne_swab) { - struct config_llog_instance *cfg = &cld->cld_cfg; - struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb); - struct mgs_nidtbl_entry *entry; - struct lustre_cfg *lcfg; - struct lustre_cfg_bufs bufs; - u64 prev_version = 0; - char *inst; - char *buf; - int bufsz; - int pos; - int rc = 0; - int off = 0; - ENTRY; + struct config_llog_instance *cfg = &cld->cld_cfg; + struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb); + struct mgs_nidtbl_entry *entry; + struct lustre_cfg *lcfg; + struct lustre_cfg_bufs bufs; + u64 prev_version = 0; + char *inst; + char *buf; + int bufsz; + int pos = 0; + int rc = 0; + int off = 0; - LASSERT(cfg->cfg_instance != NULL); - LASSERT(cfg->cfg_sb == cfg->cfg_instance); + ENTRY; + LASSERT(cfg->cfg_instance != 0); + LASSERT(ll_get_cfg_instance(cfg->cfg_sb) == cfg->cfg_instance); - OBD_ALLOC(inst, PAGE_CACHE_SIZE); + OBD_ALLOC(inst, PAGE_SIZE); if (inst == NULL) RETURN(-ENOMEM); if (!IS_SERVER(lsi)) { - pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); - if (pos >= PAGE_CACHE_SIZE) { - OBD_FREE(inst, PAGE_CACHE_SIZE); + pos = snprintf(inst, PAGE_SIZE, "%016lx", cfg->cfg_instance); + if (pos >= PAGE_SIZE) { + OBD_FREE(inst, PAGE_SIZE); return -E2BIG; } - } else { +#ifdef HAVE_SERVER_SUPPORT + } else { LASSERT(IS_MDT(lsi)); rc = server_name2svname(lsi->lsi_svname, inst, NULL, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (rc) { - OBD_FREE(inst, PAGE_CACHE_SIZE); + OBD_FREE(inst, PAGE_SIZE); RETURN(-EINVAL); } pos = strlen(inst); - } +#endif /* HAVE_SERVER_SUPPORT */ + } ++pos; buf = inst + pos; - bufsz = PAGE_CACHE_SIZE - pos; + bufsz = PAGE_SIZE - pos; while (datalen > 0) { int entry_len = sizeof(*entry); - int is_ost; + int is_ost, i; struct obd_device *obd; char *obdname; char *cname; @@ -1399,7 +1475,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* Keep this swab for normal mixed endian handling. LU-1644 */ if (mne_swab) lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > PAGE_CACHE_SIZE) { + if (entry->mne_length > PAGE_SIZE) { CERROR("MNE too large (%u)\n", entry->mne_length); break; } @@ -1477,11 +1553,17 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, continue; } - /* TODO: iterate all nids to find one */ - /* find uuid by nid */ - rc = client_import_find_conn(obd->u.cli.cl_import, - entry->u.nids[0], - (struct obd_uuid *)uuid); + /* iterate all nids to find one */ + /* find uuid by nid */ + rc = -ENOENT; + for (i = 0; i < entry->mne_nid_count; i++) { + rc = client_import_find_conn(obd->u.cli.cl_import, + entry->u.nids[i], + (struct obd_uuid *)uuid); + if (rc == 0) + break; + } + up_read(&obd->u.cli.cl_sem); if (rc < 0) { CERROR("mgc: cannot find uuid by nid %s\n", @@ -1498,18 +1580,20 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, lustre_cfg_bufs_set_string(&bufs, 1, params); - rc = -ENOMEM; - lcfg = lustre_cfg_new(LCFG_PARAM, &bufs); - if (lcfg == NULL) { - CERROR("mgc: cannot allocate memory\n"); - break; - } + OBD_ALLOC(lcfg, lustre_cfg_len(bufs.lcfg_bufcount, + bufs.lcfg_buflen)); + if (!lcfg) { + rc = -ENOMEM; + break; + } + lustre_cfg_init(lcfg, LCFG_PARAM, &bufs); - CDEBUG(D_INFO, "ir apply logs "LPD64"/"LPD64" for %s -> %s\n", + CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n", prev_version, max_version, obdname, params); rc = class_process_config(lcfg); - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, + lcfg->lcfg_buflens)); if (rc) CDEBUG(D_INFO, "process config for %s error %d\n", obdname, rc); @@ -1517,30 +1601,43 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* continue, even one with error */ } - OBD_FREE(inst, PAGE_CACHE_SIZE); + OBD_FREE(inst, PAGE_SIZE); RETURN(rc); } /** * This function is called if this client was notified for target restarting - * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs. + * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery or + * nodemap logs. */ -static int mgc_process_recover_log(struct obd_device *obd, - struct config_llog_data *cld) +static int mgc_process_recover_nodemap_log(struct obd_device *obd, + struct config_llog_data *cld) { - struct ptlrpc_request *req = NULL; - struct config_llog_instance *cfg = &cld->cld_cfg; - struct mgs_config_body *body; - struct mgs_config_res *res; - struct ptlrpc_bulk_desc *desc; - struct page **pages; - int nrpages; - bool eof = true; + struct ptlrpc_connection *mgc_conn; + struct ptlrpc_request *req = NULL; + struct config_llog_instance *cfg = &cld->cld_cfg; + struct mgs_config_body *body; + struct mgs_config_res *res; + struct nodemap_config *new_config = NULL; + struct lu_nodemap *recent_nodemap = NULL; + struct ptlrpc_bulk_desc *desc; + struct page **pages = NULL; + __u64 config_read_offset = 0; + __u8 nodemap_cur_pass = 0; + int nrpages = 0; + bool eof = true; bool mne_swab = false; - int i; - int ealen; - int rc; - ENTRY; + int i; + int ealen; + int rc; + ENTRY; + + mgc_conn = class_exp2cliimp(cld->cld_mgcexp)->imp_connection; + + /* don't need to get local config */ + if (cld_is_nodemap(cld) && + (LNET_NETTYP(LNET_NIDNET(mgc_conn->c_peer.nid)) == LOLND)) + GOTO(out, rc = 0); /* allocate buffer for bulk transfer. * if this is the first time for this mgs to read logs, @@ -1549,7 +1646,7 @@ static int mgc_process_recover_log(struct obd_device *obd, * small and CONFIG_READ_NRPAGES will be used. */ nrpages = CONFIG_READ_NRPAGES; - if (cfg->cfg_last_idx == 0) /* the first time */ + if (cfg->cfg_last_idx == 0 || cld_is_nodemap(cld)) nrpages = CONFIG_READ_NRPAGES_INIT; OBD_ALLOC(pages, sizeof(*pages) * nrpages); @@ -1557,108 +1654,164 @@ static int mgc_process_recover_log(struct obd_device *obd, GOTO(out, rc = -ENOMEM); for (i = 0; i < nrpages; i++) { - pages[i] = alloc_page(GFP_IOFS); + pages[i] = alloc_page(GFP_KERNEL); if (pages[i] == NULL) GOTO(out, rc = -ENOMEM); } again: - LASSERT(cld_is_recover(cld)); +#ifdef HAVE_SERVER_SUPPORT + if (cld_is_nodemap(cld) && config_read_offset == 0) { + new_config = nodemap_config_alloc(); + if (IS_ERR(new_config)) { + rc = PTR_ERR(new_config); + new_config = NULL; + GOTO(out, rc); + } + } +#endif + LASSERT(cld_is_recover(cld) || cld_is_nodemap(cld)); LASSERT(mutex_is_locked(&cld->cld_lock)); - req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp), - &RQF_MGS_CONFIG_READ); - if (req == NULL) - GOTO(out, rc = -ENOMEM); + req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp), + &RQF_MGS_CONFIG_READ); + if (req == NULL) + GOTO(out, rc = -ENOMEM); - rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ); - if (rc) - GOTO(out, rc); + rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ); + if (rc) + GOTO(out, rc); - /* pack request */ - body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); - LASSERT(body != NULL); - LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname)); + /* pack request */ + body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); + LASSERT(body != NULL); + LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname)); if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name)) >= sizeof(body->mcb_name)) GOTO(out, rc = -E2BIG); - body->mcb_offset = cfg->cfg_last_idx + 1; - body->mcb_type = cld->cld_type; - body->mcb_bits = PAGE_CACHE_SHIFT; - body->mcb_units = nrpages; + if (cld_is_nodemap(cld)) + body->mcb_offset = config_read_offset; + else + body->mcb_offset = cfg->cfg_last_idx + 1; + body->mcb_type = cld->cld_type; + body->mcb_bits = PAGE_SHIFT; + body->mcb_units = nrpages; + body->mcb_nm_cur_pass = nodemap_cur_pass; /* allocate bulk transfer descriptor */ - desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK, - MGS_BULK_PORTAL); + desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, + PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, + MGS_BULK_PORTAL, + &ptlrpc_bulk_kiov_pin_ops); if (desc == NULL) GOTO(out, rc = -ENOMEM); for (i = 0; i < nrpages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, + PAGE_SIZE); - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - if (rc) - GOTO(out, rc); + ptlrpc_request_set_replen(req); + rc = ptlrpc_queue_wait(req); + if (rc) + GOTO(out, rc); - res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES); - if (res->mcr_size < res->mcr_offset) - GOTO(out, rc = -EINVAL); + res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES); + if (!res) + GOTO(out, rc = -EPROTO); - /* always update the index even though it might have errors with - * handling the recover logs */ - cfg->cfg_last_idx = res->mcr_offset; - eof = res->mcr_offset == res->mcr_size; + if (cld_is_nodemap(cld)) { + config_read_offset = res->mcr_offset; + eof = config_read_offset == II_END_OFF; + nodemap_cur_pass = res->mcr_nm_cur_pass; + } else { + if (res->mcr_size < res->mcr_offset) + GOTO(out, rc = -EINVAL); - CDEBUG(D_INFO, "Latest version "LPD64", more %d.\n", - res->mcr_offset, eof == false); + /* always update the index even though it might have errors with + * handling the recover logs + */ + cfg->cfg_last_idx = res->mcr_offset; + eof = res->mcr_offset == res->mcr_size; - ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0); - if (ealen < 0) - GOTO(out, rc = ealen); + CDEBUG(D_INFO, "Latest version %lld, more %d.\n", + res->mcr_offset, eof == false); + } - if (ealen > nrpages << PAGE_CACHE_SHIFT) - GOTO(out, rc = -EINVAL); + ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0); + if (ealen < 0) + GOTO(out, rc = ealen); - if (ealen == 0) { /* no logs transferred */ - if (!eof) - rc = -EINVAL; - GOTO(out, rc); - } + if (ealen > nrpages << PAGE_SHIFT) + GOTO(out, rc = -EINVAL); - mne_swab = !!ptlrpc_rep_need_swab(req); -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0) - /* This import flag means the server did an extra swab of IR MNE - * records (fixed in LU-1252), reverse it here if needed. LU-1644 */ - if (unlikely(req->rq_import->imp_need_mne_swab)) - mne_swab = !mne_swab; -#else -#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab" + if (ealen == 0) { /* no logs transferred */ +#ifdef HAVE_SERVER_SUPPORT + /* config changed since first read RPC */ + if (cld_is_nodemap(cld) && config_read_offset == 0) { + CDEBUG(D_INFO, "nodemap config changed in transit, retrying\n"); + GOTO(out, rc = -EAGAIN); + } #endif + if (!eof) + rc = -EINVAL; + GOTO(out, rc); + } - for (i = 0; i < nrpages && ealen > 0; i++) { - int rc2; - void *ptr; + mne_swab = ptlrpc_rep_need_swab(req); + + /* When a nodemap config is received, we build a new nodemap config, + * with new nodemap structs. We keep track of the most recently added + * nodemap since the config is read ordered by nodemap_id, and so it + * is likely that the next record will be related. Because access to + * the nodemaps is single threaded until the nodemap_config is active, + * we don't need to reference count with recent_nodemap, though + * recent_nodemap should be set to NULL when the nodemap_config + * is either destroyed or set active. + */ + for (i = 0; i < nrpages && ealen > 0; i++) { + int rc2; + union lu_page *ptr; ptr = kmap(pages[i]); - rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, - min_t(int, ealen, PAGE_CACHE_SIZE), - mne_swab); + if (cld_is_nodemap(cld)) + rc2 = nodemap_process_idx_pages(new_config, ptr, + &recent_nodemap); + else + rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, + ptr, + min_t(int, ealen, + PAGE_SIZE), + mne_swab); kunmap(pages[i]); if (rc2 < 0) { - CWARN("Process recover log %s error %d\n", - cld->cld_logname, rc2); - break; - } + CWARN("%s: error processing %s log %s: rc = %d\n", + obd->obd_name, + cld_is_nodemap(cld) ? "nodemap" : "recovery", + cld->cld_logname, + rc2); + GOTO(out, rc = rc2); + } - ealen -= PAGE_CACHE_SIZE; - } + ealen -= PAGE_SIZE; + } out: - if (req) - ptlrpc_req_finished(req); + if (req) { + ptlrpc_req_finished(req); + req = NULL; + } - if (rc == 0 && !eof) - goto again; + if (rc == 0 && !eof) + goto again; + +#ifdef HAVE_SERVER_SUPPORT + if (new_config != NULL) { + /* recent_nodemap cannot be used after set_active/dealloc */ + if (rc == 0) + nodemap_config_set_active_mgc(new_config); + else + nodemap_config_dealloc(new_config); + } +#endif if (pages) { for (i = 0; i < nrpages; i++) { @@ -1671,6 +1824,21 @@ out: return rc; } +static int mgc_barrier_glimpse_ast(struct ldlm_lock *lock, void *data) +{ + struct config_llog_data *cld = lock->l_ast_data; + int rc; + ENTRY; + + if (cld->cld_stopping) + RETURN(-ENODEV); + + rc = barrier_handler(s2lsi(cld->cld_cfg.cfg_sb)->lsi_dt_dev, + (struct ptlrpc_request *)data); + + RETURN(rc); +} + /* Copy a remote log locally */ static int mgc_llog_local_copy(const struct lu_env *env, struct obd_device *obd, @@ -1688,7 +1856,7 @@ static int mgc_llog_local_copy(const struct lu_env *env, * - if failed then move bakup to logname again */ - OBD_ALLOC(temp_log, strlen(logname) + 1); + OBD_ALLOC(temp_log, strlen(logname) + 2); if (!temp_log) RETURN(-ENOMEM); sprintf(temp_log, "%sT", logname); @@ -1710,7 +1878,7 @@ out: obd->obd_name, logname, rc); } llog_erase(env, lctxt, NULL, temp_log); - OBD_FREE(temp_log, strlen(logname) + 1); + OBD_FREE(temp_log, strlen(logname) + 2); return rc; } @@ -1722,7 +1890,6 @@ static int mgc_process_cfg_log(struct obd_device *mgc, struct client_obd *cli = &mgc->u.cli; struct lustre_sb_info *lsi = NULL; int rc = 0; - bool sptlrpc_started = false; struct lu_env *env; ENTRY; @@ -1730,13 +1897,6 @@ static int mgc_process_cfg_log(struct obd_device *mgc, LASSERT(cld); LASSERT(mutex_is_locked(&cld->cld_lock)); - /* - * local copy of sptlrpc log is controlled elsewhere, don't try to - * read it up here. - */ - if (cld_is_sptlrpc(cld) && local_only) - RETURN(0); - if (cld->cld_cfg.cfg_sb) lsi = s2lsi(cld->cld_cfg.cfg_sb); @@ -1759,12 +1919,13 @@ static int mgc_process_cfg_log(struct obd_device *mgc, cli->cl_mgc_configs_dir != NULL && lu2dt_dev(cli->cl_mgc_configs_dir->do_lu.lo_dev) == lsi->lsi_dt_dev) { - if (!local_only) + if (!local_only && !lsi->lsi_dt_dev->dd_rdonly) /* Only try to copy log if we have the lock. */ rc = mgc_llog_local_copy(env, mgc, ctxt, lctxt, cld->cld_logname); if (local_only || rc) { - if (llog_is_empty(env, lctxt, cld->cld_logname)) { + if (strcmp(cld->cld_logname, PARAMS_FILENAME) != 0 && + llog_is_empty(env, lctxt, cld->cld_logname)) { LCONSOLE_ERROR_MSG(0x13a, "Failed to get MGS " "log %s and no local copy." "\n", cld->cld_logname); @@ -1773,6 +1934,7 @@ static int mgc_process_cfg_log(struct obd_device *mgc, CDEBUG(D_MGC, "Failed to get MGS log %s, using local " "copy for now, will try to update later.\n", cld->cld_logname); + rc = 0; } /* Now, whether we copied or not, start using the local llog. * If we failed to copy, we'll start using whatever the old @@ -1785,36 +1947,39 @@ static int mgc_process_cfg_log(struct obd_device *mgc, GOTO(out_pop, rc = -EIO); } - if (cld_is_sptlrpc(cld)) { - sptlrpc_conf_log_update_begin(cld->cld_logname); - sptlrpc_started = true; - } + rc = -EAGAIN; + if (lsi && IS_SERVER(lsi) && !IS_MGS(lsi) && + lsi->lsi_dt_dev->dd_rdonly) { + struct llog_ctxt *rctxt; - /* logname and instance info should be the same, so use our - * copy of the instance for the update. The cfg_last_idx will - * be updated here. */ - rc = class_config_parse_llog(env, ctxt, cld->cld_logname, - &cld->cld_cfg); - EXIT; + /* Under readonly mode, we may have no local copy or local + * copy is incomplete, so try to use remote llog firstly. */ + rctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT); + LASSERT(rctxt); -out_pop: - __llog_ctxt_put(env, ctxt); - if (lctxt) - __llog_ctxt_put(env, lctxt); + rc = class_config_parse_llog(env, rctxt, cld->cld_logname, + &cld->cld_cfg); + llog_ctxt_put(rctxt); + } + + if (rc && rc != -ENOENT) + rc = class_config_parse_llog(env, ctxt, cld->cld_logname, + &cld->cld_cfg); /* - * update settings on existing OBDs. doing it inside - * of llog_process_lock so no device is attaching/detaching - * in parallel. + * update settings on existing OBDs. * the logname must be -sptlrpc */ - if (sptlrpc_started) { - LASSERT(cld_is_sptlrpc(cld)); - sptlrpc_conf_log_update_end(cld->cld_logname); + if (rc == 0 && cld_is_sptlrpc(cld)) class_notify_sptlrpc_conf(cld->cld_logname, strlen(cld->cld_logname) - strlen("-sptlrpc")); - } + EXIT; + +out_pop: + __llog_ctxt_put(env, ctxt); + if (lctxt) + __llog_ctxt_put(env, lctxt); lu_env_fini(env); out_free: @@ -1822,76 +1987,169 @@ out_free: return rc; } -/** Get a config log from the MGS and process it. - * This func is called for both clients and servers. - * Copy the log locally before parsing it if appropriate (non-MGS server) +static bool mgc_import_in_recovery(struct obd_import *imp) +{ + bool in_recovery = true; + + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_FULL || + imp->imp_state == LUSTRE_IMP_CLOSED) + in_recovery = false; + spin_unlock(&imp->imp_lock); + + return in_recovery; +} + +/** + * Get a configuration log from the MGS and process it. + * + * This function is called for both clients and servers to process the + * configuration log from the MGS. The MGC enqueues a DLM lock on the + * log from the MGS, and if the lock gets revoked the MGC will be notified + * by the lock cancellation callback that the config log has changed, + * and will enqueue another MGS lock on it, and then continue processing + * the new additions to the end of the log. + * + * Since the MGC import is not replayable, if the import is being evicted + * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process + * the log until recovery is finished or the import is closed. + * + * Make a local copy of the log before parsing it if appropriate (non-MGS + * server) so that the server can start even when the MGS is down. + * + * There shouldn't be multiple processes running process_log at once -- + * sounds like badness. It actually might be fine, as long as they're not + * trying to update from the same log simultaneously, in which case we + * should use a per-log semaphore instead of cld_lock. + * + * \param[in] mgc MGC device by which to fetch the configuration log + * \param[in] cld log processing state (stored in lock callback data) + * + * \retval 0 on success + * \retval negative errno on failure */ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) { struct lustre_handle lockh = { 0 }; __u64 flags = LDLM_FL_NO_LRU; int rc = 0, rcl; + bool retry = false; ENTRY; - LASSERT(cld); + LASSERT(cld != NULL); /* I don't want multiple processes running process_log at once -- sounds like badness. It actually might be fine, as long as we're not trying to update from the same log simultaneously (in which case we should use a per-log sem.) */ +restart: mutex_lock(&cld->cld_lock); if (cld->cld_stopping) { mutex_unlock(&cld->cld_lock); - RETURN(0); - } - - OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20); - - CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname, - cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1); - - /* Get the cfg lock on the llog */ - rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL, - LCK_CR, &flags, NULL, NULL, NULL, - cld, 0, NULL, &lockh); - if (rcl == 0) { - /* Get the cld, it will be released in mgc_blocking_ast. */ - config_log_get(cld); - rc = ldlm_lock_set_data(&lockh, (void *)cld); - LASSERT(rc == 0); - } else { - CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); - - /* mark cld_lostlock so that it will requeue - * after MGC becomes available. */ - cld->cld_lostlock = 1; - /* Get extra reference, it will be put in requeue thread */ - config_log_get(cld); - } + RETURN(0); + } + OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20); + + CDEBUG(D_MGC, "Process log %s-%016lx from %d\n", cld->cld_logname, + cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1); + + /* Get the cfg lock on the llog */ + rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL, + LCK_CR, &flags, + cld_is_barrier(cld) ? mgc_barrier_glimpse_ast : NULL, + cld, 0, NULL, &lockh); + if (rcl == 0) { + /* Get the cld, it will be released in mgc_blocking_ast. */ + config_log_get(cld); + rc = ldlm_lock_set_data(&lockh, (void *)cld); + LASSERT(rc == 0); + } else { + CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); + + if (rcl == -ESHUTDOWN && + atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) { + struct obd_import *imp; + struct l_wait_info lwi; + long timeout = cfs_time_seconds(obd_timeout); + + mutex_unlock(&cld->cld_lock); + imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp); + + /* Let's force the pinger, and wait the import to be + * connected, note: since mgc import is non-replayable, + * and even the import state is disconnected, it does + * not mean the "recovery" is stopped, so we will keep + * waitting until timeout or the import state is + * FULL or closed */ + ptlrpc_pinger_force(imp); + + lwi = LWI_TIMEOUT(timeout, NULL, NULL); + l_wait_event(imp->imp_recovery_waitq, + !mgc_import_in_recovery(imp), &lwi); + + if (imp->imp_state == LUSTRE_IMP_FULL) { + retry = true; + goto restart; + } else { + mutex_lock(&cld->cld_lock); + /* unlock/lock mutex, so check stopping again */ + if (cld->cld_stopping) { + mutex_unlock(&cld->cld_lock); + RETURN(0); + } + spin_lock(&config_list_lock); + cld->cld_lostlock = 1; + spin_unlock(&config_list_lock); + } + } else { + /* mark cld_lostlock so that it will requeue + * after MGC becomes available. */ + spin_lock(&config_list_lock); + cld->cld_lostlock = 1; + spin_unlock(&config_list_lock); + } + } - if (cld_is_recover(cld)) { - rc = 0; /* this is not a fatal error for recover log */ - if (rcl == 0) - rc = mgc_process_recover_log(mgc, cld); - } else { - rc = mgc_process_cfg_log(mgc, cld, rcl != 0); - } + if (cld_is_recover(cld) || cld_is_nodemap(cld)) { + if (!rcl) + rc = mgc_process_recover_nodemap_log(mgc, cld); + else if (cld_is_nodemap(cld)) + rc = rcl; + + if (cld_is_recover(cld) && rc) { + if (!rcl) { + CERROR("%s: recover log %s failed, not fatal: rc = %d\n", + mgc->obd_name, cld->cld_logname, rc); + spin_lock(&config_list_lock); + cld->cld_lostlock = 1; + spin_unlock(&config_list_lock); + } + rc = 0; /* this is not a fatal error for recover log */ + } + } else if (!cld_is_barrier(cld)) { + rc = mgc_process_cfg_log(mgc, cld, rcl != 0); + } - CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n", - mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc); + CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n", + mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc); mutex_unlock(&cld->cld_lock); - /* Now drop the lock so MGS can revoke it */ - if (!rcl) { - rcl = mgc_cancel(mgc->u.cli.cl_mgc_mgsexp, NULL, - LCK_CR, &lockh); - if (rcl) - CERROR("Can't drop cfg lock: %d\n", rcl); - } + /* Now drop the lock so MGS can revoke it */ + if (!rcl) { + rcl = mgc_cancel(mgc->u.cli.cl_mgc_mgsexp, LCK_CR, &lockh); + if (rcl) + CERROR("Can't drop cfg lock: %d\n", rcl); + } - RETURN(rc); + /* requeue nodemap lock immediately if transfer was interrupted */ + if (cld_is_nodemap(cld) && rc == -EAGAIN) { + mgc_requeue_add(cld); + rc = 0; + } + + RETURN(rc); } @@ -1899,7 +2157,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) * LCFG_LOG_START gets the config log from the MGS, processes it to start * any services, and adds it to the list logs to watch (follow). */ -static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf) +static int mgc_process_config(struct obd_device *obd, size_t len, void *buf) { struct lustre_cfg *lcfg = buf; struct config_llog_instance *cfg = NULL; @@ -1943,38 +2201,35 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf) cfg->cfg_last_idx); /* We're only called through here on the initial mount */ - rc = config_log_add(obd, logname, cfg, sb); - if (rc) - break; - cld = config_log_find(logname, cfg); - if (cld == NULL) { - rc = -ENOENT; - break; - } + cld = config_log_add(obd, logname, cfg, sb); + if (IS_ERR(cld)) { + rc = PTR_ERR(cld); + break; + } - /* COMPAT_146 */ - /* FIXME only set this for old logs! Right now this forces - us to always skip the "inside markers" check */ - cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146; - - rc = mgc_process_log(obd, cld); - if (rc == 0 && cld->cld_recover != NULL) { - if (OCD_HAS_FLAG(&obd->u.cli.cl_import-> - imp_connect_data, IMP_RECOV)) { - rc = mgc_process_log(obd, cld->cld_recover); - } else { - struct config_llog_data *cir = cld->cld_recover; - cld->cld_recover = NULL; - config_log_put(cir); - } - if (rc) - CERROR("Cannot process recover llog %d\n", rc); - } + rc = mgc_process_log(obd, cld); + if (rc == 0 && cld->cld_recover != NULL) { + if (OCD_HAS_FLAG(&obd->u.cli.cl_import-> + imp_connect_data, IMP_RECOV)) { + rc = mgc_process_log(obd, cld->cld_recover); + } else { + struct config_llog_data *cir; + + mutex_lock(&cld->cld_lock); + cir = cld->cld_recover; + cld->cld_recover = NULL; + mutex_unlock(&cld->cld_lock); + config_log_put(cir); + } + + if (rc) + CERROR("Cannot process recover llog %d\n", rc); + } if (rc == 0 && cld->cld_params != NULL) { rc = mgc_process_log(obd, cld->cld_params); if (rc == -ENOENT) { - CDEBUG(D_MGC, "There is no params" + CDEBUG(D_MGC, "There is no params " "config file yet\n"); rc = 0; } @@ -1983,7 +2238,6 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf) CERROR("%s: can't process params llog: rc = %d\n", obd->obd_name, rc); } - config_log_put(cld); break; } @@ -2006,7 +2260,7 @@ out: RETURN(rc); } -struct obd_ops mgc_obd_ops = { +static struct obd_ops mgc_obd_ops = { .o_owner = THIS_MODULE, .o_setup = mgc_setup, .o_precleanup = mgc_precleanup, @@ -2015,34 +2269,27 @@ struct obd_ops mgc_obd_ops = { .o_del_conn = client_import_del_conn, .o_connect = client_connect_import, .o_disconnect = client_disconnect_export, - //.o_enqueue = mgc_enqueue, - .o_cancel = mgc_cancel, - //.o_iocontrol = mgc_iocontrol, .o_set_info_async = mgc_set_info_async, .o_get_info = mgc_get_info, .o_import_event = mgc_import_event, .o_process_config = mgc_process_config, }; -int __init mgc_init(void) +static int __init mgc_init(void) { - return class_register_type(&mgc_obd_ops, NULL, NULL, -#ifndef HAVE_ONLY_PROCFS_SEQ - NULL, -#endif - LUSTRE_MGC_NAME, NULL); + return class_register_type(&mgc_obd_ops, NULL, false, NULL, + LUSTRE_MGC_NAME, NULL); } -#ifdef __KERNEL__ -static void /*__exit*/ mgc_exit(void) +static void __exit mgc_exit(void) { class_unregister_type(LUSTRE_MGC_NAME); } -MODULE_AUTHOR("Sun Microsystems, Inc. "); +MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("Lustre Management Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mgc_init); module_exit(mgc_exit); -#endif