If Lustre identifies the same peer with multiple NIDs,
as a result of peer discovery it is possible that
the discovered peer is found to contain a NID which is locked
as primary by a different existing peer record.
In this case it is safe to merge the peer records,
but the NID which got locked the earliest should be
kept as primary.
This allows for the first of the two locked NIDs
to stay primary as intended for the purpose of communicating
with Lustre even if peer discovery succeeded
using a different NID of MR peer.
This patch adds updates to the original port because master
version of of this moment evolved after it was landed.
Lustre-change: https://review.whamcloud.com/50530
Lustre-commit:
3b7a02ee4d656b7b3e044713681da2f56dddb152
Test-parameters: trivial testlist=sanity-lnet
Fixes:
1a2db3e14b78 ("EX-7251 lnet: fix locking multiple NIDs")
Signed-off-by: Serguei Smirnov <ssmirnov@whamcloud.com>
Change-Id: I3303e618b37a76c30be6426972e7853bb31ae497
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/51384
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Cyril Bordage <cbordage@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
#define IOC_LIBCFS_GET_LOCAL_HSTATS _IOWR(IOC_LIBCFS_TYPE, 103, IOCTL_CONFIG_SIZE)
#define IOC_LIBCFS_GET_RECOVERY_QUEUE _IOWR(IOC_LIBCFS_TYPE, 104, IOCTL_CONFIG_SIZE)
#define IOC_LIBCFS_SET_CONNS_PER_PEER _IOWR(IOC_LIBCFS_TYPE, 105, IOCTL_CONFIG_SIZE)
-#define IOC_LIBCFS_MAX_NR 105
+#define IOC_LIBCFS_SET_PEER _IOWR(IOC_LIBCFS_TYPE, 106, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_MAX_NR 106
extern int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
return 0;
}
+ case IOC_LIBCFS_SET_PEER: {
+ struct lnet_ioctl_peer_cfg *cfg = arg;
+ struct lnet_peer *lp;
+
+ if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lp = lnet_find_peer(cfg->prcfg_prim_nid);
+ if (!lp) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return -ENOENT;
+ }
+ spin_lock(&lp->lp_lock);
+ lp->lp_state = cfg->prcfg_state;
+ spin_unlock(&lp->lp_lock);
+ lnet_peer_decref_locked(lp);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ CDEBUG(D_NET, "Set peer %s state to %u\n",
+ libcfs_nid2str(cfg->prcfg_prim_nid), cfg->prcfg_state);
+ return 0;
+ }
+
case IOC_LIBCFS_SET_CONNS_PER_PEER: {
struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
int value;
*/
again:
spin_lock(&lp->lp_lock);
- if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid)
+ if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid) {
lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
+ lp->lp_prim_lock_ts = ktime_get_ns();
+ }
/* DD disabled, nothing to do */
if (lnet_peer_discovery_disabled) {
struct lnet_peer *lp2 =
lpni->lpni_peer_net->lpn_peer;
int rtr_refcount = lp2->lp_rtr_refcount;
- unsigned peer2_state;
+ unsigned int peer2_state;
__u64 peer2_prim_lock_ts;
/* If there's another peer that this NID belongs to
peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
spin_unlock(&lp2->lp_lock);
- /* If both peers have their primary NIDs locked,
- * the NID which got locked the earliest should be
+ /* NID which got locked the earliest should be
* kept as primary. In case if the peers were
* created by Lustre, this allows the
* first listed NID to stay primary as intended
*/
spin_lock(&lp->lp_lock);
if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
- lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
- peer2_prim_lock_ts < lp->lp_prim_lock_ts) {
+ ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
+ peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
+ !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
lp->lp_prim_lock_ts = peer2_prim_lock_ts;
lp->lp_primary_nid = nid;
+ lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
}
spin_unlock(&lp->lp_lock);
-
/*
* if we're trying to delete a router it means
* we're moving this peer NI to a new peer so must
return rc;
}
+static int
+lustre_lnet_config_peer(int state, lnet_nid_t nid, char *name,
+ int seq_no, struct cYAML **err_rc)
+{
+ struct lnet_ioctl_peer_cfg data;
+ int rc = LUSTRE_CFG_RC_NO_ERR;
+ char err_str[LNET_MAX_STR_LEN] = "\"success\"";
+
+ LIBCFS_IOC_INIT_V2(data, prcfg_hdr);
+ data.prcfg_state = state;
+ data.prcfg_prim_nid = nid;
+ data.prcfg_cfg_nid = LNET_NID_ANY;
+
+ rc = l_ioctl(LNET_DEV_ID, IOC_LIBCFS_SET_PEER, &data);
+ if (rc != 0) {
+ rc = -errno;
+ snprintf(err_str,
+ sizeof(err_str), "Can not set peer property: %s",
+ strerror(errno));
+ }
+
+ cYAML_build_error(rc, seq_no, ADD_CMD, name, err_str, err_rc);
+
+ return rc;
+}
static int
lustre_lnet_config_conns_per_peer(int value, bool all, lnet_nid_t nid,
"peer_ni healthv", seq_no, err_rc);
}
+int lustre_lnet_set_peer_state(int state, char *lpni_nid, int seq_no,
+ struct cYAML **err_rc)
+{
+ lnet_nid_t nid;
+
+ if (lpni_nid)
+ nid = libcfs_str2nid(lpni_nid);
+ else
+ nid = LNET_NID_ANY;
+ return lustre_lnet_config_peer(state, nid, "peer state", seq_no,
+ err_rc);
+}
+
int lustre_lnet_config_ni_conns_per_peer(int value, bool all, char *ni_nid,
int seq_no, struct cYAML **err_rc)
{
*/
int lustre_lnet_show_peer_debug_info(char *peer_nid, int seq_no,
struct cYAML **err_rc);
+
+/* lustre_lnet_set_peer_state
+ * set peer state
+ * lpni_nid - primary nid of the peer
+ * seq_no - sequence number of the request
+ * err_rc - [OUT] struct cYAML tree describing the error. Freed by
+ * caller
+ */
+int lustre_lnet_set_peer_state(int state, char *lpni_nid, int seq_no,
+ struct cYAML **err_rc);
+
#endif /* LIB_LNET_CONFIG_API_H */
{"set", jt_set_peer_ni_value, 0, "set peer ni specific parameter\n"
"\t--nid: Peer NI NID to set the\n"
"\t--health: specify health value to set\n"
- "\t--all: set all peer_nis values to the one specified\n"},
+ "\t--all: set all peer_nis values to the one specified\n"
+ "\t--state: set peer state (DANGEROUS: for test/debug only)"},
{ 0, 0, 0, NULL }
};
char *nid = NULL;
long int healthv = -1;
bool all = false;
+ long int state = -1;
int rc, opt;
struct cYAML *err_rc = NULL;
- const char *const short_options = "t:n:a";
+ const char *const short_options = "t:n:s:a";
static const struct option long_options[] = {
{ .name = "nid", .has_arg = required_argument, .val = 'n' },
{ .name = "health", .has_arg = required_argument, .val = 't' },
+ { .name = "state", .has_arg = required_argument, .val = 's' },
{ .name = "all", .has_arg = no_argument, .val = 'a' },
{ .name = NULL } };
if (parse_long(optarg, &healthv) != 0)
healthv = -1;
break;
+ case 's':
+ if (parse_long(optarg, &state) != 0)
+ state = -1;
+ break;
case 'a':
all = true;
break;
}
}
- rc = cb(healthv, all, nid, -1, &err_rc);
+ if (state > -1)
+ rc = lustre_lnet_set_peer_state(state, nid, -1, &err_rc);
+ else
+ rc = cb(healthv, all, nid, -1, &err_rc);
if (rc != LUSTRE_CFG_RC_NO_ERR)
cYAML_print_tree2file(stderr, err_rc);
}
run_test 302 "Check that peer debug info can be dumped"
+test_304() {
+ [[ ${NETTYPE} == tcp* ]] || skip "Need tcp NETTYPE"
+
+ cleanup_netns || error "Failed to cleanup netns before test execution"
+ cleanup_lnet || error "Failed to unload modules before test execution"
+
+ setup_fakeif || error "Failed to add fake IF"
+ have_interface "$FAKE_IF" ||
+ error "Expect $FAKE_IF configured but not found"
+
+ reinit_dlc || return $?
+
+ add_net "tcp" "${INTERFACES[0]}" || return $?
+ add_net "tcp" "$FAKE_IF" || return $?
+
+ local nid1=$(lctl list_nids | head -n 1)
+ local nid2=$(lctl list_nids | tail --lines 1)
+
+ check_ni_status "$nid1" up
+ check_ni_status "$nid2" up
+
+ do_lnetctl peer add --prim_nid ${nid2} --lock_prim ||
+ error "peer add failed $?"
+ local locked_peer_state=($(do_lnetctl peer show -v 4 --nid ${nid2} |
+ awk '/peer state/{print $NF}'))
+
+ # Expect peer state bits:
+ # LNET_PEER_MULTI_RAIL(0) | LNET_PEER_CONFIGURED(3) |
+ # LNET_PEER_LOCK_PRIMARY(20)
+ (( $locked_peer_state != "1048585")) &&
+ error "Wrong peer state \"$locked_peer_state\" expected 1048585"
+
+ # Clear LNET_PEER_CONFIGURED bit and verify
+ do_lnetctl peer set --nid ${nid2} --state 1048577 ||
+ error "peer add failed $?"
+ locked_peer_state=($(do_lnetctl peer show -v 4 --nid ${nid2} |
+ awk '/peer state/{print $NF}'))
+ (( $locked_peer_state != "1048577")) &&
+ error "Wrong peer state \"$locked_peer_state\" expected 1048577"
+ do_lnetctl discover ${nid1} ||
+ error "Failed to discover peer"
+
+ # Expect nid2 and nid1 peer entries to be consolidated,
+ # nid2 to stay primary
+ cat <<EOF >> $TMP/sanity-lnet-$testnum-expected.yaml
+peer:
+ - primary nid: ${nid2}
+ Multi-Rail: True
+ peer ni:
+ - nid: ${nid1}
+ state: NA
+ - nid: ${nid2}
+ state: NA
+EOF
+ $LNETCTL peer show > $TMP/sanity-lnet-$testnum-actual.yaml
+ compare_yaml_files ||
+ error "Unexpected peer configuration"
+
+ locked_peer_state=($(do_lnetctl peer show -v 4 --nid ${nid2} |
+ awk '/peer state/{print $NF}'))
+ # Expect peer state bits to be added:
+ # LNET_PEER_DISCOVERED(4) | LNET_PEER_NIDS_UPTODATE(8)
+ (( $locked_peer_state != "1048849")) &&
+ error "Wrong peer state \"$locked_peer_state\" expected 1048849"
+ return 0
+}
+run_test 304 "Check locked primary peer nid consolidation"
+
complete_test $SECONDS
+
cleanup_testsuite
exit_status