return rc;
}
-/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
+/*
+ * This function can be called from two paths:
+ * 1. when sending a message
+ * 2. when decommiting a message (lnet_msg_decommit_tx())
+ * In both these cases the peer_ni should have it's reference count
+ * acquired by the caller and therefore it is safe to drop the spin
+ * lock before calling lnd_query()
+ */
static void
lnet_ni_query_locked(lnet_ni_t *ni, struct lnet_peer_ni *lp)
{
cfs_time_t last_alive = 0;
+ int cpt = lnet_cpt_of_nid_locked(lp->lpni_nid, ni);
LASSERT(lnet_peer_aliveness_enabled(lp));
LASSERT(ni->ni_net->net_lnd->lnd_query != NULL);
- lnet_net_unlock(lp->lpni_cpt);
+ lnet_net_unlock(cpt);
(ni->ni_net->net_lnd->lnd_query)(ni, lp->lpni_nid, &last_alive);
- lnet_net_lock(lp->lpni_cpt);
+ lnet_net_lock(cpt);
lp->lpni_last_query = cfs_time_current();
* Trust lnet_notify() if it has more recent aliveness news, but
* ignore the initial assumed death (see lnet_peers_start_down()).
*/
+ spin_lock(&lp->lpni_lock);
if (!lp->lpni_alive && lp->lpni_alive_count > 0 &&
- cfs_time_aftereq(lp->lpni_timestamp, lp->lpni_last_alive))
+ cfs_time_aftereq(lp->lpni_timestamp, lp->lpni_last_alive)) {
+ spin_unlock(&lp->lpni_lock);
return 0;
+ }
deadline =
cfs_time_add(lp->lpni_last_alive,
* case, and moreover lpni_last_alive at peer creation is assumed.
*/
if (alive && !lp->lpni_alive &&
- !(lnet_isrouter(lp) && lp->lpni_alive_count == 0))
+ !(lnet_isrouter(lp) && lp->lpni_alive_count == 0)) {
+ spin_unlock(&lp->lpni_lock);
lnet_notify_locked(lp, 0, 1, lp->lpni_last_alive);
+ } else {
+ spin_unlock(&lp->lpni_lock);
+ }
return alive;
}
the_lnet.ln_counters[cpt]->drop_count++;
the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
lnet_net_unlock(cpt);
+ if (msg->msg_txpeer)
+ atomic_inc(&msg->msg_txpeer->lpni_stats.drop_count);
+ if (msg->msg_txni)
+ atomic_inc(&msg->msg_txni->ni_stats.drop_count);
CNETERR("Dropping message for %s: peer not alive\n",
libcfs_id2str(msg->msg_target));
}
if (!msg->msg_peertxcredit) {
+ spin_lock(&lp->lpni_lock);
LASSERT((lp->lpni_txcredits < 0) ==
!list_empty(&lp->lpni_txq));
if (lp->lpni_txcredits < 0) {
msg->msg_tx_delayed = 1;
list_add_tail(&msg->msg_list, &lp->lpni_txq);
+ spin_unlock(&lp->lpni_lock);
return LNET_CREDIT_WAIT;
}
+ spin_unlock(&lp->lpni_lock);
}
if (!msg->msg_txcredit) {
msg->msg_txcredit = 1;
tq->tq_credits--;
+ atomic_dec(&ni->ni_tx_credits);
if (tq->tq_credits < tq->tq_credits_min)
tq->tq_credits_min = tq->tq_credits;
LASSERT(!do_recv || msg->msg_rx_delayed);
if (!msg->msg_peerrtrcredit) {
+ spin_lock(&lp->lpni_lock);
LASSERT((lp->lpni_rtrcredits < 0) ==
!list_empty(&lp->lpni_rtrq));
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
list_add_tail(&msg->msg_list, &lp->lpni_rtrq);
+ spin_unlock(&lp->lpni_lock);
return LNET_CREDIT_WAIT;
}
+ spin_unlock(&lp->lpni_lock);
}
rbp = lnet_msg2bufpool(msg);
!list_empty(&tq->tq_delayed));
tq->tq_credits++;
+ atomic_inc(&ni->ni_tx_credits);
if (tq->tq_credits <= 0) {
msg2 = list_entry(tq->tq_delayed.next,
lnet_msg_t, msg_list);
LASSERT(msg2->msg_txni == ni);
LASSERT(msg2->msg_tx_delayed);
+ LASSERT(msg2->msg_tx_cpt == msg->msg_tx_cpt);
(void) lnet_post_send_locked(msg2, 1);
}
/* give back peer txcredits */
msg->msg_peertxcredit = 0;
+ spin_lock(&txpeer->lpni_lock);
LASSERT((txpeer->lpni_txcredits < 0) ==
!list_empty(&txpeer->lpni_txq));
txpeer->lpni_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
- LASSERT (txpeer->lpni_txqnob >= 0);
+ LASSERT(txpeer->lpni_txqnob >= 0);
txpeer->lpni_txcredits++;
if (txpeer->lpni_txcredits <= 0) {
msg2 = list_entry(txpeer->lpni_txq.next,
lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
+ spin_unlock(&txpeer->lpni_lock);
LASSERT(msg2->msg_txpeer == txpeer);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ if (msg2->msg_tx_cpt != msg->msg_tx_cpt) {
+ lnet_net_unlock(msg->msg_tx_cpt);
+ lnet_net_lock(msg2->msg_tx_cpt);
+ }
+ (void) lnet_post_send_locked(msg2, 1);
+ if (msg2->msg_tx_cpt != msg->msg_tx_cpt) {
+ lnet_net_unlock(msg2->msg_tx_cpt);
+ lnet_net_lock(msg->msg_tx_cpt);
+ }
+ } else {
+ spin_unlock(&txpeer->lpni_lock);
}
- }
+ }
if (txni != NULL) {
msg->msg_txni = NULL;
void
lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
{
- lnet_msg_t *msg;
- lnet_msg_t *tmp;
- struct list_head drop;
-
- INIT_LIST_HEAD(&drop);
-
- list_splice_init(list, &drop);
+ lnet_msg_t *msg;
+ lnet_msg_t *tmp;
lnet_net_unlock(cpt);
- list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
+ list_for_each_entry_safe(msg, tmp, list, msg_list) {
lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
0, 0, 0, msg->msg_hdr.payload_length);
list_del_init(&msg->msg_list);
/* give back peer router credits */
msg->msg_peerrtrcredit = 0;
+ spin_lock(&rxpeer->lpni_lock);
LASSERT((rxpeer->lpni_rtrcredits < 0) ==
!list_empty(&rxpeer->lpni_rtrq));
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
- lnet_drop_routed_msgs_locked(&rxpeer->lpni_rtrq,
- msg->msg_rx_cpt);
+ struct list_head drop;
+ INIT_LIST_HEAD(&drop);
+ list_splice_init(&rxpeer->lpni_rtrq, &drop);
+ spin_unlock(&rxpeer->lpni_lock);
+ lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
} else if (rxpeer->lpni_rtrcredits <= 0) {
msg2 = list_entry(rxpeer->lpni_rtrq.next,
lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
-
+ spin_unlock(&rxpeer->lpni_lock);
(void) lnet_post_routed_recv_locked(msg2, 1);
+ } else {
+ spin_unlock(&rxpeer->lpni_lock);
}
}
if (rxni != NULL) {
static int
lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
- struct lnet_msg *msg, lnet_nid_t rtr_nid, bool *lo_sent)
+ struct lnet_msg *msg, lnet_nid_t rtr_nid)
{
- struct lnet_ni *best_ni = NULL;
- struct lnet_peer_ni *best_lpni = NULL;
- struct lnet_peer_ni *net_gw = NULL;
- struct lnet_peer_ni *best_gw = NULL;
+ struct lnet_ni *best_ni;
+ struct lnet_peer_ni *best_lpni;
+ struct lnet_peer_ni *best_gw;
struct lnet_peer_ni *lpni;
- struct lnet_peer *peer = NULL;
+ struct lnet_peer *peer;
struct lnet_peer_net *peer_net;
struct lnet_net *local_net;
- struct lnet_ni *ni = NULL;
+ struct lnet_ni *ni;
+ __u32 seq;
int cpt, cpt2, rc;
- bool routing = false;
- bool ni_is_pref = false;
- bool preferred = false;
- int best_credits = 0;
- __u32 seq, seq2;
- int best_lpni_credits = INT_MIN;
- int md_cpt = 0;
- int shortest_distance = INT_MAX;
- int distance = 0;
- bool found_ir = false;
+ bool routing;
+ bool ni_is_pref;
+ bool preferred;
+ int best_credits;
+ int best_lpni_credits;
+ int md_cpt;
+ int shortest_distance;
-again:
/*
* get an initial CPT to use for locking. The idea here is not to
* serialize the calls to select_pathway, so that as many
* operations can run concurrently as possible. To do that we use
* the CPT where this call is being executed. Later on when we
* determine the CPT to use in lnet_message_commit, we switch the
- * lock and check if there was any configuration changes, if none,
- * then we proceed, if there is, then we'll need to update the cpt
- * and redo the operation.
+ * lock and check if there was any configuration change. If none,
+ * then we proceed, if there is, then we restart the operation.
*/
cpt = lnet_net_lock_current();
-
+again:
+ best_ni = NULL;
+ best_lpni = NULL;
best_gw = NULL;
- routing = false;
local_net = NULL;
- best_ni = NULL;
- shortest_distance = INT_MAX;
- found_ir = false;
+ routing = false;
+
+ seq = lnet_get_dlc_seq_locked();
if (the_lnet.ln_shutdown) {
lnet_net_unlock(cpt);
else
md_cpt = CFS_CPT_ANY;
- /*
- * initialize the variables which could be reused if we go to
- * again
- */
- lpni = NULL;
- seq = lnet_get_dlc_seq_locked();
-
- rc = lnet_find_or_create_peer_locked(dst_nid, cpt, &peer);
- if (rc != 0) {
+ peer = lnet_find_or_create_peer_locked(dst_nid, cpt);
+ if (IS_ERR(peer)) {
lnet_net_unlock(cpt);
- return rc;
+ return PTR_ERR(peer);
}
/* If peer is not healthy then can not send anything to it */
return -EHOSTUNREACH;
}
+ if (!peer->lp_multi_rail && lnet_get_num_peer_nis(peer) > 1) {
+ CERROR("peer %s is declared to be non MR capable, "
+ "yet configured with more than one NID\n",
+ libcfs_nid2str(dst_nid));
+ return -EINVAL;
+ }
+
/*
* STEP 1: first jab at determineing best_ni
* if src_nid is explicitly specified, then best_ni is already
libcfs_nid2str(src_nid));
return -EINVAL;
}
- }
-
- if (best_ni)
goto pick_peer;
+ }
/*
* Decide whether we need to route to peer_ni.
local_net = lnet_get_net_locked(peer_net->lpn_net_id);
if (!local_net) {
+ struct lnet_peer_ni *net_gw;
/*
* go through each peer_ni on that peer_net and
* determine the best possible gw to go through
* 2. NI available credits
* 3. Round Robin
*/
+ shortest_distance = INT_MAX;
+ best_credits = INT_MIN;
+ ni = NULL;
while ((ni = lnet_get_next_ni_locked(local_net, ni))) {
+ int ni_credits;
+ int distance;
+
if (!lnet_is_ni_healthy_locked(ni))
continue;
+ ni_credits = atomic_read(&ni->ni_tx_credits);
+
/*
- * calculate the distance from the cpt on which
+ * calculate the distance from the CPT on which
* the message memory is allocated to the CPT of
* the NI's physical device
*/
ni->dev_cpt);
/*
- * If we already have a closer NI within the NUMA
- * range provided, then there is no need to
- * consider the current NI. Move on to the next
- * one.
+ * All distances smaller than the NUMA range
+ * are treated equally.
*/
- if (distance > shortest_distance &&
- distance > lnet_get_numa_range())
- continue;
+ if (distance < lnet_get_numa_range())
+ distance = lnet_get_numa_range();
- if (distance < shortest_distance &&
- distance > lnet_get_numa_range()) {
- /*
- * The current NI is the closest one that we
- * have found, even though it's not in the
- * NUMA range specified. This occurs if
- * the NUMA range is less than the least
- * of the distances in the system.
- * In effect NUMA range consideration is
- * turned off.
- */
+ /*
+ * Select on shorter distance, then available
+ * credits, then round-robin.
+ */
+ if (distance > shortest_distance) {
+ continue;
+ } else if (distance < shortest_distance) {
shortest_distance = distance;
- } else if ((distance <= shortest_distance &&
- distance < lnet_get_numa_range()) ||
- distance == shortest_distance) {
- /*
- * This NI is either within range or it's
- * equidistant. In both of these cases we
- * would want to select the NI based on
- * its available credits first, and then
- * via Round Robin.
- */
- if (distance <= shortest_distance &&
- distance < lnet_get_numa_range()) {
- /*
- * If this is the first NI that's
- * within range, then set the
- * shortest distance to the range
- * specified by the user. In
- * effect we're saying that all
- * NIs that fall within this NUMA
- * range shall be dealt with as
- * having equal NUMA weight. Which
- * will mean that we should select
- * through that set by their
- * available credits first
- * followed by Round Robin.
- *
- * And since this is the first NI
- * in the range, let's just set it
- * as our best_ni for now. The
- * following NIs found in the
- * range will be dealt with as
- * mentioned previously.
- */
- shortest_distance = lnet_get_numa_range();
- if (!found_ir) {
- found_ir = true;
- goto set_ni;
- }
- }
- /*
- * This NI is NUMA equidistant let's
- * select using credits followed by Round
- * Robin.
- */
- if (ni->ni_tx_queues[cpt]->tq_credits <
- best_credits) {
+ } else if (ni_credits < best_credits) {
+ continue;
+ } else if (ni_credits == best_credits) {
+ if (best_ni && best_ni->ni_seq <= ni->ni_seq)
continue;
- } else if (ni->ni_tx_queues[cpt]->tq_credits ==
- best_credits) {
- if (best_ni) {
- if (best_ni->ni_seq <= ni->ni_seq)
- continue;
- }
- }
}
-set_ni:
best_ni = ni;
- best_credits = ni->ni_tx_queues[cpt]->tq_credits;
+ best_credits = ni_credits;
}
}
+
/*
- * Now that we selected the NI to use increment its sequence
- * number so the Round Robin algorithm will detect that it has
- * been used and pick the next NI.
+ * if the peer is not MR capable, then we should always send to it
+ * using the first NI in the NET we determined.
*/
- best_ni->ni_seq++;
+ if (!peer->lp_multi_rail && local_net != NULL)
+ best_ni = lnet_net2ni_locked(local_net->net_id, cpt);
if (!best_ni) {
lnet_net_unlock(cpt);
return -EINVAL;
}
+ /*
+ * Now that we selected the NI to use increment its sequence
+ * number so the Round Robin algorithm will detect that it has
+ * been used and pick the next NI.
+ */
+ best_ni->ni_seq++;
+
if (routing)
goto send;
msg->msg_hdr.src_nid = cpu_to_le64(best_ni->ni_nid);
msg->msg_target.nid = best_ni->ni_nid;
lnet_msg_commit(msg, cpt);
-
- lnet_net_unlock(cpt);
msg->msg_txni = best_ni;
- lnet_ni_send(best_ni, msg);
+ lnet_net_unlock(cpt);
- *lo_sent = true;
- return 0;
+ return LNET_CREDIT_OK;
}
- lpni = NULL;
-
if (msg->msg_type == LNET_MSG_REPLY ||
msg->msg_type == LNET_MSG_ACK) {
/*
}
CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
- libcfs_nid2str(lpni->lpni_nid),
+ libcfs_nid2str(dst_nid),
libcfs_nid2str(best_gw->lpni_nid),
lnet_msgtyp2str(msg->msg_type), msg->msg_len);
* to find another peer_net that we can use
*/
__u32 net_id = peer_net->lpn_net_id;
- lnet_net_unlock(cpt);
- if (!best_lpni)
- LCONSOLE_WARN("peer net %s unhealthy\n",
- libcfs_net2str(net_id));
+ LCONSOLE_WARN("peer net %s unhealthy\n",
+ libcfs_net2str(net_id));
goto again;
}
- best_lpni = NULL;
+ /*
+ * Look at the peer NIs for the destination peer that connect
+ * to the chosen net. If a peer_ni is preferred when using the
+ * best_ni to communicate, we use that one. If there is no
+ * preferred peer_ni, or there are multiple preferred peer_ni,
+ * the available transmit credits are used. If the transmit
+ * credits are equal, we round-robin over the peer_ni.
+ */
+ lpni = NULL;
+ best_lpni_credits = INT_MIN;
+ preferred = false;
while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
/*
* if this peer ni is not healthy just skip it, no point in
* it.
*/
continue;
- } if (lpni->lpni_txcredits < best_lpni_credits)
+ } else if (lpni->lpni_txcredits < best_lpni_credits) {
/*
* We already have a peer that has more credits
* available than this one. No need to consider
* this peer further.
*/
continue;
- else if (lpni->lpni_txcredits == best_lpni_credits) {
+ } else if (lpni->lpni_txcredits == best_lpni_credits) {
/*
* The best peer found so far and the current peer
* have the same number of available credits let's
best_lpni_credits = lpni->lpni_txcredits;
}
- /*
- * Increment sequence number of the peer selected so that we can
- * pick the next one in Round Robin.
- */
- best_lpni->lpni_seq++;
-
/* if we still can't find a peer ni then we can't reach it */
if (!best_lpni) {
__u32 net_id = peer_net->lpn_net_id;
send:
/*
- * determine the cpt to use and if it has changed then
- * lock the new cpt and check if the config has changed.
- * If it has changed then repeat the algorithm since the
- * ni or peer list could have changed and the algorithm
- * would endup picking a different ni/peer_ni pair.
+ * Increment sequence number of the peer selected so that we
+ * pick the next one in Round Robin.
+ */
+ best_lpni->lpni_seq++;
+
+ /*
+ * When routing the best gateway found acts as the best peer
+ * NI to send to.
*/
- cpt2 = best_lpni->lpni_cpt;
+ if (routing)
+ best_lpni = best_gw;
+
+ /*
+ * grab a reference on the peer_ni so it sticks around even if
+ * we need to drop and relock the lnet_net_lock below.
+ */
+ lnet_peer_ni_addref_locked(best_lpni);
+
+ /*
+ * Use lnet_cpt_of_nid() to determine the CPT used to commit the
+ * message. This ensures that we get a CPT that is correct for
+ * the NI when the NI has been restricted to a subset of all CPTs.
+ * If the selected CPT differs from the one currently locked, we
+ * must unlock and relock the lnet_net_lock(), and then check whether
+ * the configuration has changed. We don't have a hold on the best_ni
+ * yet, and it may have vanished.
+ */
+ cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni);
if (cpt != cpt2) {
lnet_net_unlock(cpt);
cpt = cpt2;
lnet_net_lock(cpt);
- seq2 = lnet_get_dlc_seq_locked();
- if (seq2 != seq) {
- lnet_net_unlock(cpt);
+ if (seq != lnet_get_dlc_seq_locked()) {
+ lnet_peer_ni_decref_locked(best_lpni);
goto again;
}
}
* store the best_lpni in the message right away to avoid having
* to do the same operation under different conditions
*/
- msg->msg_txpeer = (routing) ? best_gw : best_lpni;
+ msg->msg_txpeer = best_lpni;
msg->msg_txni = best_ni;
+
/*
* grab a reference for the best_ni since now it's in use in this
* send. the reference will need to be dropped when the message is
* finished in lnet_finalize()
*/
lnet_ni_addref_locked(msg->msg_txni, cpt);
- lnet_peer_ni_addref_locked(msg->msg_txpeer);
/*
* set the destination nid in the message here because it's
{
lnet_nid_t dst_nid = msg->msg_target.nid;
int rc;
- bool lo_sent = false;
/*
* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
LASSERT(!msg->msg_tx_committed);
- rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid, &lo_sent);
- if (rc < 0 || lo_sent)
+ rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
+ if (rc < 0)
return rc;
if (rc == LNET_CREDIT_OK)
info.mi_rlength = hdr->payload_length;
info.mi_roffset = hdr->msg.put.offset;
info.mi_mbits = hdr->msg.put.match_bits;
- info.mi_cpt = msg->msg_rxpeer->lpni_cpt;
+ info.mi_cpt = lnet_cpt_of_nid(msg->msg_rxpeer->lpni_nid, ni);
msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
ready_delay = msg->msg_rx_ready_delay;
lnet_pid_t dest_pid;
lnet_nid_t dest_nid;
lnet_nid_t src_nid;
- __u32 payload_length;
- __u32 type;
+ struct lnet_peer_ni *lpni;
+ __u32 payload_length;
+ __u32 type;
LASSERT (!in_interrupt ());
msg->msg_initiator = lnet_peer_primary_nid(src_nid);
lnet_net_lock(cpt);
- rc = lnet_nid2peerni_locked(&msg->msg_rxpeer, from_nid, cpt);
- if (rc != 0) {
+ lpni = lnet_nid2peerni_locked(from_nid, cpt);
+ if (IS_ERR(lpni)) {
lnet_net_unlock(cpt);
CERROR("%s, src %s: Dropping %s "
- "(error %d looking up sender)\n",
+ "(error %ld looking up sender)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type), rc);
+ lnet_msgtyp2str(type), PTR_ERR(lpni));
lnet_msg_free(msg);
if (rc == -ESHUTDOWN)
/* We are shutting down. Don't do anything more */
return 0;
goto drop;
}
+ msg->msg_rxpeer = lpni;
msg->msg_rxni = ni;
lnet_ni_addref_locked(ni, cpt);
* called lnet_drop_message(), so I just hang onto msg as well
* until that's done */
- lnet_drop_message(msg->msg_rxni,
- msg->msg_rxpeer->lpni_cpt,
+ lnet_drop_message(msg->msg_rxni, msg->msg_rx_cpt,
msg->msg_private, msg->msg_len);
/*
* NB: message will not generate event because w/o attached MD,