if (rc != 0) {
CERROR("recv from %s / send to %s aborted: "
"eager_recv failed %d\n",
- libcfs_nid2str(msg->msg_rxpeer->lp_nid),
+ libcfs_nid2str(msg->msg_rxpeer->lpni_nid),
libcfs_id2str(msg->msg_target), rc);
LASSERT(rc < 0); /* required by my callers */
}
/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
static void
-lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
+lnet_ni_query_locked(lnet_ni_t *ni, struct lnet_peer_ni *lp)
{
cfs_time_t last_alive = 0;
LASSERT(lnet_peer_aliveness_enabled(lp));
LASSERT(ni->ni_net->net_lnd->lnd_query != NULL);
- lnet_net_unlock(lp->lp_cpt);
- (ni->ni_net->net_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
- lnet_net_lock(lp->lp_cpt);
+ lnet_net_unlock(lp->lpni_cpt);
+ (ni->ni_net->net_lnd->lnd_query)(ni, lp->lpni_nid, &last_alive);
+ lnet_net_lock(lp->lpni_cpt);
- lp->lp_last_query = cfs_time_current();
+ lp->lpni_last_query = cfs_time_current();
if (last_alive != 0) /* NI has updated timestamp */
- lp->lp_last_alive = last_alive;
+ lp->lpni_last_alive = last_alive;
}
/* NB: always called with lnet_net_lock held */
static inline int
-lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
+lnet_peer_is_alive (struct lnet_peer_ni *lp, cfs_time_t now)
{
int alive;
cfs_time_t deadline;
* Trust lnet_notify() if it has more recent aliveness news, but
* ignore the initial assumed death (see lnet_peers_start_down()).
*/
- if (!lp->lp_alive && lp->lp_alive_count > 0 &&
- cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
+ if (!lp->lpni_alive && lp->lpni_alive_count > 0 &&
+ cfs_time_aftereq(lp->lpni_timestamp, lp->lpni_last_alive))
return 0;
deadline =
- cfs_time_add(lp->lp_last_alive,
- cfs_time_seconds(lp->lp_net->net_tunables.
+ cfs_time_add(lp->lpni_last_alive,
+ cfs_time_seconds(lp->lpni_net->net_tunables.
lct_peer_timeout));
alive = cfs_time_after(deadline, now);
/*
* Update obsolete lp_alive except for routers assumed to be dead
* initially, because router checker would update aliveness in this
- * case, and moreover lp_last_alive at peer creation is assumed.
+ * case, and moreover lpni_last_alive at peer creation is assumed.
*/
- if (alive && !lp->lp_alive &&
- !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
+ if (alive && !lp->lpni_alive &&
+ !(lnet_isrouter(lp) && lp->lpni_alive_count == 0))
+ lnet_notify_locked(lp, 0, 1, lp->lpni_last_alive);
return alive;
}
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
static int
-lnet_peer_alive_locked (struct lnet_ni *ni, lnet_peer_t *lp)
+lnet_peer_alive_locked (struct lnet_ni *ni, struct lnet_peer_ni *lp)
{
cfs_time_t now = cfs_time_current();
* Peer appears dead, but we should avoid frequent NI queries (at
* most once per lnet_queryinterval seconds).
*/
- if (lp->lp_last_query != 0) {
+ if (lp->lpni_last_query != 0) {
static const int lnet_queryinterval = 1;
cfs_time_t next_query =
- cfs_time_add(lp->lp_last_query,
+ cfs_time_add(lp->lpni_last_query,
cfs_time_seconds(lnet_queryinterval));
if (cfs_time_before(now, next_query)) {
- if (lp->lp_alive)
+ if (lp->lpni_alive)
CWARN("Unexpected aliveness of peer %s: "
"%d < %d (%d/%d)\n",
- libcfs_nid2str(lp->lp_nid),
+ libcfs_nid2str(lp->lpni_nid),
(int)now, (int)next_query,
lnet_queryinterval,
- lp->lp_net->net_tunables.lct_peer_timeout);
+ lp->lpni_net->net_tunables.lct_peer_timeout);
return 0;
}
}
if (lnet_peer_is_alive(lp, now))
return 1;
- lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
+ lnet_notify_locked(lp, 0, 0, lp->lpni_last_alive);
return 0;
}
static int
lnet_post_send_locked(lnet_msg_t *msg, int do_send)
{
- lnet_peer_t *lp = msg->msg_txpeer;
- lnet_ni_t *ni = msg->msg_txni;
+ struct lnet_peer_ni *lp = msg->msg_txpeer;
+ struct lnet_ni *ni = msg->msg_txni;
int cpt = msg->msg_tx_cpt;
struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
}
if (!msg->msg_peertxcredit) {
- LASSERT((lp->lp_txcredits < 0) ==
- !list_empty(&lp->lp_txq));
+ LASSERT((lp->lpni_txcredits < 0) ==
+ !list_empty(&lp->lpni_txq));
msg->msg_peertxcredit = 1;
- lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
- lp->lp_txcredits--;
+ lp->lpni_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
+ lp->lpni_txcredits--;
- if (lp->lp_txcredits < lp->lp_mintxcredits)
- lp->lp_mintxcredits = lp->lp_txcredits;
+ if (lp->lpni_txcredits < lp->lpni_mintxcredits)
+ lp->lpni_mintxcredits = lp->lpni_txcredits;
- if (lp->lp_txcredits < 0) {
+ if (lp->lpni_txcredits < 0) {
msg->msg_tx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lp_txq);
+ list_add_tail(&msg->msg_list, &lp->lpni_txq);
return LNET_CREDIT_WAIT;
}
}
* sets do_recv FALSE and I don't do the unlock/send/lock bit.
* I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
* received or OK to receive */
- lnet_peer_t *lp = msg->msg_rxpeer;
+ struct lnet_peer_ni *lp = msg->msg_rxpeer;
lnet_rtrbufpool_t *rbp;
- lnet_rtrbuf_t *rb;
+ lnet_rtrbuf_t *rb;
- LASSERT(msg->msg_iov == NULL);
- LASSERT(msg->msg_kiov == NULL);
- LASSERT(msg->msg_niov == 0);
- LASSERT(msg->msg_routing);
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_sending);
+ LASSERT (msg->msg_iov == NULL);
+ LASSERT (msg->msg_kiov == NULL);
+ LASSERT (msg->msg_niov == 0);
+ LASSERT (msg->msg_routing);
+ LASSERT (msg->msg_receiving);
+ LASSERT (!msg->msg_sending);
/* non-lnet_parse callers only receive delayed messages */
LASSERT(!do_recv || msg->msg_rx_delayed);
if (!msg->msg_peerrtrcredit) {
- LASSERT((lp->lp_rtrcredits < 0) ==
- !list_empty(&lp->lp_rtrq));
+ LASSERT((lp->lpni_rtrcredits < 0) ==
+ !list_empty(&lp->lpni_rtrq));
msg->msg_peerrtrcredit = 1;
- lp->lp_rtrcredits--;
- if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
- lp->lp_minrtrcredits = lp->lp_rtrcredits;
+ lp->lpni_rtrcredits--;
+ if (lp->lpni_rtrcredits < lp->lpni_minrtrcredits)
+ lp->lpni_minrtrcredits = lp->lpni_rtrcredits;
- if (lp->lp_rtrcredits < 0) {
+ if (lp->lpni_rtrcredits < 0) {
/* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+ list_add_tail(&msg->msg_list, &lp->lpni_rtrq);
return LNET_CREDIT_WAIT;
}
}
void
lnet_return_tx_credits_locked(lnet_msg_t *msg)
{
- lnet_peer_t *txpeer = msg->msg_txpeer;
- lnet_msg_t *msg2;
- struct lnet_ni *txni = msg->msg_txni;
+ struct lnet_peer_ni *txpeer = msg->msg_txpeer;
+ struct lnet_ni *txni = msg->msg_txni;
+ lnet_msg_t *msg2;
if (msg->msg_txcredit) {
struct lnet_ni *ni = msg->msg_txni;
/* give back peer txcredits */
msg->msg_peertxcredit = 0;
- LASSERT((txpeer->lp_txcredits < 0) ==
- !list_empty(&txpeer->lp_txq));
+ LASSERT((txpeer->lpni_txcredits < 0) ==
+ !list_empty(&txpeer->lpni_txq));
- txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
- LASSERT(txpeer->lp_txqnob >= 0);
+ txpeer->lpni_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
+ LASSERT (txpeer->lpni_txqnob >= 0);
- txpeer->lp_txcredits++;
- if (txpeer->lp_txcredits <= 0) {
- msg2 = list_entry(txpeer->lp_txq.next,
- lnet_msg_t, msg_list);
+ txpeer->lpni_txcredits++;
+ if (txpeer->lpni_txcredits <= 0) {
+ msg2 = list_entry(txpeer->lpni_txq.next,
+ lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer == txpeer);
if (txpeer != NULL) {
msg->msg_txpeer = NULL;
- lnet_peer_decref_locked(txpeer);
+ lnet_peer_ni_decref_locked(txpeer);
}
}
void
lnet_return_rx_credits_locked(lnet_msg_t *msg)
{
- lnet_peer_t *rxpeer = msg->msg_rxpeer;
- struct lnet_ni *rxni = msg->msg_rxni;
- lnet_msg_t *msg2;
+ struct lnet_peer_ni *rxpeer = msg->msg_rxpeer;
+ struct lnet_ni *rxni = msg->msg_rxni;
+ lnet_msg_t *msg2;
if (msg->msg_rtrcredit) {
/* give back global router credits */
/* give back peer router credits */
msg->msg_peerrtrcredit = 0;
- LASSERT((rxpeer->lp_rtrcredits < 0) ==
- !list_empty(&rxpeer->lp_rtrq));
+ LASSERT((rxpeer->lpni_rtrcredits < 0) ==
+ !list_empty(&rxpeer->lpni_rtrq));
- rxpeer->lp_rtrcredits++;
+ rxpeer->lpni_rtrcredits++;
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
- lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
+ lnet_drop_routed_msgs_locked(&rxpeer->lpni_rtrq,
msg->msg_rx_cpt);
- } else if (rxpeer->lp_rtrcredits <= 0) {
- msg2 = list_entry(rxpeer->lp_rtrq.next,
+ } else if (rxpeer->lpni_rtrcredits <= 0) {
+ msg2 = list_entry(rxpeer->lpni_rtrq.next,
lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
}
if (rxpeer != NULL) {
msg->msg_rxpeer = NULL;
- lnet_peer_decref_locked(rxpeer);
+ lnet_peer_ni_decref_locked(rxpeer);
}
}
static int
lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
{
- lnet_peer_t *p1 = r1->lr_gateway;
- lnet_peer_t *p2 = r2->lr_gateway;
+ struct lnet_peer_ni *p1 = r1->lr_gateway;
+ struct lnet_peer_ni *p2 = r2->lr_gateway;
int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
if (r1_hops > r2_hops)
return -ERANGE;
- if (p1->lp_txqnob < p2->lp_txqnob)
+ if (p1->lpni_txqnob < p2->lpni_txqnob)
return 1;
- if (p1->lp_txqnob > p2->lp_txqnob)
+ if (p1->lpni_txqnob > p2->lpni_txqnob)
return -ERANGE;
- if (p1->lp_txcredits > p2->lp_txcredits)
+ if (p1->lpni_txcredits > p2->lpni_txcredits)
return 1;
- if (p1->lp_txcredits < p2->lp_txcredits)
+ if (p1->lpni_txcredits < p2->lpni_txcredits)
return -ERANGE;
if (r1->lr_seq - r2->lr_seq <= 0)
return -ERANGE;
}
-static lnet_peer_t *
+static struct lnet_peer_ni *
lnet_find_route_locked(struct lnet_net *net, lnet_nid_t target,
lnet_nid_t rtr_nid)
{
lnet_route_t *route;
lnet_route_t *best_route;
lnet_route_t *last_route;
- struct lnet_peer *lp_best;
- struct lnet_peer *lp;
+ struct lnet_peer_ni *lpni_best;
+ struct lnet_peer_ni *lp;
int rc;
/* If @rtr_nid is not LNET_NID_ANY, return the gateway with
if (rnet == NULL)
return NULL;
- lp_best = NULL;
+ lpni_best = NULL;
best_route = last_route = NULL;
list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
lp = route->lr_gateway;
if (!lnet_is_route_alive(route))
continue;
- if (net != NULL && lp->lp_net != net)
+ if (net != NULL && lp->lpni_net != net)
continue;
- if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
+ if (lp->lpni_nid == rtr_nid) /* it's pre-determined router */
return lp;
- if (lp_best == NULL) {
+ if (lpni_best == NULL) {
best_route = last_route = route;
- lp_best = lp;
+ lpni_best = lp;
continue;
}
continue;
best_route = route;
- lp_best = lp;
+ lpni_best = lp;
}
/* set sequence number on the best router to the latest sequence + 1
* harmless and functional */
if (best_route != NULL)
best_route->lr_seq = last_route->lr_seq + 1;
- return lp_best;
+ return lpni_best;
}
int
lnet_nid_t dst_nid = msg->msg_target.nid;
struct lnet_ni *src_ni;
struct lnet_ni *local_ni;
- struct lnet_peer *lp;
+ struct lnet_peer_ni *lp;
int cpt;
int cpt2;
int rc;
return 0;
}
- rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
+ rc = lnet_nid2peerni_locked(&lp, dst_nid, cpt);
if (rc != 0) {
lnet_net_unlock(cpt);
LCONSOLE_WARN("Error %d finding peer %s\n", rc,
/* ENOMEM or shutting down */
return rc;
}
- LASSERT (lp->lp_net == src_ni->ni_net);
+ LASSERT (lp->lpni_net == src_ni->ni_net);
} else {
/* sending to a remote network */
lp = lnet_find_route_locked(src_ni != NULL ?
* it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
* pre-determined router, this can happen if router table
* was changed when we release the lock */
- if (rtr_nid != lp->lp_nid) {
- cpt2 = lp->lp_cpt;
+ if (rtr_nid != lp->lpni_nid) {
+ cpt2 = lp->lpni_cpt;
if (cpt2 != cpt) {
lnet_net_unlock(cpt);
- rtr_nid = lp->lp_nid;
+ rtr_nid = lp->lpni_nid;
cpt = cpt2;
goto again;
}
}
CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
- libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
+ libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lpni_nid),
lnet_msgtyp2str(msg->msg_type), msg->msg_len);
if (src_ni == NULL) {
- src_ni = lnet_get_next_ni_locked(lp->lp_net, NULL);
+ src_ni = lnet_get_next_ni_locked(lp->lpni_net, NULL);
LASSERT(src_ni != NULL);
src_nid = src_ni->ni_nid;
} else {
- LASSERT (src_ni->ni_net == lp->lp_net);
+ LASSERT (src_ni->ni_net == lp->lpni_net);
}
- lnet_peer_addref_locked(lp);
+ lnet_peer_ni_addref_locked(lp);
LASSERT(src_nid != LNET_NID_ANY);
lnet_msg_commit(msg, cpt);
}
msg->msg_target_is_router = 1;
- msg->msg_target.nid = lp->lp_nid;
+ msg->msg_target.nid = lp->lpni_nid;
msg->msg_target.pid = LNET_PID_LUSTRE;
}
info.mi_rlength = hdr->payload_length;
info.mi_roffset = hdr->msg.put.offset;
info.mi_mbits = hdr->msg.put.match_bits;
- info.mi_cpt = msg->msg_rxpeer->lp_cpt;
+ info.mi_cpt = msg->msg_rxpeer->lpni_cpt;
msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL;
ready_delay = msg->msg_rx_ready_delay;
if (!the_lnet.ln_routing)
return -ECANCELED;
- if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
+ if (msg->msg_rxpeer->lpni_rtrcredits <= 0 ||
lnet_msg2bufpool(msg)->rbp_credits <= 0) {
if (ni->ni_net->net_lnd->lnd_eager_recv == NULL) {
msg->msg_rx_ready_delay = 1;
}
lnet_net_lock(cpt);
- rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
+ rc = lnet_nid2peerni_locked(&msg->msg_rxpeer, from_nid, cpt);
if (rc != 0) {
lnet_net_unlock(cpt);
CERROR("%s, src %s: Dropping %s "
* until that's done */
lnet_drop_message(msg->msg_rxni,
- msg->msg_rxpeer->lp_cpt,
+ msg->msg_rxpeer->lpni_cpt,
msg->msg_private, msg->msg_len);
/*
* NB: message will not generate event because w/o attached MD,
hops = shortest_hops;
if (srcnidp != NULL) {
ni = lnet_get_next_ni_locked(
- shortest->lr_gateway->lp_net,
+ shortest->lr_gateway->lpni_net,
NULL);
*srcnidp = ni->ni_nid;
}