* has come around and set ready to zero */
already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
- if (!already_live) {
- wake_up_all(&dev->gnd_waitq);
- }
+ if (!already_live)
+ wake_up(&dev->gnd_waitq);
}
void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data)
if (tx->tx_phys != NULL) {
kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
- LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
+ GNILND_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
}
/* Only free the buffer if we used it */
niov = DIV_ROUND_UP(nob + offset + kiov->bv_offset,
PAGE_SIZE);
- LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
- "bad niov %d msg %p kiov %p offset %d nob%d\n",
- niov, msg, kiov, offset, nob);
-
while (offset >= kiov->bv_len) {
offset -= kiov->bv_len;
niov--;
kiov++;
LASSERT(niov > 0);
}
+
+ LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
+ "bad niov %d msg %p kiov %p offset %d nob%d\n",
+ niov, msg, kiov, offset, nob);
+
for (i = 0; i < niov; i++) {
/* We can't have a bv_offset on anything but the first
* entry, otherwise we'll have a hole at the end of the
}
CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
- LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
+ GNILND_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
/* if loops changes, please change kgnilnd_cksum_kiov
* and kgnilnd_setup_immediate_buffer */
GOTO(error, rc);
}
- if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
+ if ((phys - tx->tx_phys) == GNILND_MAX_IOV) {
CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
rc = -EMSGSIZE;
GOTO(error, rc);
kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
tx->tx_buffer, nob);
}
- /* fallthrough */
+ fallthrough;
case 1:
libcfs_debug_dumplog();
break;
* that we fill up our mailbox, we'll keep trying to resend that msg
* until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
* that he hasn't send us any traffic in return */
-
+
/* some reasonable throttling of the debug message */
if (log_retrans) {
unsigned long now = jiffies;
break;
}
/* needs to queue to try again, so... */
- /* fall through... */
+ fallthrough;
case GNILND_MSG_NOOP:
/* Just make sure this goes out first for this conn */
add_tail = 0;
- /* fall through... */
+ fallthrough;
default:
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
}
void
-kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_process_id *target)
+kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_processid *target)
{
kgn_peer_t *peer;
kgn_peer_t *new_peer = NULL;
/* I expect to find him, so only take a read lock */
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
- peer = kgnilnd_find_peer_locked(target->nid);
+ peer = kgnilnd_find_peer_locked(lnet_nid_to_nid4(&target->nid));
if (peer != NULL) {
conn = kgnilnd_find_conn_locked(peer);
/* this could be NULL during quiesce */
CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
- node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
+ node_state = kgnilnd_get_node_state(ntohl(target->nid.nid_addr[0]));
/* NB - this will not block during normal operations -
* the only writer of this is in the startup/shutdown path. */
/* ignore previous peer entirely - we cycled the lock, so we
* will create new peer and at worst drop it if peer is still
* in the tables */
- rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
+ rc = kgnilnd_create_peer_safe(&new_peer, lnet_nid_to_nid4(&target->nid),
+ net, node_state);
if (rc != 0) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
GOTO(no_peer, rc);
/* search for peer again now that we have the lock
* if we don't find it, add our new one to the list */
- kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
+ kgnilnd_add_peer_locked(lnet_nid_to_nid4(&target->nid), new_peer,
+ &peer);
/* don't create a connection if the peer is not up */
if (peer->gnp_state != GNILND_PEER_UP) {
{
struct lnet_hdr *hdr = &lntmsg->msg_hdr;
int type = lntmsg->msg_type;
- struct lnet_process_id target = lntmsg->msg_target;
+ struct lnet_processid *target = &lntmsg->msg_target;
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int niov = lntmsg->msg_niov;
LASSERT(!in_interrupt());
CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
- type, nob, niov, libcfs_id2str(target));
+ type, nob, niov, libcfs_idstr(target));
LASSERTF(nob == 0 || niov > 0,
"lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
- LASSERTF(niov <= LNET_MAX_IOV,
- "lntmsg %p niov %d\n", lntmsg, niov);
if (msg_vmflush)
mpflag = memalloc_noreclaim_save();
break;
if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ,
+ lnet_nid_to_nid4(&ni->ni_nid));
else
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
if (tx->tx_lntmsg[1] == NULL) {
CERROR("Can't create reply for GET to %s\n",
- libcfs_nid2str(target.nid));
+ libcfs_nidstr(&target->nid));
kgnilnd_tx_done(tx, rc);
rc = -EIO;
goto out;
tx->tx_lntmsg[0] = lntmsg;
if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
- tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr);
else
- tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr,
+ &tx->tx_msg.gnm_u.putreq.gnprm_hdr);
/* rest of tx_msg is setup just before it is sent */
- kgnilnd_launch_tx(tx, net, &target);
+ kgnilnd_launch_tx(tx, net, target);
goto out;
case LNET_MSG_REPLY:
case LNET_MSG_PUT:
break;
if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ,
+ lnet_nid_to_nid4(&ni->ni_nid));
else
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
tx->tx_lntmsg[0] = lntmsg;
if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
- tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr,
+ &tx->tx_msg.gnm_u.putreq.gnprm_hdr);
else
- tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr);
/* rest of tx_msg is setup just before it is sent */
- kgnilnd_launch_tx(tx, net, &target);
+ kgnilnd_launch_tx(tx, net, target);
goto out;
}
LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
"lntmsg 0x%p too large %d\n", lntmsg, nob);
- tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
goto out;
goto out;
}
- tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.immediate.gnim_hdr);
tx->tx_lntmsg[0] = lntmsg;
- kgnilnd_launch_tx(tx, net, &target);
+ kgnilnd_launch_tx(tx, net, target);
out:
/* use stored value as we could have already finalized lntmsg here from a failed launch */
LBUG();
}
- tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(done_type, lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL)
goto failed_0;
failed_1:
kgnilnd_tx_done(tx, rc);
- kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
failed_0:
lnet_finalize(lntmsg, rc);
}
CERROR("Couldnt find matching peer %p or conn %p / %p\n",
peer, conn, found_conn);
if (found_conn) {
- CERROR("Unexpected connstamp %#llx(%#llx expected)"
- " from %s", rxmsg->gnm_connstamp,
+ CERROR("Unexpected connstamp %#llx(%#llx expected) from %s\n",
+ rxmsg->gnm_connstamp,
found_conn->gnc_peer_connstamp,
libcfs_nid2str(peer->gnp_nid));
}
case 2:
kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
&rxmsg[1], rxmsg->gnm_payload_len);
- /* fallthrough */
+ fallthrough;
case 1:
libcfs_debug_dumplog();
break;
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
/* sending ACK with sink buff. info */
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
kgnilnd_consume_rx(rx);
RETURN(-ENOMEM);
nak_put_req:
/* make sure we send an error back when the PUT fails */
- kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
kgnilnd_tx_done(tx, rc);
kgnilnd_consume_rx(rx);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
/* lntmsg can be null when parsing a LNET_GET */
if (lntmsg != NULL) {
/* sending ACK with sink buff. info */
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
kgnilnd_consume_rx(rx);
RETURN(-ENOMEM);
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
nak_get_req_rev:
/* make sure we send an error back when the GET fails */
- kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
kgnilnd_tx_done(tx, rc);
kgnilnd_consume_rx(rx);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
RETURN(0);
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
RETURN(0);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
return 0;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx == NULL)
return 0;
kgnilnd_queue_tx(conn, tx);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
return;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx != NULL) {
int rc;
case GNILND_MSG_PUT_REQ:
case GNILND_MSG_GET_REQ_REV:
tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
- /* fallthrough */
+ fallthrough;
case GNILND_MSG_PUT_ACK:
case GNILND_MSG_PUT_REQ_REV:
case GNILND_MSG_GET_ACK_REV:
int repost = 1, saw_complete;
unsigned long timestamp, newest_last_rx, timeout;
int last_seq;
+ struct lnet_hdr hdr;
+ struct lnet_nid srcnid;
ENTRY;
/* Short circuit if the ep_handle is null.
case GNILND_MSG_IMMEDIATE:
/* only get SMSG payload for IMMEDIATE */
atomic64_add(msg->gnm_payload_len, &conn->gnc_device->gnd_short_rxbytes);
- rc = lnet_parse(net->gnn_ni, &msg->gnm_u.immediate.gnim_hdr,
- msg->gnm_srcnid, rx, 0);
+ lnet_hdr_from_nid4(&hdr, &msg->gnm_u.immediate.gnim_hdr);
+ lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
+ rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 0);
repost = rc < 0;
break;
case GNILND_MSG_GET_REQ_REV:
case GNILND_MSG_PUT_REQ:
- rc = lnet_parse(net->gnn_ni, &msg->gnm_u.putreq.gnprm_hdr,
- msg->gnm_srcnid, rx, 1);
+ lnet_hdr_from_nid4(&hdr, &msg->gnm_u.putreq.gnprm_hdr);
+ lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
+ rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1);
repost = rc < 0;
break;
case GNILND_MSG_GET_NAK_REV:
break;
case GNILND_MSG_PUT_REQ_REV:
case GNILND_MSG_GET_REQ:
- rc = lnet_parse(net->gnn_ni, &msg->gnm_u.get.gngm_hdr,
- msg->gnm_srcnid, rx, 1);
+ lnet_hdr_from_nid4(&hdr, &msg->gnm_u.get.gngm_hdr);
+ lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
+ rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1);
repost = rc < 0;
break;
if (conn->gnc_ephandle != NULL) {
int rc = 0;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx != NULL) {
tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error;
tx->tx_state = GNILND_TX_WAITING_COMPLETION;
conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
list_del_init(&conn->gnc_schedlist);
- /*
+ /*
* Since we are processing conn now, we don't need to be on the delaylist any longer.
*/