* Copyright (C) 2009-2012 Cray, Inc.
*
* Derived from work by Eric Barton <eric@bartonsoftware.com>
+ * Author: James Shimek <jshimek@cray.com>
* Author: Nic Henke <nic@cray.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
*/
+#include <asm/page.h>
#include <linux/nmi.h>
#include "gnilnd.h"
* == 0: reschedule if someone marked him WANTS_SCHED
* > 0 : force a reschedule */
/* Return code 0 means it did not schedule the conn, 1
- * means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
*/
int
#if 0
KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
#endif
+ CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
- CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n",
- sizeof(*tx), tx);
}
kgn_tx_t *
* gni_smsg_send to send that as the payload */
LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
- LASSERT(nob >= 0);
if (nob == 0) {
tx->tx_buffer = NULL;
"nkiov %u offset %u\n",
kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
- phys->address = lnet_page2phys(kiov->kiov_page);
+ phys->address = page_to_phys(kiov->kiov_page);
phys++;
kiov++;
nkiov--;
if (tx->tx_buffer_copy)
gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
else
- gmp->gmp_map_key = tx->tx_map_key;
+ gmp->gmp_map_key = tx->tx_map_key;
atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
} else {
- rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
- LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
+ rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
+ LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
}
tx->tx_buftype--;
libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
kgnilnd_tx_state2str(tx->tx_list_state),
- cfs_duration_sec((long)jiffies - tx->tx_qtime));
+ cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
}
/* The error codes determine if we hold onto the MDD */
}
if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
- GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
+ GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
+ "Cant send to %s after timeout lapse of %lu; TO %lu\n",
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(now - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
} else {
- rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
- msg, sizeof(*msg), immediate, immediatenob,
- tx->tx_id.txe_smsg_id);
+ rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
+ msg, sizeof(*msg), immediate,
+ immediatenob,
+ tx->tx_id.txe_smsg_id);
}
switch (rrc) {
void
kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
{
- int rc;
+ int rc = 0;
int add_tail = 1;
/* set the tx_id here, we delay it until we have an actual conn
kgn_peer_t *new_peer = NULL;
kgn_conn_t *conn = NULL;
int rc;
+ int node_state;
ENTRY;
CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
+ node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
+
/* NB - this will not block during normal operations -
* the only writer of this is in the startup/shutdown path. */
rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
/* ignore previous peer entirely - we cycled the lock, so we
* will create new peer and at worst drop it if peer is still
* in the tables */
- rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net);
+ rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
if (rc != 0) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
GOTO(no_peer, rc);
* if we don't find it, add our new one to the list */
kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
+ /* don't create a connection if the peer is not up */
+ if (peer->gnp_down != GNILND_RCA_NODE_UP) {
+ write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+ rc = -ENETRESET;
+ GOTO(no_peer, rc);
+ }
+
conn = kgnilnd_find_or_create_conn_locked(peer);
+
+ if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
+ write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+ GOTO(no_peer, rc);
+ }
+
if (conn != NULL) {
/* oh hey, found a conn now... magical */
kgnilnd_queue_tx(conn, tx);
RETURN_EXIT;
}
-void
+int
kgnilnd_rdma(kgn_tx_t *tx, int type,
kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
{
/* allocation of buffer failed nak the rdma */
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
- return;
+ return 0;
}
kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
tx->tx_buffer_copy = NULL;
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
- return;
+ return 0;
}
}
desc_map_key = tx->tx_buffer_copy_map_key;
if (nob == 0) {
kgnilnd_queue_tx(conn, tx);
- return;
+ return 0;
}
/* Don't lie (CLOSE == RDMA idle) */
LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
tx, conn, conn->gnc_close_sent);
- GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
- type, tx->tx_rdma_desc.dlvr_mode, cookie);
+ GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
+ "0x%x cookie:"LPX64,
+ type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
/* set CQ dedicated for RDMA */
tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
+ if (rrc == GNI_RC_ERROR_RESOURCE) {
+ mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_unmap_buffer(tx, 0);
+
+ if (tx->tx_buffer_copy != NULL) {
+ vfree(tx->tx_buffer_copy);
+ tx->tx_buffer_copy = NULL;
+ }
+
+ spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
+ kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
+ GNILND_TX_MAPQ, 0);
+ spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
+ kgnilnd_schedule_device(tx->tx_conn->gnc_device);
+ return -EAGAIN;
+ }
+
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
tx->tx_qtime = jiffies;
/* XXX Nic: is this a place we should handle more errors for
* robustness sake */
LASSERT(rrc == GNI_RC_SUCCESS);
-
+ return 0;
}
kgn_rx_t *
/* if we are eager, free the cache alloc'd msg */
if (unlikely(rx->grx_eager)) {
LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
+ atomic_dec(&kgnilnd_data.kgn_neager_allocs);
/* release ref from eager_recv */
kgnilnd_conn_decref(conn);
/* we have no credits or buffers for this message, so copy it
* somewhere for a later kgnilnd_recv */
+ if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
+ *kgnilnd_tunables.kgn_eager_credits) {
+ CERROR("Out of eager credits to %s\n",
+ libcfs_nid2str(conn->gnc_peer->gnp_nid));
+ return -ENOMEM;
+ }
+
+ atomic_inc(&kgnilnd_data.kgn_neager_allocs);
+
LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
if (eagermsg == NULL) {
kgnilnd_conn_decref(conn);
tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
-
+ tx->tx_qtime = jiffies;
/* we only queue from kgnilnd_recv - we might get called from other contexts
* and we don't want to block the mutex in those cases */
int rc = 0;
int count = 0;
int reconnect;
+ int to_reconn;
short releaseconn = 0;
unsigned long first_rx = 0;
+ int purgatory_conn_cnt = 0;
CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
peer, libcfs_nid2str(peer->gnp_nid),
reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
(atomic_read(&peer->gnp_dirty_eps) == 0);
+ /* fast reconnect after a timeout */
+ to_reconn = !conn &&
+ (peer->gnp_last_errno == -ETIMEDOUT) &&
+ *kgnilnd_tunables.kgn_fast_reconn;
+
/* if we are not connected and there are tx on the gnp_tx_queue waiting
* to be sent, we'll check the reconnect interval and fire up a new
* connection request */
- if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
+ if (reconnect &&
+ (peer->gnp_connecting == GNILND_PEER_IDLE) &&
(time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
- !list_empty(&peer->gnp_tx_queue) && reconnect) {
+ (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
CDEBUG(D_NET, "starting connect to %s\n",
libcfs_nid2str(peer->gnp_nid));
cfs_duration_sec(waiting));
kgnilnd_detach_purgatory_locked(conn, souls);
+ } else {
+ purgatory_conn_cnt++;
+ }
+ }
+ }
+
+ /* If we have too many connections in purgatory we could run out of
+ * resources. Limit the number of connections to a tunable number,
+ * clean up to the minimum all in one fell swoop... there are
+ * situations where dvs will retry tx's and we can eat up several
+ * hundread connection requests at once.
+ */
+ if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
+ list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
+ gnc_list) {
+ if (conn->gnc_in_purgatory &&
+ conn->gnc_state == GNILND_CONN_DONE) {
+ CDEBUG(D_NET, "Dropping Held resource due to"
+ " resource limits being hit\n");
+ kgnilnd_detach_purgatory_locked(conn, souls);
+
+ if (purgatory_conn_cnt-- <
+ *kgnilnd_tunables.kgn_max_purgatory)
+ break;
}
}
}
lnet_copy_flat2kiov(
niov, kiov, offset,
nob,
- tx->tx_buffer_copy, tx->tx_offset, nob);
+ tx->tx_buffer_copy + tx->tx_offset, 0, nob);
} else {
memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
}
long num_processed = 0;
kgn_conn_t *conn = NULL;
kgn_tx_t *tx = NULL;
+ kgn_rdma_desc_t *rdesc;
+ unsigned int rnob;
+ __u64 rcookie;
for (;;) {
/* make sure we don't keep looping if we need to reset */
/* drop ref from kgnilnd_validate_tx_ev_id */
kgnilnd_admin_decref(conn->gnc_tx_in_use);
kgnilnd_conn_decref(conn);
+
continue;
}
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
- if (should_retry) {
- kgnilnd_rdma(tx, tx->tx_msg.gnm_type,
- &tx->tx_putinfo.gnpam_desc,
- tx->tx_putinfo.gnpam_desc.gnrd_nob,
- tx->tx_putinfo.gnpam_dst_cookie);
- } else {
- kgnilnd_nak_rdma(conn, tx->tx_msg.gnm_type,
- -EFAULT,
- tx->tx_putinfo.gnpam_dst_cookie,
- tx->tx_msg.gnm_srcnid);
- kgnilnd_tx_done(tx, -EFAULT);
- }
+ rdesc = &tx->tx_putinfo.gnpam_desc;
+ rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob;
+ rcookie = tx->tx_putinfo.gnpam_dst_cookie;
} else {
- if (should_retry) {
- kgnilnd_rdma(tx, tx->tx_msg.gnm_type,
- &tx->tx_getinfo.gngm_desc,
- tx->tx_lntmsg[0]->msg_len,
- tx->tx_getinfo.gngm_cookie);
- } else {
- kgnilnd_nak_rdma(conn, tx->tx_msg.gnm_type,
- -EFAULT,
- tx->tx_getinfo.gngm_cookie,
- tx->tx_msg.gnm_srcnid);
- kgnilnd_tx_done(tx, -EFAULT);
- }
+ rdesc = &tx->tx_getinfo.gngm_desc;
+ rnob = tx->tx_lntmsg[0]->msg_len;
+ rcookie = tx->tx_getinfo.gngm_cookie;
+ }
+
+ if (should_retry) {
+ kgnilnd_rdma(tx,
+ tx->tx_msg.gnm_type,
+ rdesc,
+ rnob, rcookie);
+ } else {
+ kgnilnd_nak_rdma(conn,
+ tx->tx_msg.gnm_type,
+ -EFAULT,
+ rcookie,
+ tx->tx_msg.gnm_srcnid);
+ kgnilnd_tx_done(tx, -EFAULT);
+ kgnilnd_close_conn(conn, -ECOMM);
}
/* drop ref from kgnilnd_validate_tx_ev_id */
rc = kgnilnd_map_buffer(tx);
}
- /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+ /* rc should be 0 if we mapped successfully here, if non-zero
+ * we are queueing */
if (rc != 0) {
/* if try_map_if_full set, they handle requeuing */
if (unlikely(try_map_if_full)) {
* remote node where the RDMA will be started
* Special case -EAGAIN logic - this should just queued as if the mapping couldn't
* be satisified. The rest of the errors are "hard" errors that require
- * upper layers to handle themselves */
+ * upper layers to handle themselves.
+ * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
+ * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
+ * it's gnm_type will now be GNILND_MSG_PUT_DONE or
+ * GNILND_MSG_GET_DONE_REV.
+ */
case GNILND_MSG_GET_REQ:
tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
break;
/* PUT_REQ and GET_DONE are where we do the actual RDMA */
+ case GNILND_MSG_PUT_DONE:
case GNILND_MSG_PUT_REQ:
- kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
&tx->tx_putinfo.gnpam_desc,
tx->tx_putinfo.gnpam_desc.gnrd_nob,
tx->tx_putinfo.gnpam_dst_cookie);
+ RETURN(try_map_if_full ? rc : 0);
break;
case GNILND_MSG_GET_DONE:
- kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
&tx->tx_getinfo.gngm_desc,
tx->tx_lntmsg[0]->msg_len,
tx->tx_getinfo.gngm_cookie);
-
+ RETURN(try_map_if_full ? rc : 0);
break;
case GNILND_MSG_PUT_REQ_REV:
tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
break;
case GNILND_MSG_PUT_DONE_REV:
- kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
&tx->tx_getinfo.gngm_desc,
- tx->tx_lntmsg[0]->msg_len,
+ tx->tx_nob,
tx->tx_getinfo.gngm_cookie);
+ RETURN(try_map_if_full ? rc : 0);
break;
case GNILND_MSG_GET_ACK_REV:
tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
/* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
break;
+ case GNILND_MSG_GET_DONE_REV:
case GNILND_MSG_GET_REQ_REV:
- kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
+ rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
&tx->tx_putinfo.gnpam_desc,
tx->tx_putinfo.gnpam_desc.gnrd_nob,
tx->tx_putinfo.gnpam_dst_cookie);
-
+ RETURN(try_map_if_full ? rc : 0);
break;
}
{
int complete = 0;
kgn_conn_t *conn = tx->tx_conn;
+ __u64 nob = tx->tx_nob;
+ __u32 physnop = tx->tx_phys_npages;
+ int id = tx->tx_id.txe_smsg_id;
+ int buftype = tx->tx_buftype;
+ gni_mem_handle_t hndl;
+ hndl.qword1 = tx->tx_map_key.qword1;
+ hndl.qword2 = tx->tx_map_key.qword2;
spin_lock(&conn->gnc_list_lock);
tx->tx_rc = rc;
tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
+ if (rc == -EFAULT) {
+ CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n",
+ rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
+
+ if(*kgnilnd_tunables.kgn_efault_lbug) {
+ GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
+ "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
+ rc, tx, conn ?
+ libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
+ tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
+ kgnilnd_tx_state2str(tx->tx_list_state),
+ cfs_duration_sec((unsigned long) jiffies - tx->tx_qtime));
+ LBUG();
+ }
+ }
+
if (!(tx->tx_state & GNILND_TX_WAITING_COMPLETION)) {
kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
/* sample under lock as follow on steps require gnc_list_lock
if (rrc == GNI_RC_NOT_DONE) {
mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
- CDEBUG(D_INFO, "SMSG RX empty\n");
+ CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
RETURN_EXIT;
}
RETURN_EXIT;
}
- GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p from %s",
- conn, libcfs_nid2str(peer->gnp_nid));
+ GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
timestamp = conn->gnc_last_rx;
last_seq = conn->gnc_rx_seq;
* mapped so we can reset our timers */
dev->gnd_map_attempt = 0;
continue;
+ } else if (rc == -EAGAIN) {
+ spin_lock(&dev->gnd_lock);
+ mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
+ spin_unlock(&dev->gnd_lock);
+ GOTO(get_out_mapped, rc);
} else if (rc != -ENOMEM) {
/* carp, failure we can't handle */
kgnilnd_tx_done(tx, rc);
* yet. Cycle this conn back through
* the scheduler. */
kgnilnd_schedule_conn(conn);
- } else
- kgnilnd_complete_closed_conn(conn);
-
+ } else {
+ kgnilnd_complete_closed_conn(conn);
+ }
up_write(&dev->gnd_conn_sem);
} else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
/* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
DEFINE_WAIT(wait);
dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
+
cfs_block_allsigs();
/* all gnilnd threads need to run fairly urgently */