conn->ibc_reserved_credits--;
}
+ conn->ibc_retry_noop = 0;
if (kiblnd_send_noop(conn)) {
cfs_spin_unlock(&conn->ibc_lock);
cfs_spin_lock(&conn->ibc_lock);
if (tx != NULL)
kiblnd_queue_tx_locked(tx, conn);
+ else if (kiblnd_send_noop(conn)) /* still... */
+ conn->ibc_retry_noop = 1;
}
kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
}
}
-int
-kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
+static int
+kiblnd_check_txs_locked (kib_conn_t *conn, cfs_list_t *txs)
{
kib_tx_t *tx;
cfs_list_t *ttmp;
- int timed_out = 0;
-
- cfs_spin_lock(&conn->ibc_lock);
cfs_list_for_each (ttmp, txs) {
tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
}
if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
- timed_out = 1;
CERROR("Timed out tx: %s, %lu seconds\n",
kiblnd_queue2str(conn, txs),
cfs_duration_sec(jiffies - tx->tx_deadline));
- break;
+ return 1;
}
}
- cfs_spin_unlock(&conn->ibc_lock);
- return timed_out;
+ return 0;
}
-int
-kiblnd_conn_timed_out (kib_conn_t *conn)
+static int
+kiblnd_conn_timed_out_locked (kib_conn_t *conn)
{
- return kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_noops) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
- kiblnd_check_txs(conn, &conn->ibc_active_txs);
+ return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
}
void
kiblnd_check_conns (int idx)
{
- cfs_list_t *peers = &kiblnd_data.kib_peers[idx];
- cfs_list_t *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- unsigned long flags;
+ CFS_LIST_HEAD (closes);
+ CFS_LIST_HEAD (checksends);
+ cfs_list_t *peers = &kiblnd_data.kib_peers[idx];
+ cfs_list_t *ptmp;
+ kib_peer_t *peer;
+ kib_conn_t *conn;
+ cfs_list_t *ctmp;
+ unsigned long flags;
- again:
/* NB. We expect to have a look at all the peers and not find any
- * rdmas to time out, so we just use a shared lock while we
+ * RDMAs to time out, so we just use a shared lock while we
* take a look... */
cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
cfs_list_for_each (ctmp, &peer->ibp_conns) {
- conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list);
+ int timedout;
+ int retry_noop;
+
+ conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
- /* In case we have enough credits to return via a
- * NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- kiblnd_check_sends(conn);
+ cfs_spin_lock(&conn->ibc_lock);
- if (!kiblnd_conn_timed_out(conn))
+ retry_noop = conn->ibc_retry_noop;
+ timedout = kiblnd_conn_timed_out_locked(conn);
+ if (!retry_noop && !timedout) {
+ cfs_spin_unlock(&conn->ibc_lock);
continue;
+ }
- /* Handle timeout by closing the whole connection. We
- * can only be sure RDMA activity has ceased once the
- * QP has been modified. */
-
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
-
- CERROR("Timed out RDMA with %s (%lu)\n",
- libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive));
-
- kiblnd_close_conn(conn, -ETIMEDOUT);
- kiblnd_conn_decref(conn); /* ...until here */
+ if (timedout) {
+ CERROR("Timed out RDMA with %s (%lu): "
+ "c: %u, oc: %u, rc: %u\n",
+ libcfs_nid2str(peer->ibp_nid),
+ cfs_duration_sec(cfs_time_current() -
+ peer->ibp_last_alive),
+ conn->ibc_credits,
+ conn->ibc_outstanding_credits,
+ conn->ibc_reserved_credits);
+ cfs_list_add(&conn->ibc_connd_list, &closes);
+ } else {
+ cfs_list_add(&conn->ibc_connd_list, &checksends);
+ }
+ /* +ref for 'closes' or 'checksends' */
+ kiblnd_conn_addref(conn);
- /* start again now I've dropped the lock */
- goto again;
+ cfs_spin_unlock(&conn->ibc_lock);
}
}
cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* Handle timeout by closing the whole
+ * connection. We can only be sure RDMA activity
+ * has ceased once the QP has been modified. */
+ while (!cfs_list_empty(&closes)) {
+ conn = cfs_list_entry(closes.next, kib_conn_t, ibc_connd_list);
+ cfs_list_del(&conn->ibc_connd_list);
+ kiblnd_close_conn(conn, -ETIMEDOUT);
+ kiblnd_conn_decref(conn);
+ }
+
+ /* In case we have enough credits to return via a
+ * NOOP, but there were no non-blocking tx descs
+ * free to do it last time... */
+ while (!cfs_list_empty(&checksends)) {
+ conn = cfs_list_entry(checksends.next, kib_conn_t, ibc_connd_list);
+ cfs_list_del(&conn->ibc_connd_list);
+ kiblnd_check_sends(conn);
+ kiblnd_conn_decref(conn);
+ }
}
void