tx->tx_queued = 0;
if (msg->ibm_type == IBLND_MSG_NOOP &&
- (!kiblnd_send_noop(conn) || /* redundant NOOP */
+ (!kiblnd_need_noop(conn) || /* redundant NOOP */
(IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
/* OK to drop when posted enough NOOPs, since
conn->ibc_reserved_credits--;
}
- if (kiblnd_send_noop(conn)) {
+ if (kiblnd_need_noop(conn)) {
cfs_spin_unlock(&conn->ibc_lock);
tx = kiblnd_get_idle_tx(ni);
kiblnd_check_sends(conn);
}
+static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
+{
+ unsigned short port;
+ int rc;
+
+#ifdef HAVE_OFED_RDMA_SET_REUSEADDR
+ /* allow the port to be reused */
+ rc = rdma_set_reuseaddr(cmid, 1);
+ if (rc != 0) {
+ CERROR("Unable to set reuse on cmid: %d\n", rc);
+ return rc;
+ }
+#endif
+
+ /* look for a free privileged port */
+ for (port = PROT_SOCK-1; port > 0; port--) {
+ srcaddr->sin_port = htons(port);
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)srcaddr,
+ (struct sockaddr *)dstaddr,
+ timeout_ms);
+ if (rc == 0) {
+ CDEBUG(D_NET, "bound to port %hu\n", port);
+ return 0;
+ } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
+ CDEBUG(D_NET, "bind to port %hu failed: %d\n",
+ port, rc);
+ } else {
+ return rc;
+ }
+ }
+
+ CERROR("Failed to bind to a free privileged port\n");
+#ifndef HAVE_OFED_RDMA_SET_REUSEADDR
+ CERROR("You may need IB verbs that supports rdma_set_reuseaddr()\n");
+#endif
+ return rc;
+}
+
void
kiblnd_connect_peer (kib_peer_t *peer)
{
LASSERT (net != NULL);
LASSERT (peer->ibp_connecting > 0);
- cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
+ cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
+ IB_QPT_RC);
+
if (IS_ERR(cmid)) {
CERROR("Can't create CMID for %s: %ld\n",
libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
kiblnd_peer_addref(peer); /* cmid's ref */
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- if (rc == 0) {
- LASSERT (cmid->device != NULL);
- CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
- libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
- HIPQUAD(dev->ibd_ifip), cmid->device->name);
- return;
+ if (*kiblnd_tunables.kib_use_priv_port) {
+ rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+ *kiblnd_tunables.kib_timeout * 1000);
+ } else {
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr,
+ *kiblnd_tunables.kib_timeout * 1000);
+ }
+ if (rc != 0) {
+ /* Can't initiate address resolution: */
+ CERROR("Can't resolve addr for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc);
+ goto failed2;
}
- /* Can't initiate address resolution: */
- CERROR("Can't resolve addr for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
+ LASSERT (cmid->device != NULL);
+ CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
+ libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
+ HIPQUAD(dev->ibd_ifip), cmid->device->name);
+
+ return;
+ failed2:
kiblnd_peer_decref(peer); /* cmid's ref */
rdma_destroy_id(cmid);
failed:
int
kiblnd_thread_start (int (*fn)(void *arg), void *arg)
{
- long pid = cfs_kernel_thread (fn, arg, 0);
+ long pid = cfs_create_thread (fn, arg, 0);
if (pid < 0)
return ((int)pid);
int version = IBLND_MSG_VERSION;
unsigned long flags;
int rc;
-
+ struct sockaddr_in *peer_addr;
LASSERT (!cfs_in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
rej.ibr_why = IBLND_REJECT_FATAL;
rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
+ peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+ if (*kiblnd_tunables.kib_require_priv_port &&
+ ntohs(peer_addr->sin_port) >= PROT_SOCK) {
+ __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+ CERROR("Peer's port (%u.%u.%u.%u:%hu) is not privileged\n",
+ HIPQUAD(ip), ntohs(peer_addr->sin_port));
+ goto failed;
+ }
+
if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
CERROR("Short connection request\n");
goto failed;
}
}
-int
-kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
+static int
+kiblnd_check_txs_locked(kib_conn_t *conn, cfs_list_t *txs)
{
kib_tx_t *tx;
cfs_list_t *ttmp;
- int timed_out = 0;
-
- cfs_spin_lock(&conn->ibc_lock);
cfs_list_for_each (ttmp, txs) {
tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
}
if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
- timed_out = 1;
CERROR("Timed out tx: %s, %lu seconds\n",
kiblnd_queue2str(conn, txs),
cfs_duration_sec(jiffies - tx->tx_deadline));
- break;
+ return 1;
}
}
- cfs_spin_unlock(&conn->ibc_lock);
- return timed_out;
+ return 0;
}
-int
-kiblnd_conn_timed_out (kib_conn_t *conn)
+static int
+kiblnd_conn_timed_out_locked(kib_conn_t *conn)
{
- return kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_noops) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
- kiblnd_check_txs(conn, &conn->ibc_active_txs);
+ return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
}
void
kiblnd_check_conns (int idx)
{
- cfs_list_t *peers = &kiblnd_data.kib_peers[idx];
- cfs_list_t *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- unsigned long flags;
+ CFS_LIST_HEAD (closes);
+ CFS_LIST_HEAD (checksends);
+ cfs_list_t *peers = &kiblnd_data.kib_peers[idx];
+ cfs_list_t *ptmp;
+ kib_peer_t *peer;
+ kib_conn_t *conn;
+ cfs_list_t *ctmp;
+ unsigned long flags;
- again:
/* NB. We expect to have a look at all the peers and not find any
- * rdmas to time out, so we just use a shared lock while we
+ * RDMAs to time out, so we just use a shared lock while we
* take a look... */
cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
cfs_list_for_each (ctmp, &peer->ibp_conns) {
- conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list);
+ int timedout;
+ int sendnoop;
+
+ conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
- /* In case we have enough credits to return via a
- * NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- kiblnd_check_sends(conn);
+ cfs_spin_lock(&conn->ibc_lock);
- if (!kiblnd_conn_timed_out(conn))
+ sendnoop = kiblnd_need_noop(conn);
+ timedout = kiblnd_conn_timed_out_locked(conn);
+ if (!sendnoop && !timedout) {
+ cfs_spin_unlock(&conn->ibc_lock);
continue;
+ }
- /* Handle timeout by closing the whole connection. We
- * can only be sure RDMA activity has ceased once the
- * QP has been modified. */
-
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
-
- CERROR("Timed out RDMA with %s (%lu)\n",
- libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive));
-
- kiblnd_close_conn(conn, -ETIMEDOUT);
- kiblnd_conn_decref(conn); /* ...until here */
+ if (timedout) {
+ CERROR("Timed out RDMA with %s (%lu): "
+ "c: %u, oc: %u, rc: %u\n",
+ libcfs_nid2str(peer->ibp_nid),
+ cfs_duration_sec(cfs_time_current() -
+ peer->ibp_last_alive),
+ conn->ibc_credits,
+ conn->ibc_outstanding_credits,
+ conn->ibc_reserved_credits);
+ cfs_list_add(&conn->ibc_connd_list, &closes);
+ } else {
+ cfs_list_add(&conn->ibc_connd_list,
+ &checksends);
+ }
+ /* +ref for 'closes' or 'checksends' */
+ kiblnd_conn_addref(conn);
- /* start again now I've dropped the lock */
- goto again;
+ cfs_spin_unlock(&conn->ibc_lock);
}
}
cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* Handle timeout by closing the whole
+ * connection. We can only be sure RDMA activity
+ * has ceased once the QP has been modified. */
+ while (!cfs_list_empty(&closes)) {
+ conn = cfs_list_entry(closes.next,
+ kib_conn_t, ibc_connd_list);
+ cfs_list_del(&conn->ibc_connd_list);
+ kiblnd_close_conn(conn, -ETIMEDOUT);
+ kiblnd_conn_decref(conn);
+ }
+
+ /* In case we have enough credits to return via a
+ * NOOP, but there were no non-blocking tx descs
+ * free to do it last time... */
+ while (!cfs_list_empty(&checksends)) {
+ conn = cfs_list_entry(checksends.next,
+ kib_conn_t, ibc_connd_list);
+ cfs_list_del(&conn->ibc_connd_list);
+ kiblnd_check_sends(conn);
+ kiblnd_conn_decref(conn);
+ }
}
void