fps = net->ibn_fmr_ps[cpt];
rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
if (rc != 0) {
- CERROR("Can't map %u pages: %d\n", nob, rc);
+ CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob,
+ tx->tx_nfrags, rd->rd_nfrags, rc);
return rc;
}
* from the first send; hence the ++ rather than = below. */
tx->tx_sending++;
list_add(&tx->tx_list, &conn->ibc_active_txs);
- tx->tx_on_activeq = ktime_get();
-
- /* I'm still holding ibc_lock! */
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
- rc = -ECONNABORTED;
- } else if (tx->tx_pool->tpo_pool.po_failed ||
- conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
- /* close_conn will launch failover */
- rc = -ENETDOWN;
- } else {
+
+ /* I'm still holding ibc_lock! */
+ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
+ rc = -ECONNABORTED;
+ } else if (tx->tx_pool->tpo_pool.po_failed ||
+ conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+ /* close_conn will launch failover */
+ rc = -ENETDOWN;
+ } else {
struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
struct ib_send_wr *wr = &tx->tx_wrq[0].wr;
spin_unlock(&conn->ibc_lock);
}
-static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
- struct sockaddr_in *srcaddr,
- struct sockaddr_in *dstaddr,
- int timeout_ms)
+static int
+kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
{
unsigned short port;
int rc;
+ LASSERT(capable(CAP_NET_BIND_SERVICE));
+
/* allow the port to be reused */
rc = rdma_set_reuseaddr(cmid, 1);
if (rc != 0) {
return rc;
}
+static int
+kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
+{
+ const struct cred *old_creds = NULL;
+ struct cred *new_creds;
+ int rc;
+
+ if (!capable(CAP_NET_BIND_SERVICE)) {
+ new_creds = prepare_creds();
+ if (!new_creds)
+ return -ENOMEM;
+
+ cap_raise(new_creds->cap_effective, CAP_NET_BIND_SERVICE);
+ old_creds = override_creds(new_creds);
+ }
+
+ rc = kiblnd_resolve_addr_cap(cmid, srcaddr, dstaddr, timeout_ms);
+
+ if (old_creds)
+ revert_creds(old_creds);
+
+ return rc;
+}
+
static void
kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
{
kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
{
LIST_HEAD(zombies);
- struct list_head *tmp;
- struct list_head *nxt;
+ struct kib_tx *nxt;
struct kib_tx *tx;
spin_lock(&conn->ibc_lock);
- list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, struct kib_tx, tx_list);
-
+ list_for_each_entry_safe(tx, nxt, txs, tx_list) {
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_waiting ||
(conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
peer_ni->ibp_accepting > 0));
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
- conn->ibc_connvars = NULL;
+ LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ conn->ibc_connvars = NULL;
- if (status != 0) {
- /* failed to establish connection */
- kiblnd_peer_connect_failed(peer_ni, active, status);
- kiblnd_finalise_conn(conn);
- return;
- }
+ if (status != 0) {
+ /* failed to establish connection */
+ kiblnd_peer_connect_failed(peer_ni, active, status);
+ kiblnd_finalise_conn(conn);
+ return;
+ }
- /* connection established */
+ /* connection established */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ /* reset retry count */
+ peer_ni->ibp_retries = 0;
+
conn->ibc_last_send = ktime_get();
- kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
- kiblnd_peer_alive(peer_ni);
+ kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
+ kiblnd_peer_alive(peer_ni);
/* Add conn to peer_ni's list and nuke any dangling conns from a different
* peer_ni instance... */
{
int rc;
+#ifdef HAVE_RDMA_REJECT_4ARGS
+ rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
+#else
rc = rdma_reject(cmid, rej, sizeof(*rej));
+#endif
if (rc != 0)
CWARN("Error %d sending reject\n", rc);
goto out;
}
- switch (why) {
- default:
- reason = "Unknown";
- break;
+ if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) {
+ reason = "retry count exceeded due to no listener";
+ goto out;
+ }
+
+ switch (why) {
+ default:
+ reason = "Unknown";
+ break;
case IBLND_REJECT_RDMA_FRAGS: {
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
IBLND_REJECT_CONN_STALE, NULL);
break;
- case IB_CM_REJ_INVALID_SERVICE_ID:
+ case IB_CM_REJ_INVALID_SERVICE_ID:
+ peer_ni->ibp_retries++;
kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
IBLND_REJECT_INVALID_SRV_ID, NULL);
- CNETERR("%s rejected: no listener at %d\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- *kiblnd_tunables.kib_service);
- break;
+ CNETERR("%s rejected: no listener at %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ *kiblnd_tunables.kib_service);
+ break;
case IB_CM_REJ_CONSUMER_DEFINED:
if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
{
struct kib_tx *tx;
struct list_head *ttmp;
- bool active_txs = strcmp(kiblnd_queue2str(conn, txs),
- "active_txs") == 0;
list_for_each(ttmp, txs) {
tx = list_entry(ttmp, struct kib_tx, tx_list);
LASSERT(tx->tx_waiting || tx->tx_sending != 0);
}
- if (ktime_compare(ktime_get(), tx->tx_deadline) < 0)
- continue;
-
- if (!active_txs) {
- CERROR("Timed out tx: %s, "
- "outstanding RDMA time: %lld sec\n",
+ if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+ CERROR("Timed out tx: %s, %lld seconds\n",
kiblnd_queue2str(conn, txs),
- *kiblnd_tunables.kib_timeout +
- (ktime_ms_delta(ktime_get(),
- tx->tx_deadline) / MSEC_PER_SEC));
- } else {
- CERROR("Timed out tx: %s, time in internal queue: %lld "
- "sec, time in active queue: %lld sec,"
- " outstanding RDMA time: %lld sec\n",
- kiblnd_queue2str(conn, txs),
- ktime_ms_delta(tx->tx_deadline,
- tx->tx_on_activeq) / MSEC_PER_SEC,
ktime_ms_delta(ktime_get(),
- tx->tx_on_activeq) / MSEC_PER_SEC,
- *kiblnd_tunables.kib_timeout +
- (ktime_ms_delta(ktime_get(),
- tx->tx_deadline) / MSEC_PER_SEC));
+ tx->tx_deadline) / MSEC_PER_SEC);
+ return 1;
}
-
- return 1;
}
return 0;
unsigned long flags;
struct ib_wc wc;
int did_something;
- int busy_loops = 0;
int rc;
init_waitqueue_entry(&wait, current);
spin_lock_irqsave(&sched->ibs_lock, flags);
while (!kiblnd_data.kib_shutdown) {
- if (busy_loops++ >= IBLND_RESCHED) {
+ if (need_resched()) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
cond_resched();
- busy_loops = 0;
spin_lock_irqsave(&sched->ibs_lock, flags);
}
spin_unlock_irqrestore(&sched->ibs_lock, flags);
schedule();
- busy_loops = 0;
remove_wait_queue(&sched->ibs_waitq, &wait);
set_current_state(TASK_RUNNING);