return -EPROTONOSUPPORT;
}
+#ifdef HAVE_FMR_POOL_API
/*
* FMR does not support gaps but the tx has gaps then
* we should make sure that the number of fragments we'll be sending
return -EFBIG;
}
}
+#endif
fps = net->ibn_fmr_ps[cpt];
rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
* for FastReg or FMR with no gaps we can accumulate all
* the fragments in one FastReg or FMR fragment.
*/
- if (((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) && !tx->tx_gaps) ||
+ if (
+#ifdef HAVE_FMR_POOL_API
+ ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
+ && !tx->tx_gaps) ||
+#endif
(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
/* FMR requires zero based address */
+#ifdef HAVE_FMR_POOL_API
if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
+#endif
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
} else {
static void
kiblnd_unmap_tx(struct kib_tx *tx)
{
- if (tx->tx_fmr.fmr_pfmr || tx->tx_fmr.fmr_frd)
+ if (
+#ifdef HAVE_FMR_POOL_API
+ tx->tx_fmr.fmr_pfmr ||
+#endif
+ tx->tx_fmr.fmr_frd)
kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
if (tx->tx_nfrags != 0) {
* dead in the water and fail the operation.
*/
if (tunables->lnd_map_on_demand &&
- (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED ||
- net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED))
+ (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED
+#ifdef HAVE_FMR_POOL_API
+ || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
+#endif
+ ))
return NULL;
/*
unsigned short port;
int rc;
- LASSERT(capable(CAP_NET_BIND_SERVICE));
-
/* allow the port to be reused */
rc = rdma_set_reuseaddr(cmid, 1);
if (rc != 0) {
}
}
- CERROR("Failed to bind to a free privileged port\n");
- return rc;
+ CERROR("cannot bind to a free privileged port: rc = %d\n", rc);
+
+ return rc;
}
static int
int rc;
if (!capable(CAP_NET_BIND_SERVICE)) {
- new_creds = prepare_creds();
+ new_creds = prepare_kernel_cred(NULL);
if (!new_creds)
return -ENOMEM;
int
kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
if (tx->tx_sending == 0) {
tx->tx_queued = 0;
list_move(&tx->tx_list, &zombies);
+ } else {
+ /* keep tx until cq destroy */
+ list_move(&tx->tx_list, &conn->ibc_zombie_txs);
+ conn->ibc_waits ++;
}
}
kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
}
+static int
+kiblnd_tx_may_discard(struct kib_conn *conn)
+{
+ int rc = 0;
+ struct kib_tx *nxt;
+ struct kib_tx *tx;
+
+ spin_lock(&conn->ibc_lock);
+
+ list_for_each_entry_safe(tx, nxt, &conn->ibc_zombie_txs, tx_list) {
+ if (tx->tx_sending > 0 && tx->tx_lntmsg[0] &&
+ lnet_md_discarded(tx->tx_lntmsg[0]->msg_md)) {
+ tx->tx_sending --;
+ if (tx->tx_sending == 0) {
+ kiblnd_conn_decref(tx->tx_conn);
+ tx->tx_conn = NULL;
+ rc = 1;
+ }
+ }
+ }
+
+ spin_unlock(&conn->ibc_lock);
+ return rc;
+}
+
static void
kiblnd_finalise_conn(struct kib_conn *conn)
{
CNETERR("Deleting messages for %s: connection failed\n",
libcfs_nid2str(peer_ni->ibp_nid));
- kiblnd_txlist_done(&zombies, error,
- LNET_MSG_STATUS_LOCAL_DROPPED);
+ if (error == -EHOSTUNREACH || error == -ETIMEDOUT)
+ kiblnd_txlist_done(&zombies, error,
+ LNET_MSG_STATUS_NETWORK_TIMEOUT);
+ else
+ kiblnd_txlist_done(&zombies, error,
+ LNET_MSG_STATUS_LOCAL_DROPPED);
}
static void
}
if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
- CERROR("Timed out tx: %s, %lld seconds\n",
+ CERROR("Timed out tx: %s(WSQ:%d%d%d), %lld seconds\n",
kiblnd_queue2str(conn, txs),
+ tx->tx_waiting, tx->tx_sending, tx->tx_queued,
kiblnd_timeout() +
ktime_ms_delta(ktime_get(),
tx->tx_deadline) / MSEC_PER_SEC);
}
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ int wait;
conn = list_entry(kiblnd_data.kib_connd_conns.next,
struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
dropped_lock = 1;
kiblnd_disconnect_conn(conn);
- kiblnd_conn_decref(conn);
+ wait = conn->ibc_waits;
+ if (wait == 0) /* keep ref for connd_wait, see below */
+ kiblnd_conn_decref(conn);
spin_lock_irqsave(lock, flags);
+
+ if (wait)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_connd_waits);
}
while (reconn < KIB_RECONN_BREAK) {
spin_lock_irqsave(lock, flags);
}
+ if (!list_empty(&kiblnd_data.kib_connd_waits)) {
+ conn = list_entry(kiblnd_data.kib_connd_waits.next,
+ struct kib_conn, ibc_list);
+ list_del(&conn->ibc_list);
+ spin_unlock_irqrestore(lock, flags);
+
+ dropped_lock = kiblnd_tx_may_discard(conn);
+ if (dropped_lock)
+ kiblnd_conn_decref(conn);
+
+ spin_lock_irqsave(lock, flags);
+ if (dropped_lock == 0)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_connd_waits);
+ }
+
/* careful with the jiffy wrap... */
timeout = (int)(deadline - jiffies);
if (timeout <= 0) {