route->ksnr_deleted = 0;
route->ksnr_conn_count = 0;
route->ksnr_share_count = 0;
+ route->ksnr_ctrl_conn_count = 0;
+ route->ksnr_blki_conn_count = 0;
+ route->ksnr_blko_conn_count = 0;
return (route);
}
return rc;
}
+static unsigned int
+ksocknal_get_conn_count_by_type(struct ksock_route *route,
+ int type)
+{
+ unsigned int count = 0;
+
+ switch (type) {
+ case SOCKLND_CONN_CONTROL:
+ count = route->ksnr_ctrl_conn_count;
+ break;
+ case SOCKLND_CONN_BULK_IN:
+ count = route->ksnr_blki_conn_count;
+ break;
+ case SOCKLND_CONN_BULK_OUT:
+ count = route->ksnr_blko_conn_count;
+ break;
+ case SOCKLND_CONN_ANY:
+ count = route->ksnr_conn_count;
+ break;
+ default:
+ LBUG();
+ break;
+ }
+
+ return count;
+}
+
+static void
+ksocknal_incr_conn_count(struct ksock_route *route,
+ int type)
+{
+ route->ksnr_conn_count++;
+
+ /* check if all connections of the given type got created */
+ switch (type) {
+ case SOCKLND_CONN_CONTROL:
+ route->ksnr_ctrl_conn_count++;
+ /* there's a single control connection per peer */
+ route->ksnr_connected |= BIT(type);
+ break;
+ case SOCKLND_CONN_BULK_IN:
+ route->ksnr_blki_conn_count++;
+ if (route->ksnr_blki_conn_count >=
+ *ksocknal_tunables.ksnd_conns_per_peer)
+ route->ksnr_connected |= BIT(type);
+ break;
+ case SOCKLND_CONN_BULK_OUT:
+ route->ksnr_blko_conn_count++;
+ if (route->ksnr_blko_conn_count >=
+ *ksocknal_tunables.ksnd_conns_per_peer)
+ route->ksnr_connected |= BIT(type);
+ break;
+ case SOCKLND_CONN_ANY:
+ if (route->ksnr_conn_count >=
+ *ksocknal_tunables.ksnd_conns_per_peer)
+ route->ksnr_connected |= BIT(type);
+ break;
+ default:
+ LBUG();
+ break;
+
+ }
+
+ CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x conns_per_peer %d\n",
+ type,
+ route->ksnr_connected,
+ *ksocknal_tunables.ksnd_conns_per_peer);
+}
+
static void
ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
{
iface->ksni_nroutes++;
}
- route->ksnr_connected |= (1<<type);
- route->ksnr_conn_count++;
+ ksocknal_incr_conn_count(route, type);
/* Successful connection => further attempts can
* proceed immediately */
int rc;
int rc2;
int active;
+ int num_dup = 0;
char *warn = NULL;
active = (route != NULL);
conn2->ksnc_type != conn->ksnc_type)
continue;
+ num_dup++;
+ if (num_dup < *ksocknal_tunables.ksnd_conns_per_peer)
+ continue;
+
/* Reply on a passive connection attempt so the peer_ni
* realises we're connected. */
LASSERT (rc == 0);
if (route != NULL) {
/* dissociate conn from route... */
LASSERT(!route->ksnr_deleted);
- LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+ /* connected bit is set only if all connections
+ * of the given type got created
+ */
+ if (ksocknal_get_conn_count_by_type(route, conn->ksnc_type) ==
+ *ksocknal_tunables.ksnd_conns_per_peer)
+ LASSERT((route->ksnr_connected &
+ BIT(conn->ksnc_type)) != 0);
conn2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_conns) {
conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
#if SOCKNAL_VERSION_DEBUG
int *ksnd_protocol; /* protocol version */
#endif
+ int *ksnd_conns_per_peer; /* for typed mode, yields:
+ * 1 + 2*conns_per_peer total
+ * for untyped:
+ * conns_per_peer total
+ */
};
struct ksock_net {
time64_t ksnc_tx_last_post;
};
+#define SOCKNAL_CONN_COUNT_MAX_BITS 8 /* max conn count bits */
+
struct ksock_route {
struct list_head ksnr_list; /* chain on peer_ni route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
unsigned int ksnr_connecting:1;/* connection establishment in progress */
unsigned int ksnr_connected:4; /* connections established by type */
unsigned int ksnr_deleted:1; /* been removed from peer_ni? */
+ unsigned int ksnr_ctrl_conn_count:1; /* # conns by type */
+ unsigned int ksnr_blki_conn_count:8;
+ unsigned int ksnr_blko_conn_count:8;
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this route */
};
ksocknal_destroy_peer(peer_ni);
}
+static inline int ksocknal_conns_per_peer(void)
+{
+ return *ksocknal_tunables.ksnd_conns_per_peer ?: 1;
+}
+
int ksocknal_startup(struct lnet_ni *ni);
void ksocknal_shutdown(struct lnet_ni *ni);
int ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg);
if (retry_later) /* needs reschedule */
break;
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+ if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+ } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+ } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0 &&
+ route->ksnr_blki_conn_count <= route->ksnr_blko_conn_count) {
type = SOCKLND_CONN_BULK_IN;
} else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+ LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
type = SOCKLND_CONN_BULK_OUT;
}
module_param(zc_recv_min_nfrags, int, 0644);
MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
+static unsigned int conns_per_peer = 1;
+module_param(conns_per_peer, uint, 0444);
+MODULE_PARM_DESC(conns_per_peer, "number of connections per peer");
+
#ifdef SOCKNAL_BACKOFF
static int backoff_init = 3;
module_param(backoff_init, int, 0644);
ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload;
ksocknal_tunables.ksnd_zc_recv = &zc_recv;
ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags;
+ ksocknal_tunables.ksnd_conns_per_peer = &conns_per_peer;
+ if (conns_per_peer > ((1 << SOCKNAL_CONN_COUNT_MAX_BITS) - 1))
+ CWARN("socklnd conns_per_peer is capped at %u.\n",
+ (1 << SOCKNAL_CONN_COUNT_MAX_BITS) - 1);
+ ksocknal_tunables.ksnd_conns_per_peer = &conns_per_peer;
#ifdef CPU_AFFINITY
if (enable_irq_affinity) {