peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
peer_ni->ibp_queue_depth_mod = 0; /* try to use the default */
kref_init(&peer_ni->ibp_kref);
+ atomic_set(&peer_ni->ibp_nconns, 0);
INIT_HLIST_NODE(&peer_ni->ibp_list);
INIT_LIST_HEAD(&peer_ni->ibp_conns);
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
- /* hash NID to CPU id in this partition... */
- ibp_nid = conn->ibc_peer->ibp_nid;
+ /* hash NID to CPU id in this partition... when targeting a single peer
+ * with multiple QPs, to engage more cores in CQ processing to a single
+ * peer, use ibp_nconns to salt the value the comp_vector value
+ */
+ ibp_nid = conn->ibc_peer->ibp_nid +
+ atomic_read(&conn->ibc_peer->ibp_nconns);
off = do_div(ibp_nid, cpumask_weight(*mask));
for_each_cpu(i, *mask) {
if (off-- == 0)
conn->ibc_state = state;
/* 1 more conn */
+ atomic_inc(&peer_ni->ibp_nconns);
atomic_inc(&net->ibn_nconns);
return conn;
kiblnd_peer_decref(peer_ni);
rdma_destroy_id(cmid);
+ atomic_dec(&peer_ni->ibp_nconns);
atomic_dec(&net->ibn_nconns);
}
}