LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
/* Only match interfaces for additional connections
- * if I have > 1 interface */
- n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
- MIN(n_peerips, net->ksnn_ninterfaces);
+ * if I have > 1 interface
+ */
+ n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
+ min(n_peerips, net->ksnn_ninterfaces);
for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
/* ^ yes really... */
* if we have autroutes, and these connect on demand. */
}
-void
-ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
-{
- int connect = 1;
- time64_t last_alive = 0;
- time64_t now = ktime_get_seconds();
- struct ksock_peer_ni *peer_ni = NULL;
- rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
- struct lnet_process_id id = {
- .nid = nid,
- .pid = LNET_PID_LUSTRE,
- };
-
- read_lock(glock);
-
- peer_ni = ksocknal_find_peer_locked(ni, id);
- if (peer_ni != NULL) {
- struct list_head *tmp;
- struct ksock_conn *conn;
- int bufnob;
-
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
-
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* something got ACKed */
- conn->ksnc_tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
- peer_ni->ksnp_last_alive = now;
- conn->ksnc_tx_bufnob = bufnob;
- }
- }
-
- last_alive = peer_ni->ksnp_last_alive;
- if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
- connect = 0;
- }
-
- read_unlock(glock);
-
- if (last_alive != 0)
- *when = last_alive;
-
- CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
- libcfs_nid2str(nid), peer_ni,
- last_alive ? now - last_alive : -1,
- connect);
-
- if (!connect)
- return;
-
- ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
-
- write_lock_bh(glock);
-
- peer_ni = ksocknal_find_peer_locked(ni, id);
- if (peer_ni != NULL)
- ksocknal_launch_all_connections_locked(peer_ni);
-
- write_unlock_bh(glock);
-}
-
static void
ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
{
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
read_lock(&ksocknal_data.ksnd_global_lock);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n",
atomic_read(&net->ksnn_npeers) - SOCKNAL_SHUTDOWN_BIAS);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni);
}
.lnd_send = ksocknal_send,
.lnd_recv = ksocknal_recv,
.lnd_notify_peer_down = ksocknal_notify_gw_down,
- .lnd_query = ksocknal_query,
.lnd_accept = ksocknal_accept,
};