*/
#include "socknal.h"
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-# include <linux/syscalls.h>
-#endif
/*
* LIB functions follow
LASSERT (tx->tx_nkiov > 0);
#if SOCKNAL_ZC
- if (fragsize >= ksocknal_tunables.ksnd_zc_min_frag &&
+ if (fragsize >= ksocknal_data.ksnd_zc_min_frag &&
(sock->sk->route_caps & NETIF_F_SG) &&
(sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM))) {
* is set. Instead, we presume peer death has occurred if
* the socket doesn't drain within a timout */
conn->ksnc_tx_deadline = jiffies +
- ksocknal_tunables.ksnd_io_timeout * HZ;
+ ksocknal_data.ksnd_io_timeout * HZ;
conn->ksnc_peer->ksnp_last_alive = jiffies;
} while (tx->tx_resid != 0);
/* received something... */
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_rx_deadline = jiffies +
- ksocknal_tunables.ksnd_io_timeout * HZ;
+ ksocknal_data.ksnd_io_timeout * HZ;
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
/* received something... */
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_rx_deadline = jiffies +
- ksocknal_tunables.ksnd_io_timeout * HZ;
+ ksocknal_data.ksnd_io_timeout * HZ;
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
if (conn->ksnc_rx_nob_wanted == 0) {
/* Completed a message segment (header or payload) */
- if ((ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0 &&
+ if ((ksocknal_data.ksnd_eager_ack & conn->ksnc_type) != 0 &&
(conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) {
/* Remind the socket to ack eagerly... */
LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES);
LASSERT (!route->ksnr_connecting);
- if (ksocknal_tunables.ksnd_typed_conns)
+ if (ksocknal_data.ksnd_typed_conns)
route->ksnr_connecting =
KSNR_TYPED_ROUTES & ~route->ksnr_connected;
else
fnob = nob;
}
- if (!ksocknal_tunables.ksnd_typed_conns)
+ if (!ksocknal_data.ksnd_typed_conns)
continue;
switch (c->ksnc_type) {
case SOCKNAL_CONN_BULK_IN:
continue;
case SOCKNAL_CONN_BULK_OUT:
- if (tx->tx_nob < ksocknal_tunables.ksnd_min_bulk)
+ if (tx->tx_nob < ksocknal_data.ksnd_min_bulk)
continue;
break;
case SOCKNAL_CONN_CONTROL:
- if (tx->tx_nob >= ksocknal_tunables.ksnd_min_bulk)
+ if (tx->tx_nob >= ksocknal_data.ksnd_min_bulk)
continue;
break;
}
spin_lock_irqsave (&sched->kss_lock, flags);
conn->ksnc_tx_deadline = jiffies +
- ksocknal_tunables.ksnd_io_timeout * HZ;
+ ksocknal_data.ksnd_io_timeout * HZ;
mb(); /* order with list_add_tail */
list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
/* Keepalives: If 3/4 of the timeout elapses, start probing every
* second until the timeout elapses. */
- option = (ksocknal_tunables.ksnd_io_timeout * 3) / 4;
+ option = (ksocknal_data.ksnd_io_timeout * 3) / 4;
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
(char *)&option, sizeof (option));
return (rc);
}
- option = ksocknal_tunables.ksnd_io_timeout / 4;
+ option = ksocknal_data.ksnd_io_timeout / 4;
set_fs (KERNEL_DS);
rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
(char *)&option, sizeof (option));
/* Set the socket timeouts, so our connection attempt completes in
* finite time */
- tv.tv_sec = ksocknal_tunables.ksnd_io_timeout;
+ tv.tv_sec = ksocknal_data.ksnd_io_timeout;
tv.tv_usec = 0;
set_fs (KERNEL_DS);
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set send timeout %d: %d\n",
- ksocknal_tunables.ksnd_io_timeout, rc);
+ ksocknal_data.ksnd_io_timeout, rc);
goto out;
}
set_fs (oldmm);
if (rc != 0) {
CERROR ("Can't set receive timeout %d: %d\n",
- ksocknal_tunables.ksnd_io_timeout, rc);
+ ksocknal_data.ksnd_io_timeout, rc);
goto out;
}
* timeout on any connection within (n+1)/n times the
* timeout interval. */
- if (ksocknal_tunables.ksnd_io_timeout > n * p)
+ if (ksocknal_data.ksnd_io_timeout > n * p)
chunk = (chunk * n * p) /
- ksocknal_tunables.ksnd_io_timeout;
+ ksocknal_data.ksnd_io_timeout;
if (chunk == 0)
chunk = 1;