#define LOWEST_BIT_SET(x) ((x) & ~((x) - 1))
-#ifndef CONFIG_SMP
-# define smp_processor_id() 0
-#endif
-
/*
* Debugging
*/
} while (0)
#else
#define CHECK_STACK(stack) do{}while(0)
-#define CDEBUG_STACK(var) (0)
+#define CDEBUG_STACK(var) (0L)
#endif
#if 1
struct socket *sock = NULL;
ksock_sched_t *sched = NULL;
unsigned int irq = 0;
- struct net_device *dev = NULL;
+ struct dst_entry *dst;
int ret;
int idx;
ENTRY;
conn->ksnc_file = file;
conn->ksnc_sock = sock;
- conn->ksnc_saved_data_ready = sock->sk->data_ready;
- conn->ksnc_saved_write_space = sock->sk->write_space;
+ conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
+ conn->ksnc_saved_write_space = sock->sk->sk_write_space;
conn->ksnc_peernid = nid;
atomic_set (&conn->ksnc_refcount, 1); /* 1 ref for socklist */
conn->ksnc_tx_ready = 0;
conn->ksnc_tx_scheduled = 0;
-#warning check it is OK to derefence sk->dst_cache->dev like this...
- lock_sock (conn->ksnc_sock->sk);
-
- if (conn->ksnc_sock->sk->dst_cache != NULL) {
- dev = conn->ksnc_sock->sk->dst_cache->dev;
- if (dev != NULL) {
- irq = dev->irq;
+ dst = sk_dst_get (conn->ksnc_sock->sk);
+ if (dst != NULL) {
+ if (dst->dev != NULL) {
+ irq = dst->dev->irq;
if (irq >= NR_IRQS) {
CERROR ("Unexpected IRQ %x\n", irq);
irq = 0;
}
}
+ dst_release (dst);
}
- release_sock (conn->ksnc_sock->sk);
-
write_lock_irqsave (&ksocknal_data.ksnd_socklist_lock, flags);
if (irq == 0 ||
ksocknal_bind_irq (irq, sched - ksocknal_data.ksnd_schedulers);
/* NOW it's safe to get called back when socket is ready... */
- sock->sk->user_data = conn;
- sock->sk->data_ready = ksocknal_data_ready;
- sock->sk->write_space = ksocknal_write_space;
+ sock->sk->sk_user_data = conn;
+ sock->sk->sk_data_ready = ksocknal_data_ready;
+ sock->sk->sk_write_space = ksocknal_write_space;
/* ...which I call right now to get things going */
ksocknal_data_ready (sock->sk, 0);
/* NB I _have_ to restore the callback, rather than storing
* a noop, since the socket could survive past this module
* being unloaded!! */
- conn->ksnc_sock->sk->data_ready = conn->ksnc_saved_data_ready;
- conn->ksnc_sock->sk->write_space = conn->ksnc_saved_write_space;
+ conn->ksnc_sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
+ conn->ksnc_sock->sk->sk_write_space = conn->ksnc_saved_write_space;
/* OK; no more callbacks, but they could be in progress now,
* so wait for them to complete... */
/* ...however if I get the lock before a callback gets it,
* this will make them noop
*/
- conn->ksnc_sock->sk->user_data = NULL;
+ conn->ksnc_sock->sk->sk_user_data = NULL;
/* And drop the scheduler's connection count while I've got
* the exclusive lock */
oldmm = get_fs ();
set_fs (KERNEL_DS);
- rc = sk->prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof (val));
+ rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof (val));
LASSERT (rc == 0);
set_fs (oldmm);
* "That's a summons, mate..." */
LASSERT (atomic_read (&conn->ksnc_refcount) == 0);
- LASSERT (conn->ksnc_sock->sk->data_ready != ksocknal_data_ready);
- LASSERT (conn->ksnc_sock->sk->write_space != ksocknal_write_space);
- LASSERT (conn->ksnc_sock->sk->user_data == NULL);
+ LASSERT (conn->ksnc_sock->sk->sk_data_ready != ksocknal_data_ready);
+ LASSERT (conn->ksnc_sock->sk->sk_write_space != ksocknal_write_space);
+ LASSERT (conn->ksnc_sock->sk->sk_user_data == NULL);
LASSERT (!conn->ksnc_rx_scheduled);
spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
-#define SOCKNAL_TX_LOW_WATER(sk) (((sk)->sndbuf*8)/10)
+#define SOCKNAL_TX_LOW_WATER(sk) (((sk)->sk_sndbuf*8)/10)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,72))
+# define sk_data_ready data_ready
+# define sk_write_space write_space
+# define sk_user_data user_data
+# define sk_prot prot
+# define sk_sndbuf sndbuf
+# define sk_socket socket
+#endif
typedef struct /* pool of forwarding buffers */
{
/* interleave correctly with closing sockets... */
read_lock (&ksocknal_data.ksnd_socklist_lock);
- conn = sk->user_data;
+ conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_close_sock */
- LASSERT (sk->data_ready != &ksocknal_data_ready);
- sk->data_ready (sk, n);
+ LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
+ sk->sk_data_ready (sk, n);
} else if (!conn->ksnc_rx_ready) { /* new news */
/* Set ASAP in case of concurrent calls to me */
conn->ksnc_rx_ready = 1;
/* interleave correctly with closing sockets... */
read_lock (&ksocknal_data.ksnd_socklist_lock);
- conn = sk->user_data;
+ conn = sk->sk_user_data;
CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
sk, tcp_wspace(sk), SOCKNAL_TX_LOW_WATER(sk), conn,
" empty" : " queued"));
if (conn == NULL) { /* raced with ksocknal_close_sock */
- LASSERT (sk->write_space != &ksocknal_write_space);
- sk->write_space (sk);
+ LASSERT (sk->sk_write_space != &ksocknal_write_space);
+ sk->sk_write_space (sk);
} else if (tcp_wspace(sk) >= SOCKNAL_TX_LOW_WATER(sk)) { /* got enough space */
- clear_bit (SOCK_NOSPACE, &sk->socket->flags);
+ clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
if (!conn->ksnc_tx_ready) { /* new news */
/* Set ASAP in case of concurrent calls to me */
#define LOWEST_BIT_SET(x) ((x) & ~((x) - 1))
-#ifndef CONFIG_SMP
-# define smp_processor_id() 0
-#endif
-
/*
* Debugging
*/
} while (0)
#else
#define CHECK_STACK(stack) do{}while(0)
-#define CDEBUG_STACK(var) (0)
+#define CDEBUG_STACK(var) (0L)
#endif
#if 1
struct socket *sock = NULL;
ksock_sched_t *sched = NULL;
unsigned int irq = 0;
- struct net_device *dev = NULL;
+ struct dst_entry *dst;
int ret;
int idx;
ENTRY;
conn->ksnc_file = file;
conn->ksnc_sock = sock;
- conn->ksnc_saved_data_ready = sock->sk->data_ready;
- conn->ksnc_saved_write_space = sock->sk->write_space;
+ conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
+ conn->ksnc_saved_write_space = sock->sk->sk_write_space;
conn->ksnc_peernid = nid;
atomic_set (&conn->ksnc_refcount, 1); /* 1 ref for socklist */
conn->ksnc_tx_ready = 0;
conn->ksnc_tx_scheduled = 0;
-#warning check it is OK to derefence sk->dst_cache->dev like this...
- lock_sock (conn->ksnc_sock->sk);
-
- if (conn->ksnc_sock->sk->dst_cache != NULL) {
- dev = conn->ksnc_sock->sk->dst_cache->dev;
- if (dev != NULL) {
- irq = dev->irq;
+ dst = sk_dst_get (conn->ksnc_sock->sk);
+ if (dst != NULL) {
+ if (dst->dev != NULL) {
+ irq = dst->dev->irq;
if (irq >= NR_IRQS) {
CERROR ("Unexpected IRQ %x\n", irq);
irq = 0;
}
}
+ dst_release (dst);
}
- release_sock (conn->ksnc_sock->sk);
-
write_lock_irqsave (&ksocknal_data.ksnd_socklist_lock, flags);
if (irq == 0 ||
ksocknal_bind_irq (irq, sched - ksocknal_data.ksnd_schedulers);
/* NOW it's safe to get called back when socket is ready... */
- sock->sk->user_data = conn;
- sock->sk->data_ready = ksocknal_data_ready;
- sock->sk->write_space = ksocknal_write_space;
+ sock->sk->sk_user_data = conn;
+ sock->sk->sk_data_ready = ksocknal_data_ready;
+ sock->sk->sk_write_space = ksocknal_write_space;
/* ...which I call right now to get things going */
ksocknal_data_ready (sock->sk, 0);
/* NB I _have_ to restore the callback, rather than storing
* a noop, since the socket could survive past this module
* being unloaded!! */
- conn->ksnc_sock->sk->data_ready = conn->ksnc_saved_data_ready;
- conn->ksnc_sock->sk->write_space = conn->ksnc_saved_write_space;
+ conn->ksnc_sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
+ conn->ksnc_sock->sk->sk_write_space = conn->ksnc_saved_write_space;
/* OK; no more callbacks, but they could be in progress now,
* so wait for them to complete... */
/* ...however if I get the lock before a callback gets it,
* this will make them noop
*/
- conn->ksnc_sock->sk->user_data = NULL;
+ conn->ksnc_sock->sk->sk_user_data = NULL;
/* And drop the scheduler's connection count while I've got
* the exclusive lock */
oldmm = get_fs ();
set_fs (KERNEL_DS);
- rc = sk->prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof (val));
+ rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof (val));
LASSERT (rc == 0);
set_fs (oldmm);
* "That's a summons, mate..." */
LASSERT (atomic_read (&conn->ksnc_refcount) == 0);
- LASSERT (conn->ksnc_sock->sk->data_ready != ksocknal_data_ready);
- LASSERT (conn->ksnc_sock->sk->write_space != ksocknal_write_space);
- LASSERT (conn->ksnc_sock->sk->user_data == NULL);
+ LASSERT (conn->ksnc_sock->sk->sk_data_ready != ksocknal_data_ready);
+ LASSERT (conn->ksnc_sock->sk->sk_write_space != ksocknal_write_space);
+ LASSERT (conn->ksnc_sock->sk->sk_user_data == NULL);
LASSERT (!conn->ksnc_rx_scheduled);
spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
-#define SOCKNAL_TX_LOW_WATER(sk) (((sk)->sndbuf*8)/10)
+#define SOCKNAL_TX_LOW_WATER(sk) (((sk)->sk_sndbuf*8)/10)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,72))
+# define sk_data_ready data_ready
+# define sk_write_space write_space
+# define sk_user_data user_data
+# define sk_prot prot
+# define sk_sndbuf sndbuf
+# define sk_socket socket
+#endif
typedef struct /* pool of forwarding buffers */
{
/* interleave correctly with closing sockets... */
read_lock (&ksocknal_data.ksnd_socklist_lock);
- conn = sk->user_data;
+ conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_close_sock */
- LASSERT (sk->data_ready != &ksocknal_data_ready);
- sk->data_ready (sk, n);
+ LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
+ sk->sk_data_ready (sk, n);
} else if (!conn->ksnc_rx_ready) { /* new news */
/* Set ASAP in case of concurrent calls to me */
conn->ksnc_rx_ready = 1;
/* interleave correctly with closing sockets... */
read_lock (&ksocknal_data.ksnd_socklist_lock);
- conn = sk->user_data;
+ conn = sk->sk_user_data;
CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
sk, tcp_wspace(sk), SOCKNAL_TX_LOW_WATER(sk), conn,
" empty" : " queued"));
if (conn == NULL) { /* raced with ksocknal_close_sock */
- LASSERT (sk->write_space != &ksocknal_write_space);
- sk->write_space (sk);
+ LASSERT (sk->sk_write_space != &ksocknal_write_space);
+ sk->sk_write_space (sk);
} else if (tcp_wspace(sk) >= SOCKNAL_TX_LOW_WATER(sk)) { /* got enough space */
- clear_bit (SOCK_NOSPACE, &sk->socket->flags);
+ clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
if (!conn->ksnc_tx_ready) { /* new news */
/* Set ASAP in case of concurrent calls to me */