* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
int
ksocknal_lib_zc_capable(ksock_conn_t *conn)
{
- int caps = conn->ksnc_sock->sk->sk_route_caps;
+ int caps = conn->ksnc_sock->sk->sk_route_caps;
- if (conn->ksnc_proto == &ksocknal_protocol_v1x)
- return 0;
+ if (conn->ksnc_proto == &ksocknal_protocol_v1x)
+ return 0;
- /* ZC if the socket supports scatter/gather and doesn't need software
- * checksums */
- return ((caps & NETIF_F_SG) != 0 &&
- (caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) != 0);
+ /* ZC if the socket supports scatter/gather and doesn't need software
+ * checksums */
+ return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0);
}
int
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1))
+ (kiov[i].kiov_offset + kiov[i].kiov_len !=
+ PAGE_CACHE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
return (0);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
-#define sock2tcp_opt(sk) tcp_sk(sk)
-#else
-struct tcp_opt *sock2tcp_opt(struct sock *sk)
-{
- struct tcp_sock *s = (struct tcp_sock *)sk;
- return &s->tcp;
-}
-#endif
-
void
ksocknal_lib_push_conn (ksock_conn_t *conn)
{
struct sock *sk;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11))
- struct tcp_opt *tp;
-#else
struct tcp_sock *tp;
-#endif
int nonagle;
int val = 1;
int rc;
if (rc != 0) /* being shut down */
return;
- sk = conn->ksnc_sock->sk;
- tp = sock2tcp_opt(sk);
+ sk = conn->ksnc_sock->sk;
+ tp = tcp_sk(sk);
lock_sock (sk);
nonagle = tp->nonagle;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
} else
ksocknal_read_callback(conn);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
EXIT;
}
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
wspace = SOCKNAL_WSPACE(sk);
LASSERT (sk->sk_write_space != &ksocknal_write_space);
sk->sk_write_space (sk);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return;
}
clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
}
void
int
ksocknal_lib_memory_pressure(ksock_conn_t *conn)
{
- int rc = 0;
- ksock_sched_t *sched;
-
- sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ int rc = 0;
+ ksock_sched_t *sched;
+
+ sched = conn->ksnc_scheduler;
+ spin_lock_bh(&sched->kss_lock);
if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
!conn->ksnc_tx_ready) {
rc = -ENOMEM;
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- return rc;
+ return rc;
}