1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
7 # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
8 static cfs_sysctl_table_t ksocknal_ctl_table[21];
10 cfs_sysctl_table_t ksocknal_top_ctl_table[] = {
13 .procname = "socknal",
17 .child = ksocknal_ctl_table
23 ksocknal_lib_tunables_init ()
28 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
30 .procname = "timeout",
31 .data = ksocknal_tunables.ksnd_timeout,
32 .maxlen = sizeof (int),
34 .proc_handler = &proc_dointvec
36 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
38 .procname = "credits",
39 .data = ksocknal_tunables.ksnd_credits,
40 .maxlen = sizeof (int),
42 .proc_handler = &proc_dointvec
44 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
46 .procname = "peer_credits",
47 .data = ksocknal_tunables.ksnd_peercredits,
48 .maxlen = sizeof (int),
50 .proc_handler = &proc_dointvec
52 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
54 .procname = "nconnds",
55 .data = ksocknal_tunables.ksnd_nconnds,
56 .maxlen = sizeof (int),
58 .proc_handler = &proc_dointvec
60 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
62 .procname = "min_reconnectms",
63 .data = ksocknal_tunables.ksnd_min_reconnectms,
64 .maxlen = sizeof (int),
66 .proc_handler = &proc_dointvec
68 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
70 .procname = "max_reconnectms",
71 .data = ksocknal_tunables.ksnd_max_reconnectms,
72 .maxlen = sizeof (int),
74 .proc_handler = &proc_dointvec
76 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
78 .procname = "eager_ack",
79 .data = ksocknal_tunables.ksnd_eager_ack,
80 .maxlen = sizeof (int),
82 .proc_handler = &proc_dointvec
84 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
86 .procname = "zero_copy",
87 .data = ksocknal_tunables.ksnd_zc_min_frag,
88 .maxlen = sizeof (int),
90 .proc_handler = &proc_dointvec
92 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
95 .data = ksocknal_tunables.ksnd_typed_conns,
96 .maxlen = sizeof (int),
98 .proc_handler = &proc_dointvec
100 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
102 .procname = "min_bulk",
103 .data = ksocknal_tunables.ksnd_min_bulk,
104 .maxlen = sizeof (int),
106 .proc_handler = &proc_dointvec
108 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
110 .procname = "rx_buffer_size",
111 .data = ksocknal_tunables.ksnd_rx_buffer_size,
112 .maxlen = sizeof(int),
114 .proc_handler = &proc_dointvec
116 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
118 .procname = "tx_buffer_size",
119 .data = ksocknal_tunables.ksnd_tx_buffer_size,
120 .maxlen = sizeof(int),
122 .proc_handler = &proc_dointvec
124 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
127 .data = ksocknal_tunables.ksnd_nagle,
128 .maxlen = sizeof(int),
130 .proc_handler = &proc_dointvec
133 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
135 .procname = "irq_affinity",
136 .data = ksocknal_tunables.ksnd_irq_affinity,
137 .maxlen = sizeof(int),
139 .proc_handler = &proc_dointvec
142 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
144 .procname = "keepalive_idle",
145 .data = ksocknal_tunables.ksnd_keepalive_idle,
146 .maxlen = sizeof(int),
148 .proc_handler = &proc_dointvec
150 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
152 .procname = "keepalive_count",
153 .data = ksocknal_tunables.ksnd_keepalive_count,
154 .maxlen = sizeof(int),
156 .proc_handler = &proc_dointvec
158 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
160 .procname = "keepalive_intvl",
161 .data = ksocknal_tunables.ksnd_keepalive_intvl,
162 .maxlen = sizeof(int),
164 .proc_handler = &proc_dointvec
166 #ifdef SOCKNAL_BACKOFF
167 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
169 .procname = "backoff_init",
170 .data = ksocknal_tunables.ksnd_backoff_init,
171 .maxlen = sizeof(int),
173 .proc_handler = &proc_dointvec
175 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
177 .procname = "backoff_max",
178 .data = ksocknal_tunables.ksnd_backoff_max,
179 .maxlen = sizeof(int),
181 .proc_handler = &proc_dointvec
184 #if SOCKNAL_VERSION_DEBUG
185 ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
187 .procname = "protocol",
188 .data = ksocknal_tunables.ksnd_protocol,
189 .maxlin = sizeof(int),
191 .proc_handler = &proc_dointvec
195 LASSERT (i < sizeof(ksocknal_ctl_table)/sizeof(ksocknal_ctl_table[0]));
197 ksocknal_tunables.ksnd_sysctl =
198 cfs_register_sysctl_table(ksocknal_top_ctl_table, 0);
200 if (ksocknal_tunables.ksnd_sysctl == NULL)
201 CWARN("Can't setup /proc tunables\n");
207 ksocknal_lib_tunables_fini ()
209 if (ksocknal_tunables.ksnd_sysctl != NULL)
210 cfs_unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
214 ksocknal_lib_tunables_init ()
220 ksocknal_lib_tunables_fini ()
223 #endif /* # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM */
226 ksocknal_lib_bind_irq (unsigned int irq)
228 #if (defined(CONFIG_SMP) && CPU_AFFINITY)
232 ksock_irqinfo_t *info;
233 char *argv[] = {"/bin/sh",
237 char *envp[] = {"HOME=/",
238 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
241 LASSERT (irq < NR_IRQS);
242 if (irq == 0) /* software NIC or affinity disabled */
245 info = &ksocknal_data.ksnd_irqinfo[irq];
247 write_lock_bh (&ksocknal_data.ksnd_global_lock);
249 LASSERT (info->ksni_valid);
250 bind = !info->ksni_bound;
251 info->ksni_bound = 1;
253 write_unlock_bh (&ksocknal_data.ksnd_global_lock);
255 if (!bind) /* bound already */
258 cpu = ksocknal_irqsched2cpu(info->ksni_sched);
259 snprintf (cmdline, sizeof (cmdline),
260 "echo %d > /proc/irq/%u/smp_affinity", 1 << cpu, irq);
262 LCONSOLE_INFO("Binding irq %u to CPU %d with cmd: %s\n",
265 /* FIXME: Find a better method of setting IRQ affinity...
268 USERMODEHELPER(argv[0], argv, envp);
273 ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
275 int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1,
279 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
280 LASSERT (!conn->ksnc_closing);
283 CERROR ("Error %d getting sock peer IP\n", rc);
287 rc = libcfs_sock_getaddr(conn->ksnc_sock, 0,
288 &conn->ksnc_myipaddr, NULL);
290 CERROR ("Error %d getting sock local IP\n", rc);
298 ksocknal_lib_sock_irq (struct socket *sock)
302 struct dst_entry *dst;
304 if (!*ksocknal_tunables.ksnd_irq_affinity)
307 dst = sk_dst_get (sock->sk);
309 if (dst->dev != NULL) {
311 if (irq >= NR_IRQS) {
312 CERROR ("Unexpected IRQ %x\n", irq);
324 ksocknal_lib_zc_capable(struct socket *sock)
326 int caps = sock->sk->sk_route_caps;
328 /* ZC if the socket supports scatter/gather and doesn't need software
330 return ((caps & NETIF_F_SG) != 0 &&
331 (caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) != 0);
335 ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
337 struct socket *sock = conn->ksnc_sock;
341 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
342 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
343 tx->tx_nob == tx->tx_resid && /* frist sending */
344 tx->tx_msg.ksm_csum == 0) /* not checksummed */
345 ksocknal_lib_csum_tx(tx);
347 /* NB we can't trust socket ops to either consume our iovs
348 * or leave them alone. */
351 #if SOCKNAL_SINGLE_FRAG_TX
352 struct iovec scratch;
353 struct iovec *scratchiov = &scratch;
354 unsigned int niov = 1;
356 struct iovec *scratchiov = conn->ksnc_tx_scratch_iov;
357 unsigned int niov = tx->tx_niov;
359 struct msghdr msg = {
362 .msg_iov = scratchiov,
366 .msg_flags = MSG_DONTWAIT
368 mm_segment_t oldmm = get_fs();
371 for (nob = i = 0; i < niov; i++) {
372 scratchiov[i] = tx->tx_iov[i];
373 nob += scratchiov[i].iov_len;
376 if (!list_empty(&conn->ksnc_tx_queue) ||
378 msg.msg_flags |= MSG_MORE;
381 rc = sock_sendmsg(sock, &msg, nob);
388 ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
390 struct socket *sock = conn->ksnc_sock;
391 lnet_kiov_t *kiov = tx->tx_kiov;
395 /* NB we can't trust socket ops to either consume our iovs
396 * or leave them alone. */
398 if (kiov->kiov_len >= *ksocknal_tunables.ksnd_zc_min_frag &&
399 tx->tx_msg.ksm_zc_req_cookie != 0) {
400 /* Zero copy is enabled */
401 struct page *page = kiov->kiov_page;
402 int offset = kiov->kiov_offset;
403 int fragsize = kiov->kiov_len;
404 int msgflg = MSG_DONTWAIT;
406 CDEBUG(D_NET, "page %p + offset %x for %d\n",
407 page, offset, kiov->kiov_len);
409 if (!list_empty(&conn->ksnc_tx_queue) ||
410 fragsize < tx->tx_resid)
413 rc = tcp_sendpage(sock, page, offset, fragsize, msgflg);
415 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
416 struct iovec scratch;
417 struct iovec *scratchiov = &scratch;
418 unsigned int niov = 1;
420 #ifdef CONFIG_HIGHMEM
421 #warning "XXX risk of kmap deadlock on multiple frags..."
423 struct iovec *scratchiov = conn->ksnc_tx_scratch_iov;
424 unsigned int niov = tx->tx_nkiov;
426 struct msghdr msg = {
429 .msg_iov = scratchiov,
433 .msg_flags = MSG_DONTWAIT
435 mm_segment_t oldmm = get_fs();
438 for (nob = i = 0; i < niov; i++) {
439 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
441 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
444 if (!list_empty(&conn->ksnc_tx_queue) ||
446 msg.msg_flags |= MSG_MORE;
449 rc = sock_sendmsg(sock, &msg, nob);
452 for (i = 0; i < niov; i++)
453 kunmap(kiov[i].kiov_page);
459 ksocknal_lib_eager_ack (ksock_conn_t *conn)
462 mm_segment_t oldmm = get_fs();
463 struct socket *sock = conn->ksnc_sock;
465 /* Remind the socket to ACK eagerly. If I don't, the socket might
466 * think I'm about to send something it could piggy-back the ACK
467 * on, introducing delay in completing zero-copy sends in my
471 sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
472 (char *)&opt, sizeof (opt));
477 ksocknal_lib_recv_iov (ksock_conn_t *conn)
479 #if SOCKNAL_SINGLE_FRAG_RX
480 struct iovec scratch;
481 struct iovec *scratchiov = &scratch;
482 unsigned int niov = 1;
484 struct iovec *scratchiov = conn->ksnc_rx_scratch_iov;
485 unsigned int niov = conn->ksnc_rx_niov;
487 struct iovec *iov = conn->ksnc_rx_iov;
488 struct msghdr msg = {
491 .msg_iov = scratchiov,
497 mm_segment_t oldmm = get_fs();
505 /* NB we can't trust socket ops to either consume our iovs
506 * or leave them alone. */
509 for (nob = i = 0; i < niov; i++) {
510 scratchiov[i] = iov[i];
511 nob += scratchiov[i].iov_len;
513 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
516 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
517 /* NB this is just a boolean..........................^ */
521 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
522 saved_csum = conn->ksnc_msg.ksm_csum;
523 conn->ksnc_msg.ksm_csum = 0;
526 if (saved_csum != 0) {
527 /* accumulate checksum */
528 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
531 fragnob = iov[i].iov_len;
535 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
536 iov[i].iov_base, fragnob);
538 conn->ksnc_msg.ksm_csum = saved_csum;
545 ksocknal_lib_recv_kiov (ksock_conn_t *conn)
547 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
548 struct iovec scratch;
549 struct iovec *scratchiov = &scratch;
550 unsigned int niov = 1;
552 #ifdef CONFIG_HIGHMEM
553 #warning "XXX risk of kmap deadlock on multiple frags..."
555 struct iovec *scratchiov = conn->ksnc_rx_scratch_iov;
556 unsigned int niov = conn->ksnc_rx_nkiov;
558 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
559 struct msghdr msg = {
562 .msg_iov = scratchiov,
568 mm_segment_t oldmm = get_fs();
576 /* NB we can't trust socket ops to either consume our iovs
577 * or leave them alone. */
578 for (nob = i = 0; i < niov; i++) {
579 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
580 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
582 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
585 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
586 /* NB this is just a boolean.......................^ */
589 if (conn->ksnc_msg.ksm_csum != 0) {
590 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
593 /* Dang! have to kmap again because I have nowhere to stash the
594 * mapped address. But by doing it while the page is still
595 * mapped, the kernel just bumps the map count and returns me
596 * the address it stashed. */
597 base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
598 fragnob = kiov[i].kiov_len;
602 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
605 kunmap(kiov[i].kiov_page);
608 for (i = 0; i < niov; i++)
609 kunmap(kiov[i].kiov_page);
614 void ksocknal_lib_csum_tx(ksock_tx_t *tx)
620 LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
621 LASSERT(tx->tx_conn != NULL);
622 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
624 tx->tx_msg.ksm_csum = 0;
626 csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
627 tx->tx_iov[0].iov_len);
629 if (tx->tx_kiov != NULL) {
630 for (i = 0; i < tx->tx_nkiov; i++) {
631 base = kmap(tx->tx_kiov[i].kiov_page) +
632 tx->tx_kiov[i].kiov_offset;
634 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
636 kunmap(tx->tx_kiov[i].kiov_page);
639 for (i = 1; i < tx->tx_niov; i++)
640 csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
641 tx->tx_iov[i].iov_len);
644 if (*ksocknal_tunables.ksnd_inject_csum_error) {
646 *ksocknal_tunables.ksnd_inject_csum_error = 0;
649 tx->tx_msg.ksm_csum = csum;
653 ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
655 mm_segment_t oldmm = get_fs ();
656 struct socket *sock = conn->ksnc_sock;
660 rc = ksocknal_connsock_addref(conn);
662 LASSERT (conn->ksnc_closing);
663 *txmem = *rxmem = *nagle = 0;
667 rc = libcfs_sock_getbuf(sock, txmem, rxmem);
669 len = sizeof(*nagle);
671 rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
672 (char *)nagle, &len);
676 ksocknal_connsock_decref(conn);
681 *txmem = *rxmem = *nagle = 0;
687 ksocknal_lib_setup_sock (struct socket *sock)
689 mm_segment_t oldmm = get_fs ();
696 struct linger linger;
698 sock->sk->sk_allocation = GFP_NOFS;
700 /* Ensure this socket aborts active sends immediately when we close
707 rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
708 (char *)&linger, sizeof (linger));
711 CERROR ("Can't set SO_LINGER: %d\n", rc);
717 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
718 (char *)&option, sizeof (option));
721 CERROR ("Can't set SO_LINGER2: %d\n", rc);
725 if (!*ksocknal_tunables.ksnd_nagle) {
729 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
730 (char *)&option, sizeof (option));
733 CERROR ("Can't disable nagle: %d\n", rc);
738 rc = libcfs_sock_setbuf(sock,
739 *ksocknal_tunables.ksnd_tx_buffer_size,
740 *ksocknal_tunables.ksnd_rx_buffer_size);
742 CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n",
743 *ksocknal_tunables.ksnd_tx_buffer_size,
744 *ksocknal_tunables.ksnd_rx_buffer_size, rc);
748 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
749 #ifdef SOCKNAL_BACKOFF
750 if (*ksocknal_tunables.ksnd_backoff_init > 0) {
751 option = *ksocknal_tunables.ksnd_backoff_init;
754 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_BACKOFF_INIT,
755 (char *)&option, sizeof (option));
758 CERROR ("Can't set initial tcp backoff %d: %d\n",
764 if (*ksocknal_tunables.ksnd_backoff_max > 0) {
765 option = *ksocknal_tunables.ksnd_backoff_max;
768 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_BACKOFF_MAX,
769 (char *)&option, sizeof (option));
772 CERROR ("Can't set maximum tcp backoff %d: %d\n",
779 /* snapshot tunables */
780 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
781 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
782 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
784 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
786 option = (do_keepalive ? 1 : 0);
788 rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
789 (char *)&option, sizeof (option));
792 CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
800 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
801 (char *)&keep_idle, sizeof (keep_idle));
804 CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
809 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
810 (char *)&keep_intvl, sizeof (keep_intvl));
813 CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
818 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
819 (char *)&keep_count, sizeof (keep_count));
822 CERROR ("Can't set TCP_KEEPCNT: %d\n", rc);
829 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
830 struct tcp_opt *sock2tcp_opt(struct sock *sk)
832 return &(sk->tp_pinfo.af_tcp);
834 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
835 #define sock2tcp_opt(sk) tcp_sk(sk)
837 struct tcp_opt *sock2tcp_opt(struct sock *sk)
839 struct tcp_sock *s = (struct tcp_sock *)sk;
845 ksocknal_lib_push_conn (ksock_conn_t *conn)
848 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11))
858 rc = ksocknal_connsock_addref(conn);
859 if (rc != 0) /* being shut down */
862 sk = conn->ksnc_sock->sk;
863 tp = sock2tcp_opt(sk);
866 nonagle = tp->nonagle;
873 rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
874 (char *)&val, sizeof (val));
880 tp->nonagle = nonagle;
883 ksocknal_connsock_decref(conn);
886 extern void ksocknal_read_callback (ksock_conn_t *conn);
887 extern void ksocknal_write_callback (ksock_conn_t *conn);
889 * socket call back in Linux
892 ksocknal_data_ready (struct sock *sk, int n)
897 /* interleave correctly with closing sockets... */
899 read_lock (&ksocknal_data.ksnd_global_lock);
901 conn = sk->sk_user_data;
902 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
903 LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
904 sk->sk_data_ready (sk, n);
906 ksocknal_read_callback(conn);
908 read_unlock (&ksocknal_data.ksnd_global_lock);
914 ksocknal_write_space (struct sock *sk)
920 /* interleave correctly with closing sockets... */
922 read_lock (&ksocknal_data.ksnd_global_lock);
924 conn = sk->sk_user_data;
925 wspace = SOCKNAL_WSPACE(sk);
926 min_wpace = SOCKNAL_MIN_WSPACE(sk);
928 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
929 sk, wspace, min_wpace, conn,
930 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
931 " ready" : " blocked"),
932 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
933 " scheduled" : " idle"),
934 (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
935 " empty" : " queued"));
937 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
938 LASSERT (sk->sk_write_space != &ksocknal_write_space);
939 sk->sk_write_space (sk);
941 read_unlock (&ksocknal_data.ksnd_global_lock);
945 if (wspace >= min_wpace) { /* got enough space */
946 ksocknal_write_callback(conn);
948 /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
949 * ENOMEM check in ksocknal_transmit is race-free (think about
952 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
955 read_unlock (&ksocknal_data.ksnd_global_lock);
959 ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
961 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
962 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
966 ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
968 sock->sk->sk_user_data = conn;
969 sock->sk->sk_data_ready = ksocknal_data_ready;
970 sock->sk->sk_write_space = ksocknal_write_space;
975 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
977 /* Remove conn's network callbacks.
978 * NB I _have_ to restore the callback, rather than storing a noop,
979 * since the socket could survive past this module being unloaded!! */
980 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
981 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
983 /* A callback could be in progress already; they hold a read lock
984 * on ksnd_global_lock (to serialise with me) and NOOP if
985 * sk_user_data is NULL. */
986 sock->sk->sk_user_data = NULL;