4 #define SOCKNAL_SYSCTL 200
6 #define SOCKNAL_SYSCTL_TIMEOUT 1
7 #define SOCKNAL_SYSCTL_EAGER_ACK 2
8 #define SOCKNAL_SYSCTL_ZERO_COPY 3
9 #define SOCKNAL_SYSCTL_TYPED 4
10 #define SOCKNAL_SYSCTL_MIN_BULK 5
11 #define SOCKNAL_SYSCTL_BUFFER_SIZE 6
12 #define SOCKNAL_SYSCTL_NAGLE 7
13 #define SOCKNAL_SYSCTL_IRQ_AFFINITY 8
14 #define SOCKNAL_SYSCTL_KEEPALIVE_IDLE 9
15 #define SOCKNAL_SYSCTL_KEEPALIVE_COUNT 10
16 #define SOCKNAL_SYSCTL_KEEPALIVE_INTVL 11
18 static ctl_table ksocknal_ctl_table[] = {
19 {SOCKNAL_SYSCTL_TIMEOUT, "timeout",
20 &ksocknal_tunables.ksnd_io_timeout, sizeof (int),
21 0644, NULL, &proc_dointvec},
22 {SOCKNAL_SYSCTL_EAGER_ACK, "eager_ack",
23 &ksocknal_tunables.ksnd_eager_ack, sizeof (int),
24 0644, NULL, &proc_dointvec},
26 {SOCKNAL_SYSCTL_ZERO_COPY, "zero_copy",
27 &ksocknal_tunables.ksnd_zc_min_frag, sizeof (int),
28 0644, NULL, &proc_dointvec},
30 {SOCKNAL_SYSCTL_TYPED, "typed",
31 &ksocknal_tunables.ksnd_typed_conns, sizeof (int),
32 0644, NULL, &proc_dointvec},
33 {SOCKNAL_SYSCTL_MIN_BULK, "min_bulk",
34 &ksocknal_tunables.ksnd_min_bulk, sizeof (int),
35 0644, NULL, &proc_dointvec},
36 {SOCKNAL_SYSCTL_BUFFER_SIZE, "buffer_size",
37 &ksocknal_tunables.ksnd_buffer_size, sizeof(int),
38 0644, NULL, &proc_dointvec},
39 {SOCKNAL_SYSCTL_NAGLE, "nagle",
40 &ksocknal_tunables.ksnd_nagle, sizeof(int),
41 0644, NULL, &proc_dointvec},
43 {SOCKNAL_SYSCTL_IRQ_AFFINITY, "irq_affinity",
44 &ksocknal_tunables.ksnd_irq_affinity, sizeof(int),
45 0644, NULL, &proc_dointvec},
47 {SOCKNAL_SYSCTL_KEEPALIVE_IDLE, "keepalive_idle",
48 &ksocknal_tunables.ksnd_keepalive_idle, sizeof(int),
49 0644, NULL, &proc_dointvec},
50 {SOCKNAL_SYSCTL_KEEPALIVE_COUNT, "keepalive_count",
51 &ksocknal_tunables.ksnd_keepalive_count, sizeof(int),
52 0644, NULL, &proc_dointvec},
53 {SOCKNAL_SYSCTL_KEEPALIVE_INTVL, "keepalive_intvl",
54 &ksocknal_tunables.ksnd_keepalive_intvl, sizeof(int),
55 0644, NULL, &proc_dointvec},
59 ctl_table ksocknal_top_ctl_table[] = {
60 {SOCKNAL_SYSCTL, "socknal", NULL, 0, 0555, ksocknal_ctl_table},
66 ksocknal_lib_bind_irq (unsigned int irq)
68 #if (defined(CONFIG_SMP) && CPU_AFFINITY)
73 ksock_irqinfo_t *info;
74 char *argv[] = {"/bin/sh",
78 char *envp[] = {"HOME=/",
79 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
82 LASSERT (irq < NR_IRQS);
83 if (irq == 0) /* software NIC or affinity disabled */
86 info = &ksocknal_data.ksnd_irqinfo[irq];
88 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
90 LASSERT (info->ksni_valid);
91 bind = !info->ksni_bound;
94 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
96 if (!bind) /* bound already */
99 cpu = ksocknal_irqsched2cpu(info->ksni_sched);
100 snprintf (cmdline, sizeof (cmdline),
101 "echo %d > /proc/irq/%u/smp_affinity", 1 << cpu, irq);
103 printk (KERN_INFO "Lustre: Binding irq %u to CPU %d with cmd: %s\n",
106 /* FIXME: Find a better method of setting IRQ affinity...
109 USERMODEHELPER(argv[0], argv, envp);
114 ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
116 struct sockaddr_in sin;
117 int len = sizeof (sin);
120 rc = conn->ksnc_sock->ops->getname (conn->ksnc_sock,
121 (struct sockaddr *)&sin, &len, 2);
122 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
123 LASSERT (!conn->ksnc_closing);
126 CERROR ("Error %d getting sock peer IP\n", rc);
130 conn->ksnc_ipaddr = ntohl (sin.sin_addr.s_addr);
131 conn->ksnc_port = ntohs (sin.sin_port);
133 rc = conn->ksnc_sock->ops->getname (conn->ksnc_sock,
134 (struct sockaddr *)&sin, &len, 0);
136 CERROR ("Error %d getting sock local IP\n", rc);
140 conn->ksnc_myipaddr = ntohl (sin.sin_addr.s_addr);
146 ksocknal_lib_sock_irq (struct socket *sock)
149 struct dst_entry *dst;
151 if (!ksocknal_tunables.ksnd_irq_affinity)
154 dst = sk_dst_get (sock->sk);
156 if (dst->dev != NULL) {
158 if (irq >= NR_IRQS) {
159 CERROR ("Unexpected IRQ %x\n", irq);
169 #if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
171 ksocknal_kvaddr_to_page (unsigned long vaddr)
175 if (vaddr >= VMALLOC_START &&
177 page = vmalloc_to_page ((void *)vaddr);
179 else if (vaddr >= PKMAP_BASE &&
180 vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
181 page = vmalloc_to_page ((void *)vaddr);
182 /* in 2.4 ^ just walks the page tables */
185 page = virt_to_page (vaddr);
196 ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
198 struct socket *sock = conn->ksnc_sock;
199 #if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
200 unsigned long vaddr = (unsigned long)iov->iov_base
201 int offset = vaddr & (PAGE_SIZE - 1);
202 int zcsize = MIN (iov->iov_len, PAGE_SIZE - offset);
208 /* NB we can't trust socket ops to either consume our iovs
209 * or leave them alone. */
211 #if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
212 if (zcsize >= ksocknal_data.ksnd_zc_min_frag &&
213 (sock->sk->route_caps & NETIF_F_SG) &&
214 (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) &&
215 (page = ksocknal_kvaddr_to_page (vaddr)) != NULL) {
216 int msgflg = MSG_DONTWAIT;
218 CDEBUG(D_NET, "vaddr %p, page %p->%p + offset %x for %d\n",
219 (void *)vaddr, page, page_address(page), offset, zcsize);
221 if (!list_empty (&conn->ksnc_tx_queue) ||
222 zcsize < tx->tx_resid)
225 rc = tcp_sendpage_zccd(sock, page, offset, zcsize, msgflg, &tx->tx_zccd);
229 #if SOCKNAL_SINGLE_FRAG_TX
230 struct iovec scratch;
231 struct iovec *scratchiov = &scratch;
234 struct iovec *scratchiov = conn->ksnc_tx_scratch_iov;
235 int niov = tx->tx_niov;
237 struct msghdr msg = {
240 .msg_iov = scratchiov,
244 .msg_flags = MSG_DONTWAIT
246 mm_segment_t oldmm = get_fs();
249 for (nob = i = 0; i < niov; i++) {
250 scratchiov[i] = tx->tx_iov[i];
251 nob += scratchiov[i].iov_len;
254 if (!list_empty(&conn->ksnc_tx_queue) ||
256 msg.msg_flags |= MSG_MORE;
259 rc = sock_sendmsg(sock, &msg, nob);
266 ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
268 struct socket *sock = conn->ksnc_sock;
269 ptl_kiov_t *kiov = tx->tx_kiov;
273 /* NB we can't trust socket ops to either consume our iovs
274 * or leave them alone. */
277 if (kiov->kiov_len >= ksocknal_tunables.ksnd_zc_min_frag &&
278 (sock->sk->route_caps & NETIF_F_SG) &&
279 (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM))) {
280 struct page *page = kiov->kiov_page;
281 int offset = kiov->kiov_offset;
282 int fragsize = kiov->kiov_len;
283 int msgflg = MSG_DONTWAIT;
285 CDEBUG(D_NET, "page %p + offset %x for %d\n",
286 page, offset, kiov->kiov_len);
288 if (!list_empty(&conn->ksnc_tx_queue) ||
289 fragsize < tx->tx_resid)
292 rc = tcp_sendpage_zccd(sock, page, offset, fragsize, msgflg,
297 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
298 struct iovec scratch;
299 struct iovec *scratchiov = &scratch;
302 #ifdef CONFIG_HIGHMEM
303 #warning "XXX risk of kmap deadlock on multiple frags..."
305 struct iovec *scratchiov = conn->ksnc_tx_scratch_iov;
306 int niov = tx->tx_nkiov;
308 struct msghdr msg = {
311 .msg_iov = scratchiov,
315 .msg_flags = MSG_DONTWAIT
317 mm_segment_t oldmm = get_fs();
320 for (nob = i = 0; i < niov; i++) {
321 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
323 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
326 if (!list_empty(&conn->ksnc_tx_queue) ||
328 msg.msg_flags |= MSG_DONTWAIT;
331 rc = sock_sendmsg(sock, &msg, nob);
334 for (i = 0; i < niov; i++)
335 kunmap(kiov[i].kiov_page);
341 ksocknal_lib_eager_ack (ksock_conn_t *conn)
344 mm_segment_t oldmm = get_fs();
345 struct socket *sock = conn->ksnc_sock;
347 /* Remind the socket to ACK eagerly. If I don't, the socket might
348 * think I'm about to send something it could piggy-back the ACK
349 * on, introducing delay in completing zero-copy sends in my
353 sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
354 (char *)&opt, sizeof (opt));
359 ksocknal_lib_recv_iov (ksock_conn_t *conn)
361 #if SOCKNAL_SINGLE_FRAG_RX
362 struct iovec scratch;
363 struct iovec *scratchiov = &scratch;
366 struct iovec *scratchiov = conn->ksnc_rx_scratch_iov;
367 int niov = conn->ksnc_rx_niov;
369 struct iovec *iov = conn->ksnc_rx_iov;
370 struct msghdr msg = {
373 .msg_iov = scratchiov,
379 mm_segment_t oldmm = get_fs();
384 /* NB we can't trust socket ops to either consume our iovs
385 * or leave them alone. */
388 for (nob = i = 0; i < niov; i++) {
389 scratchiov[i] = iov[i];
390 nob += scratchiov[i].iov_len;
392 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
395 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
396 /* NB this is just a boolean..........................^ */
403 ksocknal_lib_recv_kiov (ksock_conn_t *conn)
405 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
406 struct iovec scratch;
407 struct iovec *scratchiov = &scratch;
410 #ifdef CONFIG_HIGHMEM
411 #warning "XXX risk of kmap deadlock on multiple frags..."
413 struct iovec *scratchiov = conn->ksnc_rx_scratch_iov;
414 int niov = conn->ksnc_rx_nkiov;
416 ptl_kiov_t *kiov = conn->ksnc_rx_kiov;
417 struct msghdr msg = {
420 .msg_iov = scratchiov,
426 mm_segment_t oldmm = get_fs();
431 /* NB we can't trust socket ops to either consume our iovs
432 * or leave them alone. */
433 for (nob = i = 0; i < niov; i++) {
434 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
435 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
437 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
440 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
441 /* NB this is just a boolean.......................^ */
444 for (i = 0; i < niov; i++)
445 kunmap(kiov[i].kiov_page);
451 ksocknal_lib_sock_write (struct socket *sock, void *buffer, int nob)
454 mm_segment_t oldmm = get_fs();
461 struct msghdr msg = {
472 rc = sock_sendmsg (sock, &msg, iov.iov_len);
479 CERROR ("Unexpected zero rc\n");
480 return (-ECONNABORTED);
483 buffer = ((char *)buffer) + rc;
491 ksocknal_lib_sock_read (struct socket *sock, void *buffer, int nob)
494 mm_segment_t oldmm = get_fs();
501 struct msghdr msg = {
512 rc = sock_recvmsg (sock, &msg, iov.iov_len, 0);
519 return (-ECONNABORTED);
521 buffer = ((char *)buffer) + rc;
529 ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
531 mm_segment_t oldmm = get_fs ();
532 struct socket *sock = conn->ksnc_sock;
536 rc = ksocknal_getconnsock (conn);
538 LASSERT (conn->ksnc_closing);
539 *txmem = *rxmem = *nagle = 0;
545 len = sizeof(*txmem);
546 rc = sock_getsockopt(sock, SOL_SOCKET, SO_SNDBUF,
547 (char *)txmem, &len);
549 len = sizeof(*rxmem);
550 rc = sock_getsockopt(sock, SOL_SOCKET, SO_RCVBUF,
551 (char *)rxmem, &len);
554 len = sizeof(*nagle);
555 rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
556 (char *)nagle, &len);
560 ksocknal_putconnsock (conn);
565 *txmem = *rxmem = *nagle = 0;
571 ksocknal_lib_setup_sock (struct socket *sock)
573 mm_segment_t oldmm = get_fs ();
580 struct linger linger;
582 sock->sk->sk_allocation = GFP_NOFS;
584 /* Ensure this socket aborts active sends immediately when we close
591 rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
592 (char *)&linger, sizeof (linger));
595 CERROR ("Can't set SO_LINGER: %d\n", rc);
601 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
602 (char *)&option, sizeof (option));
605 CERROR ("Can't set SO_LINGER2: %d\n", rc);
609 if (!ksocknal_tunables.ksnd_nagle) {
613 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
614 (char *)&option, sizeof (option));
617 CERROR ("Can't disable nagle: %d\n", rc);
622 if (ksocknal_tunables.ksnd_buffer_size > 0) {
623 option = ksocknal_tunables.ksnd_buffer_size;
626 rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDBUF,
627 (char *)&option, sizeof (option));
630 CERROR ("Can't set send buffer %d: %d\n",
636 rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVBUF,
637 (char *)&option, sizeof (option));
640 CERROR ("Can't set receive buffer %d: %d\n",
646 /* snapshot tunables */
647 keep_idle = ksocknal_tunables.ksnd_keepalive_idle;
648 keep_count = ksocknal_tunables.ksnd_keepalive_count;
649 keep_intvl = ksocknal_tunables.ksnd_keepalive_intvl;
651 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
653 option = (do_keepalive ? 1 : 0);
655 rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
656 (char *)&option, sizeof (option));
659 CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
667 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
668 (char *)&keep_idle, sizeof (keep_idle));
671 CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
676 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
677 (char *)&keep_intvl, sizeof (keep_intvl));
680 CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
685 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
686 (char *)&keep_count, sizeof (keep_count));
689 CERROR ("Can't set TCP_KEEPCNT: %d\n", rc);
697 ksocknal_lib_connect_sock(struct socket **sockp, int *may_retry,
698 ksock_route_t *route, int local_port)
700 struct sockaddr_in locaddr;
701 struct sockaddr_in srvaddr;
705 mm_segment_t oldmm = get_fs();
708 memset(&locaddr, 0, sizeof(locaddr));
709 locaddr.sin_family = AF_INET;
710 locaddr.sin_port = htons(local_port);
711 locaddr.sin_addr.s_addr =
712 (route->ksnr_myipaddr != 0) ? htonl(route->ksnr_myipaddr)
715 memset (&srvaddr, 0, sizeof (srvaddr));
716 srvaddr.sin_family = AF_INET;
717 srvaddr.sin_port = htons (route->ksnr_port);
718 srvaddr.sin_addr.s_addr = htonl (route->ksnr_ipaddr);
722 rc = sock_create (PF_INET, SOCK_STREAM, 0, &sock);
725 CERROR ("Can't create autoconnect socket: %d\n", rc);
729 /* Ugh; have to map_fd for compatibility with sockets passed in
730 * from userspace. And we actually need the sock->file refcounting
731 * that this gives you :) */
733 rc = sock_map_fd (sock);
736 CERROR ("sock_map_fd error %d\n", rc);
740 /* NB the file descriptor (rc) now owns the ref on sock->file */
741 LASSERT (sock->file != NULL);
742 LASSERT (file_count(sock->file) == 1);
744 get_file(sock->file); /* extra ref makes sock->file */
745 sys_close(rc); /* survive this close */
747 /* Still got a single ref on sock->file */
748 LASSERT (file_count(sock->file) == 1);
750 /* Set the socket timeouts, so our connection attempt completes in
752 tv.tv_sec = ksocknal_tunables.ksnd_io_timeout;
756 rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDTIMEO,
757 (char *)&tv, sizeof (tv));
760 CERROR ("Can't set send timeout %d: %d\n",
761 ksocknal_tunables.ksnd_io_timeout, rc);
766 rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVTIMEO,
767 (char *)&tv, sizeof (tv));
770 CERROR ("Can't set receive timeout %d: %d\n",
771 ksocknal_tunables.ksnd_io_timeout, rc);
777 rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
778 (char *)&option, sizeof (option));
781 CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
785 rc = sock->ops->bind(sock,
786 (struct sockaddr *)&locaddr, sizeof(locaddr));
787 if (rc == -EADDRINUSE) {
788 CDEBUG(D_NET, "Port %d already in use\n", local_port);
793 CERROR("Error trying to bind to reserved port %d: %d\n",
798 rc = sock->ops->connect(sock,
799 (struct sockaddr *)&srvaddr, sizeof(srvaddr),
800 sock->file->f_flags);
804 /* EADDRNOTAVAIL probably means we're already connected to the same
805 * peer/port on the same local port on a differently typed
806 * connection. Let our caller retry with a different local
808 *may_retry = (rc == -EADDRNOTAVAIL);
810 CDEBUG(*may_retry ? D_NET : D_ERROR,
811 "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc,
812 HIPQUAD(route->ksnr_myipaddr), local_port,
813 HIPQUAD(route->ksnr_ipaddr), route->ksnr_port);
820 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
821 struct tcp_opt *sock2tcp_opt(struct sock *sk)
823 return &(sk->tp_pinfo.af_tcp);
826 struct tcp_opt *sock2tcp_opt(struct sock *sk)
828 struct tcp_sock *s = (struct tcp_sock *)sk;
834 ksocknal_lib_push_conn (ksock_conn_t *conn)
843 rc = ksocknal_getconnsock (conn);
844 if (rc != 0) /* being shut down */
847 sk = conn->ksnc_sock->sk;
848 tp = sock2tcp_opt(sk);
851 nonagle = tp->nonagle;
858 rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
859 (char *)&val, sizeof (val));
865 tp->nonagle = nonagle;
868 ksocknal_putconnsock (conn);
871 extern void ksocknal_read_callback (ksock_conn_t *conn);
872 extern void ksocknal_write_callback (ksock_conn_t *conn);
874 * socket call back in Linux
877 ksocknal_data_ready (struct sock *sk, int n)
882 /* interleave correctly with closing sockets... */
883 read_lock (&ksocknal_data.ksnd_global_lock);
885 conn = sk->sk_user_data;
886 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
887 LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
888 sk->sk_data_ready (sk, n);
890 ksocknal_read_callback(conn);
892 read_unlock (&ksocknal_data.ksnd_global_lock);
898 ksocknal_write_space (struct sock *sk)
902 /* interleave correctly with closing sockets... */
903 read_lock (&ksocknal_data.ksnd_global_lock);
905 conn = sk->sk_user_data;
907 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
908 sk, tcp_wspace(sk), SOCKNAL_TX_LOW_WATER(sk), conn,
909 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
910 " ready" : " blocked"),
911 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
912 " scheduled" : " idle"),
913 (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
914 " empty" : " queued"));
916 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
917 LASSERT (sk->sk_write_space != &ksocknal_write_space);
918 sk->sk_write_space (sk);
920 read_unlock (&ksocknal_data.ksnd_global_lock);
924 if (tcp_wspace(sk) >= SOCKNAL_TX_LOW_WATER(sk)) { /* got enough space */
925 ksocknal_write_callback(conn);
927 /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
928 * ENOMEM check in ksocknal_transmit is race-free (think about
931 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
934 read_unlock (&ksocknal_data.ksnd_global_lock);
938 ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
940 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
941 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
945 ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
947 sock->sk->sk_user_data = conn;
948 sock->sk->sk_data_ready = ksocknal_data_ready;
949 sock->sk->sk_write_space = ksocknal_write_space;
954 ksocknal_lib_act_callback(struct socket *sock, ksock_conn_t *conn)
956 ksocknal_data_ready (sock->sk, 0);
957 ksocknal_write_space (sock->sk);
962 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
964 /* Remove conn's network callbacks.
965 * NB I _have_ to restore the callback, rather than storing a noop,
966 * since the socket could survive past this module being unloaded!! */
967 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
968 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
970 /* A callback could be in progress already; they hold a read lock
971 * on ksnd_global_lock (to serialise with me) and NOOP if
972 * sk_user_data is NULL. */
973 sock->sk->sk_user_data = NULL;