1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Phil Schwan <phil@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * Darwin porting library
23 * Make things easy to port
25 #include <mach/mach_types.h>
27 #include <netinet/in.h>
28 #include <netinet/tcp.h>
34 #undef SOCKNAL_SINGLE_FRAG_TX
35 #define SOCKNAL_SINGLE_FRAG_TX 1
36 #undef SOCKNAL_SINGLE_FRAG_RX
37 #define SOCKNAL_SINGLE_FRAG_RX 1
40 SYSCTL_DECL(_portals);
42 SYSCTL_NODE (_portals, OID_AUTO, ksocknal, CTLFLAG_RW,
43 0, "ksocknal_sysctl");
45 SYSCTL_INT(_portals_ksocknal, OID_AUTO, timeout,
46 CTLTYPE_INT | CTLFLAG_RW , &ksocknal_tunables.ksnd_io_timeout,
48 SYSCTL_INT(_portals_ksocknal, OID_AUTO, eager_ack,
49 CTLTYPE_INT | CTLFLAG_RW , &ksocknal_tunables.ksnd_eager_ack,
51 SYSCTL_INT(_portals_ksocknal, OID_AUTO, typed,
52 CTLTYPE_INT | CTLFLAG_RW , &ksocknal_tunables.ksnd_typed_conns,
54 SYSCTL_INT(_portals_ksocknal, OID_AUTO, min_bulk,
55 CTLTYPE_INT | CTLFLAG_RW , &ksocknal_tunables.ksnd_min_bulk,
57 SYSCTL_INT(_portals_ksocknal, OID_AUTO, buffer_size,
58 CTLTYPE_INT | CTLFLAG_RW , &ksocknal_tunables.ksnd_buffer_size,
60 SYSCTL_INT(_portals_ksocknal, OID_AUTO, nagle,
61 CTLTYPE_INT | CTLFLAG_RW , &ksocknal_tunables.ksnd_nagle,
64 cfs_sysctl_table_t ksocknal_top_ctl_table [] = {
65 &sysctl__portals_ksocknal,
66 &sysctl__portals_ksocknal_timeout,
67 &sysctl__portals_ksocknal_eager_ack,
68 &sysctl__portals_ksocknal_typed,
69 &sysctl__portals_ksocknal_min_bulk,
70 &sysctl__portals_ksocknal_buffer_size,
71 &sysctl__portals_ksocknal_nagle,
75 static unsigned long ksocknal_mbuf_size = (u_quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES);
78 sockfd_lookup(int fd, void *foo)
85 getsock(current_proc()->p_fd, fd, &fp);
87 so = (struct socket *)fp->f_data;
95 extern struct fileops socketops;
98 sock_map_fd (struct socket *so)
102 CFS_DECL_FUNNEL_DATA;
105 falloc(current_proc(), &fp, &fd);
106 fp->f_flag = FREAD|FWRITE;
107 fp->f_type = DTYPE_SOCKET;
108 fp->f_ops = &socketops;
109 fp->f_data = (caddr_t)so;
111 *fdflags(current_proc(), fd) &= ~UF_RESERVED;
118 sock_release(struct socket *so)
121 CFS_DECL_FUNNEL_DATA;
123 fp = (struct file *)so->reserved4;
124 so->reserved4 = NULL;
137 CFS_DECL_FUNNEL_DATA;
140 fdrelse(current_proc(), fd);
145 ksocknal_lib_bind_irq (unsigned int irq)
151 ksocknal_lib_sock_irq (struct socket *sock)
157 ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
159 struct sockaddr_in *sin;
165 rc = conn->ksnc_sock->so_proto->pr_usrreqs->pru_peeraddr(conn->ksnc_sock, &sa);
166 LASSERT (!conn->ksnc_closing);
169 if (sa) FREE(sa, M_SONAME);
170 CERROR ("Error %d getting sock peer IP\n", rc);
173 sin = (struct sockaddr_in *)sa;
174 conn->ksnc_ipaddr = ntohl (sin->sin_addr.s_addr);
175 conn->ksnc_port = ntohs (sin->sin_port);
176 if (sa) FREE(sa, M_SONAME);
177 rc = conn->ksnc_sock->so_proto->pr_usrreqs->pru_sockaddr(conn->ksnc_sock, &sa);
180 if (sa) FREE(sa, M_SONAME);
181 CERROR ("Error %d getting sock local IP\n", rc);
184 conn->ksnc_myipaddr = ntohl (sin->sin_addr.s_addr);
190 ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
192 #if SOCKNAL_SINGLE_FRAG_TX
193 struct iovec scratch;
194 struct iovec *scratchiov = &scratch;
197 struct iovec *scratchiov = conn->ksnc_tx_scratch_iov;
198 int niov = tx->tx_niov;
200 struct socket *sock = conn->ksnc_sock;
205 .uio_iov = scratchiov,
208 .uio_resid = 0, /* This will be valued after a while */
209 .uio_segflg = UIO_SYSSPACE,
213 int flags = MSG_DONTWAIT;
216 for (nob = i = 0; i < niov; i++) {
217 scratchiov[i] = tx->tx_iov[i];
218 nob += scratchiov[i].iov_len;
220 suio.uio_resid = nob;
223 rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, flags);
226 /* NB there is no return value can indicate how many
227 * have been sent and how many resid, we have to get
228 * sent bytes from suio. */
230 if (suio.uio_resid != nob &&\
231 (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK))
232 /* We have sent something */
233 rc = nob - suio.uio_resid;
234 else if ( rc == EWOULDBLOCK )
235 /* Actually, EAGAIN and EWOULDBLOCK have same value in OSX */
240 rc = nob - suio.uio_resid;
246 ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
248 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
249 struct iovec scratch;
250 struct iovec *scratchiov = &scratch;
253 struct iovec *scratchiov = conn->ksnc_tx_scratch_iov;
254 int niov = tx->tx_nkiov;
256 struct socket *sock = conn->ksnc_sock;
257 ptl_kiov_t *kiov = tx->tx_kiov;
262 .uio_iov = scratchiov,
265 .uio_resid = 0, /* It should be valued after a while */
266 .uio_segflg = UIO_SYSSPACE,
270 int flags = MSG_DONTWAIT;
273 for (nob = i = 0; i < niov; i++) {
274 scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) +
276 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
278 suio.uio_resid = nob;
281 rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, flags);
284 for (i = 0; i < niov; i++)
285 cfs_kunmap(kiov[i].kiov_page);
288 if (suio.uio_resid != nob &&\
289 (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK))
290 /* We have sent something */
291 rc = nob - suio.uio_resid;
292 else if ( rc == EWOULDBLOCK )
293 /* EAGAIN and EWOULD BLOCK have same value in OSX */
298 rc = nob - suio.uio_resid;
304 * liang: Hack of inpcb and tcpcb.
305 * To get tcpcb of a socket, and call tcp_output
316 LIST_HEAD(ks_tsegqe_head, ks_tseg_qent);
319 struct ks_tsegqe_head t_segq;
321 struct ks_tcptemp *unused;
323 struct inpcb *t_inpcb;
327 * There are more fields but we dont need
332 #define TF_ACKNOW 0x00001
333 #define TF_DELACK 0x00002
336 LIST_ENTRY(ks_inpcb) inp_hash;
337 struct in_addr reserved1;
338 struct in_addr reserved2;
341 LIST_ENTRY(inpcb) inp_list;
344 * There are more fields but we dont need
349 #define ks_sotoinpcb(so) ((struct ks_inpcb *)(so)->so_pcb)
350 #define ks_intotcpcb(ip) ((struct ks_tcpcb *)(ip)->inp_ppcb)
351 #define ks_sototcpcb(so) (intotcpcb(sotoinpcb(so)))
354 ksocknal_lib_eager_ack (ksock_conn_t *conn)
356 struct socket *sock = conn->ksnc_sock;
357 struct ks_inpcb *inp = ks_sotoinpcb(sock);
358 struct ks_tcpcb *tp = ks_intotcpcb(inp);
362 extern int tcp_output(register struct ks_tcpcb *tp);
367 if (tp && tp->t_flags & TF_DELACK){
368 tp->t_flags &= ~TF_DELACK;
369 tp->t_flags |= TF_ACKNOW;
370 (void) tcp_output(tp);
375 * No TCP_QUICKACK supported in BSD, so I have to call tcp_fasttimo
376 * to send immediate ACK. It's not the best resolution because
377 * tcp_fasttimo will send out ACK for all delayed-ack tcp socket.
378 * Anyway, it's working now.
379 * extern void tcp_fasttimo();
388 ksocknal_lib_recv_iov (ksock_conn_t *conn)
390 #if SOCKNAL_SINGLE_FRAG_RX
391 struct iovec scratch;
392 struct iovec *scratchiov = &scratch;
395 struct iovec *scratchiov = conn->ksnc_rx_scratch_iov;
396 int niov = conn->ksnc_rx_niov;
398 struct iovec *iov = conn->ksnc_rx_iov;
403 .uio_iov = scratchiov,
406 .uio_resid = 0, /* It should be valued after a while */
407 .uio_segflg = UIO_SYSSPACE,
411 int flags = MSG_DONTWAIT;
414 for (nob = i = 0; i < niov; i++) {
415 scratchiov[i] = iov[i];
416 nob += scratchiov[i].iov_len;
418 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
420 ruio.uio_resid = nob;
423 rc = soreceive(conn->ksnc_sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, &flags);
426 if (ruio.uio_resid != nob && \
427 (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK || rc == EAGAIN))
428 /* data particially received */
429 rc = nob - ruio.uio_resid;
430 else if (rc == EWOULDBLOCK)
431 /* EAGAIN and EWOULD BLOCK have same value in OSX */
436 rc = nob - ruio.uio_resid;
442 ksocknal_lib_recv_kiov (ksock_conn_t *conn)
444 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
445 struct iovec scratch;
446 struct iovec *scratchiov = &scratch;
449 struct iovec *scratchiov = conn->ksnc_rx_scratch_iov;
450 int niov = conn->ksnc_rx_nkiov;
452 ptl_kiov_t *kiov = conn->ksnc_rx_kiov;
457 .uio_iov = scratchiov,
461 .uio_segflg = UIO_SYSSPACE,
465 int flags = MSG_DONTWAIT;
468 for (nob = i = 0; i < niov; i++) {
469 scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
470 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
472 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
474 ruio.uio_resid = nob;
477 rc = soreceive(conn->ksnc_sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, NULL, &flags);
480 for (i = 0; i < niov; i++)
481 cfs_kunmap(kiov[i].kiov_page);
484 if (ruio.uio_resid != nob && \
485 (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK))
486 /* data particially received */
487 rc = nob - ruio.uio_resid;
488 else if (rc == EWOULDBLOCK)
489 /* receive blocked, EWOULDBLOCK == EAGAIN */
494 rc = nob - ruio.uio_resid;
500 ksocknal_lib_sock_write (struct socket *sock, void *buffer, int nob)
515 .uio_segflg = UIO_SYSSPACE,
521 rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, 0);
525 if ( suio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\
530 rc = nob - suio.uio_resid;
531 buffer = ((char *)buffer) + rc;
532 nob = suio.uio_resid;
542 ksocknal_lib_sock_read (struct socket *sock, void *buffer, int nob)
557 .uio_segflg = UIO_SYSSPACE,
563 rc = soreceive(sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, (int *)0);
567 if ( ruio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\
572 rc = nob - ruio.uio_resid;
573 buffer = ((char *)buffer) + rc;
574 nob = ruio.uio_resid;
584 ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
587 struct socket *sock = conn->ksnc_sock;
592 rc = ksocknal_getconnsock (conn);
594 LASSERT (conn->ksnc_closing);
595 *txmem = *rxmem = *nagle = 0;
599 len = sizeof(*txmem);
600 bzero(&sopt, sizeof sopt);
601 sopt.sopt_dir = SOPT_GET;
602 sopt.sopt_level = SOL_SOCKET;
603 sopt.sopt_name = SO_SNDBUF;
604 sopt.sopt_val = txmem;
605 sopt.sopt_valsize = len;
608 rc = sogetopt(sock, &sopt);
610 len = sizeof(*rxmem);
611 sopt.sopt_name = SO_RCVBUF;
612 sopt.sopt_val = rxmem;
613 rc = sogetopt(sock, &sopt);
616 len = sizeof(*nagle);
617 sopt.sopt_level = IPPROTO_TCP;
618 sopt.sopt_name = TCP_NODELAY;
619 sopt.sopt_val = nagle;
620 rc = sogetopt(sock, &sopt);
624 ksocknal_putconnsock (conn);
629 *txmem = *rxmem = *nagle = 0;
635 ksocknal_lib_setup_sock (struct socket *so)
644 struct linger linger;
647 /* Ensure this socket aborts active sends immediately when we close
650 bzero(&sopt, sizeof sopt);
654 sopt.sopt_dir = SOPT_SET;
655 sopt.sopt_level = SOL_SOCKET;
656 sopt.sopt_name = SO_LINGER;
657 sopt.sopt_val = &linger;
658 sopt.sopt_valsize = sizeof(linger);
661 rc = sosetopt(so, &sopt);
663 CERROR ("Can't set SO_LINGER: %d\n", rc);
668 if (!ksocknal_tunables.ksnd_nagle) {
670 bzero(&sopt, sizeof sopt);
671 sopt.sopt_dir = SOPT_SET;
672 sopt.sopt_level = IPPROTO_TCP;
673 sopt.sopt_name = TCP_NODELAY;
674 sopt.sopt_val = &option;
675 sopt.sopt_valsize = sizeof(option);
676 rc = sosetopt(so, &sopt);
678 CERROR ("Can't disable nagle: %d\n", rc);
682 if (ksocknal_tunables.ksnd_buffer_size > 0) {
683 option = ksocknal_tunables.ksnd_buffer_size;
684 if (option > ksocknal_mbuf_size)
685 option = ksocknal_mbuf_size;
687 sopt.sopt_dir = SOPT_SET;
688 sopt.sopt_level = SOL_SOCKET;
689 sopt.sopt_name = SO_SNDBUF;
690 sopt.sopt_val = &option;
691 sopt.sopt_valsize = sizeof(option);
692 rc = sosetopt(so, &sopt);
694 CERROR ("Can't set send buffer %d: %d\n",
699 sopt.sopt_name = SO_RCVBUF;
700 rc = sosetopt(so, &sopt);
702 CERROR ("Can't set receive buffer %d: %d\n",
707 /* snapshot tunables */
708 keep_idle = ksocknal_tunables.ksnd_keepalive_idle;
709 keep_count = ksocknal_tunables.ksnd_keepalive_count;
710 keep_intvl = ksocknal_tunables.ksnd_keepalive_intvl;
712 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
713 option = (do_keepalive ? 1 : 0);
714 bzero(&sopt, sizeof sopt);
715 sopt.sopt_dir = SOPT_SET;
716 sopt.sopt_level = SOL_SOCKET;
717 sopt.sopt_name = SO_KEEPALIVE;
718 sopt.sopt_val = &option;
719 sopt.sopt_valsize = sizeof(option);
720 rc = sosetopt(so, &sopt);
722 CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
727 /* no more setting, just return */
732 bzero(&sopt, sizeof sopt);
733 sopt.sopt_dir = SOPT_SET;
734 sopt.sopt_level = IPPROTO_TCP;
735 sopt.sopt_name = TCP_KEEPALIVE;
736 sopt.sopt_val = &keep_idle;
737 sopt.sopt_valsize = sizeof(keep_idle);
738 rc = sosetopt(so, &sopt);
740 CERROR ("Can't set TCP_KEEPALIVE : %d\n", rc);
749 ksocknal_lib_connect_sock (struct socket **sockp, int *may_retry,
750 ksock_route_t *route, int local_port)
752 struct sockaddr_in locaddr;
753 struct sockaddr_in srvaddr;
761 CFS_DECL_FUNNEL_DATA;
764 bzero (&locaddr, sizeof (locaddr));
765 locaddr.sin_len = sizeof(struct sockaddr_in);
766 locaddr.sin_family = AF_INET;
767 locaddr.sin_port = htons (local_port);
768 locaddr.sin_addr.s_addr =
769 (route->ksnr_myipaddr != 0) ? htonl(route->ksnr_myipaddr)
771 bzero(&srvaddr, sizeof(srvaddr));
772 srvaddr.sin_len = sizeof(struct sockaddr_in);
773 srvaddr.sin_family = AF_INET;
774 srvaddr.sin_port = htons (route->ksnr_port);
775 srvaddr.sin_addr.s_addr = htonl (route->ksnr_ipaddr);
780 rc = socreate(PF_INET, &so, SOCK_STREAM, 0);
784 CERROR ("Can't create autoconnect socket: %d\n", rc);
790 * Liang: what do we need here?
792 fd = sock_map_fd (so);
795 CERROR ("sock_map_fd error %d\n", fd);
800 /* Set the socket timeouts, so our connection attempt completes in
802 tv.tv_sec = ksocknal_tunables.ksnd_io_timeout;
804 bzero(&sopt, sizeof sopt);
805 sopt.sopt_dir = SOPT_SET;
806 sopt.sopt_level = SOL_SOCKET;
807 sopt.sopt_name = SO_SNDTIMEO;
809 sopt.sopt_valsize = sizeof(tv);
812 rc = sosetopt(so, &sopt);
815 CERROR ("Can't set send timeout %d: %d\n",
816 ksocknal_tunables.ksnd_io_timeout, rc);
819 sopt.sopt_level = SOL_SOCKET;
820 sopt.sopt_name = SO_RCVTIMEO;
821 rc = sosetopt(so, &sopt);
824 CERROR ("Can't set receive timeout %d: %d\n",
825 ksocknal_tunables.ksnd_io_timeout, rc);
829 sopt.sopt_level = SOL_SOCKET;
830 sopt.sopt_name = SO_REUSEADDR;
831 sopt.sopt_val = &option;
832 sopt.sopt_valsize = sizeof(option);
833 rc = sosetopt(so, &sopt);
836 CERROR ("Can't set sock reuse address: %d\n", rc);
839 rc = sobind(so, (struct sockaddr *)&locaddr);
840 if (rc == EADDRINUSE) {
842 CDEBUG(D_NET, "Port %d already in use\n", local_port);
848 CERROR ("Can't bind to local IP Address %u.%u.%u.%u: %d\n",
849 HIPQUAD(route->ksnr_myipaddr), rc);
852 rc = soconnect(so, (struct sockaddr *)&srvaddr);
853 *may_retry = (rc == EADDRNOTAVAIL || rc == EADDRINUSE);
856 if (rc != EADDRNOTAVAIL && rc != EADDRINUSE)
857 CERROR ("Can't connect to nid "LPX64
858 " local IP: %u.%u.%u.%u,"
859 " remote IP: %u.%u.%u.%u/%d: %d\n",
860 route->ksnr_peer->ksnp_nid,
861 HIPQUAD(route->ksnr_myipaddr),
862 HIPQUAD(route->ksnr_ipaddr),
863 route->ksnr_port, rc);
868 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
869 CDEBUG(D_NET, "ksocknal sleep for waiting auto_connect.\n");
870 (void) tsleep((caddr_t)&so->so_timeo, PSOCK, "ksocknal_conn", hz);
872 LASSERT((so->so_state & SS_ISCONNECTED));
878 CERROR ("Error %d waiting for connection to nid "LPX64
879 " local IP: %u.%u.%u.%u,"
880 " remote IP: %u.%u.%u.%u/%d: %d\n", rc,
881 route->ksnr_peer->ksnp_nid,
882 HIPQUAD(route->ksnr_myipaddr),
883 HIPQUAD(route->ksnr_ipaddr),
884 route->ksnr_port, rc);
890 rele_file(KSN_SOCK2FILE(so));
896 ksocknal_lib_push_conn(ksock_conn_t *conn)
904 rc = ksocknal_getconnsock (conn);
905 if (rc != 0) /* being shut down */
907 sock = conn->ksnc_sock;
908 bzero(&sopt, sizeof sopt);
909 sopt.sopt_dir = SOPT_SET;
910 sopt.sopt_level = IPPROTO_TCP;
911 sopt.sopt_name = TCP_NODELAY;
912 sopt.sopt_val = &val;
913 sopt.sopt_valsize = sizeof val;
916 sosetopt(sock, &sopt);
919 ksocknal_putconnsock (conn);
923 extern void ksocknal_read_callback (ksock_conn_t *conn);
924 extern void ksocknal_write_callback (ksock_conn_t *conn);
927 ksocknal_upcall(struct socket *so, caddr_t arg, int waitf)
933 read_lock (&ksocknal_data.ksnd_global_lock);
934 conn = so->reserved3;
937 /* More processing is needed? */
940 if ((so->so_rcv.sb_flags & SB_UPCALL) || !arg ) {
941 extern int soreadable(struct socket *so);
943 if (conn->ksnc_rx_nob_wanted && soreadable(so)){
944 /* To verify whether the upcall is for receive */
946 ksocknal_read_callback (conn);
951 if ((so->so_snd.sb_flags & SB_UPCALL) || !arg){
952 extern int sowriteable(struct socket *so);
954 if (sowriteable(so)){
955 /* socket is writable */
957 ksocknal_write_callback(conn);
962 read_unlock (&ksocknal_data.ksnd_global_lock);
968 ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
970 /* No callback need to save in osx */
975 ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
980 sock->so_upcallarg = (void *)sock; /* anything not NULL */
981 sock->so_upcall = ksocknal_upcall;
982 sock->so_snd.sb_timeo = 0;
983 sock->so_rcv.sb_timeo = 2 * HZ;
984 sock->so_rcv.sb_flags |= SB_UPCALL;
985 sock->so_snd.sb_flags |= SB_UPCALL;
986 sock->reserved3 = conn;
992 ksocknal_lib_act_callback(struct socket *sock)
994 /* upcall will take the network funnel */
995 ksocknal_upcall (sock, 0, 0);
999 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
1004 sock->so_upcall = NULL;
1005 sock->so_upcallarg = NULL;
1006 sock->so_rcv.sb_flags &= ~SB_UPCALL;
1007 sock->so_snd.sb_flags &= ~SB_UPCALL;