1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@zabbo.net>
6 * Author: Peter J. Braam <braam@clusterfs.com>
7 * Author: Phil Schwan <phil@clusterfs.com>
8 * Author: Eric Barton <eric@bartonsoftware.com>
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 ptl_handle_ni_t ksocknal_ni;
29 static nal_t ksocknal_api;
30 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
31 ksock_nal_data_t ksocknal_data;
33 static ksock_nal_data_t ksocknal_data;
36 kpr_nal_interface_t ksocknal_router_interface = {
38 kprni_arg: &ksocknal_data,
39 kprni_fwd: ksocknal_fwd_packet,
44 ksocknal_api_forward(nal_t *nal, int id, void *args, size_t args_len,
45 void *ret, size_t ret_len)
51 nal_cb = k->ksnd_nal_cb;
53 lib_dispatch(nal_cb, k, id, args, ret); /* ksocknal_send needs k */
58 ksocknal_api_shutdown(nal_t *nal, int ni)
60 CDEBUG (D_NET, "closing all connections\n");
62 ksocknal_del_route (PTL_NID_ANY, 0, 0, 0);
63 ksocknal_close_conn (PTL_NID_ANY, 0);
68 ksocknal_api_yield(nal_t *nal)
75 ksocknal_api_lock(nal_t *nal, unsigned long *flags)
81 nal_cb = k->ksnd_nal_cb;
82 nal_cb->cb_cli(nal_cb,flags);
86 ksocknal_api_unlock(nal_t *nal, unsigned long *flags)
92 nal_cb = k->ksnd_nal_cb;
93 nal_cb->cb_sti(nal_cb,flags);
97 ksocknal_init(int interface, ptl_pt_index_t ptl_size,
98 ptl_ac_index_t ac_size, ptl_pid_t requested_pid)
100 CDEBUG(D_NET, "calling lib_init with nid "LPX64"\n", (ptl_nid_t)0);
101 lib_init(&ksocknal_lib, (ptl_nid_t)0, 0, 10, ptl_size, ac_size);
102 return (&ksocknal_api);
106 * EXTRA functions follow
110 ksocknal_set_mynid(ptl_nid_t nid)
112 lib_ni_t *ni = &ksocknal_lib.ni;
114 /* FIXME: we have to do this because we call lib_init() at module
115 * insertion time, which is before we have 'mynid' available. lib_init
116 * sets the NAL's nid, which it uses to tell other nodes where packets
117 * are coming from. This is not a very graceful solution to this
120 CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
128 ksocknal_bind_irq (unsigned int irq)
130 #if (defined(CONFIG_SMP) && CPU_AFFINITY)
134 ksock_irqinfo_t *info;
135 char *argv[] = {"/bin/sh",
139 char *envp[] = {"HOME=/",
140 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
143 LASSERT (irq < NR_IRQS);
144 if (irq == 0) /* software NIC */
147 info = &ksocknal_data.ksnd_irqinfo[irq];
149 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
151 LASSERT (info->ksni_valid);
152 bind = !info->ksni_bound;
153 info->ksni_bound = 1;
155 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
157 if (!bind) /* bound already */
160 snprintf (cmdline, sizeof (cmdline),
161 "echo %d > /proc/irq/%u/smp_affinity", 1 << info->ksni_sched, irq);
163 printk (KERN_INFO "Binding irq %u to CPU %d with cmd: %s\n",
164 irq, info->ksni_sched, cmdline);
166 /* FIXME: Find a better method of setting IRQ affinity...
169 call_usermodehelper (argv[0], argv, envp);
174 ksocknal_create_route (__u32 ipaddr, int port, int buffer_size,
175 int irq_affinity, int xchange_nids, int nonagel)
177 ksock_route_t *route;
179 PORTAL_ALLOC (route, sizeof (*route));
183 atomic_set (&route->ksnr_refcount, 1);
184 route->ksnr_sharecount = 0;
185 route->ksnr_peer = NULL;
186 route->ksnr_timeout = jiffies_64;
187 route->ksnr_retry_interval = SOCKNAL_MIN_RECONNECT_INTERVAL;
188 route->ksnr_ipaddr = ipaddr;
189 route->ksnr_port = port;
190 route->ksnr_buffer_size = buffer_size;
191 route->ksnr_irq_affinity = irq_affinity;
192 route->ksnr_xchange_nids = xchange_nids;
193 route->ksnr_nonagel = nonagel;
194 route->ksnr_connecting = 0;
195 route->ksnr_deleted = 0;
196 route->ksnr_generation = 0;
197 route->ksnr_conn = NULL;
203 ksocknal_destroy_route (ksock_route_t *route)
205 LASSERT (route->ksnr_sharecount == 0);
206 LASSERT (route->ksnr_conn == NULL);
208 if (route->ksnr_peer != NULL)
209 ksocknal_put_peer (route->ksnr_peer);
211 PORTAL_FREE (route, sizeof (*route));
215 ksocknal_put_route (ksock_route_t *route)
217 CDEBUG (D_OTHER, "putting route[%p] -> "LPX64" (%d)\n",
218 route, route->ksnr_peer->ksnp_nid,
219 atomic_read (&route->ksnr_refcount));
221 LASSERT (atomic_read (&route->ksnr_refcount) > 0);
222 if (!atomic_dec_and_test (&route->ksnr_refcount))
225 ksocknal_destroy_route (route);
229 ksocknal_create_peer (ptl_nid_t nid)
233 LASSERT (nid != PTL_NID_ANY);
235 PORTAL_ALLOC (peer, sizeof (*peer));
239 memset (peer, 0, sizeof (*peer));
241 peer->ksnp_nid = nid;
242 atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
243 peer->ksnp_closing = 0;
244 INIT_LIST_HEAD (&peer->ksnp_conns);
245 INIT_LIST_HEAD (&peer->ksnp_routes);
246 INIT_LIST_HEAD (&peer->ksnp_tx_queue);
248 /* Can't unload while peers exist; ensures all I/O has terminated
249 * before unload attempts */
251 atomic_inc (&ksocknal_data.ksnd_npeers);
256 ksocknal_destroy_peer (ksock_peer_t *peer)
258 CDEBUG (D_NET, "peer "LPX64" %p deleted\n", peer->ksnp_nid, peer);
260 LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
261 LASSERT (list_empty (&peer->ksnp_conns));
262 LASSERT (list_empty (&peer->ksnp_routes));
263 LASSERT (list_empty (&peer->ksnp_tx_queue));
265 PORTAL_FREE (peer, sizeof (*peer));
267 /* NB a peer's connections and autoconnect routes keep a reference
268 * on their peer until they are destroyed, so we can be assured
269 * that _all_ state to do with this peer has been cleaned up when
270 * its refcount drops to zero. */
271 atomic_dec (&ksocknal_data.ksnd_npeers);
276 ksocknal_put_peer (ksock_peer_t *peer)
278 CDEBUG (D_OTHER, "putting peer[%p] -> "LPX64" (%d)\n",
279 peer, peer->ksnp_nid,
280 atomic_read (&peer->ksnp_refcount));
282 LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
283 if (!atomic_dec_and_test (&peer->ksnp_refcount))
286 ksocknal_destroy_peer (peer);
290 ksocknal_find_peer_locked (ptl_nid_t nid)
292 struct list_head *peer_list = ksocknal_nid2peerlist (nid);
293 struct list_head *tmp;
296 list_for_each (tmp, peer_list) {
298 peer = list_entry (tmp, ksock_peer_t, ksnp_list);
300 LASSERT (!peer->ksnp_closing);
301 LASSERT (!(list_empty (&peer->ksnp_routes) &&
302 list_empty (&peer->ksnp_conns)));
304 if (peer->ksnp_nid != nid)
307 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
308 peer, nid, atomic_read (&peer->ksnp_refcount));
315 ksocknal_get_peer (ptl_nid_t nid)
319 read_lock (&ksocknal_data.ksnd_global_lock);
320 peer = ksocknal_find_peer_locked (nid);
321 if (peer != NULL) /* +1 ref for caller? */
322 atomic_inc (&peer->ksnp_refcount);
323 read_unlock (&ksocknal_data.ksnd_global_lock);
329 ksocknal_unlink_peer_locked (ksock_peer_t *peer)
331 LASSERT (!peer->ksnp_closing);
332 peer->ksnp_closing = 1;
333 list_del (&peer->ksnp_list);
334 /* lose peerlist's ref */
335 ksocknal_put_peer (peer);
339 ksocknal_get_route_by_idx (int index)
342 struct list_head *ptmp;
343 ksock_route_t *route;
344 struct list_head *rtmp;
347 read_lock (&ksocknal_data.ksnd_global_lock);
349 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
350 list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
351 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
353 LASSERT (!(list_empty (&peer->ksnp_routes) &&
354 list_empty (&peer->ksnp_conns)));
356 list_for_each (rtmp, &peer->ksnp_routes) {
360 route = list_entry (rtmp, ksock_route_t, ksnr_list);
361 atomic_inc (&route->ksnr_refcount);
362 read_unlock (&ksocknal_data.ksnd_global_lock);
368 read_unlock (&ksocknal_data.ksnd_global_lock);
373 ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob,
374 int nonagle, int xchange_nids, int bind_irq, int share)
379 ksock_route_t *route;
380 struct list_head *rtmp;
381 ksock_route_t *route2;
383 if (nid == PTL_NID_ANY)
386 /* Have a brand new peer ready... */
387 peer = ksocknal_create_peer (nid);
391 route = ksocknal_create_route (ipaddr, port, bufnob,
392 nonagle, xchange_nids, bind_irq);
394 ksocknal_put_peer (peer);
398 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
400 peer2 = ksocknal_find_peer_locked (nid);
402 ksocknal_put_peer (peer);
405 /* peer table takes existing ref on peer */
406 list_add (&peer->ksnp_list,
407 ksocknal_nid2peerlist (nid));
412 /* check for existing route to this NID via this ipaddr */
413 list_for_each (rtmp, &peer->ksnp_routes) {
414 route2 = list_entry (rtmp, ksock_route_t, ksnr_list);
416 if (route2->ksnr_ipaddr == ipaddr)
423 if (route2 != NULL) {
424 ksocknal_put_route (route);
427 /* route takes a ref on peer */
428 route->ksnr_peer = peer;
429 atomic_inc (&peer->ksnp_refcount);
430 /* peer's route list takes existing ref on route */
431 list_add (&route->ksnr_list, &peer->ksnp_routes);
434 route->ksnr_sharecount++;
436 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
442 ksocknal_del_route_locked (ksock_route_t *route, int share, int keep_conn)
444 ksock_peer_t *peer = route->ksnr_peer;
445 ksock_conn_t *conn = route->ksnr_conn;
448 route->ksnr_sharecount = 0;
450 route->ksnr_sharecount--;
451 if (route->ksnr_sharecount != 0)
457 ksocknal_close_conn_locked (conn);
459 /* keeping the conn; just dissociate it and route... */
460 conn->ksnc_route = NULL;
461 route->ksnr_conn = NULL;
462 ksocknal_put_route (route); /* drop conn's ref on route */
463 ksocknal_put_conn (conn); /* drop route's ref on conn */
467 route->ksnr_deleted = 1;
468 list_del (&route->ksnr_list);
469 ksocknal_put_route (route); /* drop peer's ref */
471 if (list_empty (&peer->ksnp_routes) &&
472 list_empty (&peer->ksnp_conns)) {
473 /* I've just removed the last autoconnect route of a peer
474 * with no active connections */
475 ksocknal_unlink_peer_locked (peer);
480 ksocknal_del_route (ptl_nid_t nid, __u32 ipaddr, int share, int keep_conn)
483 struct list_head *ptmp;
484 struct list_head *pnxt;
486 struct list_head *rtmp;
487 struct list_head *rnxt;
488 ksock_route_t *route;
494 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
496 if (nid != PTL_NID_ANY)
497 lo = hi = ksocknal_nid2peerlist(nid) - ksocknal_data.ksnd_peers;
500 hi = ksocknal_data.ksnd_peer_hash_size - 1;
503 for (i = lo; i <= hi; i++) {
504 list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
505 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
507 if (!(nid == PTL_NID_ANY || peer->ksnp_nid == nid))
510 list_for_each_safe (rtmp, rnxt, &peer->ksnp_routes) {
511 route = list_entry (rtmp, ksock_route_t,
515 route->ksnr_ipaddr == ipaddr))
518 ksocknal_del_route_locked (route, share, keep_conn);
519 rc = 0; /* matched something */
526 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
532 ksocknal_get_conn_by_idx (int index)
535 struct list_head *ptmp;
537 struct list_head *ctmp;
540 read_lock (&ksocknal_data.ksnd_global_lock);
542 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
543 list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
544 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
546 LASSERT (!(list_empty (&peer->ksnp_routes) &&
547 list_empty (&peer->ksnp_conns)));
549 list_for_each (ctmp, &peer->ksnp_conns) {
553 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
554 atomic_inc (&conn->ksnc_refcount);
555 read_unlock (&ksocknal_data.ksnd_global_lock);
561 read_unlock (&ksocknal_data.ksnd_global_lock);
566 ksocknal_get_peer_addr (ksock_conn_t *conn)
568 struct sockaddr_in sin;
569 int len = sizeof (sin);
572 rc = ksocknal_getconnsock (conn);
575 rc = conn->ksnc_sock->ops->getname (conn->ksnc_sock,
576 (struct sockaddr *)&sin, &len, 2);
577 LASSERT (len <= sizeof (sin));
578 ksocknal_putconnsock (conn);
581 CERROR ("Error %d getting sock peer IP\n", rc);
585 conn->ksnc_ipaddr = ntohl (sin.sin_addr.s_addr);
586 conn->ksnc_port = ntohs (sin.sin_port);
590 ksocknal_conn_irq (ksock_conn_t *conn)
594 struct dst_entry *dst;
596 rc = ksocknal_getconnsock (conn);
599 dst = sk_dst_get (conn->ksnc_sock->sk);
601 if (dst->dev != NULL) {
603 if (irq >= NR_IRQS) {
604 CERROR ("Unexpected IRQ %x\n", irq);
611 ksocknal_putconnsock (conn);
616 ksocknal_choose_scheduler_locked (unsigned int irq)
618 ksock_sched_t *sched;
619 ksock_irqinfo_t *info;
622 LASSERT (irq < NR_IRQS);
623 info = &ksocknal_data.ksnd_irqinfo[irq];
625 if (irq != 0 && /* hardware NIC */
626 info->ksni_valid) { /* already set up */
627 return (&ksocknal_data.ksnd_schedulers[info->ksni_sched]);
630 /* software NIC (irq == 0) || not associated with a scheduler yet.
631 * Choose the CPU with the fewest connections... */
632 sched = &ksocknal_data.ksnd_schedulers[0];
633 for (i = 1; i < SOCKNAL_N_SCHED; i++)
634 if (sched->kss_nconns >
635 ksocknal_data.ksnd_schedulers[i].kss_nconns)
636 sched = &ksocknal_data.ksnd_schedulers[i];
638 if (irq != 0) { /* Hardware NIC */
639 info->ksni_valid = 1;
640 info->ksni_sched = sched - ksocknal_data.ksnd_schedulers;
643 LASSERT (info->ksni_sched == sched - ksocknal_data.ksnd_schedulers);
650 ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route,
651 struct socket *sock, int bind_irq)
657 ksock_sched_t *sched;
662 /* NB, sock has an associated file since (a) this connection might
663 * have been created in userland and (b) we need the refcounting so
664 * that we don't close the socket while I/O is being done on it. */
665 LASSERT (sock->file != NULL);
667 rc = ksocknal_set_linger (sock);
672 if (route == NULL) { /* not autoconnect */
673 /* Assume this socket connects to a brand new peer */
674 peer = ksocknal_create_peer (nid);
679 PORTAL_ALLOC(conn, sizeof(*conn));
682 ksocknal_put_peer (peer);
686 memset (conn, 0, sizeof (*conn));
687 conn->ksnc_peer = NULL;
688 conn->ksnc_route = NULL;
689 conn->ksnc_sock = sock;
690 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
691 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
692 atomic_set (&conn->ksnc_refcount, 1); /* 1 ref for me */
694 conn->ksnc_rx_ready = 0;
695 conn->ksnc_rx_scheduled = 0;
696 ksocknal_new_packet (conn, 0);
698 INIT_LIST_HEAD (&conn->ksnc_tx_queue);
700 INIT_LIST_HEAD (&conn->ksnc_tx_pending);
702 conn->ksnc_tx_ready = 0;
703 conn->ksnc_tx_scheduled = 0;
704 atomic_set (&conn->ksnc_tx_nob, 0);
706 ksocknal_get_peer_addr (conn);
708 irq = ksocknal_conn_irq (conn);
710 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
714 LASSERT (route->ksnr_conn == NULL && route->ksnr_connecting);
716 if (route->ksnr_deleted) {
717 /* This conn was autoconnected, but the autoconnect
718 * route got deleted while it was being
720 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock,
722 PORTAL_FREE (conn, sizeof (*conn));
727 /* associate conn/route for auto-reconnect */
728 route->ksnr_conn = conn;
729 atomic_inc (&conn->ksnc_refcount);
730 conn->ksnc_route = route;
731 atomic_inc (&route->ksnr_refcount);
732 route->ksnr_connecting = 0;
734 route->ksnr_generation++;
735 route->ksnr_retry_interval = SOCKNAL_MIN_RECONNECT_INTERVAL;
737 peer = route->ksnr_peer;
739 /* Not an autoconnected connection; see if there is an
740 * existing peer for this NID */
741 peer2 = ksocknal_find_peer_locked (nid);
743 ksocknal_put_peer (peer);
746 list_add (&peer->ksnp_list,
747 ksocknal_nid2peerlist (nid));
748 /* peer list takes over existing ref */
752 LASSERT (!peer->ksnp_closing);
754 conn->ksnc_peer = peer;
755 atomic_inc (&peer->ksnp_refcount);
757 list_add (&conn->ksnc_list, &peer->ksnp_conns);
758 atomic_inc (&conn->ksnc_refcount);
760 sched = ksocknal_choose_scheduler_locked (irq);
762 conn->ksnc_scheduler = sched;
764 /* NB my callbacks block while I hold ksnd_global_lock */
765 sock->sk->sk_user_data = conn;
766 sock->sk->sk_data_ready = ksocknal_data_ready;
767 sock->sk->sk_write_space = ksocknal_write_space;
769 /* Take all the packets blocking for a connection.
770 * NB, it might be nicer to share these blocked packets among any
771 * other connections that are becoming established, however that
772 * confuses the normal packet launching operation, which selects a
773 * connection and queues the packet on it without needing an
774 * exclusive lock on ksnd_global_lock. */
775 while (!list_empty (&peer->ksnp_tx_queue)) {
776 tx = list_entry (peer->ksnp_tx_queue.next,
777 ksock_tx_t, tx_list);
779 list_del (&tx->tx_list);
780 ksocknal_queue_tx_locked (tx, conn);
783 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
785 if (bind_irq) /* irq binding required */
786 ksocknal_bind_irq (irq);
788 /* Call the callbacks right now to get things going. */
789 ksocknal_data_ready (sock->sk, 0);
790 ksocknal_write_space (sock->sk);
792 CDEBUG(D_IOCTL, "conn [%p] registered for nid "LPX64"\n",
793 conn, conn->ksnc_peer->ksnp_nid);
795 ksocknal_put_conn (conn);
800 ksocknal_close_conn_locked (ksock_conn_t *conn)
802 /* This just does the immmediate housekeeping, and queues the
803 * connection for the reaper to terminate.
804 * Caller holds ksnd_global_lock exclusively in irq context */
805 ksock_peer_t *peer = conn->ksnc_peer;
806 ksock_route_t *route;
808 LASSERT (!conn->ksnc_closing);
809 conn->ksnc_closing = 1;
810 atomic_inc (&ksocknal_data.ksnd_nclosing_conns);
812 route = conn->ksnc_route;
814 /* dissociate conn from route... */
815 LASSERT (!route->ksnr_connecting &&
816 !route->ksnr_deleted);
818 route->ksnr_conn = NULL;
819 conn->ksnc_route = NULL;
821 ksocknal_put_route (route); /* drop conn's ref on route */
822 ksocknal_put_conn (conn); /* drop route's ref on conn */
825 /* ksnd_deathrow_conns takes over peer's ref */
826 list_del (&conn->ksnc_list);
828 if (list_empty (&peer->ksnp_conns) &&
829 list_empty (&peer->ksnp_routes)) {
830 /* I've just closed last conn belonging to a
831 * non-autoconnecting peer */
832 ksocknal_unlink_peer_locked (peer);
835 spin_lock (&ksocknal_data.ksnd_reaper_lock);
837 list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
838 if (waitqueue_active (&ksocknal_data.ksnd_reaper_waitq))
839 wake_up (&ksocknal_data.ksnd_reaper_waitq);
841 spin_unlock (&ksocknal_data.ksnd_reaper_lock);
845 ksocknal_close_conn_unlocked (ksock_conn_t *conn)
850 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
852 if (!conn->ksnc_closing) {
854 ksocknal_close_conn_locked (conn);
857 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
863 ksocknal_terminate_conn (ksock_conn_t *conn)
865 /* This gets called by the reaper (guaranteed thread context) to
866 * disengage the socket from its callbacks and close it.
867 * ksnc_refcount will eventually hit zero, and then the reaper will
871 /* serialise with callbacks */
872 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
874 LASSERT (conn->ksnc_closing);
876 /* Remove conn's network callbacks.
877 * NB I _have_ to restore the callback, rather than storing a noop,
878 * since the socket could survive past this module being unloaded!! */
879 conn->ksnc_sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
880 conn->ksnc_sock->sk->sk_write_space = conn->ksnc_saved_write_space;
882 /* A callback could be in progress already; they hold a read lock
883 * on ksnd_global_lock (to serialise with me) and NOOP if
884 * sk_user_data is NULL. */
885 conn->ksnc_sock->sk->sk_user_data = NULL;
887 conn->ksnc_scheduler->kss_nconns--;
889 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
891 /* The socket is closed on the final put; either here, or in
892 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
893 * when the connection was established, this will close the socket
894 * immediately, aborting anything buffered in it. Any hung
895 * zero-copy transmits will therefore complete in finite time. */
896 ksocknal_putconnsock (conn);
900 ksocknal_destroy_conn (ksock_conn_t *conn)
902 /* Final coup-de-grace of the reaper */
903 CDEBUG (D_NET, "connection %p\n", conn);
905 LASSERT (atomic_read (&conn->ksnc_refcount) == 0);
906 LASSERT (conn->ksnc_route == NULL);
907 LASSERT (!conn->ksnc_tx_scheduled);
908 LASSERT (!conn->ksnc_rx_scheduled);
910 LASSERT (list_empty (&conn->ksnc_tx_pending));
912 /* complete queued packets */
913 while (!list_empty (&conn->ksnc_tx_queue)) {
914 ksock_tx_t *tx = list_entry (conn->ksnc_tx_queue.next,
915 ksock_tx_t, tx_list);
917 CERROR ("Deleting packet type %d len %d ("LPX64"->"LPX64")\n",
918 NTOH__u32 (tx->tx_hdr->type),
919 NTOH__u32 (PTL_HDR_LENGTH(tx->tx_hdr)),
920 NTOH__u64 (tx->tx_hdr->src_nid),
921 NTOH__u64 (tx->tx_hdr->dest_nid));
923 list_del (&tx->tx_list);
924 ksocknal_tx_done (tx, 0);
927 /* complete current receive if any */
928 switch (conn->ksnc_rx_state) {
929 case SOCKNAL_RX_BODY:
930 lib_finalize (&ksocknal_lib, NULL, conn->ksnc_cookie);
932 case SOCKNAL_RX_BODY_FWD:
933 ksocknal_fmb_callback (conn->ksnc_cookie, -ECONNABORTED);
935 case SOCKNAL_RX_HEADER:
936 case SOCKNAL_RX_SLOP:
943 ksocknal_put_peer (conn->ksnc_peer);
945 PORTAL_FREE (conn, sizeof (*conn));
946 atomic_dec (&ksocknal_data.ksnd_nclosing_conns);
950 ksocknal_put_conn (ksock_conn_t *conn)
954 CDEBUG (D_OTHER, "putting conn[%p] -> "LPX64" (%d)\n",
955 conn, conn->ksnc_peer->ksnp_nid,
956 atomic_read (&conn->ksnc_refcount));
958 LASSERT (atomic_read (&conn->ksnc_refcount) > 0);
959 if (!atomic_dec_and_test (&conn->ksnc_refcount))
962 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
964 list_add (&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
965 if (waitqueue_active (&ksocknal_data.ksnd_reaper_waitq))
966 wake_up (&ksocknal_data.ksnd_reaper_waitq);
968 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
972 ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr)
976 struct list_head *ctmp;
977 struct list_head *cnxt;
979 struct list_head *ptmp;
980 struct list_head *pnxt;
986 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
988 if (nid != PTL_NID_ANY)
989 lo = hi = ksocknal_nid2peerlist(nid) - ksocknal_data.ksnd_peers;
992 hi = ksocknal_data.ksnd_peer_hash_size - 1;
995 for (i = lo; i <= hi; i++) {
996 list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
998 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
1000 if (!(nid == PTL_NID_ANY || nid == peer->ksnp_nid))
1003 list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
1005 conn = list_entry (ctmp, ksock_conn_t,
1008 if (!(ipaddr == 0 ||
1009 conn->ksnc_ipaddr == ipaddr))
1013 ksocknal_close_conn_locked (conn);
1018 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
1023 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1024 struct tcp_opt *sock2tcp_opt(struct sock *sk)
1026 return &(sk->tp_pinfo.af_tcp);
1029 struct tcp_opt *sock2tcp_opt(struct sock *sk)
1031 struct tcp_sock *s = (struct tcp_sock *)sk;
1037 ksocknal_push_conn (ksock_conn_t *conn)
1046 rc = ksocknal_getconnsock (conn);
1047 if (rc != 0) /* being shut down */
1050 sk = conn->ksnc_sock->sk;
1051 tp = sock2tcp_opt(sk);
1054 nonagle = tp->nonagle;
1061 rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
1062 (char *)&val, sizeof (val));
1068 tp->nonagle = nonagle;
1071 ksocknal_putconnsock (conn);
1075 ksocknal_push_peer (ksock_peer_t *peer)
1079 struct list_head *tmp;
1082 for (index = 0; ; index++) {
1083 read_lock (&ksocknal_data.ksnd_global_lock);
1088 list_for_each (tmp, &peer->ksnp_conns) {
1090 conn = list_entry (tmp, ksock_conn_t, ksnc_list);
1091 atomic_inc (&conn->ksnc_refcount);
1096 read_unlock (&ksocknal_data.ksnd_global_lock);
1101 ksocknal_push_conn (conn);
1102 ksocknal_put_conn (conn);
1107 ksocknal_push (ptl_nid_t nid)
1110 struct list_head *tmp;
1116 if (nid != PTL_NID_ANY) {
1117 peer = ksocknal_get_peer (nid);
1121 ksocknal_push_peer (peer);
1122 ksocknal_put_peer (peer);
1127 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1128 for (j = 0; ; j++) {
1129 read_lock (&ksocknal_data.ksnd_global_lock);
1134 list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
1136 peer = list_entry(tmp, ksock_peer_t,
1138 atomic_inc (&peer->ksnp_refcount);
1143 read_unlock (&ksocknal_data.ksnd_global_lock);
1147 ksocknal_push_peer (peer);
1148 ksocknal_put_peer (peer);
1158 ksocknal_cmd(struct portal_ioctl_data * data, void * private)
1162 LASSERT (data != NULL);
1164 switch(data->ioc_nal_cmd) {
1165 case NAL_CMD_GET_AUTOCONN: {
1166 ksock_route_t *route = ksocknal_get_route_by_idx (data->ioc_count);
1172 data->ioc_nid = route->ksnr_peer->ksnp_nid;
1173 data->ioc_id = route->ksnr_ipaddr;
1174 data->ioc_misc = route->ksnr_port;
1175 data->ioc_count = route->ksnr_generation;
1176 data->ioc_size = route->ksnr_buffer_size;
1177 data->ioc_wait = route->ksnr_sharecount;
1178 data->ioc_flags = (route->ksnr_nonagel ? 1 : 0) |
1179 (route->ksnr_xchange_nids ? 2 : 0) |
1180 (route->ksnr_irq_affinity ? 4 : 0);
1181 ksocknal_put_route (route);
1185 case NAL_CMD_ADD_AUTOCONN: {
1186 rc = ksocknal_add_route (data->ioc_nid, data->ioc_id,
1187 data->ioc_misc, data->ioc_size,
1188 (data->ioc_flags & 1) != 0,
1189 (data->ioc_flags & 2) != 0,
1190 (data->ioc_flags & 4) != 0,
1191 (data->ioc_flags & 8) != 0);
1194 case NAL_CMD_DEL_AUTOCONN: {
1195 rc = ksocknal_del_route (data->ioc_nid, data->ioc_id,
1196 (data->ioc_flags & 1) != 0,
1197 (data->ioc_flags & 2) != 0);
1200 case NAL_CMD_GET_CONN: {
1201 ksock_conn_t *conn = ksocknal_get_conn_by_idx (data->ioc_count);
1207 data->ioc_nid = conn->ksnc_peer->ksnp_nid;
1208 data->ioc_id = conn->ksnc_ipaddr;
1209 data->ioc_misc = conn->ksnc_port;
1210 ksocknal_put_conn (conn);
1214 case NAL_CMD_REGISTER_PEER_FD: {
1215 struct socket *sock = sockfd_lookup (data->ioc_fd, &rc);
1218 rc = ksocknal_create_conn (data->ioc_nid, NULL,
1219 sock, data->ioc_flags);
1225 case NAL_CMD_CLOSE_CONNECTION: {
1226 rc = ksocknal_close_conn (data->ioc_nid, data->ioc_id);
1229 case NAL_CMD_REGISTER_MYNID: {
1230 rc = ksocknal_set_mynid (data->ioc_nid);
1233 case NAL_CMD_PUSH_CONNECTION: {
1234 rc = ksocknal_push (data->ioc_nid);
1243 ksocknal_free_buffers (void)
1245 if (ksocknal_data.ksnd_fmbs != NULL) {
1246 ksock_fmb_t *fmb = (ksock_fmb_t *)ksocknal_data.ksnd_fmbs;
1251 i < (SOCKNAL_SMALL_FWD_NMSGS + SOCKNAL_LARGE_FWD_NMSGS);
1253 for (j = 0; j < fmb->fmb_npages; j++)
1254 if (fmb->fmb_pages[j] != NULL)
1255 __free_page (fmb->fmb_pages[j]);
1257 PORTAL_FREE (ksocknal_data.ksnd_fmbs,
1258 sizeof (ksock_fmb_t) * (SOCKNAL_SMALL_FWD_NMSGS +
1259 SOCKNAL_LARGE_FWD_NMSGS));
1262 LASSERT (ksocknal_data.ksnd_active_ltxs == 0);
1263 if (ksocknal_data.ksnd_ltxs != NULL)
1264 PORTAL_FREE (ksocknal_data.ksnd_ltxs,
1265 sizeof (ksock_ltx_t) * (SOCKNAL_NLTXS +
1266 SOCKNAL_NNBLK_LTXS));
1268 if (ksocknal_data.ksnd_schedulers != NULL)
1269 PORTAL_FREE (ksocknal_data.ksnd_schedulers,
1270 sizeof (ksock_sched_t) * SOCKNAL_N_SCHED);
1272 PORTAL_FREE (ksocknal_data.ksnd_peers,
1273 sizeof (struct list_head) *
1274 ksocknal_data.ksnd_peer_hash_size);
1278 ksocknal_module_fini (void)
1282 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1283 atomic_read (&portal_kmemory));
1285 switch (ksocknal_data.ksnd_init) {
1289 case SOCKNAL_INIT_ALL:
1290 kportal_nal_unregister(SOCKNAL);
1291 PORTAL_SYMBOL_UNREGISTER (ksocknal_ni);
1294 case SOCKNAL_INIT_PTL:
1295 PtlNIFini(ksocknal_ni);
1296 lib_fini(&ksocknal_lib);
1299 case SOCKNAL_INIT_DATA:
1300 /* Module refcount only gets to zero when all peers
1301 * have been closed so all lists must be empty */
1302 LASSERT (atomic_read (&ksocknal_data.ksnd_npeers) == 0);
1303 LASSERT (ksocknal_data.ksnd_peers != NULL);
1304 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1305 LASSERT (list_empty (&ksocknal_data.ksnd_peers[i]));
1307 LASSERT (list_empty (&ksocknal_data.ksnd_zombie_conns));
1308 LASSERT (list_empty (&ksocknal_data.ksnd_autoconnectd_routes));
1309 LASSERT (list_empty (&ksocknal_data.ksnd_small_fmp.fmp_blocked_conns));
1310 LASSERT (list_empty (&ksocknal_data.ksnd_large_fmp.fmp_blocked_conns));
1312 if (ksocknal_data.ksnd_schedulers != NULL)
1313 for (i = 0; i < SOCKNAL_N_SCHED; i++) {
1314 ksock_sched_t *kss =
1315 &ksocknal_data.ksnd_schedulers[i];
1317 LASSERT (list_empty (&kss->kss_tx_conns));
1318 LASSERT (list_empty (&kss->kss_rx_conns));
1319 LASSERT (kss->kss_nconns == 0);
1322 /* stop router calling me */
1323 kpr_shutdown (&ksocknal_data.ksnd_router);
1325 /* flag threads to terminate; wake and wait for them to die */
1326 ksocknal_data.ksnd_shuttingdown = 1;
1327 wake_up_all (&ksocknal_data.ksnd_autoconnectd_waitq);
1328 wake_up_all (&ksocknal_data.ksnd_reaper_waitq);
1330 for (i = 0; i < SOCKNAL_N_SCHED; i++)
1331 wake_up_all(&ksocknal_data.ksnd_schedulers[i].kss_waitq);
1333 while (atomic_read (&ksocknal_data.ksnd_nthreads) != 0) {
1334 CDEBUG (D_NET, "waitinf for %d threads to terminate\n",
1335 atomic_read (&ksocknal_data.ksnd_nthreads));
1336 set_current_state (TASK_UNINTERRUPTIBLE);
1337 schedule_timeout (HZ);
1340 kpr_deregister (&ksocknal_data.ksnd_router);
1342 ksocknal_free_buffers();
1345 case SOCKNAL_INIT_NOTHING:
1349 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1350 atomic_read (&portal_kmemory));
1352 printk(KERN_INFO "Routing socket NAL unloaded (final mem %d)\n",
1353 atomic_read(&portal_kmemory));
1358 ksocknal_module_init (void)
1360 int pkmem = atomic_read(&portal_kmemory);
1365 /* packet descriptor must fit in a router descriptor's scratchpad */
1366 LASSERT(sizeof (ksock_tx_t) <= sizeof (kprfd_scratch_t));
1368 LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
1370 ksocknal_api.forward = ksocknal_api_forward;
1371 ksocknal_api.shutdown = ksocknal_api_shutdown;
1372 ksocknal_api.yield = ksocknal_api_yield;
1373 ksocknal_api.validate = NULL; /* our api validate is a NOOP */
1374 ksocknal_api.lock = ksocknal_api_lock;
1375 ksocknal_api.unlock = ksocknal_api_unlock;
1376 ksocknal_api.nal_data = &ksocknal_data;
1378 ksocknal_lib.nal_data = &ksocknal_data;
1380 memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
1382 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
1383 PORTAL_ALLOC (ksocknal_data.ksnd_peers,
1384 sizeof (struct list_head) * ksocknal_data.ksnd_peer_hash_size);
1385 if (ksocknal_data.ksnd_peers == NULL)
1388 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
1389 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
1391 rwlock_init(&ksocknal_data.ksnd_global_lock);
1393 ksocknal_data.ksnd_nal_cb = &ksocknal_lib;
1394 spin_lock_init (&ksocknal_data.ksnd_nal_cb_lock);
1396 spin_lock_init(&ksocknal_data.ksnd_small_fmp.fmp_lock);
1397 INIT_LIST_HEAD(&ksocknal_data.ksnd_small_fmp.fmp_idle_fmbs);
1398 INIT_LIST_HEAD(&ksocknal_data.ksnd_small_fmp.fmp_blocked_conns);
1400 spin_lock_init(&ksocknal_data.ksnd_large_fmp.fmp_lock);
1401 INIT_LIST_HEAD(&ksocknal_data.ksnd_large_fmp.fmp_idle_fmbs);
1402 INIT_LIST_HEAD(&ksocknal_data.ksnd_large_fmp.fmp_blocked_conns);
1404 spin_lock_init(&ksocknal_data.ksnd_idle_ltx_lock);
1405 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_nblk_ltx_list);
1406 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_ltx_list);
1407 init_waitqueue_head(&ksocknal_data.ksnd_idle_ltx_waitq);
1409 spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
1410 INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
1411 INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
1412 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
1414 spin_lock_init (&ksocknal_data.ksnd_autoconnectd_lock);
1415 INIT_LIST_HEAD (&ksocknal_data.ksnd_autoconnectd_routes);
1416 init_waitqueue_head(&ksocknal_data.ksnd_autoconnectd_waitq);
1418 /* NB memset above zeros whole of ksocknal_data, including
1419 * ksocknal_data.ksnd_irqinfo[all].ksni_valid */
1421 /* flag lists/ptrs/locks initialised */
1422 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
1424 PORTAL_ALLOC(ksocknal_data.ksnd_schedulers,
1425 sizeof(ksock_sched_t) * SOCKNAL_N_SCHED);
1426 if (ksocknal_data.ksnd_schedulers == NULL) {
1427 ksocknal_module_fini ();
1431 for (i = 0; i < SOCKNAL_N_SCHED; i++) {
1432 ksock_sched_t *kss = &ksocknal_data.ksnd_schedulers[i];
1434 spin_lock_init (&kss->kss_lock);
1435 INIT_LIST_HEAD (&kss->kss_rx_conns);
1436 INIT_LIST_HEAD (&kss->kss_tx_conns);
1438 INIT_LIST_HEAD (&kss->kss_zctxdone_list);
1440 init_waitqueue_head (&kss->kss_waitq);
1443 CDEBUG (D_MALLOC, "ltx "LPSZ", total "LPSZ"\n", sizeof (ksock_ltx_t),
1444 sizeof (ksock_ltx_t) * (SOCKNAL_NLTXS + SOCKNAL_NNBLK_LTXS));
1446 PORTAL_ALLOC(ksocknal_data.ksnd_ltxs,
1447 sizeof(ksock_ltx_t) * (SOCKNAL_NLTXS +SOCKNAL_NNBLK_LTXS));
1448 if (ksocknal_data.ksnd_ltxs == NULL) {
1449 ksocknal_module_fini ();
1453 /* Deterministic bugs please */
1454 memset (ksocknal_data.ksnd_ltxs, 0xeb,
1455 sizeof (ksock_ltx_t) * (SOCKNAL_NLTXS + SOCKNAL_NNBLK_LTXS));
1457 for (i = 0; i < SOCKNAL_NLTXS + SOCKNAL_NNBLK_LTXS; i++) {
1458 ksock_ltx_t *ltx = &((ksock_ltx_t *)ksocknal_data.ksnd_ltxs)[i];
1460 ltx->ltx_tx.tx_hdr = <x->ltx_hdr;
1461 ltx->ltx_idle = i < SOCKNAL_NLTXS ?
1462 &ksocknal_data.ksnd_idle_ltx_list :
1463 &ksocknal_data.ksnd_idle_nblk_ltx_list;
1464 list_add (<x->ltx_tx.tx_list, ltx->ltx_idle);
1467 rc = PtlNIInit(ksocknal_init, 32, 4, 0, &ksocknal_ni);
1469 CERROR("ksocknal: PtlNIInit failed: error %d\n", rc);
1470 ksocknal_module_fini ();
1473 PtlNIDebug(ksocknal_ni, ~0);
1475 ksocknal_data.ksnd_init = SOCKNAL_INIT_PTL; // flag PtlNIInit() called
1477 for (i = 0; i < SOCKNAL_N_SCHED; i++) {
1478 rc = ksocknal_thread_start (ksocknal_scheduler,
1479 &ksocknal_data.ksnd_schedulers[i]);
1481 CERROR("Can't spawn socknal scheduler[%d]: %d\n",
1483 ksocknal_module_fini ();
1488 for (i = 0; i < SOCKNAL_N_AUTOCONNECTD; i++) {
1489 rc = ksocknal_thread_start (ksocknal_autoconnectd, (void *)((long)i));
1491 CERROR("Can't spawn socknal autoconnectd: %d\n", rc);
1492 ksocknal_module_fini ();
1497 rc = ksocknal_thread_start (ksocknal_reaper, NULL);
1499 CERROR ("Can't spawn socknal reaper: %d\n", rc);
1500 ksocknal_module_fini ();
1504 rc = kpr_register(&ksocknal_data.ksnd_router,
1505 &ksocknal_router_interface);
1507 CDEBUG(D_NET, "Can't initialise routing interface "
1508 "(rc = %d): not routing\n", rc);
1510 /* Only allocate forwarding buffers if I'm on a gateway */
1512 PORTAL_ALLOC(ksocknal_data.ksnd_fmbs,
1513 sizeof(ksock_fmb_t) * (SOCKNAL_SMALL_FWD_NMSGS +
1514 SOCKNAL_LARGE_FWD_NMSGS));
1515 if (ksocknal_data.ksnd_fmbs == NULL) {
1516 ksocknal_module_fini ();
1520 /* NULL out buffer pointers etc */
1521 memset(ksocknal_data.ksnd_fmbs, 0,
1522 sizeof(ksock_fmb_t) * (SOCKNAL_SMALL_FWD_NMSGS +
1523 SOCKNAL_LARGE_FWD_NMSGS));
1525 for (i = 0; i < (SOCKNAL_SMALL_FWD_NMSGS +
1526 SOCKNAL_LARGE_FWD_NMSGS); i++) {
1528 &((ksock_fmb_t *)ksocknal_data.ksnd_fmbs)[i];
1530 if (i < SOCKNAL_SMALL_FWD_NMSGS) {
1531 fmb->fmb_npages = SOCKNAL_SMALL_FWD_PAGES;
1532 fmb->fmb_pool = &ksocknal_data.ksnd_small_fmp;
1534 fmb->fmb_npages = SOCKNAL_LARGE_FWD_PAGES;
1535 fmb->fmb_pool = &ksocknal_data.ksnd_large_fmp;
1538 LASSERT (fmb->fmb_npages > 0);
1539 for (j = 0; j < fmb->fmb_npages; j++) {
1540 fmb->fmb_pages[j] = alloc_page(GFP_KERNEL);
1542 if (fmb->fmb_pages[j] == NULL) {
1543 ksocknal_module_fini ();
1547 LASSERT(page_address (fmb->fmb_pages[j]) !=
1551 list_add(&fmb->fmb_list, &fmb->fmb_pool->fmp_idle_fmbs);
1555 rc = kportal_nal_register(SOCKNAL, &ksocknal_cmd, NULL);
1557 CERROR ("Can't initialise command interface (rc = %d)\n", rc);
1558 ksocknal_module_fini ();
1562 PORTAL_SYMBOL_REGISTER(ksocknal_ni);
1564 /* flag everything initialised */
1565 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
1567 printk(KERN_INFO "Routing socket NAL loaded (Routing %s, initial "
1569 kpr_routing (&ksocknal_data.ksnd_router) ?
1570 "enabled" : "disabled", pkmem);
1575 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1576 MODULE_DESCRIPTION("Kernel TCP Socket NAL v0.01");
1577 MODULE_LICENSE("GPL");
1579 module_init(ksocknal_module_init);
1580 module_exit(ksocknal_module_fini);
1582 EXPORT_SYMBOL (ksocknal_ni);