Whamcloud - gitweb
* ranal passes netregression
[fs/lustre-release.git] / lnet / klnds / ralnd / ralnd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004 Cluster File Systems, Inc.
5  *   Author: Eric Barton <eric@bartonsoftware.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23 #include "ranal.h"
24
25
26 nal_t                   kranal_api;
27 ptl_handle_ni_t         kranal_ni;
28 kra_data_t              kranal_data;
29 kra_tunables_t          kranal_tunables;
30
31 #define RANAL_SYSCTL_TIMEOUT           1
32 #define RANAL_SYSCTL_LISTENER_TIMEOUT  2
33 #define RANAL_SYSCTL_BACKLOG           3
34 #define RANAL_SYSCTL_PORT              4
35 #define RANAL_SYSCTL_MAX_IMMEDIATE     5
36
37 #define RANAL_SYSCTL                   202
38
39 static ctl_table kranal_ctl_table[] = {
40         {RANAL_SYSCTL_TIMEOUT, "timeout",
41          &kranal_tunables.kra_timeout, sizeof(int),
42          0644, NULL, &proc_dointvec},
43         {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout",
44          &kranal_tunables.kra_listener_timeout, sizeof(int),
45          0644, NULL, &proc_dointvec},
46         {RANAL_SYSCTL_BACKLOG, "backlog",
47          &kranal_tunables.kra_backlog, sizeof(int),
48          0644, NULL, kranal_listener_procint},
49         {RANAL_SYSCTL_PORT, "port",
50          &kranal_tunables.kra_port, sizeof(int),
51          0644, NULL, kranal_listener_procint},
52         {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate",
53          &kranal_tunables.kra_max_immediate, sizeof(int),
54          0644, NULL, &proc_dointvec},
55         { 0 }
56 };
57
58 static ctl_table kranal_top_ctl_table[] = {
59         {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
60         { 0 }
61 };
62
63 int
64 kranal_sock_write (struct socket *sock, void *buffer, int nob)
65 {
66         int           rc;
67         mm_segment_t  oldmm = get_fs();
68         struct iovec  iov = {
69                 .iov_base = buffer,
70                 .iov_len  = nob
71         };
72         struct msghdr msg = {
73                 .msg_name       = NULL,
74                 .msg_namelen    = 0,
75                 .msg_iov        = &iov,
76                 .msg_iovlen     = 1,
77                 .msg_control    = NULL,
78                 .msg_controllen = 0,
79                 .msg_flags      = MSG_DONTWAIT
80         };
81
82         /* We've set up the socket's send buffer to be large enough for
83          * everything we send, so a single non-blocking send should
84          * complete without error. */
85
86         set_fs(KERNEL_DS);
87         rc = sock_sendmsg(sock, &msg, iov.iov_len);
88         set_fs(oldmm);
89
90         if (rc == nob)
91                 return 0;
92
93         if (rc >= 0)
94                 return -EAGAIN;
95
96         return rc;
97 }
98
99 int
100 kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
101 {
102         int            rc;
103         mm_segment_t   oldmm = get_fs();
104         long           ticks = timeout * HZ;
105         unsigned long  then;
106         struct timeval tv;
107
108         LASSERT (nob > 0);
109         LASSERT (ticks > 0);
110
111         for (;;) {
112                 struct iovec  iov = {
113                         .iov_base = buffer,
114                         .iov_len  = nob
115                 };
116                 struct msghdr msg = {
117                         .msg_name       = NULL,
118                         .msg_namelen    = 0,
119                         .msg_iov        = &iov,
120                         .msg_iovlen     = 1,
121                         .msg_control    = NULL,
122                         .msg_controllen = 0,
123                         .msg_flags      = 0
124                 };
125
126                 /* Set receive timeout to remaining time */
127                 tv = (struct timeval) {
128                         .tv_sec = ticks / HZ,
129                         .tv_usec = ((ticks % HZ) * 1000000) / HZ
130                 };
131                 set_fs(KERNEL_DS);
132                 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
133                                      (char *)&tv, sizeof(tv));
134                 set_fs(oldmm);
135                 if (rc != 0) {
136                         CERROR("Can't set socket recv timeout %d: %d\n",
137                                timeout, rc);
138                         return rc;
139                 }
140
141                 set_fs(KERNEL_DS);
142                 then = jiffies;
143                 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
144                 ticks -= jiffies - then;
145                 set_fs(oldmm);
146
147                 if (rc < 0)
148                         return rc;
149
150                 if (rc == 0)
151                         return -ECONNABORTED;
152
153                 buffer = ((char *)buffer) + rc;
154                 nob -= rc;
155
156                 if (nob == 0)
157                         return 0;
158
159                 if (ticks <= 0)
160                         return -ETIMEDOUT;
161         }
162 }
163
164 int
165 kranal_create_sock(struct socket **sockp)
166 {
167         struct socket       *sock;
168         int                  rc;
169         int                  option;
170         mm_segment_t         oldmm = get_fs();
171
172         rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
173         if (rc != 0) {
174                 CERROR("Can't create socket: %d\n", rc);
175                 return rc;
176         }
177
178         /* Ensure sending connection info doesn't block */
179         option = 2 * sizeof(kra_connreq_t);
180         set_fs(KERNEL_DS);
181         rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
182                              (char *)&option, sizeof(option));
183         set_fs(oldmm);
184         if (rc != 0) {
185                 CERROR("Can't set send buffer %d: %d\n", option, rc);
186                 goto failed;
187         }
188
189         option = 1;
190         set_fs(KERNEL_DS);
191         rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
192                              (char *)&option, sizeof(option));
193         set_fs(oldmm);
194         if (rc != 0) {
195                 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
196                 goto failed;
197         }
198
199         *sockp = sock;
200         return 0;
201
202  failed:
203         sock_release(sock);
204         return rc;
205 }
206
207 void
208 kranal_pause(int ticks)
209 {
210         set_current_state(TASK_UNINTERRUPTIBLE);
211         schedule_timeout(ticks);
212 }
213
214 void
215 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid)
216 {
217         RAP_RETURN   rrc;
218
219         memset(connreq, 0, sizeof(*connreq));
220
221         connreq->racr_magic     = RANAL_MSG_MAGIC;
222         connreq->racr_version   = RANAL_MSG_VERSION;
223         connreq->racr_devid     = conn->rac_device->rad_id;
224         connreq->racr_srcnid    = kranal_lib.libnal_ni.ni_pid.nid;
225         connreq->racr_dstnid    = dstnid;
226         connreq->racr_peerstamp = kranal_data.kra_peerstamp;
227         connreq->racr_connstamp = conn->rac_my_connstamp;
228         connreq->racr_timeout   = conn->rac_timeout;
229
230         rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
231         LASSERT(rrc == RAP_SUCCESS);
232 }
233
234 int
235 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout)
236 {
237         int         rc;
238
239         rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout);
240         if (rc != 0) {
241                 CERROR("Read failed: %d\n", rc);
242                 return rc;
243         }
244
245         if (connreq->racr_magic != RANAL_MSG_MAGIC) {
246                 if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
247                         CERROR("Unexpected magic %08x\n", connreq->racr_magic);
248                         return -EPROTO;
249                 }
250
251                 __swab32s(&connreq->racr_magic);
252                 __swab16s(&connreq->racr_version);
253                 __swab16s(&connreq->racr_devid);
254                 __swab64s(&connreq->racr_srcnid);
255                 __swab64s(&connreq->racr_dstnid);
256                 __swab64s(&connreq->racr_peerstamp);
257                 __swab64s(&connreq->racr_connstamp);
258                 __swab32s(&connreq->racr_timeout);
259
260                 __swab32s(&connreq->racr_riparams.HostId);
261                 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
262                 __swab32s(&connreq->racr_riparams.PTag);
263                 __swab32s(&connreq->racr_riparams.CompletionCookie);
264         }
265
266         if (connreq->racr_version != RANAL_MSG_VERSION) {
267                 CERROR("Unexpected version %d\n", connreq->racr_version);
268                 return -EPROTO;
269         }
270
271         if (connreq->racr_srcnid == PTL_NID_ANY ||
272             connreq->racr_dstnid == PTL_NID_ANY) {
273                 CERROR("Received PTL_NID_ANY\n");
274                 return -EPROTO;
275         }
276
277         if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
278                 CERROR("Received timeout %d < MIN %d\n",
279                        connreq->racr_timeout, RANAL_MIN_TIMEOUT);
280                 return -EPROTO;
281         }
282
283         return 0;
284 }
285
286 int
287 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
288 {
289         kra_conn_t         *conn;
290         struct list_head   *ctmp;
291         struct list_head   *cnxt;
292         int                 loopback;
293         int                 count = 0;
294
295         loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
296
297         list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
298                 conn = list_entry(ctmp, kra_conn_t, rac_list);
299
300                 if (conn == newconn)
301                         continue;
302
303                 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
304                         CDEBUG(D_NET, "Closing stale conn nid:"LPX64
305                                " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid,
306                                conn->rac_peerstamp, newconn->rac_peerstamp);
307                         LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
308                         count++;
309                         kranal_close_conn_locked(conn, -ESTALE);
310                         continue;
311                 }
312
313                 if (conn->rac_device != newconn->rac_device)
314                         continue;
315
316                 if (loopback &&
317                     newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
318                     newconn->rac_peer_connstamp == conn->rac_my_connstamp)
319                         continue;
320
321                 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
322
323                 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
324                        " connstamp:"LPX64"("LPX64")\n", peer->rap_nid,
325                        conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
326
327                 count++;
328                 kranal_close_conn_locked(conn, -ESTALE);
329         }
330
331         return count;
332 }
333
334 int
335 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
336 {
337         kra_conn_t       *conn;
338         struct list_head *tmp;
339         int               loopback;
340
341         loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
342
343         list_for_each(tmp, &peer->rap_conns) {
344                 conn = list_entry(tmp, kra_conn_t, rac_list);
345
346                 /* 'newconn' is from an earlier version of 'peer'!!! */
347                 if (newconn->rac_peerstamp < conn->rac_peerstamp)
348                         return 1;
349
350                 /* 'conn' is from an earlier version of 'peer': it will be
351                  * removed when we cull stale conns later on... */
352                 if (newconn->rac_peerstamp > conn->rac_peerstamp)
353                         continue;
354
355                 /* Different devices are OK */
356                 if (conn->rac_device != newconn->rac_device)
357                         continue;
358
359                 /* It's me connecting to myself */
360                 if (loopback &&
361                     newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
362                     newconn->rac_peer_connstamp == conn->rac_my_connstamp)
363                         continue;
364
365                 /* 'newconn' is an earlier connection from 'peer'!!! */
366                 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
367                         return 2;
368
369                 /* 'conn' is an earlier connection from 'peer': it will be
370                  * removed when we cull stale conns later on... */
371                 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
372                         continue;
373
374                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
375                  * playing the game... */
376                 return 3;
377         }
378
379         return 0;
380 }
381
382 void
383 kranal_set_conn_uniqueness (kra_conn_t *conn)
384 {
385         unsigned long  flags;
386
387         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
388
389         conn->rac_my_connstamp = kranal_data.kra_connstamp++;
390
391         do {    /* allocate a unique cqid */
392                 conn->rac_cqid = kranal_data.kra_next_cqid++;
393         } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
394
395         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
396 }
397
398 int
399 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
400 {
401         kra_conn_t    *conn;
402         RAP_RETURN     rrc;
403
404         LASSERT (!in_interrupt());
405         PORTAL_ALLOC(conn, sizeof(*conn));
406
407         if (conn == NULL)
408                 return -ENOMEM;
409
410         memset(conn, 0, sizeof(*conn));
411         atomic_set(&conn->rac_refcount, 1);
412         INIT_LIST_HEAD(&conn->rac_list);
413         INIT_LIST_HEAD(&conn->rac_hashlist);
414         INIT_LIST_HEAD(&conn->rac_schedlist);
415         INIT_LIST_HEAD(&conn->rac_fmaq);
416         INIT_LIST_HEAD(&conn->rac_rdmaq);
417         INIT_LIST_HEAD(&conn->rac_replyq);
418         spin_lock_init(&conn->rac_lock);
419
420         kranal_set_conn_uniqueness(conn);
421
422         conn->rac_device = dev;
423         conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
424         kranal_update_reaper_timeout(conn->rac_timeout);
425
426         rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
427                            &conn->rac_rihandle);
428         if (rrc != RAP_SUCCESS) {
429                 CERROR("RapkCreateRi failed: %d\n", rrc);
430                 PORTAL_FREE(conn, sizeof(*conn));
431                 return -ENETDOWN;
432         }
433
434         atomic_inc(&kranal_data.kra_nconns);
435         *connp = conn;
436         return 0;
437 }
438
439 void
440 kranal_destroy_conn(kra_conn_t *conn)
441 {
442         RAP_RETURN         rrc;
443
444         LASSERT (!in_interrupt());
445         LASSERT (!conn->rac_scheduled);
446         LASSERT (list_empty(&conn->rac_list));
447         LASSERT (list_empty(&conn->rac_hashlist));
448         LASSERT (list_empty(&conn->rac_schedlist));
449         LASSERT (atomic_read(&conn->rac_refcount) == 0);
450         LASSERT (list_empty(&conn->rac_fmaq));
451         LASSERT (list_empty(&conn->rac_rdmaq));
452         LASSERT (list_empty(&conn->rac_replyq));
453
454         rrc = RapkDestroyRi(conn->rac_device->rad_handle,
455                             conn->rac_rihandle);
456         LASSERT (rrc == RAP_SUCCESS);
457
458         if (conn->rac_peer != NULL)
459                 kranal_peer_decref(conn->rac_peer);
460
461         PORTAL_FREE(conn, sizeof(*conn));
462         atomic_dec(&kranal_data.kra_nconns);
463 }
464
465 void
466 kranal_terminate_conn_locked (kra_conn_t *conn)
467 {
468         LASSERT (!in_interrupt());
469         LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
470         LASSERT (!list_empty(&conn->rac_hashlist));
471         LASSERT (list_empty(&conn->rac_list));
472
473         /* Remove from conn hash table: no new callbacks */
474         list_del_init(&conn->rac_hashlist);
475         kranal_conn_decref(conn);
476
477         conn->rac_state = RANAL_CONN_CLOSED;
478
479         /* schedule to clear out all uncompleted comms in context of dev's
480          * scheduler */
481         kranal_schedule_conn(conn);
482 }
483
484 void
485 kranal_close_conn_locked (kra_conn_t *conn, int error)
486 {
487         kra_peer_t        *peer = conn->rac_peer;
488
489         CDEBUG(error == 0 ? D_NET : D_ERROR,
490                "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
491
492         LASSERT (!in_interrupt());
493         LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
494         LASSERT (!list_empty(&conn->rac_hashlist));
495         LASSERT (!list_empty(&conn->rac_list));
496
497         list_del_init(&conn->rac_list);
498
499         if (list_empty(&peer->rap_conns) &&
500             peer->rap_persistence == 0) {
501                 /* Non-persistent peer with no more conns... */
502                 kranal_unlink_peer_locked(peer);
503         }
504
505         /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
506          * full timeout.  If we get a CLOSE we know the peer has stopped all
507          * RDMA.  Otherwise if we wait for the full timeout we can also be sure
508          * all RDMA has stopped. */
509         conn->rac_last_rx = jiffies;
510         mb();
511
512         conn->rac_state = RANAL_CONN_CLOSING;
513         kranal_schedule_conn(conn);             /* schedule sending CLOSE */
514
515         kranal_conn_decref(conn);               /* lose peer's ref */
516 }
517
518 void
519 kranal_close_conn (kra_conn_t *conn, int error)
520 {
521         unsigned long    flags;
522
523
524         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
525
526         if (conn->rac_state == RANAL_CONN_ESTABLISHED)
527                 kranal_close_conn_locked(conn, error);
528
529         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
530 }
531
532 int
533 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
534                        __u32 peer_ip, int peer_port)
535 {
536         RAP_RETURN    rrc;
537
538         rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
539         if (rrc != RAP_SUCCESS) {
540                 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
541                        HIPQUAD(peer_ip), peer_port, rrc);
542                 return -EPROTO;
543         }
544
545         conn->rac_peerstamp = connreq->racr_peerstamp;
546         conn->rac_peer_connstamp = connreq->racr_connstamp;
547         conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
548         kranal_update_reaper_timeout(conn->rac_keepalive);
549         return 0;
550 }
551
552 int
553 kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp,
554                                ptl_nid_t *dst_nidp, kra_conn_t **connp)
555 {
556         struct sockaddr_in   addr;
557         __u32                peer_ip;
558         unsigned int         peer_port;
559         kra_connreq_t        rx_connreq;
560         kra_connreq_t        tx_connreq;
561         kra_conn_t          *conn;
562         kra_device_t        *dev;
563         int                  rc;
564         int                  len;
565         int                  i;
566
567         len = sizeof(addr);
568         rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
569         if (rc != 0) {
570                 CERROR("Can't get peer's IP: %d\n", rc);
571                 return rc;
572         }
573
574         peer_ip = ntohl(addr.sin_addr.s_addr);
575         peer_port = ntohs(addr.sin_port);
576
577         if (peer_port >= 1024) {
578                 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
579                        HIPQUAD(peer_ip), peer_port);
580                 return -ECONNREFUSED;
581         }
582
583         rc = kranal_recv_connreq(sock, &rx_connreq,
584                                  kranal_tunables.kra_listener_timeout);
585         if (rc != 0) {
586                 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
587                        HIPQUAD(peer_ip), peer_port, rc);
588                 return rc;
589         }
590
591         for (i = 0;;i++) {
592                 if (i == kranal_data.kra_ndevs) {
593                         CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
594                                rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
595                         return -ENODEV;
596                 }
597                 dev = &kranal_data.kra_devices[i];
598                 if (dev->rad_id == rx_connreq.racr_devid)
599                         break;
600         }
601
602         rc = kranal_create_conn(&conn, dev);
603         if (rc != 0)
604                 return rc;
605
606         kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
607
608         rc = kranal_sock_write(sock, &tx_connreq, sizeof(tx_connreq));
609         if (rc != 0) {
610                 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
611                        HIPQUAD(peer_ip), peer_port, rc);
612                 kranal_conn_decref(conn);
613                 return rc;
614         }
615
616         rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
617         if (rc != 0) {
618                 kranal_conn_decref(conn);
619                 return rc;
620         }
621
622         *connp = conn;
623         *src_nidp = rx_connreq.racr_srcnid;
624         *dst_nidp = rx_connreq.racr_dstnid;
625         return 0;
626 }
627
628 int
629 ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
630 {
631         struct sockaddr_in  locaddr;
632         struct sockaddr_in  srvaddr;
633         struct socket      *sock;
634         unsigned int        port;
635         int                 rc;
636
637         for (port = 1023; port >= 512; port--) {
638
639                 memset(&locaddr, 0, sizeof(locaddr));
640                 locaddr.sin_family      = AF_INET;
641                 locaddr.sin_port        = htons(port);
642                 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
643
644                 memset (&srvaddr, 0, sizeof (srvaddr));
645                 srvaddr.sin_family      = AF_INET;
646                 srvaddr.sin_port        = htons (peer->rap_port);
647                 srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
648
649                 rc = kranal_create_sock(&sock);
650                 if (rc != 0)
651                         return rc;
652
653                 rc = sock->ops->bind(sock,
654                                      (struct sockaddr *)&locaddr, sizeof(locaddr));
655                 if (rc != 0) {
656                         sock_release(sock);
657
658                         if (rc == -EADDRINUSE) {
659                                 CDEBUG(D_NET, "Port %d already in use\n", port);
660                                 continue;
661                         }
662
663                         CERROR("Can't bind to reserved port %d: %d\n", port, rc);
664                         return rc;
665                 }
666
667                 rc = sock->ops->connect(sock,
668                                         (struct sockaddr *)&srvaddr, sizeof(srvaddr),
669                                         0);
670                 if (rc == 0) {
671                         *sockp = sock;
672                         return 0;
673                 }
674
675                 sock_release(sock);
676
677                 if (rc != -EADDRNOTAVAIL) {
678                         CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
679                                port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
680                         return rc;
681                 }
682
683                 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n",
684                        port, HIPQUAD(peer->rap_ip), peer->rap_port);
685         }
686
687         /* all ports busy */
688         return -EHOSTUNREACH;
689 }
690
691
692 int
693 kranal_active_conn_handshake(kra_peer_t *peer,
694                              ptl_nid_t *dst_nidp, kra_conn_t **connp)
695 {
696         kra_connreq_t       connreq;
697         kra_conn_t         *conn;
698         kra_device_t       *dev;
699         struct socket      *sock;
700         int                 rc;
701         unsigned int        idx;
702
703         /* spread connections over all devices using both peer NIDs to ensure
704          * all nids use all devices */
705         idx = peer->rap_nid + kranal_lib.libnal_ni.ni_pid.nid;
706         dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
707
708         rc = kranal_create_conn(&conn, dev);
709         if (rc != 0)
710                 return rc;
711
712         kranal_pack_connreq(&connreq, conn, peer->rap_nid);
713
714         rc = ranal_connect_sock(peer, &sock);
715         if (rc != 0)
716                 goto failed_0;
717
718         /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
719          * immediately after accepting a connection, so we connect and then
720          * send immediately. */
721
722         rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
723         if (rc != 0) {
724                 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
725                        HIPQUAD(peer->rap_ip), peer->rap_port, rc);
726                 goto failed_1;
727         }
728
729         rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout);
730         if (rc != 0) {
731                 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
732                        HIPQUAD(peer->rap_ip), peer->rap_port, rc);
733                 goto failed_1;
734         }
735
736         sock_release(sock);
737         rc = -EPROTO;
738
739         if (connreq.racr_srcnid != peer->rap_nid) {
740                 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
741                        "received "LPX64" expected "LPX64"\n",
742                        HIPQUAD(peer->rap_ip), peer->rap_port,
743                        connreq.racr_srcnid, peer->rap_nid);
744                 goto failed_0;
745         }
746
747         if (connreq.racr_devid != dev->rad_id) {
748                 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
749                        "received %d expected %d\n",
750                        HIPQUAD(peer->rap_ip), peer->rap_port,
751                        connreq.racr_devid, dev->rad_id);
752                 goto failed_0;
753         }
754
755         rc = kranal_set_conn_params(conn, &connreq,
756                                     peer->rap_ip, peer->rap_port);
757         if (rc != 0)
758                 goto failed_0;
759
760         *connp = conn;
761         *dst_nidp = connreq.racr_dstnid;
762         return 0;
763
764  failed_1:
765         sock_release(sock);
766  failed_0:
767         kranal_conn_decref(conn);
768         return rc;
769 }
770
771 int
772 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
773 {
774         kra_peer_t        *peer2;
775         kra_tx_t          *tx;
776         ptl_nid_t          peer_nid;
777         ptl_nid_t          dst_nid;
778         unsigned long      flags;
779         kra_conn_t        *conn;
780         int                rc;
781         int                nstale;
782         int                new_peer = 0;
783
784         if (sock == NULL) {
785                 /* active: connd wants to connect to 'peer' */
786                 LASSERT (peer != NULL);
787                 LASSERT (peer->rap_connecting);
788
789                 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
790                 if (rc != 0)
791                         return rc;
792
793                 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
794
795                 if (!kranal_peer_active(peer)) {
796                         /* raced with peer getting unlinked */
797                         write_unlock_irqrestore(&kranal_data.kra_global_lock,
798                                                 flags);
799                         kranal_conn_decref(conn);
800                         return -ESTALE;
801                 }
802
803                 peer_nid = peer->rap_nid;
804         } else {
805                 /* passive: listener accepted 'sock' */
806                 LASSERT (peer == NULL);
807
808                 rc = kranal_passive_conn_handshake(sock, &peer_nid,
809                                                    &dst_nid, &conn);
810                 if (rc != 0)
811                         return rc;
812
813                 /* assume this is a new peer */
814                 peer = kranal_create_peer(peer_nid);
815                 if (peer == NULL) {
816                         CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
817                         kranal_conn_decref(conn);
818                         return -ENOMEM;
819                 }
820
821                 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
822
823                 peer2 = kranal_find_peer_locked(peer_nid);
824                 if (peer2 == NULL) {
825                         new_peer = 1;
826                 } else {
827                         /* peer_nid already in the peer table */
828                         kranal_peer_decref(peer);
829                         peer = peer2;
830                 }
831         }
832
833         LASSERT ((!new_peer) != (!kranal_peer_active(peer)));
834
835         /* Refuse connection if peer thinks we are a different NID.  We check
836          * this while holding the global lock, to synch with connection
837          * destruction on NID change. */
838         if (dst_nid != kranal_lib.libnal_ni.ni_pid.nid) {
839                 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
840
841                 CERROR("Stale/bad connection with "LPX64
842                        ": dst_nid "LPX64", expected "LPX64"\n",
843                        peer_nid, dst_nid, kranal_lib.libnal_ni.ni_pid.nid);
844                 rc = -ESTALE;
845                 goto failed;
846         }
847
848         /* Refuse to duplicate an existing connection (both sides might try to
849          * connect at once).  NB we return success!  We _are_ connected so we
850          * _don't_ have any blocked txs to complete with failure. */
851         rc = kranal_conn_isdup_locked(peer, conn);
852         if (rc != 0) {
853                 LASSERT (!list_empty(&peer->rap_conns));
854                 LASSERT (list_empty(&peer->rap_tx_queue));
855                 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
856                 CWARN("Not creating duplicate connection to "LPX64": %d\n",
857                       peer_nid, rc);
858                 rc = 0;
859                 goto failed;
860         }
861
862         if (new_peer) {
863                 /* peer table takes my ref on the new peer */
864                 list_add_tail(&peer->rap_list,
865                               kranal_nid2peerlist(peer_nid));
866         }
867
868         /* initialise timestamps before reaper looks at them */
869         conn->rac_last_tx = conn->rac_last_rx = jiffies;
870
871         kranal_peer_addref(peer);               /* +1 ref for conn */
872         conn->rac_peer = peer;
873         list_add_tail(&conn->rac_list, &peer->rap_conns);
874
875         kranal_conn_addref(conn);               /* +1 ref for conn table */
876         list_add_tail(&conn->rac_hashlist,
877                       kranal_cqid2connlist(conn->rac_cqid));
878
879         /* Schedule all packets blocking for a connection */
880         while (!list_empty(&peer->rap_tx_queue)) {
881                 tx = list_entry(peer->rap_tx_queue.next,
882                                 kra_tx_t, tx_list);
883
884                 list_del(&tx->tx_list);
885                 kranal_post_fma(conn, tx);
886         }
887
888         nstale = kranal_close_stale_conns_locked(peer, conn);
889
890         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
891
892         /* CAVEAT EMPTOR: passive peer can disappear NOW */
893
894         if (nstale != 0)
895                 CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
896
897         /* Ensure conn gets checked.  Transmits may have been queued and an
898          * FMA event may have happened before it got in the cq hash table */
899         kranal_schedule_conn(conn);
900         return 0;
901
902  failed:
903         if (new_peer)
904                 kranal_peer_decref(peer);
905         kranal_conn_decref(conn);
906         return rc;
907 }
908
909 void
910 kranal_connect (kra_peer_t *peer)
911 {
912         kra_tx_t          *tx;
913         unsigned long      flags;
914         struct list_head   zombies;
915         int                rc;
916
917         LASSERT (peer->rap_connecting);
918
919         CDEBUG(D_NET, "About to handshake "LPX64"\n", peer->rap_nid);
920
921         rc = kranal_conn_handshake(NULL, peer);
922
923         CDEBUG(D_NET, "Done handshake "LPX64":%d \n", peer->rap_nid, rc);
924
925         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
926
927         LASSERT (peer->rap_connecting);
928         peer->rap_connecting = 0;
929
930         if (rc == 0) {
931                 /* kranal_conn_handshake() queues blocked txs immediately on
932                  * success to avoid messages jumping the queue */
933                 LASSERT (list_empty(&peer->rap_tx_queue));
934
935                 /* reset reconnection timeouts */
936                 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
937                 peer->rap_reconnect_time = CURRENT_SECONDS;
938
939                 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
940                 return;
941         }
942
943         LASSERT (peer->rap_reconnect_interval != 0);
944         peer->rap_reconnect_time = CURRENT_SECONDS + peer->rap_reconnect_interval;
945         peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
946                                            1 * peer->rap_reconnect_interval);
947
948         /* Grab all blocked packets while we have the global lock */
949         list_add(&zombies, &peer->rap_tx_queue);
950         list_del_init(&peer->rap_tx_queue);
951
952         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
953
954         if (list_empty(&zombies))
955                 return;
956
957         CWARN("Dropping packets for "LPX64": connection failed\n",
958               peer->rap_nid);
959
960         do {
961                 tx = list_entry(zombies.next, kra_tx_t, tx_list);
962
963                 list_del(&tx->tx_list);
964                 kranal_tx_done(tx, -EHOSTUNREACH);
965
966         } while (!list_empty(&zombies));
967 }
968
969 void
970 kranal_free_acceptsock (kra_acceptsock_t *ras)
971 {
972         sock_release(ras->ras_sock);
973         PORTAL_FREE(ras, sizeof(*ras));
974 }
975
976 int
977 kranal_listener (void *arg)
978 {
979         struct sockaddr_in addr;
980         wait_queue_t       wait;
981         struct socket     *sock;
982         kra_acceptsock_t  *ras;
983         int                port;
984         char               name[16];
985         int                rc;
986         unsigned long      flags;
987
988         /* Parent thread holds kra_nid_mutex, and is, or is about to
989          * block on kra_listener_signal */
990
991         port = kranal_tunables.kra_port;
992         snprintf(name, sizeof(name), "kranal_lstn%03d", port);
993         kportal_daemonize(name);
994         kportal_blockallsigs();
995
996         init_waitqueue_entry(&wait, current);
997
998         rc = kranal_create_sock(&sock);
999         if (rc != 0)
1000                 goto out_0;
1001
1002         memset(&addr, 0, sizeof(addr));
1003         addr.sin_family      = AF_INET;
1004         addr.sin_port        = htons(port);
1005         addr.sin_addr.s_addr = INADDR_ANY;
1006
1007         rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
1008         if (rc != 0) {
1009                 CERROR("Can't bind to port %d\n", port);
1010                 goto out_1;
1011         }
1012
1013         rc = sock->ops->listen(sock, kranal_tunables.kra_backlog);
1014         if (rc != 0) {
1015                 CERROR("Can't set listen backlog %d: %d\n",
1016                        kranal_tunables.kra_backlog, rc);
1017                 goto out_1;
1018         }
1019
1020         LASSERT (kranal_data.kra_listener_sock == NULL);
1021         kranal_data.kra_listener_sock = sock;
1022
1023         /* unblock waiting parent */
1024         LASSERT (kranal_data.kra_listener_shutdown == 0);
1025         up(&kranal_data.kra_listener_signal);
1026
1027         /* Wake me any time something happens on my socket */
1028         add_wait_queue(sock->sk->sk_sleep, &wait);
1029         ras = NULL;
1030
1031         while (kranal_data.kra_listener_shutdown == 0) {
1032
1033                 if (ras == NULL) {
1034                         PORTAL_ALLOC(ras, sizeof(*ras));
1035                         if (ras == NULL) {
1036                                 CERROR("Out of Memory: pausing...\n");
1037                                 kranal_pause(HZ);
1038                                 continue;
1039                         }
1040                         ras->ras_sock = NULL;
1041                 }
1042
1043                 if (ras->ras_sock == NULL) {
1044                         ras->ras_sock = sock_alloc();
1045                         if (ras->ras_sock == NULL) {
1046                                 CERROR("Can't allocate socket: pausing...\n");
1047                                 kranal_pause(HZ);
1048                                 continue;
1049                         }
1050                         /* XXX this should add a ref to sock->ops->owner, if
1051                          * TCP could be a module */
1052                         ras->ras_sock->type = sock->type;
1053                         ras->ras_sock->ops = sock->ops;
1054                 }
1055
1056                 set_current_state(TASK_INTERRUPTIBLE);
1057
1058                 rc = sock->ops->accept(sock, ras->ras_sock, O_NONBLOCK);
1059
1060                 /* Sleep for socket activity? */
1061                 if (rc == -EAGAIN &&
1062                     kranal_data.kra_listener_shutdown == 0)
1063                         schedule();
1064
1065                 set_current_state(TASK_RUNNING);
1066
1067                 if (rc == 0) {
1068                         spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1069
1070                         list_add_tail(&ras->ras_list,
1071                                       &kranal_data.kra_connd_acceptq);
1072
1073                         spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1074                         wake_up(&kranal_data.kra_connd_waitq);
1075
1076                         ras = NULL;
1077                         continue;
1078                 }
1079
1080                 if (rc != -EAGAIN) {
1081                         CERROR("Accept failed: %d, pausing...\n", rc);
1082                         kranal_pause(HZ);
1083                 }
1084         }
1085
1086         if (ras != NULL) {
1087                 if (ras->ras_sock != NULL)
1088                         sock_release(ras->ras_sock);
1089                 PORTAL_FREE(ras, sizeof(*ras));
1090         }
1091
1092         rc = 0;
1093         remove_wait_queue(sock->sk->sk_sleep, &wait);
1094  out_1:
1095         sock_release(sock);
1096         kranal_data.kra_listener_sock = NULL;
1097  out_0:
1098         /* set completion status and unblock thread waiting for me
1099          * (parent on startup failure, executioner on normal shutdown) */
1100         kranal_data.kra_listener_shutdown = rc;
1101         up(&kranal_data.kra_listener_signal);
1102
1103         return 0;
1104 }
1105
1106 int
1107 kranal_start_listener (void)
1108 {
1109         long           pid;
1110         int            rc;
1111
1112         CDEBUG(D_NET, "Starting listener\n");
1113
1114         /* Called holding kra_nid_mutex: listener stopped */
1115         LASSERT (kranal_data.kra_listener_sock == NULL);
1116
1117         kranal_data.kra_listener_shutdown = 0;
1118         pid = kernel_thread(kranal_listener, NULL, 0);
1119         if (pid < 0) {
1120                 CERROR("Can't spawn listener: %ld\n", pid);
1121                 return (int)pid;
1122         }
1123
1124         /* Block until listener has started up. */
1125         down(&kranal_data.kra_listener_signal);
1126
1127         rc = kranal_data.kra_listener_shutdown;
1128         LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
1129
1130         CDEBUG(D_NET, "Listener %ld started OK\n", pid);
1131         return rc;
1132 }
1133
1134 void
1135 kranal_stop_listener(int clear_acceptq)
1136 {
1137         struct list_head  zombie_accepts;
1138         unsigned long     flags;
1139         kra_acceptsock_t *ras;
1140
1141         CDEBUG(D_NET, "Stopping listener\n");
1142
1143         /* Called holding kra_nid_mutex: listener running */
1144         LASSERT (kranal_data.kra_listener_sock != NULL);
1145
1146         kranal_data.kra_listener_shutdown = 1;
1147         wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep);
1148
1149         /* Block until listener has torn down. */
1150         down(&kranal_data.kra_listener_signal);
1151
1152         LASSERT (kranal_data.kra_listener_sock == NULL);
1153         CDEBUG(D_NET, "Listener stopped\n");
1154
1155         if (!clear_acceptq)
1156                 return;
1157
1158         /* Close any unhandled accepts */
1159         spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1160
1161         list_add(&zombie_accepts, &kranal_data.kra_connd_acceptq);
1162         list_del_init(&kranal_data.kra_connd_acceptq);
1163
1164         spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1165
1166         while (!list_empty(&zombie_accepts)) {
1167                 ras = list_entry(zombie_accepts.next,
1168                                  kra_acceptsock_t, ras_list);
1169                 list_del(&ras->ras_list);
1170                 kranal_free_acceptsock(ras);
1171         }
1172 }
1173
1174 int
1175 kranal_listener_procint(ctl_table *table, int write, struct file *filp,
1176                         void *buffer, size_t *lenp)
1177 {
1178         int   *tunable = (int *)table->data;
1179         int    old_val;
1180         int    rc;
1181
1182         /* No race with nal initialisation since the nal is setup all the time
1183          * it's loaded.  When that changes, change this! */
1184         LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1185
1186         down(&kranal_data.kra_nid_mutex);
1187
1188         LASSERT (tunable == &kranal_tunables.kra_port ||
1189                  tunable == &kranal_tunables.kra_backlog);
1190         old_val = *tunable;
1191
1192         rc = proc_dointvec(table, write, filp, buffer, lenp);
1193
1194         if (write &&
1195             (*tunable != old_val ||
1196              kranal_data.kra_listener_sock == NULL)) {
1197
1198                 if (kranal_data.kra_listener_sock != NULL)
1199                         kranal_stop_listener(0);
1200
1201                 rc = kranal_start_listener();
1202
1203                 if (rc != 0) {
1204                         CWARN("Unable to start listener with new tunable:"
1205                               " reverting to old value\n");
1206                         *tunable = old_val;
1207                         kranal_start_listener();
1208                 }
1209         }
1210
1211         up(&kranal_data.kra_nid_mutex);
1212
1213         LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1214         return rc;
1215 }
1216
1217 int
1218 kranal_set_mynid(ptl_nid_t nid)
1219 {
1220         unsigned long    flags;
1221         lib_ni_t        *ni = &kranal_lib.libnal_ni;
1222         int              rc = 0;
1223
1224         CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
1225                nid, ni->ni_pid.nid);
1226
1227         down(&kranal_data.kra_nid_mutex);
1228
1229         if (nid == ni->ni_pid.nid) {
1230                 /* no change of NID */
1231                 up(&kranal_data.kra_nid_mutex);
1232                 return 0;
1233         }
1234
1235         if (kranal_data.kra_listener_sock != NULL)
1236                 kranal_stop_listener(1);
1237
1238         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1239         kranal_data.kra_peerstamp++;
1240         ni->ni_pid.nid = nid;
1241         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1242
1243         /* Delete all existing peers and their connections after new
1244          * NID/connstamp set to ensure no old connections in our brave
1245          * new world. */
1246         kranal_del_peer(PTL_NID_ANY, 0);
1247
1248         if (nid != PTL_NID_ANY)
1249                 rc = kranal_start_listener();
1250
1251         up(&kranal_data.kra_nid_mutex);
1252         return rc;
1253 }
1254
1255 kra_peer_t *
1256 kranal_create_peer (ptl_nid_t nid)
1257 {
1258         kra_peer_t *peer;
1259
1260         LASSERT (nid != PTL_NID_ANY);
1261
1262         PORTAL_ALLOC(peer, sizeof(*peer));
1263         if (peer == NULL)
1264                 return NULL;
1265
1266         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
1267
1268         peer->rap_nid = nid;
1269         atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
1270
1271         INIT_LIST_HEAD(&peer->rap_list);
1272         INIT_LIST_HEAD(&peer->rap_connd_list);
1273         INIT_LIST_HEAD(&peer->rap_conns);
1274         INIT_LIST_HEAD(&peer->rap_tx_queue);
1275
1276         peer->rap_reconnect_time = CURRENT_SECONDS;
1277         peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
1278
1279         atomic_inc(&kranal_data.kra_npeers);
1280         return peer;
1281 }
1282
1283 void
1284 kranal_destroy_peer (kra_peer_t *peer)
1285 {
1286         CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
1287
1288         LASSERT (atomic_read(&peer->rap_refcount) == 0);
1289         LASSERT (peer->rap_persistence == 0);
1290         LASSERT (!kranal_peer_active(peer));
1291         LASSERT (!peer->rap_connecting);
1292         LASSERT (list_empty(&peer->rap_conns));
1293         LASSERT (list_empty(&peer->rap_tx_queue));
1294         LASSERT (list_empty(&peer->rap_connd_list));
1295
1296         PORTAL_FREE(peer, sizeof(*peer));
1297
1298         /* NB a peer's connections keep a reference on their peer until
1299          * they are destroyed, so we can be assured that _all_ state to do
1300          * with this peer has been cleaned up when its refcount drops to
1301          * zero. */
1302         atomic_dec(&kranal_data.kra_npeers);
1303 }
1304
1305 kra_peer_t *
1306 kranal_find_peer_locked (ptl_nid_t nid)
1307 {
1308         struct list_head *peer_list = kranal_nid2peerlist(nid);
1309         struct list_head *tmp;
1310         kra_peer_t       *peer;
1311
1312         list_for_each (tmp, peer_list) {
1313
1314                 peer = list_entry(tmp, kra_peer_t, rap_list);
1315
1316                 LASSERT (peer->rap_persistence > 0 ||     /* persistent peer */
1317                          !list_empty(&peer->rap_conns));  /* active conn */
1318
1319                 if (peer->rap_nid != nid)
1320                         continue;
1321
1322                 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1323                        peer, nid, atomic_read(&peer->rap_refcount));
1324                 return peer;
1325         }
1326         return NULL;
1327 }
1328
1329 kra_peer_t *
1330 kranal_find_peer (ptl_nid_t nid)
1331 {
1332         kra_peer_t     *peer;
1333
1334         read_lock(&kranal_data.kra_global_lock);
1335         peer = kranal_find_peer_locked(nid);
1336         if (peer != NULL)                       /* +1 ref for caller? */
1337                 kranal_peer_addref(peer);
1338         read_unlock(&kranal_data.kra_global_lock);
1339
1340         return peer;
1341 }
1342
1343 void
1344 kranal_unlink_peer_locked (kra_peer_t *peer)
1345 {
1346         LASSERT (peer->rap_persistence == 0);
1347         LASSERT (list_empty(&peer->rap_conns));
1348
1349         LASSERT (kranal_peer_active(peer));
1350         list_del_init(&peer->rap_list);
1351
1352         /* lose peerlist's ref */
1353         kranal_peer_decref(peer);
1354 }
1355
1356 int
1357 kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
1358                       int *persistencep)
1359 {
1360         kra_peer_t        *peer;
1361         struct list_head  *ptmp;
1362         int                i;
1363
1364         read_lock(&kranal_data.kra_global_lock);
1365
1366         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1367
1368                 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1369
1370                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1371                         LASSERT (peer->rap_persistence > 0 ||
1372                                  !list_empty(&peer->rap_conns));
1373
1374                         if (index-- > 0)
1375                                 continue;
1376
1377                         *nidp = peer->rap_nid;
1378                         *ipp = peer->rap_ip;
1379                         *portp = peer->rap_port;
1380                         *persistencep = peer->rap_persistence;
1381
1382                         read_unlock(&kranal_data.kra_global_lock);
1383                         return 0;
1384                 }
1385         }
1386
1387         read_unlock(&kranal_data.kra_global_lock);
1388         return -ENOENT;
1389 }
1390
1391 int
1392 kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1393 {
1394         unsigned long      flags;
1395         kra_peer_t        *peer;
1396         kra_peer_t        *peer2;
1397
1398         if (nid == PTL_NID_ANY)
1399                 return -EINVAL;
1400
1401         peer = kranal_create_peer(nid);
1402         if (peer == NULL)
1403                 return -ENOMEM;
1404
1405         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1406
1407         peer2 = kranal_find_peer_locked(nid);
1408         if (peer2 != NULL) {
1409                 kranal_peer_decref(peer);
1410                 peer = peer2;
1411         } else {
1412                 /* peer table takes existing ref on peer */
1413                 list_add_tail(&peer->rap_list,
1414                               kranal_nid2peerlist(nid));
1415         }
1416
1417         peer->rap_ip = ip;
1418         peer->rap_port = port;
1419         peer->rap_persistence++;
1420
1421         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1422         return 0;
1423 }
1424
1425 void
1426 kranal_del_peer_locked (kra_peer_t *peer, int single_share)
1427 {
1428         struct list_head *ctmp;
1429         struct list_head *cnxt;
1430         kra_conn_t       *conn;
1431
1432         if (!single_share)
1433                 peer->rap_persistence = 0;
1434         else if (peer->rap_persistence > 0)
1435                 peer->rap_persistence--;
1436
1437         if (peer->rap_persistence != 0)
1438                 return;
1439
1440         if (list_empty(&peer->rap_conns)) {
1441                 kranal_unlink_peer_locked(peer);
1442         } else {
1443                 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1444                         conn = list_entry(ctmp, kra_conn_t, rac_list);
1445
1446                         kranal_close_conn_locked(conn, 0);
1447                 }
1448                 /* peer unlinks itself when last conn is closed */
1449         }
1450 }
1451
1452 int
1453 kranal_del_peer (ptl_nid_t nid, int single_share)
1454 {
1455         unsigned long      flags;
1456         struct list_head  *ptmp;
1457         struct list_head  *pnxt;
1458         kra_peer_t        *peer;
1459         int                lo;
1460         int                hi;
1461         int                i;
1462         int                rc = -ENOENT;
1463
1464         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1465
1466         if (nid != PTL_NID_ANY)
1467                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1468         else {
1469                 lo = 0;
1470                 hi = kranal_data.kra_peer_hash_size - 1;
1471         }
1472
1473         for (i = lo; i <= hi; i++) {
1474                 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1475                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1476                         LASSERT (peer->rap_persistence > 0 ||
1477                                  !list_empty(&peer->rap_conns));
1478
1479                         if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
1480                                 continue;
1481
1482                         kranal_del_peer_locked(peer, single_share);
1483                         rc = 0;         /* matched something */
1484
1485                         if (single_share)
1486                                 goto out;
1487                 }
1488         }
1489  out:
1490         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1491
1492         return rc;
1493 }
1494
1495 kra_conn_t *
1496 kranal_get_conn_by_idx (int index)
1497 {
1498         kra_peer_t        *peer;
1499         struct list_head  *ptmp;
1500         kra_conn_t        *conn;
1501         struct list_head  *ctmp;
1502         int                i;
1503
1504         read_lock (&kranal_data.kra_global_lock);
1505
1506         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1507                 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1508
1509                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1510                         LASSERT (peer->rap_persistence > 0 ||
1511                                  !list_empty(&peer->rap_conns));
1512
1513                         list_for_each (ctmp, &peer->rap_conns) {
1514                                 if (index-- > 0)
1515                                         continue;
1516
1517                                 conn = list_entry(ctmp, kra_conn_t, rac_list);
1518                                 CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
1519                                        conn, conn->rac_peer->rap_nid,
1520                                        atomic_read(&conn->rac_refcount));
1521                                 atomic_inc(&conn->rac_refcount);
1522                                 read_unlock(&kranal_data.kra_global_lock);
1523                                 return conn;
1524                         }
1525                 }
1526         }
1527
1528         read_unlock(&kranal_data.kra_global_lock);
1529         return NULL;
1530 }
1531
1532 int
1533 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1534 {
1535         kra_conn_t         *conn;
1536         struct list_head   *ctmp;
1537         struct list_head   *cnxt;
1538         int                 count = 0;
1539
1540         list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1541                 conn = list_entry(ctmp, kra_conn_t, rac_list);
1542
1543                 count++;
1544                 kranal_close_conn_locked(conn, why);
1545         }
1546
1547         return count;
1548 }
1549
1550 int
1551 kranal_close_matching_conns (ptl_nid_t nid)
1552 {
1553         unsigned long       flags;
1554         kra_peer_t         *peer;
1555         struct list_head   *ptmp;
1556         struct list_head   *pnxt;
1557         int                 lo;
1558         int                 hi;
1559         int                 i;
1560         int                 count = 0;
1561
1562         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1563
1564         if (nid != PTL_NID_ANY)
1565                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1566         else {
1567                 lo = 0;
1568                 hi = kranal_data.kra_peer_hash_size - 1;
1569         }
1570
1571         for (i = lo; i <= hi; i++) {
1572                 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1573
1574                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1575                         LASSERT (peer->rap_persistence > 0 ||
1576                                  !list_empty(&peer->rap_conns));
1577
1578                         if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
1579                                 continue;
1580
1581                         count += kranal_close_peer_conns_locked(peer, 0);
1582                 }
1583         }
1584
1585         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1586
1587         /* wildcards always succeed */
1588         if (nid == PTL_NID_ANY)
1589                 return 0;
1590
1591         return (count == 0) ? -ENOENT : 0;
1592 }
1593
1594 int
1595 kranal_cmd(struct portals_cfg *pcfg, void * private)
1596 {
1597         int rc = -EINVAL;
1598
1599         LASSERT (pcfg != NULL);
1600
1601         switch(pcfg->pcfg_command) {
1602         case NAL_CMD_GET_PEER: {
1603                 ptl_nid_t   nid = 0;
1604                 __u32       ip = 0;
1605                 int         port = 0;
1606                 int         share_count = 0;
1607
1608                 rc = kranal_get_peer_info(pcfg->pcfg_count,
1609                                           &nid, &ip, &port, &share_count);
1610                 pcfg->pcfg_nid   = nid;
1611                 pcfg->pcfg_size  = 0;
1612                 pcfg->pcfg_id    = ip;
1613                 pcfg->pcfg_misc  = port;
1614                 pcfg->pcfg_count = 0;
1615                 pcfg->pcfg_wait  = share_count;
1616                 break;
1617         }
1618         case NAL_CMD_ADD_PEER: {
1619                 rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
1620                                                 pcfg->pcfg_id, /* IP */
1621                                                 pcfg->pcfg_misc); /* port */
1622                 break;
1623         }
1624         case NAL_CMD_DEL_PEER: {
1625                 rc = kranal_del_peer(pcfg->pcfg_nid,
1626                                      /* flags == single_share */
1627                                      pcfg->pcfg_flags != 0);
1628                 break;
1629         }
1630         case NAL_CMD_GET_CONN: {
1631                 kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
1632
1633                 if (conn == NULL)
1634                         rc = -ENOENT;
1635                 else {
1636                         rc = 0;
1637                         pcfg->pcfg_nid   = conn->rac_peer->rap_nid;
1638                         pcfg->pcfg_id    = conn->rac_device->rad_id;
1639                         pcfg->pcfg_misc  = 0;
1640                         pcfg->pcfg_flags = 0;
1641                         kranal_conn_decref(conn);
1642                 }
1643                 break;
1644         }
1645         case NAL_CMD_CLOSE_CONNECTION: {
1646                 rc = kranal_close_matching_conns(pcfg->pcfg_nid);
1647                 break;
1648         }
1649         case NAL_CMD_REGISTER_MYNID: {
1650                 if (pcfg->pcfg_nid == PTL_NID_ANY)
1651                         rc = -EINVAL;
1652                 else
1653                         rc = kranal_set_mynid(pcfg->pcfg_nid);
1654                 break;
1655         }
1656         }
1657
1658         return rc;
1659 }
1660
1661 void
1662 kranal_free_txdescs(struct list_head *freelist)
1663 {
1664         kra_tx_t    *tx;
1665
1666         while (!list_empty(freelist)) {
1667                 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1668
1669                 list_del(&tx->tx_list);
1670                 PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1671                 PORTAL_FREE(tx, sizeof(*tx));
1672         }
1673 }
1674
1675 int
1676 kranal_alloc_txdescs(struct list_head *freelist, int n)
1677 {
1678         int            isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
1679         int            i;
1680         kra_tx_t      *tx;
1681
1682         LASSERT (freelist == &kranal_data.kra_idle_txs ||
1683                  freelist == &kranal_data.kra_idle_nblk_txs);
1684         LASSERT (list_empty(freelist));
1685
1686         for (i = 0; i < n; i++) {
1687
1688                 PORTAL_ALLOC(tx, sizeof(*tx));
1689                 if (tx == NULL) {
1690                         CERROR("Can't allocate %stx[%d]\n",
1691                                isnblk ? "nblk " : "", i);
1692                         kranal_free_txdescs(freelist);
1693                         return -ENOMEM;
1694                 }
1695
1696                 PORTAL_ALLOC(tx->tx_phys,
1697                              PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1698                 if (tx->tx_phys == NULL) {
1699                         CERROR("Can't allocate %stx[%d]->tx_phys\n",
1700                                isnblk ? "nblk " : "", i);
1701
1702                         PORTAL_FREE(tx, sizeof(*tx));
1703                         kranal_free_txdescs(freelist);
1704                         return -ENOMEM;
1705                 }
1706
1707                 tx->tx_isnblk = isnblk;
1708                 tx->tx_buftype = RANAL_BUF_NONE;
1709                 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1710
1711                 list_add(&tx->tx_list, freelist);
1712         }
1713
1714         return 0;
1715 }
1716
1717 int
1718 kranal_device_init(int id, kra_device_t *dev)
1719 {
1720         const int         total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
1721         RAP_RETURN        rrc;
1722
1723         dev->rad_id = id;
1724         rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1725                                    &dev->rad_handle);
1726         if (rrc != RAP_SUCCESS) {
1727                 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1728                 goto failed_0;
1729         }
1730
1731         rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1732         if (rrc != RAP_SUCCESS) {
1733                 CERROR("Can't reserve %d RDMA descriptors"
1734                        " for device %d: %d\n", total_ntx, id, rrc);
1735                 goto failed_1;
1736         }
1737
1738         rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1739                            &dev->rad_rdma_cqh);
1740         if (rrc != RAP_SUCCESS) {
1741                 CERROR("Can't create rdma cq size %d"
1742                        " for device %d: %d\n", total_ntx, id, rrc);
1743                 goto failed_1;
1744         }
1745
1746         rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE, RAP_CQTYPE_RECV,
1747                            &dev->rad_fma_cqh);
1748         if (rrc != RAP_SUCCESS) {
1749                 CERROR("Can't create fma cq size %d"
1750                        " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc);
1751                 goto failed_2;
1752         }
1753
1754         return 0;
1755
1756  failed_2:
1757         RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1758  failed_1:
1759         RapkReleaseDevice(dev->rad_handle);
1760  failed_0:
1761         return -ENODEV;
1762 }
1763
1764 void
1765 kranal_device_fini(kra_device_t *dev)
1766 {
1767         LASSERT(dev->rad_scheduler == NULL);
1768         RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1769         RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1770         RapkReleaseDevice(dev->rad_handle);
1771 }
1772
1773 void
1774 kranal_api_shutdown (nal_t *nal)
1775 {
1776         int           i;
1777         unsigned long flags;
1778
1779         if (nal->nal_refct != 0) {
1780                 /* This module got the first ref */
1781                 PORTAL_MODULE_UNUSE;
1782                 return;
1783         }
1784
1785         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1786                atomic_read(&portal_kmemory));
1787
1788         LASSERT (nal == &kranal_api);
1789
1790         switch (kranal_data.kra_init) {
1791         default:
1792                 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1793                 LBUG();
1794
1795         case RANAL_INIT_ALL:
1796                 /* stop calls to nal_cmd */
1797                 libcfs_nal_cmd_unregister(RANAL);
1798                 /* No new persistent peers */
1799
1800                 /* resetting my NID to unadvertises me, removes my
1801                  * listener and nukes all current peers */
1802                 kranal_set_mynid(PTL_NID_ANY);
1803                 /* no new peers or conns */
1804
1805                 /* Wait for all peer/conn state to clean up */
1806                 i = 2;
1807                 while (atomic_read(&kranal_data.kra_nconns) != 0 ||
1808                        atomic_read(&kranal_data.kra_npeers) != 0) {
1809                         i++;
1810                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1811                                "waiting for %d peers and %d conns to close down\n",
1812                                atomic_read(&kranal_data.kra_npeers),
1813                                atomic_read(&kranal_data.kra_nconns));
1814                         kranal_pause(HZ);
1815                 }
1816                 /* fall through */
1817
1818         case RANAL_INIT_LIB:
1819                 lib_fini(&kranal_lib);
1820                 /* fall through */
1821
1822         case RANAL_INIT_DATA:
1823                 break;
1824         }
1825
1826         /* flag threads to terminate; wake and wait for them to die */
1827         kranal_data.kra_shutdown = 1;
1828
1829         for (i = 0; i < kranal_data.kra_ndevs; i++) {
1830                 kra_device_t *dev = &kranal_data.kra_devices[i];
1831
1832                 LASSERT (list_empty(&dev->rad_connq));
1833
1834                 spin_lock_irqsave(&dev->rad_lock, flags);
1835                 wake_up(&dev->rad_waitq);
1836                 spin_unlock_irqrestore(&dev->rad_lock, flags);
1837         }
1838
1839         spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1840         wake_up_all(&kranal_data.kra_reaper_waitq);
1841         spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1842
1843         LASSERT (list_empty(&kranal_data.kra_connd_peers));
1844         spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1845         wake_up_all(&kranal_data.kra_connd_waitq);
1846         spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1847
1848         i = 2;
1849         while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1850                 i++;
1851                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1852                        "Waiting for %d threads to terminate\n",
1853                        atomic_read(&kranal_data.kra_nthreads));
1854                 kranal_pause(HZ);
1855         }
1856
1857         LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1858         if (kranal_data.kra_peers != NULL) {
1859                 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1860                         LASSERT (list_empty(&kranal_data.kra_peers[i]));
1861
1862                 PORTAL_FREE(kranal_data.kra_peers,
1863                             sizeof (struct list_head) *
1864                             kranal_data.kra_peer_hash_size);
1865         }
1866
1867         LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1868         if (kranal_data.kra_conns != NULL) {
1869                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1870                         LASSERT (list_empty(&kranal_data.kra_conns[i]));
1871
1872                 PORTAL_FREE(kranal_data.kra_conns,
1873                             sizeof (struct list_head) *
1874                             kranal_data.kra_conn_hash_size);
1875         }
1876
1877         for (i = 0; i < kranal_data.kra_ndevs; i++)
1878                 kranal_device_fini(&kranal_data.kra_devices[i]);
1879
1880         kranal_free_txdescs(&kranal_data.kra_idle_txs);
1881         kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
1882
1883         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1884                atomic_read(&portal_kmemory));
1885         printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
1886                atomic_read(&portal_kmemory));
1887
1888         kranal_data.kra_init = RANAL_INIT_NOTHING;
1889 }
1890
1891 int
1892 kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1893                     ptl_ni_limits_t *requested_limits,
1894                     ptl_ni_limits_t *actual_limits)
1895 {
1896         static int        device_ids[] = {RAPK_MAIN_DEVICE_ID,
1897                                           RAPK_EXPANSION_DEVICE_ID};
1898         struct timeval    tv;
1899         ptl_process_id_t  process_id;
1900         int               pkmem = atomic_read(&portal_kmemory);
1901         int               rc;
1902         int               i;
1903         kra_device_t     *dev;
1904
1905         LASSERT (nal == &kranal_api);
1906
1907         if (nal->nal_refct != 0) {
1908                 if (actual_limits != NULL)
1909                         *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
1910                 /* This module got the first ref */
1911                 PORTAL_MODULE_USE;
1912                 return PTL_OK;
1913         }
1914
1915         LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
1916
1917         memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1918
1919         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1920          * a unique (for all time) connstamp so we can uniquely identify
1921          * the sender.  The connstamp is an incrementing counter
1922          * initialised with seconds + microseconds at startup time.  So we
1923          * rely on NOT creating connections more frequently on average than
1924          * 1MHz to ensure we don't use old connstamps when we reboot. */
1925         do_gettimeofday(&tv);
1926         kranal_data.kra_connstamp =
1927         kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1928
1929         init_MUTEX(&kranal_data.kra_nid_mutex);
1930         init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
1931
1932         rwlock_init(&kranal_data.kra_global_lock);
1933
1934         for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1935                 kra_device_t  *dev = &kranal_data.kra_devices[i];
1936
1937                 dev->rad_idx = i;
1938                 INIT_LIST_HEAD(&dev->rad_connq);
1939                 init_waitqueue_head(&dev->rad_waitq);
1940                 spin_lock_init(&dev->rad_lock);
1941         }
1942
1943         kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1944         init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1945         spin_lock_init(&kranal_data.kra_reaper_lock);
1946
1947         INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1948         INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1949         init_waitqueue_head(&kranal_data.kra_connd_waitq);
1950         spin_lock_init(&kranal_data.kra_connd_lock);
1951
1952         INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1953         INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
1954         init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
1955         spin_lock_init(&kranal_data.kra_tx_lock);
1956
1957         /* OK to call kranal_api_shutdown() to cleanup now */
1958         kranal_data.kra_init = RANAL_INIT_DATA;
1959
1960         kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1961         PORTAL_ALLOC(kranal_data.kra_peers,
1962                      sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
1963         if (kranal_data.kra_peers == NULL)
1964                 goto failed;
1965
1966         for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1967                 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
1968
1969         kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
1970         PORTAL_ALLOC(kranal_data.kra_conns,
1971                      sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
1972         if (kranal_data.kra_conns == NULL)
1973                 goto failed;
1974
1975         for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1976                 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
1977
1978         rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
1979         if (rc != 0)
1980                 goto failed;
1981
1982         rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
1983         if (rc != 0)
1984                 goto failed;
1985
1986         process_id.pid = requested_pid;
1987         process_id.nid = PTL_NID_ANY;           /* don't know my NID yet */
1988
1989         rc = lib_init(&kranal_lib, nal, process_id,
1990                       requested_limits, actual_limits);
1991         if (rc != PTL_OK) {
1992                 CERROR("lib_init failed: error %d\n", rc);
1993                 goto failed;
1994         }
1995
1996         /* lib interface initialised */
1997         kranal_data.kra_init = RANAL_INIT_LIB;
1998         /*****************************************************/
1999
2000         rc = kranal_thread_start(kranal_reaper, NULL);
2001         if (rc != 0) {
2002                 CERROR("Can't spawn ranal reaper: %d\n", rc);
2003                 goto failed;
2004         }
2005
2006         for (i = 0; i < RANAL_N_CONND; i++) {
2007                 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
2008                 if (rc != 0) {
2009                         CERROR("Can't spawn ranal connd[%d]: %d\n",
2010                                i, rc);
2011                         goto failed;
2012                 }
2013         }
2014
2015         LASSERT(kranal_data.kra_ndevs == 0);
2016         for (i = 0; i < sizeof(device_ids)/sizeof(device_ids[0]); i++) {
2017                 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
2018
2019                 rc = kranal_device_init(device_ids[i], dev);
2020                 if (rc == 0)
2021                         kranal_data.kra_ndevs++;
2022
2023                 rc = kranal_thread_start(kranal_scheduler, dev);
2024                 if (rc != 0) {
2025                         CERROR("Can't spawn ranal scheduler[%d]: %d\n",
2026                                i, rc);
2027                         goto failed;
2028                 }
2029         }
2030
2031         if (kranal_data.kra_ndevs == 0)
2032                 goto failed;
2033
2034         rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL);
2035         if (rc != 0) {
2036                 CERROR("Can't initialise command interface (rc = %d)\n", rc);
2037                 goto failed;
2038         }
2039
2040         /* flag everything initialised */
2041         kranal_data.kra_init = RANAL_INIT_ALL;
2042         /*****************************************************/
2043
2044         CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
2045         printk(KERN_INFO "Lustre: RapidArray NAL loaded "
2046                "(initial mem %d)\n", pkmem);
2047
2048         return PTL_OK;
2049
2050  failed:
2051         kranal_api_shutdown(&kranal_api);
2052         return PTL_FAIL;
2053 }
2054
2055 void __exit
2056 kranal_module_fini (void)
2057 {
2058         if (kranal_tunables.kra_sysctl != NULL)
2059                 unregister_sysctl_table(kranal_tunables.kra_sysctl);
2060
2061         PtlNIFini(kranal_ni);
2062
2063         ptl_unregister_nal(RANAL);
2064 }
2065
2066 int __init
2067 kranal_module_init (void)
2068 {
2069         int    rc;
2070
2071         /* the following must be sizeof(int) for
2072          * proc_dointvec/kranal_listener_procint() */
2073         LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
2074         LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
2075         LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
2076         LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
2077         LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
2078
2079         kranal_api.nal_ni_init = kranal_api_startup;
2080         kranal_api.nal_ni_fini = kranal_api_shutdown;
2081
2082         /* Initialise dynamic tunables to defaults once only */
2083         kranal_tunables.kra_timeout = RANAL_TIMEOUT;
2084         kranal_tunables.kra_listener_timeout = RANAL_LISTENER_TIMEOUT;
2085         kranal_tunables.kra_backlog = RANAL_BACKLOG;
2086         kranal_tunables.kra_port = RANAL_PORT;
2087         kranal_tunables.kra_max_immediate = RANAL_MAX_IMMEDIATE;
2088
2089         rc = ptl_register_nal(RANAL, &kranal_api);
2090         if (rc != PTL_OK) {
2091                 CERROR("Can't register RANAL: %d\n", rc);
2092                 return -ENOMEM;               /* or something... */
2093         }
2094
2095         /* Pure gateways want the NAL started up at module load time... */
2096         rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
2097         if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2098                 ptl_unregister_nal(RANAL);
2099                 return -ENODEV;
2100         }
2101
2102         kranal_tunables.kra_sysctl =
2103                 register_sysctl_table(kranal_top_ctl_table, 0);
2104         if (kranal_tunables.kra_sysctl == NULL) {
2105                 CERROR("Can't register sysctl table\n");
2106                 PtlNIFini(kranal_ni);
2107                 ptl_unregister_nal(RANAL);
2108                 return -ENOMEM;
2109         }
2110
2111         return 0;
2112 }
2113
2114 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2115 MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
2116 MODULE_LICENSE("GPL");
2117
2118 module_init(kranal_module_init);
2119 module_exit(kranal_module_fini);