Whamcloud - gitweb
2fe1aa0f1c69b5c6d40c22dcb1a6527d45f10680
[fs/lustre-release.git] / lnet / klnds / ralnd / ralnd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004 Cluster File Systems, Inc.
5  *   Author: Eric Barton <eric@bartonsoftware.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23 #include "ranal.h"
24
25
26 nal_t                   kranal_api;
27 ptl_handle_ni_t         kranal_ni;
28 kra_data_t              kranal_data;
29 kra_tunables_t          kranal_tunables;
30
31 #define RANAL_SYSCTL_TIMEOUT           1
32 #define RANAL_SYSCTL_LISTENER_TIMEOUT  2
33 #define RANAL_SYSCTL_BACKLOG           3
34 #define RANAL_SYSCTL_PORT              4
35 #define RANAL_SYSCTL_MAX_IMMEDIATE     5
36
37 #define RANAL_SYSCTL                   202
38
39 static ctl_table kranal_ctl_table[] = {
40         {RANAL_SYSCTL_TIMEOUT, "timeout", 
41          &kranal_tunables.kra_timeout, sizeof(int),
42          0644, NULL, &proc_dointvec},
43         {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout", 
44          &kranal_tunables.kra_listener_timeout, sizeof(int),
45          0644, NULL, &proc_dointvec},
46         {RANAL_SYSCTL_BACKLOG, "backlog",
47          &kranal_tunables.kra_backlog, sizeof(int),
48          0644, NULL, kranal_listener_procint},
49         {RANAL_SYSCTL_PORT, "port",
50          &kranal_tunables.kra_port, sizeof(int),
51          0644, NULL, kranal_listener_procint},
52         {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate", 
53          &kranal_tunables.kra_max_immediate, sizeof(int),
54          0644, NULL, &proc_dointvec},
55         { 0 }
56 };
57
58 static ctl_table kranal_top_ctl_table[] = {
59         {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
60         { 0 }
61 };
62
63 int
64 kranal_sock_write (struct socket *sock, void *buffer, int nob)
65 {
66         int           rc;
67         mm_segment_t  oldmm = get_fs();
68         struct iovec  iov = {
69                 .iov_base = buffer,
70                 .iov_len  = nob
71         };
72         struct msghdr msg = {
73                 .msg_name       = NULL,
74                 .msg_namelen    = 0,
75                 .msg_iov        = &iov,
76                 .msg_iovlen     = 1,
77                 .msg_control    = NULL,
78                 .msg_controllen = 0,
79                 .msg_flags      = MSG_DONTWAIT
80         };
81
82         /* We've set up the socket's send buffer to be large enough for
83          * everything we send, so a single non-blocking send should
84          * complete without error. */
85
86         set_fs(KERNEL_DS);
87         rc = sock_sendmsg(sock, &msg, iov.iov_len);
88         set_fs(oldmm);
89
90         if (rc == nob)
91                 return 0;
92         
93         if (rc >= 0)
94                 return -EAGAIN;
95
96         return rc;
97 }
98
99 int
100 kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
101 {
102         int            rc;
103         mm_segment_t   oldmm = get_fs();
104         long           ticks = timeout * HZ;
105         int            wanted = nob;
106         unsigned long  then;
107         struct timeval tv;
108
109         LASSERT (nob > 0);
110         LASSERT (ticks > 0);
111
112         for (;;) {
113                 struct iovec  iov = {
114                         .iov_base = buffer,
115                         .iov_len  = nob
116                 };
117                 struct msghdr msg = {
118                         .msg_name       = NULL,
119                         .msg_namelen    = 0,
120                         .msg_iov        = &iov,
121                         .msg_iovlen     = 1,
122                         .msg_control    = NULL,
123                         .msg_controllen = 0,
124                         .msg_flags      = 0
125                 };
126
127                 /* Set receive timeout to remaining time */
128                 tv = (struct timeval) {
129                         .tv_sec = ticks / HZ,
130                         .tv_usec = ((ticks % HZ) * 1000000) / HZ
131                 };
132                 set_fs(KERNEL_DS);
133                 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
134                                      (char *)&tv, sizeof(tv));
135                 set_fs(oldmm);
136                 if (rc != 0) {
137                         CERROR("Can't set socket recv timeout %d: %d\n",
138                                timeout, rc);
139                         return rc;
140                 }
141
142                 set_fs(KERNEL_DS);
143                 then = jiffies;
144                 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
145                 ticks -= jiffies - then;
146                 set_fs(oldmm);
147
148                 CDEBUG(D_WARNING, "rc %d at %d/%d bytes %d/%d secs\n",
149                        rc, wanted - nob, wanted, timeout - (int)(ticks/HZ), timeout);
150
151                 if (rc < 0)
152                         return rc;
153
154                 if (rc == 0)
155                         return -ECONNABORTED;
156
157                 buffer = ((char *)buffer) + rc;
158                 nob -= rc;
159
160                 if (nob == 0)
161                         return 0;
162
163                 if (ticks <= 0)
164                         return -ETIMEDOUT;
165         }
166 }
167
168 int
169 kranal_create_sock(struct socket **sockp)
170 {
171         struct socket       *sock;
172         int                  rc;
173         int                  option;
174         mm_segment_t         oldmm = get_fs();
175
176         rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
177         if (rc != 0) {
178                 CERROR("Can't create socket: %d\n", rc);
179                 return rc;
180         }
181
182         /* Ensure sending connection info doesn't block */
183         option = 2 * sizeof(kra_connreq_t);
184         set_fs(KERNEL_DS);
185         rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
186                              (char *)&option, sizeof(option));
187         set_fs(oldmm);
188         if (rc != 0) {
189                 CERROR("Can't set send buffer %d: %d\n", option, rc);
190                 goto failed;
191         }
192
193         option = 1;
194         set_fs(KERNEL_DS);
195         rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
196                              (char *)&option, sizeof(option));
197         set_fs(oldmm);
198         if (rc != 0) {
199                 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
200                 goto failed;
201         }
202
203         *sockp = sock;
204         return 0;
205
206  failed:
207         sock_release(sock);
208         return rc;
209 }
210
211 void
212 kranal_pause(int ticks)
213 {
214         set_current_state(TASK_UNINTERRUPTIBLE);
215         schedule_timeout(ticks);
216 }
217
218 void
219 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid)
220 {
221         RAP_RETURN   rrc;
222
223         memset(connreq, 0, sizeof(*connreq));
224
225         connreq->racr_magic     = RANAL_MSG_MAGIC;
226         connreq->racr_version   = RANAL_MSG_VERSION;
227         connreq->racr_devid     = conn->rac_device->rad_id;
228         connreq->racr_srcnid    = kranal_lib.libnal_ni.ni_pid.nid;
229         connreq->racr_dstnid    = dstnid;
230         connreq->racr_peerstamp = kranal_data.kra_peerstamp;
231         connreq->racr_connstamp = conn->rac_my_connstamp;
232         connreq->racr_timeout   = conn->rac_timeout;
233
234         rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
235         LASSERT(rrc == RAP_SUCCESS);
236
237         CDEBUG(D_WARNING,"devid %d, riparams: HID %08x FDH %08x PT %08x CC %08x\n",
238                connreq->racr_devid,
239                connreq->racr_riparams.HostId,
240                connreq->racr_riparams.FmaDomainHndl,
241                connreq->racr_riparams.PTag,
242                connreq->racr_riparams.CompletionCookie);
243 }
244
245 int
246 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout)
247 {
248         int         rc;
249
250         rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout);
251         if (rc != 0) {
252                 CERROR("Read failed: %d\n", rc);
253                 return rc;
254         }
255
256         if (connreq->racr_magic != RANAL_MSG_MAGIC) {
257                 if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
258                         CERROR("Unexpected magic %08x\n", connreq->racr_magic);
259                         return -EPROTO;
260                 }
261
262                 __swab32s(&connreq->racr_magic);
263                 __swab16s(&connreq->racr_version);
264                 __swab16s(&connreq->racr_devid);
265                 __swab64s(&connreq->racr_srcnid);
266                 __swab64s(&connreq->racr_dstnid);
267                 __swab64s(&connreq->racr_peerstamp);
268                 __swab64s(&connreq->racr_connstamp);
269                 __swab32s(&connreq->racr_timeout);
270
271                 __swab32s(&connreq->racr_riparams.HostId);
272                 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
273                 __swab32s(&connreq->racr_riparams.PTag);
274                 __swab32s(&connreq->racr_riparams.CompletionCookie);
275         }
276
277         if (connreq->racr_version != RANAL_MSG_VERSION) {
278                 CERROR("Unexpected version %d\n", connreq->racr_version);
279                 return -EPROTO;
280         }
281
282         if (connreq->racr_srcnid == PTL_NID_ANY ||
283             connreq->racr_dstnid == PTL_NID_ANY) {
284                 CERROR("Received PTL_NID_ANY\n");
285                 return -EPROTO;
286         }
287
288         if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
289                 CERROR("Received timeout %d < MIN %d\n",
290                        connreq->racr_timeout, RANAL_MIN_TIMEOUT);
291                 return -EPROTO;
292         }
293         
294         return 0;
295 }
296
297 int
298 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
299 {
300         kra_conn_t         *conn;
301         struct list_head   *ctmp;
302         struct list_head   *cnxt;
303         int                 loopback;
304         int                 count = 0;
305
306         loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
307
308         list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
309                 conn = list_entry(ctmp, kra_conn_t, rac_list);
310
311                 if (conn == newconn)
312                         continue;
313
314                 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
315                         CDEBUG(D_NET, "Closing stale conn nid:"LPX64
316                                " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid,
317                                conn->rac_peerstamp, newconn->rac_peerstamp);
318                         LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
319                         count++;
320                         kranal_close_conn_locked(conn, -ESTALE);
321                         continue;
322                 }
323
324                 if (conn->rac_device != newconn->rac_device)
325                         continue;
326                 
327                 if (loopback &&
328                     newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
329                     newconn->rac_peer_connstamp == conn->rac_my_connstamp)
330                         continue;
331                     
332                 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
333
334                 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
335                        " connstamp:"LPX64"("LPX64")\n", peer->rap_nid, 
336                        conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
337
338                 count++;
339                 kranal_close_conn_locked(conn, -ESTALE);
340         }
341
342         return count;
343 }
344
345 int
346 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
347 {
348         kra_conn_t       *conn;
349         struct list_head *tmp;
350         int               loopback;
351
352         loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
353         
354         list_for_each(tmp, &peer->rap_conns) {
355                 conn = list_entry(tmp, kra_conn_t, rac_list);
356
357                 /* 'newconn' is from an earlier version of 'peer'!!! */
358                 if (newconn->rac_peerstamp < conn->rac_peerstamp)
359                         return 1;
360
361                 /* 'conn' is from an earlier version of 'peer': it will be
362                  * removed when we cull stale conns later on... */
363                 if (newconn->rac_peerstamp > conn->rac_peerstamp)
364                         continue;
365
366                 /* Different devices are OK */
367                 if (conn->rac_device != newconn->rac_device)
368                         continue;
369
370                 /* It's me connecting to myself */
371                 if (loopback &&
372                     newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
373                     newconn->rac_peer_connstamp == conn->rac_my_connstamp)
374                         continue;
375
376                 /* 'newconn' is an earlier connection from 'peer'!!! */
377                 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
378                         return 2;
379                 
380                 /* 'conn' is an earlier connection from 'peer': it will be
381                  * removed when we cull stale conns later on... */
382                 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
383                         continue;
384                 
385                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
386                  * playing the game... */
387                 return 3;
388         }
389
390         return 0;
391 }
392
393 void
394 kranal_set_conn_uniqueness (kra_conn_t *conn)
395 {
396         unsigned long  flags;
397
398         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
399
400         conn->rac_my_connstamp = kranal_data.kra_connstamp++;
401
402         do {    /* allocate a unique cqid */
403                 conn->rac_cqid = kranal_data.kra_next_cqid++;
404         } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
405         
406         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
407 }
408
409 int
410 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
411 {
412         kra_conn_t    *conn;
413         RAP_RETURN     rrc;
414
415         LASSERT (!in_interrupt());
416         PORTAL_ALLOC(conn, sizeof(*conn));
417
418         if (conn == NULL)
419                 return -ENOMEM;
420
421         memset(conn, 0, sizeof(*conn));
422         atomic_set(&conn->rac_refcount, 1);
423         INIT_LIST_HEAD(&conn->rac_list);
424         INIT_LIST_HEAD(&conn->rac_hashlist);
425         INIT_LIST_HEAD(&conn->rac_schedlist);
426         INIT_LIST_HEAD(&conn->rac_fmaq);
427         INIT_LIST_HEAD(&conn->rac_rdmaq);
428         INIT_LIST_HEAD(&conn->rac_replyq);
429         spin_lock_init(&conn->rac_lock);
430
431         kranal_set_conn_uniqueness(conn);
432
433         conn->rac_device = dev;
434         conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
435         kranal_update_reaper_timeout(conn->rac_timeout);
436
437         rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
438                            &conn->rac_rihandle);
439         if (rrc != RAP_SUCCESS) {
440                 CERROR("RapkCreateRi failed: %d\n", rrc);
441                 PORTAL_FREE(conn, sizeof(*conn));
442                 return -ENETDOWN;
443         }
444
445         atomic_inc(&kranal_data.kra_nconns);
446         *connp = conn;
447         return 0;
448 }
449
450 void
451 kranal_destroy_conn(kra_conn_t *conn) 
452 {
453         RAP_RETURN         rrc;
454
455         LASSERT (!in_interrupt());
456         LASSERT (!conn->rac_scheduled);
457         LASSERT (list_empty(&conn->rac_list));
458         LASSERT (list_empty(&conn->rac_hashlist));
459         LASSERT (list_empty(&conn->rac_schedlist));
460         LASSERT (atomic_read(&conn->rac_refcount) == 0);
461         LASSERT (list_empty(&conn->rac_fmaq));
462         LASSERT (list_empty(&conn->rac_rdmaq));
463         LASSERT (list_empty(&conn->rac_replyq));
464
465         rrc = RapkDestroyRi(conn->rac_device->rad_handle,
466                             conn->rac_rihandle);
467         LASSERT (rrc == RAP_SUCCESS);
468
469         if (conn->rac_peer != NULL)
470                 kranal_peer_decref(conn->rac_peer);
471
472         PORTAL_FREE(conn, sizeof(*conn));
473         atomic_dec(&kranal_data.kra_nconns);
474 }
475
476 void
477 kranal_terminate_conn_locked (kra_conn_t *conn)
478 {
479         LASSERT (!in_interrupt());
480         LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
481         LASSERT (!list_empty(&conn->rac_hashlist));
482         LASSERT (list_empty(&conn->rac_list));
483
484         /* Remove from conn hash table: no new callbacks */
485         list_del_init(&conn->rac_hashlist);
486         kranal_conn_decref(conn);
487
488         conn->rac_state = RANAL_CONN_CLOSED;
489
490         /* schedule to clear out all uncompleted comms in context of dev's
491          * scheduler */
492         kranal_schedule_conn(conn);
493 }
494
495 void
496 kranal_close_conn_locked (kra_conn_t *conn, int error)
497 {
498         kra_peer_t        *peer = conn->rac_peer;
499
500         CDEBUG(error == 0 ? D_NET : D_ERROR,
501                "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
502
503         LASSERT (!in_interrupt());
504         LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
505         LASSERT (!list_empty(&conn->rac_hashlist));
506         LASSERT (!list_empty(&conn->rac_list));
507
508         list_del_init(&conn->rac_list);
509
510         if (list_empty(&peer->rap_conns) &&
511             peer->rap_persistence == 0) {
512                 /* Non-persistent peer with no more conns... */
513                 kranal_unlink_peer_locked(peer);
514         }
515                         
516         /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
517          * full timeout */
518         conn->rac_last_rx = jiffies;
519         mb();
520
521         conn->rac_state = RANAL_CONN_CLOSING;
522         kranal_schedule_conn(conn);             /* schedule sending CLOSE */
523
524         kranal_conn_decref(conn);               /* lose peer's ref */
525 }
526
527 void
528 kranal_close_conn (kra_conn_t *conn, int error)
529 {
530         unsigned long    flags;
531         
532
533         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
534         
535         if (conn->rac_state == RANAL_CONN_ESTABLISHED)
536                 kranal_close_conn_locked(conn, error);
537         
538         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
539 }
540
541 int
542 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq, 
543                        __u32 peer_ip, int peer_port)
544 {
545         RAP_RETURN    rrc;
546
547         CDEBUG(D_WARNING,"devid %d, riparams: HID %08x FDH %08x PT %08x CC %08x\n",
548                conn->rac_device->rad_id,
549                connreq->racr_riparams.HostId,
550                connreq->racr_riparams.FmaDomainHndl,
551                connreq->racr_riparams.PTag,
552                connreq->racr_riparams.CompletionCookie);
553         
554         rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
555         if (rrc != RAP_SUCCESS) {
556                 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n", 
557                        HIPQUAD(peer_ip), peer_port, rrc);
558                 return -EPROTO;
559         }
560         
561         conn->rac_peerstamp = connreq->racr_peerstamp;
562         conn->rac_peer_connstamp = connreq->racr_connstamp;
563         conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
564         kranal_update_reaper_timeout(conn->rac_keepalive);
565         return 0;
566 }
567
568 int
569 kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp, 
570                                ptl_nid_t *dst_nidp, kra_conn_t **connp)
571 {
572         struct sockaddr_in   addr;
573         __u32                peer_ip;
574         unsigned int         peer_port;
575         kra_connreq_t        rx_connreq;
576         kra_connreq_t        tx_connreq;
577         kra_conn_t          *conn;
578         kra_device_t        *dev;
579         int                  rc;
580         int                  len;
581         int                  i;
582
583         CDEBUG(D_WARNING,"!!\n");
584
585         len = sizeof(addr);
586         rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
587         if (rc != 0) {
588                 CERROR("Can't get peer's IP: %d\n", rc);
589                 return rc;
590         }
591
592         peer_ip = ntohl(addr.sin_addr.s_addr);
593         peer_port = ntohs(addr.sin_port);
594
595         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
596
597         if (peer_port >= 1024) {
598                 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
599                        HIPQUAD(peer_ip), peer_port);
600                 return -ECONNREFUSED;
601         }
602
603         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
604
605         rc = kranal_recv_connreq(sock, &rx_connreq, 
606                                  kranal_tunables.kra_listener_timeout);
607         if (rc != 0) {
608                 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n", 
609                        HIPQUAD(peer_ip), peer_port, rc);
610                 return rc;
611         }
612
613         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
614
615         for (i = 0;;i++) {
616                 if (i == kranal_data.kra_ndevs) {
617                         CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
618                                rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
619                         return -ENODEV;
620                 }
621                 dev = &kranal_data.kra_devices[i];
622                 if (dev->rad_id == rx_connreq.racr_devid)
623                         break;
624         }
625
626         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
627
628         rc = kranal_create_conn(&conn, dev);
629         if (rc != 0)
630                 return rc;
631
632         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
633
634         kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
635
636         rc = kranal_sock_write(sock, &tx_connreq, sizeof(tx_connreq));
637         if (rc != 0) {
638                 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n", 
639                        HIPQUAD(peer_ip), peer_port, rc);
640                 kranal_conn_decref(conn);
641                 return rc;
642         }
643
644         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
645
646         rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
647         if (rc != 0) {
648                 kranal_conn_decref(conn);
649                 return rc;
650         }
651
652         CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
653
654         *connp = conn;
655         *src_nidp = rx_connreq.racr_srcnid;
656         *dst_nidp = rx_connreq.racr_dstnid;
657         return 0;
658 }
659
660 int
661 ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
662 {
663         struct sockaddr_in  locaddr;
664         struct sockaddr_in  srvaddr;
665         struct socket      *sock;
666         unsigned int        port;
667         int                 rc;
668
669         for (port = 1023; port >= 512; port--) {
670
671                 memset(&locaddr, 0, sizeof(locaddr)); 
672                 locaddr.sin_family      = AF_INET; 
673                 locaddr.sin_port        = htons(port);
674                 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
675
676                 memset (&srvaddr, 0, sizeof (srvaddr));
677                 srvaddr.sin_family      = AF_INET;
678                 srvaddr.sin_port        = htons (peer->rap_port);
679                 srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
680
681                 rc = kranal_create_sock(&sock);
682                 if (rc != 0)
683                         return rc;
684
685                 rc = sock->ops->bind(sock,
686                                      (struct sockaddr *)&locaddr, sizeof(locaddr));
687                 if (rc != 0) {
688                         sock_release(sock);
689                         
690                         if (rc == -EADDRINUSE) {
691                                 CDEBUG(D_NET, "Port %d already in use\n", port);
692                                 continue;
693                         }
694
695                         CERROR("Can't bind to reserved port %d: %d\n", port, rc);
696                         return rc;
697                 }
698
699                 rc = sock->ops->connect(sock,
700                                         (struct sockaddr *)&srvaddr, sizeof(srvaddr),
701                                         0);
702                 if (rc == 0) {
703                         *sockp = sock;
704                         return 0;
705                 }
706                 
707                 sock_release(sock);
708
709                 if (rc != -EADDRNOTAVAIL) {
710                         CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
711                                port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
712                         return rc;
713                 }
714                 
715                 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n", 
716                        port, HIPQUAD(peer->rap_ip), peer->rap_port);
717         }
718
719         /* all ports busy */
720         return -EHOSTUNREACH;
721 }
722
723
724 int
725 kranal_active_conn_handshake(kra_peer_t *peer, 
726                              ptl_nid_t *dst_nidp, kra_conn_t **connp)
727 {
728         kra_connreq_t       connreq;
729         kra_conn_t         *conn;
730         kra_device_t       *dev;
731         struct socket      *sock;
732         int                 rc;
733         unsigned int        idx;
734
735         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
736
737         /* spread connections over all devices using both peer NIDs to ensure
738          * all nids use all devices */
739         idx = peer->rap_nid + kranal_lib.libnal_ni.ni_pid.nid;
740         dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
741
742         rc = kranal_create_conn(&conn, dev);
743         if (rc != 0)
744                 return rc;
745
746         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
747
748         kranal_pack_connreq(&connreq, conn, peer->rap_nid);
749         
750         rc = ranal_connect_sock(peer, &sock);
751         if (rc != 0)
752                 goto failed_0;
753
754         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
755
756         /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
757          * immediately after accepting a connection, so we connect and then
758          * send immediately. */
759
760         rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
761         if (rc != 0) {
762                 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n", 
763                        HIPQUAD(peer->rap_ip), peer->rap_port, rc);
764                 goto failed_1;
765         }
766
767         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
768
769         rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout);
770         if (rc != 0) {
771                 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n", 
772                        HIPQUAD(peer->rap_ip), peer->rap_port, rc);
773                 goto failed_1;
774         }
775
776         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
777
778         sock_release(sock);
779         rc = -EPROTO;
780
781         if (connreq.racr_srcnid != peer->rap_nid) {
782                 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
783                        "received "LPX64" expected "LPX64"\n",
784                        HIPQUAD(peer->rap_ip), peer->rap_port, 
785                        connreq.racr_srcnid, peer->rap_nid);
786                 goto failed_0;
787         }
788
789         if (connreq.racr_devid != dev->rad_id) {
790                 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
791                        "received %d expected %d\n",
792                        HIPQUAD(peer->rap_ip), peer->rap_port, 
793                        connreq.racr_devid, dev->rad_id);
794                 goto failed_0;
795         }
796
797         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
798
799         rc = kranal_set_conn_params(conn, &connreq, 
800                                     peer->rap_ip, peer->rap_port);
801         if (rc != 0)
802                 goto failed_0;
803
804         *connp = conn;
805         *dst_nidp = connreq.racr_dstnid;
806         CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
807         return 0;
808
809  failed_1:
810         sock_release(sock);
811  failed_0:
812         kranal_conn_decref(conn);
813         CDEBUG(D_WARNING,LPX64": %d\n", peer->rap_nid, rc);
814         return rc;
815 }
816
817 int
818 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
819 {
820         kra_peer_t        *peer2;
821         kra_tx_t          *tx;
822         ptl_nid_t          peer_nid;
823         ptl_nid_t          dst_nid;
824         unsigned long      flags;
825         kra_conn_t        *conn;
826         int                rc;
827         int                nstale;
828         int                new_peer = 0;
829
830         if (sock == NULL) {
831                 /* active: connd wants to connect to 'peer' */
832                 LASSERT (peer != NULL);
833                 LASSERT (peer->rap_connecting);
834                 
835                 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
836                 if (rc != 0)
837                         return rc;
838
839                 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
840
841                 if (!kranal_peer_active(peer)) {
842                         /* raced with peer getting unlinked */
843                         write_unlock_irqrestore(&kranal_data.kra_global_lock, 
844                                                 flags);
845                         kranal_conn_decref(conn);
846                         return -ESTALE;
847                 }
848
849                 peer_nid = peer->rap_nid;
850         } else {
851                 /* passive: listener accepted 'sock' */
852                 LASSERT (peer == NULL);
853
854                 rc = kranal_passive_conn_handshake(sock, &peer_nid,
855                                                    &dst_nid, &conn);
856                 if (rc != 0)
857                         return rc;
858
859                 /* assume this is a new peer */
860                 peer = kranal_create_peer(peer_nid);
861                 if (peer == NULL) {
862                         CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
863                         kranal_conn_decref(conn);
864                         return -ENOMEM;
865                 }
866
867                 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
868
869                 peer2 = kranal_find_peer_locked(peer_nid);
870                 if (peer2 == NULL) {
871                         new_peer = 1;
872                 } else {
873                         /* peer_nid already in the peer table */
874                         kranal_peer_decref(peer);
875                         peer = peer2;
876                 }
877         }
878
879         LASSERT (!new_peer == !kranal_peer_active(peer));
880
881         /* Refuse connection if peer thinks we are a different NID.  We check
882          * this while holding the global lock, to synch with connection
883          * destruction on NID change. */
884         if (dst_nid != kranal_lib.libnal_ni.ni_pid.nid) {
885                 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
886
887                 CERROR("Stale/bad connection with "LPX64
888                        ": dst_nid "LPX64", expected "LPX64"\n",
889                        peer_nid, dst_nid, kranal_lib.libnal_ni.ni_pid.nid);
890                 rc = -ESTALE;
891                 goto failed;
892         }
893
894         /* Refuse to duplicate an existing connection (both sides might try to
895          * connect at once).  NB we return success!  We _are_ connected so we
896          * _don't_ have any blocked txs to complete with failure. */
897         rc = kranal_conn_isdup_locked(peer, conn);
898         if (rc != 0) {
899                 LASSERT (!list_empty(&peer->rap_conns));
900                 LASSERT (list_empty(&peer->rap_tx_queue));
901                 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
902                 CWARN("Not creating duplicate connection to "LPX64": %d\n",
903                       peer_nid, rc);
904                 rc = 0;
905                 goto failed;
906         }
907
908         if (new_peer) {
909                 /* peer table takes my ref on the new peer */
910                 list_add_tail(&peer->rap_list,
911                               kranal_nid2peerlist(peer_nid));
912         }
913         
914         kranal_peer_addref(peer);               /* +1 ref for conn */
915         conn->rac_peer = peer;
916         list_add_tail(&conn->rac_list, &peer->rap_conns);
917
918         kranal_conn_addref(conn);               /* +1 ref for conn table */
919         list_add_tail(&conn->rac_hashlist,
920                       kranal_cqid2connlist(conn->rac_cqid));
921
922         /* Schedule all packets blocking for a connection */
923         while (!list_empty(&peer->rap_tx_queue)) {
924                 tx = list_entry(peer->rap_tx_queue.next,
925                                 kra_tx_t, tx_list);
926
927                 list_del(&tx->tx_list);
928                 kranal_post_fma(conn, tx);
929         }
930
931         nstale = kranal_close_stale_conns_locked(peer, conn);
932
933         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
934
935         /* CAVEAT EMPTOR: passive peer can disappear NOW */
936
937         if (nstale != 0)
938                 CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
939
940         /* Ensure conn gets checked.  Transmits may have been queued and an
941          * FMA event may have happened before it got in the cq hash table */
942         kranal_schedule_conn(conn);
943         return 0;
944
945  failed:
946         if (new_peer)
947                 kranal_peer_decref(peer);
948         kranal_conn_decref(conn);
949         return rc;
950 }
951
952 void
953 kranal_connect (kra_peer_t *peer)
954 {
955         kra_tx_t          *tx;
956         unsigned long      flags;
957         struct list_head   zombies;
958         int                rc;
959
960         LASSERT (peer->rap_connecting);
961
962         CDEBUG(D_WARNING,"About to handshake "LPX64"\n", peer->rap_nid);
963
964         rc = kranal_conn_handshake(NULL, peer);
965
966         CDEBUG(D_WARNING,"Done handshake "LPX64":%d \n", peer->rap_nid, rc);
967
968         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
969
970         LASSERT (peer->rap_connecting);
971         peer->rap_connecting = 0;
972
973         if (rc == 0) {
974                 /* kranal_conn_handshake() queues blocked txs immediately on
975                  * success to avoid messages jumping the queue */
976                 LASSERT (list_empty(&peer->rap_tx_queue));
977
978                 /* reset reconnection timeouts */
979                 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
980                 peer->rap_reconnect_time = CURRENT_SECONDS;
981
982                 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
983                 return;
984         }
985
986         LASSERT (peer->rap_reconnect_interval != 0);
987         peer->rap_reconnect_time = CURRENT_SECONDS + peer->rap_reconnect_interval;
988         peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
989                                            1 * peer->rap_reconnect_interval);
990
991         /* Grab all blocked packets while we have the global lock */
992         list_add(&zombies, &peer->rap_tx_queue);
993         list_del_init(&peer->rap_tx_queue);
994
995         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
996
997         if (list_empty(&zombies))
998                 return;
999
1000         CWARN("Dropping packets for "LPX64": connection failed\n",
1001               peer->rap_nid);
1002
1003         do {
1004                 tx = list_entry(zombies.next, kra_tx_t, tx_list);
1005
1006                 list_del(&tx->tx_list);
1007                 kranal_tx_done(tx, -EHOSTUNREACH);
1008
1009         } while (!list_empty(&zombies));
1010 }
1011
1012 void
1013 kranal_free_acceptsock (kra_acceptsock_t *ras)
1014 {
1015         sock_release(ras->ras_sock);
1016         PORTAL_FREE(ras, sizeof(*ras));
1017 }
1018
1019 int
1020 kranal_listener (void *arg)
1021 {
1022         struct sockaddr_in addr;
1023         wait_queue_t       wait;
1024         struct socket     *sock;
1025         kra_acceptsock_t  *ras;
1026         int                port;
1027         char               name[16];
1028         int                rc;
1029         unsigned long      flags;
1030
1031         /* Parent thread holds kra_nid_mutex, and is, or is about to
1032          * block on kra_listener_signal */
1033
1034         port = kranal_tunables.kra_port;
1035         snprintf(name, sizeof(name), "kranal_lstn%03d", port);
1036         kportal_daemonize(name);
1037         kportal_blockallsigs();
1038
1039         init_waitqueue_entry(&wait, current);
1040
1041         rc = kranal_create_sock(&sock);
1042         if (rc != 0)
1043                 goto out_0;
1044
1045         memset(&addr, 0, sizeof(addr));
1046         addr.sin_family      = AF_INET;
1047         addr.sin_port        = htons(port);
1048         addr.sin_addr.s_addr = INADDR_ANY;
1049
1050         rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
1051         if (rc != 0) {
1052                 CERROR("Can't bind to port %d\n", port);
1053                 goto out_1;
1054         }
1055
1056         rc = sock->ops->listen(sock, kranal_tunables.kra_backlog);
1057         if (rc != 0) {
1058                 CERROR("Can't set listen backlog %d: %d\n", 
1059                        kranal_tunables.kra_backlog, rc);
1060                 goto out_1;
1061         }
1062
1063         LASSERT (kranal_data.kra_listener_sock == NULL);
1064         kranal_data.kra_listener_sock = sock;
1065
1066         /* unblock waiting parent */
1067         LASSERT (kranal_data.kra_listener_shutdown == 0);
1068         up(&kranal_data.kra_listener_signal);
1069
1070         /* Wake me any time something happens on my socket */
1071         add_wait_queue(sock->sk->sk_sleep, &wait);
1072         ras = NULL;
1073
1074         while (kranal_data.kra_listener_shutdown == 0) {
1075
1076                 if (ras == NULL) {
1077                         PORTAL_ALLOC(ras, sizeof(*ras));
1078                         if (ras == NULL) {
1079                                 CERROR("Out of Memory: pausing...\n");
1080                                 kranal_pause(HZ);
1081                                 continue;
1082                         }
1083                         ras->ras_sock = NULL;
1084                 }
1085
1086                 if (ras->ras_sock == NULL) {
1087                         ras->ras_sock = sock_alloc();
1088                         if (ras->ras_sock == NULL) {
1089                                 CERROR("Can't allocate socket: pausing...\n");
1090                                 kranal_pause(HZ);
1091                                 continue;
1092                         }
1093                         /* XXX this should add a ref to sock->ops->owner, if
1094                          * TCP could be a module */
1095                         ras->ras_sock->type = sock->type;
1096                         ras->ras_sock->ops = sock->ops;
1097                 }
1098                 
1099                 set_current_state(TASK_INTERRUPTIBLE);
1100
1101                 rc = sock->ops->accept(sock, ras->ras_sock, O_NONBLOCK);
1102
1103                 /* Sleep for socket activity? */
1104                 if (rc == -EAGAIN &&
1105                     kranal_data.kra_listener_shutdown == 0)
1106                         schedule();
1107
1108                 set_current_state(TASK_RUNNING);
1109
1110                 if (rc == 0) {
1111                         spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1112                         
1113                         list_add_tail(&ras->ras_list, 
1114                                       &kranal_data.kra_connd_acceptq);
1115
1116                         spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1117                         wake_up(&kranal_data.kra_connd_waitq);
1118
1119                         ras = NULL;
1120                         continue;
1121                 }
1122                 
1123                 if (rc != -EAGAIN) {
1124                         CERROR("Accept failed: %d, pausing...\n", rc);
1125                         kranal_pause(HZ);
1126                 }
1127         }
1128
1129         if (ras != NULL) {
1130                 if (ras->ras_sock != NULL)
1131                         sock_release(ras->ras_sock);
1132                 PORTAL_FREE(ras, sizeof(*ras));
1133         }
1134
1135         rc = 0;
1136         remove_wait_queue(sock->sk->sk_sleep, &wait);
1137  out_1:
1138         sock_release(sock);
1139         kranal_data.kra_listener_sock = NULL;
1140  out_0:
1141         /* set completion status and unblock thread waiting for me 
1142          * (parent on startup failure, executioner on normal shutdown) */
1143         kranal_data.kra_listener_shutdown = rc;
1144         up(&kranal_data.kra_listener_signal);
1145
1146         return 0;
1147 }
1148
1149 int
1150 kranal_start_listener (void)
1151 {
1152         long           pid;
1153         int            rc;
1154
1155         CDEBUG(D_WARNING, "Starting listener\n");
1156
1157         /* Called holding kra_nid_mutex: listener stopped */
1158         LASSERT (kranal_data.kra_listener_sock == NULL);
1159
1160         kranal_data.kra_listener_shutdown = 0;
1161         pid = kernel_thread(kranal_listener, NULL, 0);
1162         if (pid < 0) {
1163                 CERROR("Can't spawn listener: %ld\n", pid);
1164                 return (int)pid;
1165         }
1166
1167         /* Block until listener has started up. */
1168         down(&kranal_data.kra_listener_signal);
1169
1170         rc = kranal_data.kra_listener_shutdown;
1171         LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
1172
1173         CDEBUG(D_WARNING, "Listener %ld started OK\n", pid);
1174         return rc;
1175 }
1176
1177 void
1178 kranal_stop_listener(int clear_acceptq)
1179 {
1180         struct list_head  zombie_accepts;
1181         unsigned long     flags;
1182         kra_acceptsock_t *ras;
1183
1184         CDEBUG(D_WARNING, "Stopping listener\n");
1185
1186         /* Called holding kra_nid_mutex: listener running */
1187         LASSERT (kranal_data.kra_listener_sock != NULL);
1188
1189         kranal_data.kra_listener_shutdown = 1;
1190         wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep);
1191
1192         /* Block until listener has torn down. */
1193         down(&kranal_data.kra_listener_signal);
1194
1195         LASSERT (kranal_data.kra_listener_sock == NULL);
1196         CDEBUG(D_WARNING, "Listener stopped\n");
1197
1198         if (!clear_acceptq)
1199                 return;
1200         
1201         /* Close any unhandled accepts */
1202         spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1203
1204         list_add(&zombie_accepts, &kranal_data.kra_connd_acceptq);
1205         list_del_init(&kranal_data.kra_connd_acceptq);
1206
1207         spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1208         
1209         while (!list_empty(&zombie_accepts)) {
1210                 ras = list_entry(zombie_accepts.next, 
1211                                  kra_acceptsock_t, ras_list);
1212                 list_del(&ras->ras_list);
1213                 kranal_free_acceptsock(ras);
1214         }
1215 }
1216
1217 int 
1218 kranal_listener_procint(ctl_table *table, int write, struct file *filp,
1219                         void *buffer, size_t *lenp)
1220 {
1221         int   *tunable = (int *)table->data;
1222         int    old_val;
1223         int    rc;
1224
1225         /* No race with nal initialisation since the nal is setup all the time
1226          * it's loaded.  When that changes, change this! */
1227         LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1228
1229         down(&kranal_data.kra_nid_mutex);
1230
1231         LASSERT (tunable == &kranal_tunables.kra_port ||
1232                  tunable == &kranal_tunables.kra_backlog);
1233         old_val = *tunable;
1234
1235         rc = proc_dointvec(table, write, filp, buffer, lenp);
1236
1237         if (write &&
1238             (*tunable != old_val ||
1239              kranal_data.kra_listener_sock == NULL)) {
1240
1241                 if (kranal_data.kra_listener_sock != NULL)
1242                         kranal_stop_listener(0);
1243
1244                 rc = kranal_start_listener();
1245
1246                 if (rc != 0) {
1247                         CWARN("Unable to start listener with new tunable:"
1248                               " reverting to old value\n");
1249                         *tunable = old_val;
1250                         kranal_start_listener();
1251                 }
1252         }
1253
1254         up(&kranal_data.kra_nid_mutex);
1255
1256         LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1257         return rc;
1258 }
1259
1260 int
1261 kranal_set_mynid(ptl_nid_t nid)
1262 {
1263         unsigned long    flags;
1264         lib_ni_t        *ni = &kranal_lib.libnal_ni;
1265         int              rc = 0;
1266
1267         CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
1268                nid, ni->ni_pid.nid);
1269
1270         down(&kranal_data.kra_nid_mutex);
1271
1272         if (nid == ni->ni_pid.nid) {
1273                 /* no change of NID */
1274                 up(&kranal_data.kra_nid_mutex);
1275                 return 0;
1276         }
1277
1278         if (kranal_data.kra_listener_sock != NULL)
1279                 kranal_stop_listener(1);
1280
1281         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1282         kranal_data.kra_peerstamp++;
1283         ni->ni_pid.nid = nid;
1284         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1285         
1286         /* Delete all existing peers and their connections after new
1287          * NID/connstamp set to ensure no old connections in our brave
1288          * new world. */
1289         kranal_del_peer(PTL_NID_ANY, 0);
1290
1291         if (nid != PTL_NID_ANY)
1292                 rc = kranal_start_listener();
1293
1294         up(&kranal_data.kra_nid_mutex);
1295         return rc;
1296 }
1297
1298 kra_peer_t *
1299 kranal_create_peer (ptl_nid_t nid)
1300 {
1301         kra_peer_t *peer;
1302
1303         LASSERT (nid != PTL_NID_ANY);
1304
1305         PORTAL_ALLOC(peer, sizeof(*peer));
1306         if (peer == NULL)
1307                 return NULL;
1308
1309         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
1310
1311         peer->rap_nid = nid;
1312         atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
1313
1314         INIT_LIST_HEAD(&peer->rap_list);
1315         INIT_LIST_HEAD(&peer->rap_connd_list);
1316         INIT_LIST_HEAD(&peer->rap_conns);
1317         INIT_LIST_HEAD(&peer->rap_tx_queue);
1318
1319         peer->rap_reconnect_time = CURRENT_SECONDS;
1320         peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
1321
1322         atomic_inc(&kranal_data.kra_npeers);
1323         return peer;
1324 }
1325
1326 void
1327 kranal_destroy_peer (kra_peer_t *peer)
1328 {
1329         CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
1330
1331         LASSERT (atomic_read(&peer->rap_refcount) == 0);
1332         LASSERT (peer->rap_persistence == 0);
1333         LASSERT (!kranal_peer_active(peer));
1334         LASSERT (!peer->rap_connecting);
1335         LASSERT (list_empty(&peer->rap_conns));
1336         LASSERT (list_empty(&peer->rap_tx_queue));
1337         LASSERT (list_empty(&peer->rap_connd_list));
1338
1339         PORTAL_FREE(peer, sizeof(*peer));
1340
1341         /* NB a peer's connections keep a reference on their peer until
1342          * they are destroyed, so we can be assured that _all_ state to do
1343          * with this peer has been cleaned up when its refcount drops to
1344          * zero. */
1345         atomic_dec(&kranal_data.kra_npeers);
1346 }
1347
1348 kra_peer_t *
1349 kranal_find_peer_locked (ptl_nid_t nid)
1350 {
1351         struct list_head *peer_list = kranal_nid2peerlist(nid);
1352         struct list_head *tmp;
1353         kra_peer_t       *peer;
1354
1355         list_for_each (tmp, peer_list) {
1356
1357                 peer = list_entry(tmp, kra_peer_t, rap_list);
1358
1359                 LASSERT (peer->rap_persistence > 0 ||     /* persistent peer */
1360                          !list_empty(&peer->rap_conns));  /* active conn */
1361
1362                 if (peer->rap_nid != nid)
1363                         continue;
1364
1365                 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1366                        peer, nid, atomic_read(&peer->rap_refcount));
1367                 return peer;
1368         }
1369         return NULL;
1370 }
1371
1372 kra_peer_t *
1373 kranal_find_peer (ptl_nid_t nid)
1374 {
1375         kra_peer_t     *peer;
1376
1377         read_lock(&kranal_data.kra_global_lock);
1378         peer = kranal_find_peer_locked(nid);
1379         if (peer != NULL)                       /* +1 ref for caller? */
1380                 kranal_peer_addref(peer);
1381         read_unlock(&kranal_data.kra_global_lock);
1382
1383         return peer;
1384 }
1385
1386 void
1387 kranal_unlink_peer_locked (kra_peer_t *peer)
1388 {
1389         LASSERT (peer->rap_persistence == 0);
1390         LASSERT (list_empty(&peer->rap_conns));
1391
1392         LASSERT (kranal_peer_active(peer));
1393         list_del_init(&peer->rap_list);
1394
1395         /* lose peerlist's ref */
1396         kranal_peer_decref(peer);
1397 }
1398
1399 int
1400 kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp, 
1401                       int *persistencep)
1402 {
1403         kra_peer_t        *peer;
1404         struct list_head  *ptmp;
1405         int                i;
1406
1407         read_lock(&kranal_data.kra_global_lock);
1408
1409         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1410
1411                 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1412
1413                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1414                         LASSERT (peer->rap_persistence > 0 ||
1415                                  !list_empty(&peer->rap_conns));
1416
1417                         if (index-- > 0)
1418                                 continue;
1419
1420                         *nidp = peer->rap_nid;
1421                         *ipp = peer->rap_ip;
1422                         *portp = peer->rap_port;
1423                         *persistencep = peer->rap_persistence;
1424
1425                         read_unlock(&kranal_data.kra_global_lock);
1426                         return 0;
1427                 }
1428         }
1429
1430         read_unlock(&kranal_data.kra_global_lock);
1431         return -ENOENT;
1432 }
1433
1434 int
1435 kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1436 {
1437         unsigned long      flags;
1438         kra_peer_t        *peer;
1439         kra_peer_t        *peer2;
1440
1441         if (nid == PTL_NID_ANY)
1442                 return -EINVAL;
1443
1444         peer = kranal_create_peer(nid);
1445         if (peer == NULL)
1446                 return -ENOMEM;
1447
1448         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1449
1450         peer2 = kranal_find_peer_locked(nid);
1451         if (peer2 != NULL) {
1452                 kranal_peer_decref(peer);
1453                 peer = peer2;
1454         } else {
1455                 /* peer table takes existing ref on peer */
1456                 list_add_tail(&peer->rap_list,
1457                               kranal_nid2peerlist(nid));
1458         }
1459
1460         peer->rap_ip = ip;
1461         peer->rap_port = port;
1462         peer->rap_persistence++;
1463
1464         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1465         return 0;
1466 }
1467
1468 void
1469 kranal_del_peer_locked (kra_peer_t *peer, int single_share)
1470 {
1471         struct list_head *ctmp;
1472         struct list_head *cnxt;
1473         kra_conn_t       *conn;
1474
1475         if (!single_share)
1476                 peer->rap_persistence = 0;
1477         else if (peer->rap_persistence > 0)
1478                 peer->rap_persistence--;
1479
1480         if (peer->rap_persistence != 0)
1481                 return;
1482
1483         if (list_empty(&peer->rap_conns)) {
1484                 kranal_unlink_peer_locked(peer);
1485         } else {
1486                 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1487                         conn = list_entry(ctmp, kra_conn_t, rac_list);
1488
1489                         kranal_close_conn_locked(conn, 0);
1490                 }
1491                 /* peer unlinks itself when last conn is closed */
1492         }
1493 }
1494
1495 int
1496 kranal_del_peer (ptl_nid_t nid, int single_share)
1497 {
1498         unsigned long      flags;
1499         struct list_head  *ptmp;
1500         struct list_head  *pnxt;
1501         kra_peer_t        *peer;
1502         int                lo;
1503         int                hi;
1504         int                i;
1505         int                rc = -ENOENT;
1506
1507         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1508
1509         if (nid != PTL_NID_ANY)
1510                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1511         else {
1512                 lo = 0;
1513                 hi = kranal_data.kra_peer_hash_size - 1;
1514         }
1515
1516         for (i = lo; i <= hi; i++) {
1517                 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1518                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1519                         LASSERT (peer->rap_persistence > 0 ||
1520                                  !list_empty(&peer->rap_conns));
1521
1522                         if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
1523                                 continue;
1524
1525                         kranal_del_peer_locked(peer, single_share);
1526                         rc = 0;         /* matched something */
1527
1528                         if (single_share)
1529                                 goto out;
1530                 }
1531         }
1532  out:
1533         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1534
1535         return rc;
1536 }
1537
1538 kra_conn_t *
1539 kranal_get_conn_by_idx (int index)
1540 {
1541         kra_peer_t        *peer;
1542         struct list_head  *ptmp;
1543         kra_conn_t        *conn;
1544         struct list_head  *ctmp;
1545         int                i;
1546
1547         read_lock (&kranal_data.kra_global_lock);
1548
1549         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1550                 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1551
1552                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1553                         LASSERT (peer->rap_persistence > 0 ||
1554                                  !list_empty(&peer->rap_conns));
1555
1556                         list_for_each (ctmp, &peer->rap_conns) {
1557                                 if (index-- > 0)
1558                                         continue;
1559
1560                                 conn = list_entry(ctmp, kra_conn_t, rac_list);
1561                                 CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
1562                                        conn, conn->rac_peer->rap_nid,
1563                                        atomic_read(&conn->rac_refcount));
1564                                 atomic_inc(&conn->rac_refcount);
1565                                 read_unlock(&kranal_data.kra_global_lock);
1566                                 return conn;
1567                         }
1568                 }
1569         }
1570
1571         read_unlock(&kranal_data.kra_global_lock);
1572         return NULL;
1573 }
1574
1575 int
1576 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1577 {
1578         kra_conn_t         *conn;
1579         struct list_head   *ctmp;
1580         struct list_head   *cnxt;
1581         int                 count = 0;
1582
1583         list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1584                 conn = list_entry(ctmp, kra_conn_t, rac_list);
1585
1586                 count++;
1587                 kranal_close_conn_locked(conn, why);
1588         }
1589
1590         return count;
1591 }
1592
1593 int
1594 kranal_close_matching_conns (ptl_nid_t nid)
1595 {
1596         unsigned long       flags;
1597         kra_peer_t         *peer;
1598         struct list_head   *ptmp;
1599         struct list_head   *pnxt;
1600         int                 lo;
1601         int                 hi;
1602         int                 i;
1603         int                 count = 0;
1604
1605         write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1606
1607         if (nid != PTL_NID_ANY)
1608                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1609         else {
1610                 lo = 0;
1611                 hi = kranal_data.kra_peer_hash_size - 1;
1612         }
1613
1614         for (i = lo; i <= hi; i++) {
1615                 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1616
1617                         peer = list_entry(ptmp, kra_peer_t, rap_list);
1618                         LASSERT (peer->rap_persistence > 0 ||
1619                                  !list_empty(&peer->rap_conns));
1620
1621                         if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
1622                                 continue;
1623
1624                         count += kranal_close_peer_conns_locked(peer, 0);
1625                 }
1626         }
1627
1628         write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1629
1630         /* wildcards always succeed */
1631         if (nid == PTL_NID_ANY)
1632                 return 0;
1633
1634         return (count == 0) ? -ENOENT : 0;
1635 }
1636
1637 int
1638 kranal_cmd(struct portals_cfg *pcfg, void * private)
1639 {
1640         int rc = -EINVAL;
1641
1642         LASSERT (pcfg != NULL);
1643
1644         switch(pcfg->pcfg_command) {
1645         case NAL_CMD_GET_PEER: {
1646                 ptl_nid_t   nid = 0;
1647                 __u32       ip = 0;
1648                 int         port = 0;
1649                 int         share_count = 0;
1650
1651                 rc = kranal_get_peer_info(pcfg->pcfg_count,
1652                                           &nid, &ip, &port, &share_count);
1653                 pcfg->pcfg_nid   = nid;
1654                 pcfg->pcfg_size  = 0;
1655                 pcfg->pcfg_id    = ip;
1656                 pcfg->pcfg_misc  = port;
1657                 pcfg->pcfg_count = 0;
1658                 pcfg->pcfg_wait  = share_count;
1659                 break;
1660         }
1661         case NAL_CMD_ADD_PEER: {
1662                 rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
1663                                                 pcfg->pcfg_id, /* IP */
1664                                                 pcfg->pcfg_misc); /* port */
1665                 break;
1666         }
1667         case NAL_CMD_DEL_PEER: {
1668                 rc = kranal_del_peer(pcfg->pcfg_nid, 
1669                                      /* flags == single_share */
1670                                      pcfg->pcfg_flags != 0);
1671                 break;
1672         }
1673         case NAL_CMD_GET_CONN: {
1674                 kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
1675
1676                 if (conn == NULL)
1677                         rc = -ENOENT;
1678                 else {
1679                         rc = 0;
1680                         pcfg->pcfg_nid   = conn->rac_peer->rap_nid;
1681                         pcfg->pcfg_id    = conn->rac_device->rad_id;
1682                         pcfg->pcfg_misc  = 0;
1683                         pcfg->pcfg_flags = 0;
1684                         kranal_conn_decref(conn);
1685                 }
1686                 break;
1687         }
1688         case NAL_CMD_CLOSE_CONNECTION: {
1689                 rc = kranal_close_matching_conns(pcfg->pcfg_nid);
1690                 break;
1691         }
1692         case NAL_CMD_REGISTER_MYNID: {
1693                 if (pcfg->pcfg_nid == PTL_NID_ANY)
1694                         rc = -EINVAL;
1695                 else
1696                         rc = kranal_set_mynid(pcfg->pcfg_nid);
1697                 break;
1698         }
1699         }
1700
1701         return rc;
1702 }
1703
1704 void
1705 kranal_free_txdescs(struct list_head *freelist)
1706 {
1707         kra_tx_t    *tx;
1708
1709         while (!list_empty(freelist)) {
1710                 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1711
1712                 list_del(&tx->tx_list);
1713                 PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1714                 PORTAL_FREE(tx, sizeof(*tx));
1715         }
1716 }
1717
1718 int
1719 kranal_alloc_txdescs(struct list_head *freelist, int n)
1720 {
1721         int            isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
1722         int            i;
1723         kra_tx_t      *tx;
1724
1725         LASSERT (freelist == &kranal_data.kra_idle_txs ||
1726                  freelist == &kranal_data.kra_idle_nblk_txs);
1727         LASSERT (list_empty(freelist));
1728
1729         for (i = 0; i < n; i++) {
1730
1731                 PORTAL_ALLOC(tx, sizeof(*tx));
1732                 if (tx == NULL) {
1733                         CERROR("Can't allocate %stx[%d]\n",
1734                                isnblk ? "nblk " : "", i);
1735                         kranal_free_txdescs(freelist);
1736                         return -ENOMEM;
1737                 }
1738
1739                 PORTAL_ALLOC(tx->tx_phys,
1740                              PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1741                 if (tx->tx_phys == NULL) {
1742                         CERROR("Can't allocate %stx[%d]->tx_phys\n", 
1743                                isnblk ? "nblk " : "", i);
1744
1745                         PORTAL_FREE(tx, sizeof(*tx));
1746                         kranal_free_txdescs(freelist);
1747                         return -ENOMEM;
1748                 }
1749
1750                 tx->tx_isnblk = isnblk;
1751                 tx->tx_buftype = RANAL_BUF_NONE;
1752                 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1753
1754                 list_add(&tx->tx_list, freelist);
1755         }
1756
1757         return 0;
1758 }
1759
1760 int
1761 kranal_device_init(int id, kra_device_t *dev)
1762 {
1763         const int         total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
1764         RAP_RETURN        rrc;
1765
1766         dev->rad_id = id;
1767         rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1768                                    &dev->rad_handle);
1769         if (rrc != RAP_SUCCESS) {
1770                 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1771                 goto failed_0;
1772         }
1773
1774         rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1775         if (rrc != RAP_SUCCESS) {
1776                 CERROR("Can't reserve %d RDMA descriptors"
1777                        " for device %d: %d\n", total_ntx, id, rrc);
1778                 goto failed_1;
1779         }
1780
1781         rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1782                            &dev->rad_rdma_cqh);
1783         if (rrc != RAP_SUCCESS) {
1784                 CERROR("Can't create rdma cq size %d"
1785                        " for device %d: %d\n", total_ntx, id, rrc);
1786                 goto failed_1;
1787         }
1788
1789         rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE, RAP_CQTYPE_RECV,
1790                            &dev->rad_fma_cqh);
1791         if (rrc != RAP_SUCCESS) {
1792                 CERROR("Can't create fma cq size %d"
1793                        " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc);
1794                 goto failed_2;
1795         }
1796
1797         return 0;
1798
1799  failed_2:
1800         RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1801  failed_1:
1802         RapkReleaseDevice(dev->rad_handle);
1803  failed_0:
1804         return -ENODEV;
1805 }
1806
1807 void
1808 kranal_device_fini(kra_device_t *dev)
1809 {
1810         LASSERT(dev->rad_scheduler == NULL);
1811         RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1812         RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1813         RapkReleaseDevice(dev->rad_handle);
1814 }
1815
1816 void
1817 kranal_api_shutdown (nal_t *nal)
1818 {
1819         int           i;
1820         unsigned long flags;
1821         
1822         if (nal->nal_refct != 0) {
1823                 /* This module got the first ref */
1824                 PORTAL_MODULE_UNUSE;
1825                 return;
1826         }
1827
1828         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1829                atomic_read(&portal_kmemory));
1830
1831         LASSERT (nal == &kranal_api);
1832
1833         switch (kranal_data.kra_init) {
1834         default:
1835                 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1836                 LBUG();
1837
1838         case RANAL_INIT_ALL:
1839                 /* stop calls to nal_cmd */
1840                 libcfs_nal_cmd_unregister(RANAL);
1841                 /* No new persistent peers */
1842
1843                 /* resetting my NID to unadvertises me, removes my
1844                  * listener and nukes all current peers */
1845                 kranal_set_mynid(PTL_NID_ANY);
1846                 /* no new peers or conns */
1847
1848                 /* Wait for all peer/conn state to clean up */
1849                 i = 2;
1850                 while (atomic_read(&kranal_data.kra_nconns) != 0 ||
1851                        atomic_read(&kranal_data.kra_npeers) != 0) {
1852                         i++;
1853                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1854                                "waiting for %d peers and %d conns to close down\n",
1855                                atomic_read(&kranal_data.kra_npeers),
1856                                atomic_read(&kranal_data.kra_nconns));
1857                         kranal_pause(HZ);
1858                 }
1859                 /* fall through */
1860
1861         case RANAL_INIT_LIB:
1862                 lib_fini(&kranal_lib);
1863                 /* fall through */
1864
1865         case RANAL_INIT_DATA:
1866                 break;
1867         }
1868
1869         /* flag threads to terminate; wake and wait for them to die */
1870         kranal_data.kra_shutdown = 1;
1871
1872         for (i = 0; i < kranal_data.kra_ndevs; i++) {
1873                 kra_device_t *dev = &kranal_data.kra_devices[i];
1874
1875                 LASSERT (list_empty(&dev->rad_connq));
1876
1877                 spin_lock_irqsave(&dev->rad_lock, flags);
1878                 wake_up(&dev->rad_waitq);
1879                 spin_unlock_irqrestore(&dev->rad_lock, flags);
1880         }
1881
1882         spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1883         wake_up_all(&kranal_data.kra_reaper_waitq);
1884         spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1885
1886         LASSERT (list_empty(&kranal_data.kra_connd_peers));
1887         spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); 
1888         wake_up_all(&kranal_data.kra_connd_waitq);
1889         spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); 
1890
1891         i = 2;
1892         while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1893                 i++;
1894                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1895                        "Waiting for %d threads to terminate\n",
1896                        atomic_read(&kranal_data.kra_nthreads));
1897                 kranal_pause(HZ);
1898         }
1899
1900         LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1901         if (kranal_data.kra_peers != NULL) {
1902                 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1903                         LASSERT (list_empty(&kranal_data.kra_peers[i]));
1904
1905                 PORTAL_FREE(kranal_data.kra_peers,
1906                             sizeof (struct list_head) * 
1907                             kranal_data.kra_peer_hash_size);
1908         }
1909
1910         LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1911         if (kranal_data.kra_conns != NULL) {
1912                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1913                         LASSERT (list_empty(&kranal_data.kra_conns[i]));
1914
1915                 PORTAL_FREE(kranal_data.kra_conns,
1916                             sizeof (struct list_head) * 
1917                             kranal_data.kra_conn_hash_size);
1918         }
1919
1920         for (i = 0; i < kranal_data.kra_ndevs; i++)
1921                 kranal_device_fini(&kranal_data.kra_devices[i]);
1922
1923         kranal_free_txdescs(&kranal_data.kra_idle_txs);
1924         kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
1925
1926         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1927                atomic_read(&portal_kmemory));
1928         printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
1929                atomic_read(&portal_kmemory));
1930
1931         kranal_data.kra_init = RANAL_INIT_NOTHING;
1932 }
1933
1934 int
1935 kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1936                     ptl_ni_limits_t *requested_limits,
1937                     ptl_ni_limits_t *actual_limits)
1938 {
1939         static int        device_ids[] = {RAPK_MAIN_DEVICE_ID,
1940                                           RAPK_EXPANSION_DEVICE_ID};
1941         struct timeval    tv;
1942         ptl_process_id_t  process_id;
1943         int               pkmem = atomic_read(&portal_kmemory);
1944         int               rc;
1945         int               i;
1946         kra_device_t     *dev;
1947
1948         LASSERT (nal == &kranal_api);
1949
1950         if (nal->nal_refct != 0) {
1951                 if (actual_limits != NULL)
1952                         *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
1953                 /* This module got the first ref */
1954                 PORTAL_MODULE_USE;
1955                 return PTL_OK;
1956         }
1957
1958         LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
1959
1960         memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1961
1962         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1963          * a unique (for all time) connstamp so we can uniquely identify
1964          * the sender.  The connstamp is an incrementing counter
1965          * initialised with seconds + microseconds at startup time.  So we
1966          * rely on NOT creating connections more frequently on average than
1967          * 1MHz to ensure we don't use old connstamps when we reboot. */
1968         do_gettimeofday(&tv);
1969         kranal_data.kra_connstamp =
1970         kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1971
1972         init_MUTEX(&kranal_data.kra_nid_mutex);
1973         init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
1974
1975         rwlock_init(&kranal_data.kra_global_lock);
1976
1977         for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1978                 kra_device_t  *dev = &kranal_data.kra_devices[i];
1979
1980                 dev->rad_idx = i;
1981                 INIT_LIST_HEAD(&dev->rad_connq);
1982                 init_waitqueue_head(&dev->rad_waitq);
1983                 spin_lock_init(&dev->rad_lock);
1984         }
1985
1986         kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1987         init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1988         spin_lock_init(&kranal_data.kra_reaper_lock);
1989
1990         INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1991         INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1992         init_waitqueue_head(&kranal_data.kra_connd_waitq);
1993         spin_lock_init(&kranal_data.kra_connd_lock);
1994
1995         INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1996         INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
1997         init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
1998         spin_lock_init(&kranal_data.kra_tx_lock);
1999
2000         /* OK to call kranal_api_shutdown() to cleanup now */
2001         kranal_data.kra_init = RANAL_INIT_DATA;
2002         
2003         kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
2004         PORTAL_ALLOC(kranal_data.kra_peers,
2005                      sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
2006         if (kranal_data.kra_peers == NULL)
2007                 goto failed;
2008
2009         for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
2010                 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
2011
2012         kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
2013         PORTAL_ALLOC(kranal_data.kra_conns,
2014                      sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
2015         if (kranal_data.kra_conns == NULL)
2016                 goto failed;
2017
2018         for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
2019                 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
2020
2021         rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
2022         if (rc != 0)
2023                 goto failed;
2024
2025         rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
2026         if (rc != 0)
2027                 goto failed;
2028
2029         process_id.pid = requested_pid;
2030         process_id.nid = PTL_NID_ANY;           /* don't know my NID yet */
2031
2032         rc = lib_init(&kranal_lib, nal, process_id,
2033                       requested_limits, actual_limits);
2034         if (rc != PTL_OK) {
2035                 CERROR("lib_init failed: error %d\n", rc);
2036                 goto failed;
2037         }
2038
2039         /* lib interface initialised */
2040         kranal_data.kra_init = RANAL_INIT_LIB;
2041         /*****************************************************/
2042
2043         rc = kranal_thread_start(kranal_reaper, NULL);
2044         if (rc != 0) {
2045                 CERROR("Can't spawn ranal reaper: %d\n", rc);
2046                 goto failed;
2047         }
2048
2049         for (i = 0; i < RANAL_N_CONND; i++) {
2050                 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
2051                 if (rc != 0) {
2052                         CERROR("Can't spawn ranal connd[%d]: %d\n",
2053                                i, rc);
2054                         goto failed;
2055                 }
2056         }
2057
2058         LASSERT(kranal_data.kra_ndevs == 0);
2059         for (i = 0; i < sizeof(device_ids)/sizeof(device_ids[0]); i++) {
2060                 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
2061
2062                 rc = kranal_device_init(device_ids[i], dev);
2063                 if (rc == 0)
2064                         kranal_data.kra_ndevs++;
2065
2066                 rc = kranal_thread_start(kranal_scheduler, dev);
2067                 if (rc != 0) {
2068                         CERROR("Can't spawn ranal scheduler[%d]: %d\n",
2069                                i, rc);
2070                         goto failed;
2071                 }
2072         }
2073
2074         if (kranal_data.kra_ndevs == 0)
2075                 goto failed;
2076
2077         rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL);
2078         if (rc != 0) {
2079                 CERROR("Can't initialise command interface (rc = %d)\n", rc);
2080                 goto failed;
2081         }
2082
2083         /* flag everything initialised */
2084         kranal_data.kra_init = RANAL_INIT_ALL;
2085         /*****************************************************/
2086
2087         CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
2088         printk(KERN_INFO "Lustre: RapidArray NAL loaded "
2089                "(initial mem %d)\n", pkmem);
2090
2091         return PTL_OK;
2092
2093  failed:
2094         kranal_api_shutdown(&kranal_api);    
2095         return PTL_FAIL;
2096 }
2097
2098 void __exit
2099 kranal_module_fini (void)
2100 {
2101         if (kranal_tunables.kra_sysctl != NULL)
2102                 unregister_sysctl_table(kranal_tunables.kra_sysctl);
2103
2104         PtlNIFini(kranal_ni);
2105
2106         ptl_unregister_nal(RANAL);
2107 }
2108
2109 int __init
2110 kranal_module_init (void)
2111 {
2112         int    rc;
2113
2114         /* the following must be sizeof(int) for
2115          * proc_dointvec/kranal_listener_procint() */
2116         LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
2117         LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
2118         LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
2119         LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
2120         LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
2121
2122         kranal_api.nal_ni_init = kranal_api_startup;
2123         kranal_api.nal_ni_fini = kranal_api_shutdown;
2124
2125         /* Initialise dynamic tunables to defaults once only */
2126         kranal_tunables.kra_timeout = RANAL_TIMEOUT;
2127         kranal_tunables.kra_listener_timeout = RANAL_LISTENER_TIMEOUT;
2128         kranal_tunables.kra_backlog = RANAL_BACKLOG;
2129         kranal_tunables.kra_port = RANAL_PORT;
2130         kranal_tunables.kra_max_immediate = RANAL_MAX_IMMEDIATE;
2131
2132         rc = ptl_register_nal(RANAL, &kranal_api);
2133         if (rc != PTL_OK) {
2134                 CERROR("Can't register RANAL: %d\n", rc);
2135                 return -ENOMEM;               /* or something... */
2136         }
2137
2138         /* Pure gateways want the NAL started up at module load time... */
2139         rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
2140         if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2141                 ptl_unregister_nal(RANAL);
2142                 return -ENODEV;
2143         }
2144
2145         kranal_tunables.kra_sysctl = 
2146                 register_sysctl_table(kranal_top_ctl_table, 0);
2147         if (kranal_tunables.kra_sysctl == NULL) {
2148                 CERROR("Can't register sysctl table\n");
2149                 PtlNIFini(kranal_ni);
2150                 ptl_unregister_nal(RANAL);
2151                 return -ENOMEM;
2152         }
2153
2154         return 0;
2155 }
2156
2157 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2158 MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
2159 MODULE_LICENSE("GPL");
2160
2161 module_init(kranal_module_init);
2162 module_exit(kranal_module_fini);