Whamcloud - gitweb
480c5aaeaeead53d329a85cfa1647b74728aad9c
[fs/lustre-release.git] / lnet / klnds / openiblnd / openiblnd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004 Cluster File Systems, Inc.
5  *   Author: Eric Barton <eric@bartonsoftware.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #include "openibnal.h"
25
26 nal_t                   kibnal_api;
27 ptl_handle_ni_t         kibnal_ni;
28 kib_data_t              kibnal_data;
29 kib_tunables_t          kibnal_tunables;
30
31 #define IBNAL_SYSCTL             202
32
33 enum {
34         IBNAL_SYSCTL_TIMEOUT=1,
35         IBNAL_SYSCTL_LISTENER_TIMEOUT,
36         IBNAL_SYSCTL_BACKLOG,
37         IBNAL_SYSCTL_PORT
38 };
39
40 static ctl_table kibnal_ctl_table[] = {
41         {IBNAL_SYSCTL_TIMEOUT, "timeout", 
42          &kibnal_tunables.kib_io_timeout, sizeof (int),
43          0644, NULL, &proc_dointvec},
44         {IBNAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout", 
45          &kibnal_tunables.kib_listener_timeout, sizeof(int),
46          0644, NULL, &proc_dointvec},
47         {IBNAL_SYSCTL_BACKLOG, "backlog",
48          &kibnal_tunables.kib_backlog, sizeof(int),
49          0644, NULL, kibnal_listener_procint},
50         {IBNAL_SYSCTL_PORT, "port",
51          &kibnal_tunables.kib_port, sizeof(int),
52          0644, NULL, kibnal_listener_procint},
53         { 0 }
54 };
55
56 static ctl_table kibnal_top_ctl_table[] = {
57         {IBNAL_SYSCTL, "openibnal", NULL, 0, 0555, kibnal_ctl_table},
58         { 0 }
59 };
60
61 __u32 
62 kibnal_cksum (void *ptr, int nob)
63 {
64         char  *c  = ptr;
65         __u32  sum = 0;
66
67         while (nob-- > 0)
68                 sum = ((sum << 1) | (sum >> 31)) + *c++;
69
70         /* ensure I don't return 0 (== no checksum) */
71         return (sum == 0) ? 1 : sum;
72 }
73
74 void
75 kibnal_init_msg(kib_msg_t *msg, int type, int body_nob)
76 {
77         msg->ibm_type = type;
78         msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
79 }
80
81 void
82 kibnal_pack_msg(kib_msg_t *msg, int credits, ptl_nid_t dstnid, __u64 dststamp)
83 {
84         /* CAVEAT EMPTOR! all message fields not set here should have been
85          * initialised previously. */
86         msg->ibm_magic    = IBNAL_MSG_MAGIC;
87         msg->ibm_version  = IBNAL_MSG_VERSION;
88         /*   ibm_type */
89         msg->ibm_credits  = credits;
90         /*   ibm_nob */
91         msg->ibm_cksum    = 0;
92         msg->ibm_srcnid   = kibnal_lib.libnal_ni.ni_pid.nid;
93         msg->ibm_srcstamp = kibnal_data.kib_incarnation;
94         msg->ibm_dstnid   = dstnid;
95         msg->ibm_dststamp = dststamp;
96 #if IBNAL_CKSUM
97         /* NB ibm_cksum zero while computing cksum */
98         msg->ibm_cksum    = kibnal_cksum(msg, msg->ibm_nob);
99 #endif
100 }
101
102 int
103 kibnal_unpack_msg(kib_msg_t *msg, int nob)
104 {
105         const int hdr_size = offsetof(kib_msg_t, ibm_u);
106         __u32     msg_cksum;
107         int       flip;
108         int       msg_nob;
109
110         if (nob < 6) {
111                 CERROR("Short message: %d\n", nob);
112                 return -EPROTO;
113         }
114
115         if (msg->ibm_magic == IBNAL_MSG_MAGIC) {
116                 flip = 0;
117         } else if (msg->ibm_magic == __swab32(IBNAL_MSG_MAGIC)) {
118                 flip = 1;
119         } else {
120                 CERROR("Bad magic: %08x\n", msg->ibm_magic);
121                 return -EPROTO;
122         }
123
124         if (msg->ibm_version != 
125             (flip ? __swab16(IBNAL_MSG_VERSION) : IBNAL_MSG_VERSION)) {
126                 CERROR("Bad version: %d\n", msg->ibm_version);
127                 return -EPROTO;
128         }
129
130         if (nob < hdr_size) {
131                 CERROR("Short message: %d\n", nob);
132                 return -EPROTO;
133         }
134
135         msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
136         if (msg_nob > nob) {
137                 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
138                 return -EPROTO;
139         }
140
141         /* checksum must be computed with ibm_cksum zero and BEFORE anything
142          * gets flipped */
143         msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
144         msg->ibm_cksum = 0;
145         if (msg_cksum != 0 &&
146             msg_cksum != kibnal_cksum(msg, msg_nob)) {
147                 CERROR("Bad checksum\n");
148                 return -EPROTO;
149         }
150         msg->ibm_cksum = msg_cksum;
151         
152         if (flip) {
153                 /* leave magic unflipped as a clue to peer endianness */
154                 __swab16s(&msg->ibm_version);
155                 LASSERT (sizeof(msg->ibm_type) == 1);
156                 LASSERT (sizeof(msg->ibm_credits) == 1);
157                 msg->ibm_nob = msg_nob;
158                 __swab64s(&msg->ibm_srcnid);
159                 __swab64s(&msg->ibm_srcstamp);
160                 __swab64s(&msg->ibm_dstnid);
161                 __swab64s(&msg->ibm_dststamp);
162         }
163         
164         if (msg->ibm_srcnid == PTL_NID_ANY) {
165                 CERROR("Bad src nid: "LPX64"\n", msg->ibm_srcnid);
166                 return -EPROTO;
167         }
168
169         switch (msg->ibm_type) {
170         default:
171                 CERROR("Unknown message type %x\n", msg->ibm_type);
172                 return -EPROTO;
173                 
174         case IBNAL_MSG_SVCQRY:
175         case IBNAL_MSG_NOOP:
176                 break;
177
178         case IBNAL_MSG_SVCRSP:
179                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.svcrsp)) {
180                         CERROR("Short SVCRSP: %d(%d)\n", msg_nob,
181                                (int)(hdr_size + sizeof(msg->ibm_u.svcrsp)));
182                         return -EPROTO;
183                 }
184                 if (flip) {
185                         __swab64s(&msg->ibm_u.svcrsp.ibsr_svc_id);
186                         __swab16s(&msg->ibm_u.svcrsp.ibsr_svc_pkey);
187                 }
188                 break;
189
190         case IBNAL_MSG_CONNREQ:
191         case IBNAL_MSG_CONNACK:
192                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
193                         CERROR("Short CONNREQ: %d(%d)\n", msg_nob,
194                                (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
195                         return -EPROTO;
196                 }
197                 if (flip)
198                         __swab32s(&msg->ibm_u.connparams.ibcp_queue_depth);
199                 break;
200
201         case IBNAL_MSG_IMMEDIATE:
202                 if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
203                         CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
204                                (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
205                         return -EPROTO;
206                 }
207                 break;
208
209         case IBNAL_MSG_PUT_RDMA:
210         case IBNAL_MSG_GET_RDMA:
211                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.rdma)) {
212                         CERROR("Short RDMA req: %d(%d)\n", msg_nob,
213                                (int)(hdr_size + sizeof(msg->ibm_u.rdma)));
214                         return -EPROTO;
215                 }
216                 if (flip) {
217                         __swab32s(&msg->ibm_u.rdma.ibrm_desc.rd_key);
218                         __swab32s(&msg->ibm_u.rdma.ibrm_desc.rd_nob);
219                         __swab64s(&msg->ibm_u.rdma.ibrm_desc.rd_addr);
220                 }
221                 break;
222
223         case IBNAL_MSG_PUT_DONE:
224         case IBNAL_MSG_GET_DONE:
225                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
226                         CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
227                                (int)(hdr_size + sizeof(msg->ibm_u.completion)));
228                         return -EPROTO;
229                 }
230                 if (flip)
231                         __swab32s(&msg->ibm_u.completion.ibcm_status);
232                 break;
233         }
234         return 0;
235 }
236
237 int
238 kibnal_sock_write (struct socket *sock, void *buffer, int nob)
239 {
240         int           rc;
241         mm_segment_t  oldmm = get_fs();
242         struct iovec  iov = {
243                 .iov_base = buffer,
244                 .iov_len  = nob
245         };
246         struct msghdr msg = {
247                 .msg_name       = NULL,
248                 .msg_namelen    = 0,
249                 .msg_iov        = &iov,
250                 .msg_iovlen     = 1,
251                 .msg_control    = NULL,
252                 .msg_controllen = 0,
253                 .msg_flags      = MSG_DONTWAIT
254         };
255
256         /* We've set up the socket's send buffer to be large enough for
257          * everything we send, so a single non-blocking send should
258          * complete without error. */
259
260         set_fs(KERNEL_DS);
261         rc = sock_sendmsg(sock, &msg, iov.iov_len);
262         set_fs(oldmm);
263
264         if (rc == nob)
265                 return 0;
266
267         if (rc >= 0)
268                 return -EAGAIN;
269
270         return rc;
271 }
272
273 int
274 kibnal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
275 {
276         int            rc;
277         mm_segment_t   oldmm = get_fs();
278         long           ticks = timeout * HZ;
279         unsigned long  then;
280         struct timeval tv;
281
282         LASSERT (nob > 0);
283         LASSERT (ticks > 0);
284
285         for (;;) {
286                 struct iovec  iov = {
287                         .iov_base = buffer,
288                         .iov_len  = nob
289                 };
290                 struct msghdr msg = {
291                         .msg_name       = NULL,
292                         .msg_namelen    = 0,
293                         .msg_iov        = &iov,
294                         .msg_iovlen     = 1,
295                         .msg_control    = NULL,
296                         .msg_controllen = 0,
297                         .msg_flags      = 0
298                 };
299
300                 /* Set receive timeout to remaining time */
301                 tv = (struct timeval) {
302                         .tv_sec = ticks / HZ,
303                         .tv_usec = ((ticks % HZ) * 1000000) / HZ
304                 };
305                 set_fs(KERNEL_DS);
306                 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
307                                      (char *)&tv, sizeof(tv));
308                 set_fs(oldmm);
309                 if (rc != 0) {
310                         CERROR("Can't set socket recv timeout %d: %d\n",
311                                timeout, rc);
312                         return rc;
313                 }
314
315                 set_fs(KERNEL_DS);
316                 then = jiffies;
317                 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
318                 ticks -= jiffies - then;
319                 set_fs(oldmm);
320
321                 if (rc < 0)
322                         return rc;
323
324                 if (rc == 0)
325                         return -ECONNABORTED;
326
327                 buffer = ((char *)buffer) + rc;
328                 nob -= rc;
329
330                 if (nob == 0)
331                         return 0;
332
333                 if (ticks <= 0)
334                         return -ETIMEDOUT;
335         }
336 }
337
338 int
339 kibnal_create_sock(struct socket **sockp)
340 {
341         struct socket       *sock;
342         int                  rc;
343         int                  option;
344         mm_segment_t         oldmm = get_fs();
345
346         rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
347         if (rc != 0) {
348                 CERROR("Can't create socket: %d\n", rc);
349                 return rc;
350         }
351
352         /* Ensure sends will not block */
353         option = 2 * sizeof(kib_msg_t);
354         set_fs(KERNEL_DS);
355         rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
356                              (char *)&option, sizeof(option));
357         set_fs(oldmm);
358         if (rc != 0) {
359                 CERROR("Can't set send buffer %d: %d\n", option, rc);
360                 goto failed;
361         }
362
363         option = 1;
364         set_fs(KERNEL_DS);
365         rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
366                              (char *)&option, sizeof(option));
367         set_fs(oldmm);
368         if (rc != 0) {
369                 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
370                 goto failed;
371         }
372
373         *sockp = sock;
374         return 0;
375
376  failed:
377         sock_release(sock);
378         return rc;
379 }
380
381 void
382 kibnal_pause(int ticks)
383 {
384         set_current_state(TASK_UNINTERRUPTIBLE);
385         schedule_timeout(ticks);
386 }
387
388 int
389 kibnal_connect_sock(kib_peer_t *peer, struct socket **sockp)
390 {
391         struct sockaddr_in  locaddr;
392         struct sockaddr_in  srvaddr;
393         struct socket      *sock;
394         unsigned int        port;
395         int                 rc;
396
397         for (port = 1023; port >= 512; port--) {
398
399                 memset(&locaddr, 0, sizeof(locaddr)); 
400                 locaddr.sin_family      = AF_INET; 
401                 locaddr.sin_port        = htons(port);
402                 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
403
404                 memset (&srvaddr, 0, sizeof (srvaddr));
405                 srvaddr.sin_family      = AF_INET;
406                 srvaddr.sin_port        = htons (peer->ibp_port);
407                 srvaddr.sin_addr.s_addr = htonl (peer->ibp_ip);
408
409                 rc = kibnal_create_sock(&sock);
410                 if (rc != 0)
411                         return rc;
412
413                 rc = sock->ops->bind(sock,
414                                      (struct sockaddr *)&locaddr, sizeof(locaddr));
415                 if (rc != 0) {
416                         sock_release(sock);
417                         
418                         if (rc == -EADDRINUSE) {
419                                 CDEBUG(D_NET, "Port %d already in use\n", port);
420                                 continue;
421                         }
422
423                         CERROR("Can't bind to reserved port %d: %d\n", port, rc);
424                         return rc;
425                 }
426
427                 rc = sock->ops->connect(sock,
428                                         (struct sockaddr *)&srvaddr, sizeof(srvaddr),
429                                         0);
430                 if (rc == 0) {
431                         *sockp = sock;
432                         return 0;
433                 }
434                 
435                 sock_release(sock);
436
437                 if (rc != -EADDRNOTAVAIL) {
438                         CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
439                                port, HIPQUAD(peer->ibp_ip), peer->ibp_port, rc);
440                         return rc;
441                 }
442                 
443                 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n", 
444                        port, HIPQUAD(peer->ibp_ip), peer->ibp_port);
445         }
446
447         /* all ports busy */
448         return -EHOSTUNREACH;
449 }
450
451 int
452 kibnal_make_svcqry (kib_conn_t *conn) 
453 {
454         kib_peer_t    *peer = conn->ibc_peer;
455         kib_msg_t     *msg;
456         struct socket *sock;
457         int            rc;
458         int            nob;
459
460         LASSERT (conn->ibc_connreq != NULL);
461         msg = &conn->ibc_connreq->cr_msg;
462
463         kibnal_init_msg(msg, IBNAL_MSG_SVCQRY, 0);
464         kibnal_pack_msg(msg, 0, peer->ibp_nid, 0);
465
466         rc = kibnal_connect_sock(peer, &sock);
467         if (rc != 0)
468                 return rc;
469         
470         rc = kibnal_sock_write(sock, msg, msg->ibm_nob);
471         if (rc != 0) {
472                 CERROR("Error %d sending svcqry to "
473                        LPX64"@%u.%u.%u.%u/%d\n", rc, 
474                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
475                 goto out;
476         }
477
478         nob = offsetof(kib_msg_t, ibm_u) + sizeof(msg->ibm_u.svcrsp);
479         rc = kibnal_sock_read(sock, msg, nob, kibnal_tunables.kib_io_timeout);
480         if (rc != 0) {
481                 CERROR("Error %d receiving svcrsp from "
482                        LPX64"@%u.%u.%u.%u/%d\n", rc, 
483                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
484                 goto out;
485         }
486
487         rc = kibnal_unpack_msg(msg, nob);
488         if (rc != 0) {
489                 CERROR("Error %d unpacking svcrsp from "
490                        LPX64"@%u.%u.%u.%u/%d\n", rc,
491                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
492                 goto out;
493         }
494                        
495         if (msg->ibm_type != IBNAL_MSG_SVCRSP) {
496                 CERROR("Unexpected response type %d from "
497                        LPX64"@%u.%u.%u.%u/%d\n", msg->ibm_type, 
498                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
499                 rc = -EPROTO;
500                 goto out;
501         }
502         
503         if (msg->ibm_dstnid != kibnal_lib.libnal_ni.ni_pid.nid ||
504             msg->ibm_dststamp != kibnal_data.kib_incarnation) {
505                 CERROR("Unexpected dst NID/stamp "LPX64"/"LPX64" from "
506                        LPX64"@%u.%u.%u.%u/%d\n", 
507                        msg->ibm_dstnid, msg->ibm_dststamp,
508                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
509                 rc = -EPROTO;
510                 goto out;
511         }
512
513         if (msg->ibm_srcnid != peer->ibp_nid) {
514                 CERROR("Unexpected src NID "LPX64" from "
515                        LPX64"@%u.%u.%u.%u/%d\n", msg->ibm_srcnid,
516                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
517                 rc = -EPROTO;
518                 goto out;
519         }
520
521         conn->ibc_incarnation = msg->ibm_srcstamp;
522         conn->ibc_connreq->cr_svcrsp = msg->ibm_u.svcrsp;
523  out:
524         sock_release(sock);
525         return rc;
526 }
527
528 void
529 kibnal_handle_svcqry (struct socket *sock)
530 {
531         struct sockaddr_in   addr;
532         __u32                peer_ip;
533         unsigned int         peer_port;
534         kib_msg_t           *msg;
535         __u64                srcnid;
536         __u64                srcstamp;
537         int                  len;
538         int                  rc;
539
540         len = sizeof(addr);
541         rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
542         if (rc != 0) {
543                 CERROR("Can't get peer's IP: %d\n", rc);
544                 return;
545         }
546
547         peer_ip = ntohl(addr.sin_addr.s_addr);
548         peer_port = ntohs(addr.sin_port);
549
550         if (peer_port >= 1024) {
551                 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
552                        HIPQUAD(peer_ip), peer_port);
553                 return;
554         }
555
556         PORTAL_ALLOC(msg, sizeof(*msg));
557         if (msg == NULL) {
558                 CERROR("Can't allocate msgs for %u.%u.%u.%u/%d\n",
559                        HIPQUAD(peer_ip), peer_port);
560                 goto out;
561         }
562         
563         rc = kibnal_sock_read(sock, msg, offsetof(kib_msg_t, ibm_u),
564                               kibnal_tunables.kib_listener_timeout);
565         if (rc != 0) {
566                 CERROR("Error %d receiving svcqry from %u.%u.%u.%u/%d\n",
567                        rc, HIPQUAD(peer_ip), peer_port);
568                 goto out;
569         }
570         
571         rc = kibnal_unpack_msg(msg, offsetof(kib_msg_t, ibm_u));
572         if (rc != 0) {
573                 CERROR("Error %d unpacking svcqry from %u.%u.%u.%u/%d\n",
574                        rc, HIPQUAD(peer_ip), peer_port);
575                 goto out;
576         }
577         
578         if (msg->ibm_type != IBNAL_MSG_SVCQRY) {
579                 CERROR("Unexpected message %d from %u.%u.%u.%u/%d\n",
580                        msg->ibm_type, HIPQUAD(peer_ip), peer_port);
581                 goto out;
582         }
583         
584         if (msg->ibm_dstnid != kibnal_lib.libnal_ni.ni_pid.nid) {
585                 CERROR("Unexpected dstnid "LPX64"(expected "LPX64" "
586                        "from %u.%u.%u.%u/%d\n", msg->ibm_dstnid,
587                        kibnal_lib.libnal_ni.ni_pid.nid,
588                        HIPQUAD(peer_ip), peer_port);
589                 goto out;
590         }
591
592         srcnid = msg->ibm_srcnid;
593         srcstamp = msg->ibm_srcstamp;
594         
595         kibnal_init_msg(msg, IBNAL_MSG_SVCRSP, sizeof(msg->ibm_u.svcrsp));
596
597         msg->ibm_u.svcrsp.ibsr_svc_id = kibnal_data.kib_svc_id;
598         memcpy(msg->ibm_u.svcrsp.ibsr_svc_gid, kibnal_data.kib_svc_gid,
599                sizeof(kibnal_data.kib_svc_gid));
600         msg->ibm_u.svcrsp.ibsr_svc_pkey = kibnal_data.kib_svc_pkey;
601
602         kibnal_pack_msg(msg, 0, srcnid, srcstamp);
603         
604         rc = kibnal_sock_write (sock, msg, msg->ibm_nob);
605         if (rc != 0) {
606                 CERROR("Error %d replying to svcqry from %u.%u.%u.%u/%d\n",
607                        rc, HIPQUAD(peer_ip), peer_port);
608                 goto out;
609         }
610         
611  out:
612         PORTAL_FREE(msg, sizeof(*msg));
613 }
614
615 void
616 kibnal_free_acceptsock (kib_acceptsock_t *as)
617 {
618         sock_release(as->ibas_sock);
619         PORTAL_FREE(as, sizeof(*as));
620 }
621
622 int
623 kibnal_ip_listener(void *arg)
624 {
625         struct sockaddr_in addr;
626         wait_queue_t       wait;
627         struct socket     *sock;
628         kib_acceptsock_t  *as;
629         int                port;
630         char               name[16];
631         int                rc;
632         unsigned long      flags;
633
634         /* Parent thread holds kib_nid_mutex, and is, or is about to
635          * block on kib_listener_signal */
636
637         port = kibnal_tunables.kib_port;
638         snprintf(name, sizeof(name), "kibnal_lstn%03d", port);
639         kportal_daemonize(name);
640         kportal_blockallsigs();
641
642         init_waitqueue_entry(&wait, current);
643
644         rc = kibnal_create_sock(&sock);
645         if (rc != 0)
646                 goto out_0;
647
648         memset(&addr, 0, sizeof(addr));
649         addr.sin_family      = AF_INET;
650         addr.sin_port        = htons(port);
651         addr.sin_addr.s_addr = INADDR_ANY;
652
653         rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
654         if (rc != 0) {
655                 CERROR("Can't bind to port %d\n", port);
656                 goto out_1;
657         }
658
659         rc = sock->ops->listen(sock, kibnal_tunables.kib_backlog);
660         if (rc != 0) {
661                 CERROR("Can't set listen backlog %d: %d\n", 
662                        kibnal_tunables.kib_backlog, rc);
663                 goto out_1;
664         }
665
666         LASSERT (kibnal_data.kib_listener_sock == NULL);
667         kibnal_data.kib_listener_sock = sock;
668
669         /* unblock waiting parent */
670         LASSERT (kibnal_data.kib_listener_shutdown == 0);
671         up(&kibnal_data.kib_listener_signal);
672
673         /* Wake me any time something happens on my socket */
674         add_wait_queue(sock->sk->sk_sleep, &wait);
675         as = NULL;
676
677         while (kibnal_data.kib_listener_shutdown == 0) {
678
679                 if (as == NULL) {
680                         PORTAL_ALLOC(as, sizeof(*as));
681                         if (as == NULL) {
682                                 CERROR("Out of Memory: pausing...\n");
683                                 kibnal_pause(HZ);
684                                 continue;
685                         }
686                         as->ibas_sock = NULL;
687                 }
688
689                 if (as->ibas_sock == NULL) {
690                         as->ibas_sock = sock_alloc();
691                         if (as->ibas_sock == NULL) {
692                                 CERROR("Can't allocate socket: pausing...\n");
693                                 kibnal_pause(HZ);
694                                 continue;
695                         }
696                         /* XXX this should add a ref to sock->ops->owner, if
697                          * TCP could be a module */
698                         as->ibas_sock->type = sock->type;
699                         as->ibas_sock->ops = sock->ops;
700                 }
701                 
702                 set_current_state(TASK_INTERRUPTIBLE);
703
704                 rc = sock->ops->accept(sock, as->ibas_sock, O_NONBLOCK);
705
706                 /* Sleep for socket activity? */
707                 if (rc == -EAGAIN &&
708                     kibnal_data.kib_listener_shutdown == 0)
709                         schedule();
710
711                 set_current_state(TASK_RUNNING);
712
713                 if (rc == 0) {
714                         spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
715                         
716                         list_add_tail(&as->ibas_list, 
717                                       &kibnal_data.kib_connd_acceptq);
718
719                         spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);
720                         wake_up(&kibnal_data.kib_connd_waitq);
721
722                         as = NULL;
723                         continue;
724                 }
725                 
726                 if (rc != -EAGAIN) {
727                         CERROR("Accept failed: %d, pausing...\n", rc);
728                         kibnal_pause(HZ);
729                 }
730         }
731
732         if (as != NULL) {
733                 if (as->ibas_sock != NULL)
734                         sock_release(as->ibas_sock);
735                 PORTAL_FREE(as, sizeof(*as));
736         }
737
738         rc = 0;
739         remove_wait_queue(sock->sk->sk_sleep, &wait);
740  out_1:
741         sock_release(sock);
742         kibnal_data.kib_listener_sock = NULL;
743  out_0:
744         /* set completion status and unblock thread waiting for me 
745          * (parent on startup failure, executioner on normal shutdown) */
746         kibnal_data.kib_listener_shutdown = rc;
747         up(&kibnal_data.kib_listener_signal);
748
749         return 0;
750 }
751
752 int
753 kibnal_start_ip_listener (void)
754 {
755         long           pid;
756         int            rc;
757
758         CDEBUG(D_NET, "Starting listener\n");
759
760         /* Called holding kib_nid_mutex: listener stopped */
761         LASSERT (kibnal_data.kib_listener_sock == NULL);
762
763         kibnal_data.kib_listener_shutdown = 0;
764         pid = kernel_thread(kibnal_ip_listener, NULL, 0);
765         if (pid < 0) {
766                 CERROR("Can't spawn listener: %ld\n", pid);
767                 return (int)pid;
768         }
769
770         /* Block until listener has started up. */
771         down(&kibnal_data.kib_listener_signal);
772
773         rc = kibnal_data.kib_listener_shutdown;
774         LASSERT ((rc != 0) == (kibnal_data.kib_listener_sock == NULL));
775
776         CDEBUG((rc == 0) ? D_WARNING : D_ERROR, 
777                "Listener %s: pid:%ld port:%d backlog:%d\n", 
778                (rc == 0) ? "started OK" : "startup failed",
779                pid, kibnal_tunables.kib_port, kibnal_tunables.kib_backlog);
780
781         return rc;
782 }
783
784 void
785 kibnal_stop_ip_listener(int clear_acceptq)
786 {
787         struct list_head  zombie_accepts;
788         kib_acceptsock_t *as;
789         unsigned long     flags;
790
791         CDEBUG(D_NET, "Stopping listener\n");
792
793         /* Called holding kib_nid_mutex: listener running */
794         LASSERT (kibnal_data.kib_listener_sock != NULL);
795
796         kibnal_data.kib_listener_shutdown = 1;
797         wake_up_all(kibnal_data.kib_listener_sock->sk->sk_sleep);
798
799         /* Block until listener has torn down. */
800         down(&kibnal_data.kib_listener_signal);
801
802         LASSERT (kibnal_data.kib_listener_sock == NULL);
803         CDEBUG(D_WARNING, "Listener stopped\n");
804
805         if (!clear_acceptq)
806                 return;
807
808         /* Close any unhandled accepts */
809         spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
810
811         list_add(&zombie_accepts, &kibnal_data.kib_connd_acceptq);
812         list_del_init(&kibnal_data.kib_connd_acceptq);
813
814         spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);
815         
816         while (!list_empty(&zombie_accepts)) {
817                 as = list_entry(zombie_accepts.next,
818                                 kib_acceptsock_t, ibas_list);
819                 list_del(&as->ibas_list);
820                 kibnal_free_acceptsock(as);
821         }
822 }
823
824 int 
825 kibnal_listener_procint(ctl_table *table, int write, struct file *filp,
826                         void *buffer, size_t *lenp)
827 {
828         int   *tunable = (int *)table->data;
829         int    old_val;
830         int    rc;
831
832         /* No race with nal initialisation since the nal is setup all the time
833          * it's loaded.  When that changes, change this! */
834         LASSERT (kibnal_data.kib_init == IBNAL_INIT_ALL);
835
836         down(&kibnal_data.kib_nid_mutex);
837
838         LASSERT (tunable == &kibnal_tunables.kib_port ||
839                  tunable == &kibnal_tunables.kib_backlog);
840         old_val = *tunable;
841
842         rc = proc_dointvec(table, write, filp, buffer, lenp);
843
844         if (write &&
845             (*tunable != old_val ||
846              kibnal_data.kib_listener_sock == NULL)) {
847
848                 if (kibnal_data.kib_listener_sock != NULL)
849                         kibnal_stop_ip_listener(0);
850
851                 rc = kibnal_start_ip_listener();
852                 if (rc != 0) {
853                         CERROR("Unable to restart listener with new tunable:"
854                                " reverting to old value\n");
855                         *tunable = old_val;
856                         kibnal_start_ip_listener();
857                 }
858         }
859
860         up(&kibnal_data.kib_nid_mutex);
861
862         LASSERT (kibnal_data.kib_init == IBNAL_INIT_ALL);
863         return rc;
864 }
865
866 int
867 kibnal_start_ib_listener (void) 
868 {
869         int    rc;
870
871         LASSERT (kibnal_data.kib_listen_handle == NULL);
872
873         kibnal_data.kib_svc_id = ib_cm_service_assign();
874         CDEBUG(D_NET, "svc id "LPX64"\n", kibnal_data.kib_svc_id);
875
876         rc = ib_cached_gid_get(kibnal_data.kib_device,
877                                kibnal_data.kib_port, 0,
878                                kibnal_data.kib_svc_gid);
879         if (rc != 0) {
880                 CERROR("Can't get port %d GID: %d\n",
881                        kibnal_data.kib_port, rc);
882                 return rc;
883         }
884         
885         rc = ib_cached_pkey_get(kibnal_data.kib_device,
886                                 kibnal_data.kib_port, 0,
887                                 &kibnal_data.kib_svc_pkey);
888         if (rc != 0) {
889                 CERROR ("Can't get port %d PKEY: %d\n",
890                         kibnal_data.kib_port, rc);
891                 return rc;
892         }
893
894         rc = ib_cm_listen(kibnal_data.kib_svc_id,
895                           TS_IB_CM_SERVICE_EXACT_MASK,
896                           kibnal_passive_conn_callback, NULL,
897                           &kibnal_data.kib_listen_handle);
898         if (rc != 0) {
899                 kibnal_data.kib_listen_handle = NULL;
900                 CERROR ("Can't create IB listener: %d\n", rc);
901                 return rc;
902         }
903         
904         LASSERT (kibnal_data.kib_listen_handle != NULL);
905         return 0;
906 }
907
908 void
909 kibnal_stop_ib_listener (void) 
910 {
911         int    rc;
912         
913         LASSERT (kibnal_data.kib_listen_handle != NULL);
914
915         rc = ib_cm_listen_stop (kibnal_data.kib_listen_handle);
916         if (rc != 0)
917                 CERROR("Error stopping IB listener: %d\n", rc);
918                 
919         kibnal_data.kib_listen_handle = NULL;
920 }
921
922 int
923 kibnal_set_mynid (ptl_nid_t nid)
924 {
925         lib_ni_t         *ni = &kibnal_lib.libnal_ni;
926         int               rc;
927
928         CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
929                nid, ni->ni_pid.nid);
930
931         down (&kibnal_data.kib_nid_mutex);
932
933         if (nid == kibnal_data.kib_nid) {
934                 /* no change of NID */
935                 up (&kibnal_data.kib_nid_mutex);
936                 return (0);
937         }
938
939         CDEBUG(D_NET, "NID "LPX64"("LPX64")\n",
940                kibnal_data.kib_nid, nid);
941
942         if (kibnal_data.kib_listener_sock != NULL)
943                 kibnal_stop_ip_listener(1);
944         
945         if (kibnal_data.kib_listen_handle != NULL)
946                 kibnal_stop_ib_listener();
947
948         ni->ni_pid.nid = nid;
949         kibnal_data.kib_incarnation++;
950         mb();
951         /* Delete all existing peers and their connections after new
952          * NID/incarnation set to ensure no old connections in our brave new
953          * world. */
954         kibnal_del_peer (PTL_NID_ANY, 0);
955
956         if (ni->ni_pid.nid != PTL_NID_ANY) {
957                 /* got a new NID to install */
958                 rc = kibnal_start_ib_listener();
959                 if (rc != 0) {
960                         CERROR("Can't start IB listener: %d\n", rc);
961                         goto failed_0;
962                 }
963         
964                 rc = kibnal_start_ip_listener();
965                 if (rc != 0) {
966                         CERROR("Can't start IP listener: %d\n", rc);
967                         goto failed_1;
968                 }
969         }
970         
971         up(&kibnal_data.kib_nid_mutex);
972         return 0;
973
974  failed_1:
975         kibnal_stop_ib_listener();
976  failed_0:
977         ni->ni_pid.nid = PTL_NID_ANY;
978         kibnal_data.kib_incarnation++;
979         mb();
980         kibnal_del_peer (PTL_NID_ANY, 0);
981         up(&kibnal_data.kib_nid_mutex);
982         return rc;
983 }
984
985 kib_peer_t *
986 kibnal_create_peer (ptl_nid_t nid)
987 {
988         kib_peer_t *peer;
989
990         LASSERT (nid != PTL_NID_ANY);
991
992         PORTAL_ALLOC (peer, sizeof (*peer));
993         if (peer == NULL)
994                 return (NULL);
995
996         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
997
998         peer->ibp_nid = nid;
999         atomic_set (&peer->ibp_refcount, 1);    /* 1 ref for caller */
1000
1001         INIT_LIST_HEAD (&peer->ibp_list);       /* not in the peer table yet */
1002         INIT_LIST_HEAD (&peer->ibp_conns);
1003         INIT_LIST_HEAD (&peer->ibp_tx_queue);
1004         INIT_LIST_HEAD (&peer->ibp_connd_list); /* not queued for connecting */
1005
1006         peer->ibp_reconnect_time = jiffies;
1007         peer->ibp_reconnect_interval = IBNAL_MIN_RECONNECT_INTERVAL;
1008
1009         atomic_inc (&kibnal_data.kib_npeers);
1010         CDEBUG(D_NET, "peer %p "LPX64"\n", peer, nid);
1011
1012         return (peer);
1013 }
1014
1015 void
1016 kibnal_destroy_peer (kib_peer_t *peer)
1017 {
1018         CDEBUG (D_NET, "peer "LPX64" %p deleted\n", peer->ibp_nid, peer);
1019
1020         LASSERT (atomic_read (&peer->ibp_refcount) == 0);
1021         LASSERT (peer->ibp_persistence == 0);
1022         LASSERT (!kibnal_peer_active(peer));
1023         LASSERT (peer->ibp_connecting == 0);
1024         LASSERT (list_empty (&peer->ibp_connd_list));
1025         LASSERT (list_empty (&peer->ibp_conns));
1026         LASSERT (list_empty (&peer->ibp_tx_queue));
1027
1028         PORTAL_FREE (peer, sizeof (*peer));
1029
1030         /* NB a peer's connections keep a reference on their peer until
1031          * they are destroyed, so we can be assured that _all_ state to do
1032          * with this peer has been cleaned up when its refcount drops to
1033          * zero. */
1034         atomic_dec (&kibnal_data.kib_npeers);
1035 }
1036
1037 void
1038 kibnal_put_peer (kib_peer_t *peer)
1039 {
1040         CDEBUG (D_OTHER, "putting peer[%p] -> "LPX64" (%d)\n",
1041                 peer, peer->ibp_nid,
1042                 atomic_read (&peer->ibp_refcount));
1043
1044         LASSERT (atomic_read (&peer->ibp_refcount) > 0);
1045         if (!atomic_dec_and_test (&peer->ibp_refcount))
1046                 return;
1047
1048         kibnal_destroy_peer (peer);
1049 }
1050
1051 kib_peer_t *
1052 kibnal_find_peer_locked (ptl_nid_t nid)
1053 {
1054         struct list_head *peer_list = kibnal_nid2peerlist (nid);
1055         struct list_head *tmp;
1056         kib_peer_t       *peer;
1057
1058         list_for_each (tmp, peer_list) {
1059
1060                 peer = list_entry (tmp, kib_peer_t, ibp_list);
1061
1062                 LASSERT (peer->ibp_persistence != 0 || /* persistent peer */
1063                          peer->ibp_connecting != 0 || /* creating conns */
1064                          !list_empty (&peer->ibp_conns));  /* active conn */
1065
1066                 if (peer->ibp_nid != nid)
1067                         continue;
1068
1069                 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1070                        peer, nid, atomic_read (&peer->ibp_refcount));
1071                 return (peer);
1072         }
1073         return (NULL);
1074 }
1075
1076 kib_peer_t *
1077 kibnal_get_peer (ptl_nid_t nid)
1078 {
1079         kib_peer_t     *peer;
1080         unsigned long   flags;
1081
1082         read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
1083         peer = kibnal_find_peer_locked (nid);
1084         if (peer != NULL)                       /* +1 ref for caller? */
1085                 atomic_inc (&peer->ibp_refcount);
1086         read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
1087
1088         return (peer);
1089 }
1090
1091 void
1092 kibnal_unlink_peer_locked (kib_peer_t *peer)
1093 {
1094         LASSERT (peer->ibp_persistence == 0);
1095         LASSERT (list_empty(&peer->ibp_conns));
1096
1097         LASSERT (kibnal_peer_active(peer));
1098         list_del_init (&peer->ibp_list);
1099         /* lose peerlist's ref */
1100         kibnal_put_peer (peer);
1101 }
1102
1103 int
1104 kibnal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
1105                       int *persistencep)
1106 {
1107         kib_peer_t        *peer;
1108         struct list_head  *ptmp;
1109         unsigned long      flags;
1110         int                i;
1111
1112         read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
1113
1114         for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1115
1116                 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
1117                         
1118                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1119                         LASSERT (peer->ibp_persistence != 0 ||
1120                                  peer->ibp_connecting != 0 ||
1121                                  !list_empty (&peer->ibp_conns));
1122
1123                         if (index-- > 0)
1124                                 continue;
1125
1126                         *nidp = peer->ibp_nid;
1127                         *ipp = peer->ibp_ip;
1128                         *portp = peer->ibp_port;
1129                         *persistencep = peer->ibp_persistence;
1130                         
1131                         read_unlock_irqrestore(&kibnal_data.kib_global_lock,
1132                                                flags);
1133                         return (0);
1134                 }
1135         }
1136
1137         read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
1138         return (-ENOENT);
1139 }
1140
1141 int
1142 kibnal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1143 {
1144         unsigned long      flags;
1145         kib_peer_t        *peer;
1146         kib_peer_t        *peer2;
1147         
1148         if (nid == PTL_NID_ANY)
1149                 return (-EINVAL);
1150
1151         peer = kibnal_create_peer (nid);
1152         if (peer == NULL)
1153                 return (-ENOMEM);
1154
1155         write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
1156
1157         peer2 = kibnal_find_peer_locked (nid);
1158         if (peer2 != NULL) {
1159                 kibnal_put_peer (peer);
1160                 peer = peer2;
1161         } else {
1162                 /* peer table takes existing ref on peer */
1163                 list_add_tail (&peer->ibp_list,
1164                                kibnal_nid2peerlist (nid));
1165         }
1166
1167         peer->ibp_ip = ip;
1168         peer->ibp_port = port;
1169         peer->ibp_persistence++;
1170         
1171         write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
1172         return (0);
1173 }
1174
1175 void
1176 kibnal_del_peer_locked (kib_peer_t *peer, int single_share)
1177 {
1178         struct list_head *ctmp;
1179         struct list_head *cnxt;
1180         kib_conn_t       *conn;
1181
1182         if (!single_share)
1183                 peer->ibp_persistence = 0;
1184         else if (peer->ibp_persistence > 0)
1185                 peer->ibp_persistence--;
1186
1187         if (peer->ibp_persistence != 0)
1188                 return;
1189
1190         if (list_empty(&peer->ibp_conns)) {
1191                 kibnal_unlink_peer_locked(peer);
1192         } else {
1193                 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1194                         conn = list_entry(ctmp, kib_conn_t, ibc_list);
1195
1196                         kibnal_close_conn_locked (conn, 0);
1197                 }
1198                 /* NB peer is no longer persistent; closing its last conn
1199                  * unlinked it. */
1200         }
1201         /* NB peer now unlinked; might even be freed if the peer table had the
1202          * last ref on it. */
1203 }
1204
1205 int
1206 kibnal_del_peer (ptl_nid_t nid, int single_share)
1207 {
1208         unsigned long      flags;
1209         struct list_head  *ptmp;
1210         struct list_head  *pnxt;
1211         kib_peer_t        *peer;
1212         int                lo;
1213         int                hi;
1214         int                i;
1215         int                rc = -ENOENT;
1216
1217         write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
1218
1219         if (nid != PTL_NID_ANY)
1220                 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
1221         else {
1222                 lo = 0;
1223                 hi = kibnal_data.kib_peer_hash_size - 1;
1224         }
1225
1226         for (i = lo; i <= hi; i++) {
1227                 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
1228                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1229                         LASSERT (peer->ibp_persistence != 0 ||
1230                                  peer->ibp_connecting != 0 ||
1231                                  !list_empty (&peer->ibp_conns));
1232
1233                         if (!(nid == PTL_NID_ANY || peer->ibp_nid == nid))
1234                                 continue;
1235
1236                         kibnal_del_peer_locked (peer, single_share);
1237                         rc = 0;         /* matched something */
1238
1239                         if (single_share)
1240                                 goto out;
1241                 }
1242         }
1243  out:
1244         write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
1245
1246         return (rc);
1247 }
1248
1249 kib_conn_t *
1250 kibnal_get_conn_by_idx (int index)
1251 {
1252         kib_peer_t        *peer;
1253         struct list_head  *ptmp;
1254         kib_conn_t        *conn;
1255         struct list_head  *ctmp;
1256         unsigned long      flags;
1257         int                i;
1258
1259         read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
1260
1261         for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1262                 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
1263
1264                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1265                         LASSERT (peer->ibp_persistence > 0 ||
1266                                  peer->ibp_connecting != 0 ||
1267                                  !list_empty (&peer->ibp_conns));
1268
1269                         list_for_each (ctmp, &peer->ibp_conns) {
1270                                 if (index-- > 0)
1271                                         continue;
1272
1273                                 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1274                                 CDEBUG(D_NET, "++conn[%p] state %d -> "LPX64" (%d)\n",
1275                                        conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
1276                                        atomic_read (&conn->ibc_refcount));
1277                                 atomic_inc (&conn->ibc_refcount);
1278                                 read_unlock_irqrestore(&kibnal_data.kib_global_lock,
1279                                                        flags);
1280                                 return (conn);
1281                         }
1282                 }
1283         }
1284
1285         read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
1286         return (NULL);
1287 }
1288
1289 kib_conn_t *
1290 kibnal_create_conn (void)
1291 {
1292         kib_conn_t  *conn;
1293         int          i;
1294         __u64        vaddr = 0;
1295         __u64        vaddr_base;
1296         int          page_offset;
1297         int          ipage;
1298         int          rc;
1299         union {
1300                 struct ib_qp_create_param  qp_create;
1301                 struct ib_qp_attribute     qp_attr;
1302         } params;
1303         
1304         PORTAL_ALLOC (conn, sizeof (*conn));
1305         if (conn == NULL) {
1306                 CERROR ("Can't allocate connection\n");
1307                 return (NULL);
1308         }
1309
1310         /* zero flags, NULL pointers etc... */
1311         memset (conn, 0, sizeof (*conn));
1312
1313         INIT_LIST_HEAD (&conn->ibc_tx_queue);
1314         INIT_LIST_HEAD (&conn->ibc_active_txs);
1315         spin_lock_init (&conn->ibc_lock);
1316         
1317         atomic_inc (&kibnal_data.kib_nconns);
1318         /* well not really, but I call destroy() on failure, which decrements */
1319
1320         PORTAL_ALLOC (conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t));
1321         if (conn->ibc_rxs == NULL)
1322                 goto failed;
1323         memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t));
1324
1325         rc = kibnal_alloc_pages(&conn->ibc_rx_pages,
1326                                 IBNAL_RX_MSG_PAGES,
1327                                 IB_ACCESS_LOCAL_WRITE);
1328         if (rc != 0)
1329                 goto failed;
1330
1331         vaddr_base = vaddr = conn->ibc_rx_pages->ibp_vaddr;
1332
1333         for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) {
1334                 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
1335                 kib_rx_t   *rx = &conn->ibc_rxs[i];
1336
1337                 rx->rx_conn = conn;
1338                 rx->rx_vaddr = vaddr;
1339                 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
1340                 
1341                 vaddr += IBNAL_MSG_SIZE;
1342                 LASSERT (vaddr <= vaddr_base + IBNAL_RX_MSG_BYTES);
1343                 
1344                 page_offset += IBNAL_MSG_SIZE;
1345                 LASSERT (page_offset <= PAGE_SIZE);
1346
1347                 if (page_offset == PAGE_SIZE) {
1348                         page_offset = 0;
1349                         ipage++;
1350                         LASSERT (ipage <= IBNAL_RX_MSG_PAGES);
1351                 }
1352         }
1353
1354         params.qp_create = (struct ib_qp_create_param) {
1355                 .limit = {
1356                         /* Sends have an optional RDMA */
1357                         .max_outstanding_send_request    = 2 * IBNAL_MSG_QUEUE_SIZE,
1358                         .max_outstanding_receive_request = IBNAL_MSG_QUEUE_SIZE,
1359                         .max_send_gather_element         = 1,
1360                         .max_receive_scatter_element     = 1,
1361                 },
1362                 .pd              = kibnal_data.kib_pd,
1363                 .send_queue      = kibnal_data.kib_cq,
1364                 .receive_queue   = kibnal_data.kib_cq,
1365                 .send_policy     = IB_WQ_SIGNAL_SELECTABLE,
1366                 .receive_policy  = IB_WQ_SIGNAL_SELECTABLE,
1367                 .rd_domain       = 0,
1368                 .transport       = IB_TRANSPORT_RC,
1369                 .device_specific = NULL,
1370         };
1371         
1372         rc = ib_qp_create (&params.qp_create, &conn->ibc_qp, &conn->ibc_qpn);
1373         if (rc != 0) {
1374                 CERROR ("Failed to create queue pair: %d\n", rc);
1375                 goto failed;
1376         }
1377         
1378         /* Mark QP created */
1379         conn->ibc_state = IBNAL_CONN_INIT_QP;
1380
1381         params.qp_attr = (struct ib_qp_attribute) {
1382                 .state             = IB_QP_STATE_INIT,
1383                 .port              = kibnal_data.kib_port,
1384                 .enable_rdma_read  = 1,
1385                 .enable_rdma_write = 1,
1386                 .valid_fields      = (IB_QP_ATTRIBUTE_STATE |
1387                                       IB_QP_ATTRIBUTE_PORT |
1388                                       IB_QP_ATTRIBUTE_PKEY_INDEX |
1389                                       IB_QP_ATTRIBUTE_RDMA_ATOMIC_ENABLE),
1390         };
1391         rc = ib_qp_modify(conn->ibc_qp, &params.qp_attr);
1392         if (rc != 0) {
1393                 CERROR ("Failed to modify queue pair: %d\n", rc);
1394                 goto failed;
1395         }
1396
1397         /* 1 ref for caller */
1398         atomic_set (&conn->ibc_refcount, 1);
1399         return (conn);
1400         
1401  failed:
1402         kibnal_destroy_conn (conn);
1403         return (NULL);
1404 }
1405
1406 void
1407 kibnal_destroy_conn (kib_conn_t *conn)
1408 {
1409         int    rc;
1410         
1411         CDEBUG (D_NET, "connection %p\n", conn);
1412
1413         LASSERT (atomic_read (&conn->ibc_refcount) == 0);
1414         LASSERT (list_empty(&conn->ibc_tx_queue));
1415         LASSERT (list_empty(&conn->ibc_active_txs));
1416         LASSERT (conn->ibc_nsends_posted == 0);
1417         LASSERT (conn->ibc_connreq == NULL);
1418
1419         switch (conn->ibc_state) {
1420         case IBNAL_CONN_ZOMBIE:
1421                 /* called after connection sequence initiated */
1422
1423         case IBNAL_CONN_INIT_QP:
1424                 rc = ib_qp_destroy(conn->ibc_qp);
1425                 if (rc != 0)
1426                         CERROR("Can't destroy QP: %d\n", rc);
1427                 /* fall through */
1428                 
1429         case IBNAL_CONN_INIT_NOTHING:
1430                 break;
1431
1432         default:
1433                 LASSERT (0);
1434         }
1435
1436         if (conn->ibc_rx_pages != NULL) 
1437                 kibnal_free_pages(conn->ibc_rx_pages);
1438         
1439         if (conn->ibc_rxs != NULL)
1440                 PORTAL_FREE(conn->ibc_rxs, 
1441                             IBNAL_RX_MSGS * sizeof(kib_rx_t));
1442
1443         if (conn->ibc_peer != NULL)
1444                 kibnal_put_peer(conn->ibc_peer);
1445
1446         PORTAL_FREE(conn, sizeof (*conn));
1447
1448         atomic_dec(&kibnal_data.kib_nconns);
1449         
1450         if (atomic_read (&kibnal_data.kib_nconns) == 0 &&
1451             kibnal_data.kib_shutdown) {
1452                 /* I just nuked the last connection on shutdown; wake up
1453                  * everyone so they can exit. */
1454                 wake_up_all(&kibnal_data.kib_sched_waitq);
1455                 wake_up_all(&kibnal_data.kib_reaper_waitq);
1456         }
1457 }
1458
1459 void
1460 kibnal_put_conn (kib_conn_t *conn)
1461 {
1462         unsigned long flags;
1463
1464         CDEBUG (D_NET, "putting conn[%p] state %d -> "LPX64" (%d)\n",
1465                 conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
1466                 atomic_read (&conn->ibc_refcount));
1467
1468         LASSERT (atomic_read (&conn->ibc_refcount) > 0);
1469         if (!atomic_dec_and_test (&conn->ibc_refcount))
1470                 return;
1471
1472         /* last ref only goes on zombies */
1473         LASSERT (conn->ibc_state == IBNAL_CONN_ZOMBIE);
1474
1475         spin_lock_irqsave (&kibnal_data.kib_reaper_lock, flags);
1476
1477         list_add (&conn->ibc_list, &kibnal_data.kib_reaper_conns);
1478         wake_up (&kibnal_data.kib_reaper_waitq);
1479
1480         spin_unlock_irqrestore (&kibnal_data.kib_reaper_lock, flags);
1481 }
1482
1483 int
1484 kibnal_close_peer_conns_locked (kib_peer_t *peer, int why)
1485 {
1486         kib_conn_t         *conn;
1487         struct list_head   *ctmp;
1488         struct list_head   *cnxt;
1489         int                 count = 0;
1490
1491         list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1492                 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1493
1494                 count++;
1495                 kibnal_close_conn_locked (conn, why);
1496         }
1497
1498         return (count);
1499 }
1500
1501 int
1502 kibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
1503 {
1504         kib_conn_t         *conn;
1505         struct list_head   *ctmp;
1506         struct list_head   *cnxt;
1507         int                 count = 0;
1508
1509         list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1510                 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1511
1512                 if (conn->ibc_incarnation == incarnation)
1513                         continue;
1514
1515                 CDEBUG(D_NET, "Closing stale conn %p nid:"LPX64
1516                        " incarnation:"LPX64"("LPX64")\n", conn,
1517                        peer->ibp_nid, conn->ibc_incarnation, incarnation);
1518                 
1519                 count++;
1520                 kibnal_close_conn_locked (conn, -ESTALE);
1521         }
1522
1523         return (count);
1524 }
1525
1526 int
1527 kibnal_close_matching_conns (ptl_nid_t nid)
1528 {
1529         unsigned long       flags;
1530         kib_peer_t         *peer;
1531         struct list_head   *ptmp;
1532         struct list_head   *pnxt;
1533         int                 lo;
1534         int                 hi;
1535         int                 i;
1536         int                 count = 0;
1537
1538         write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
1539
1540         if (nid != PTL_NID_ANY)
1541                 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
1542         else {
1543                 lo = 0;
1544                 hi = kibnal_data.kib_peer_hash_size - 1;
1545         }
1546
1547         for (i = lo; i <= hi; i++) {
1548                 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
1549
1550                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1551                         LASSERT (peer->ibp_persistence != 0 ||
1552                                  peer->ibp_connecting != 0 ||
1553                                  !list_empty (&peer->ibp_conns));
1554
1555                         if (!(nid == PTL_NID_ANY || nid == peer->ibp_nid))
1556                                 continue;
1557
1558                         count += kibnal_close_peer_conns_locked (peer, 0);
1559                 }
1560         }
1561
1562         write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
1563
1564         /* wildcards always succeed */
1565         if (nid == PTL_NID_ANY)
1566                 return (0);
1567         
1568         return (count == 0 ? -ENOENT : 0);
1569 }
1570
1571 int
1572 kibnal_cmd(struct portals_cfg *pcfg, void * private)
1573 {
1574         int rc = -EINVAL;
1575
1576         LASSERT (pcfg != NULL);
1577
1578         switch(pcfg->pcfg_command) {
1579         case NAL_CMD_GET_PEER: {
1580                 ptl_nid_t   nid = 0;
1581                 __u32       ip = 0;
1582                 int         port = 0;
1583                 int         share_count = 0;
1584
1585                 rc = kibnal_get_peer_info(pcfg->pcfg_count,
1586                                           &nid, &ip, &port, &share_count);
1587                 pcfg->pcfg_nid   = nid;
1588                 pcfg->pcfg_size  = 0;
1589                 pcfg->pcfg_id    = ip;
1590                 pcfg->pcfg_misc  = port;
1591                 pcfg->pcfg_count = 0;
1592                 pcfg->pcfg_wait  = share_count;
1593                 break;
1594         }
1595         case NAL_CMD_ADD_PEER: {
1596                 rc = kibnal_add_persistent_peer (pcfg->pcfg_nid,
1597                                                  pcfg->pcfg_id, /* IP */
1598                                                  pcfg->pcfg_misc); /* port */
1599                 break;
1600         }
1601         case NAL_CMD_DEL_PEER: {
1602                 rc = kibnal_del_peer (pcfg->pcfg_nid, 
1603                                        /* flags == single_share */
1604                                        pcfg->pcfg_flags != 0);
1605                 break;
1606         }
1607         case NAL_CMD_GET_CONN: {
1608                 kib_conn_t *conn = kibnal_get_conn_by_idx (pcfg->pcfg_count);
1609
1610                 if (conn == NULL)
1611                         rc = -ENOENT;
1612                 else {
1613                         rc = 0;
1614                         pcfg->pcfg_nid   = conn->ibc_peer->ibp_nid;
1615                         pcfg->pcfg_id    = 0;
1616                         pcfg->pcfg_misc  = 0;
1617                         pcfg->pcfg_flags = 0;
1618                         kibnal_put_conn (conn);
1619                 }
1620                 break;
1621         }
1622         case NAL_CMD_CLOSE_CONNECTION: {
1623                 rc = kibnal_close_matching_conns (pcfg->pcfg_nid);
1624                 break;
1625         }
1626         case NAL_CMD_REGISTER_MYNID: {
1627                 if (pcfg->pcfg_nid == PTL_NID_ANY)
1628                         rc = -EINVAL;
1629                 else
1630                         rc = kibnal_set_mynid (pcfg->pcfg_nid);
1631                 break;
1632         }
1633         }
1634
1635         return rc;
1636 }
1637
1638 void
1639 kibnal_free_pages (kib_pages_t *p)
1640 {
1641         int     npages = p->ibp_npages;
1642         int     rc;
1643         int     i;
1644         
1645         if (p->ibp_mapped) {
1646                 rc = ib_memory_deregister(p->ibp_handle);
1647                 if (rc != 0)
1648                         CERROR ("Deregister error: %d\n", rc);
1649         }
1650         
1651         for (i = 0; i < npages; i++)
1652                 if (p->ibp_pages[i] != NULL)
1653                         __free_page(p->ibp_pages[i]);
1654         
1655         PORTAL_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
1656 }
1657
1658 int
1659 kibnal_alloc_pages (kib_pages_t **pp, int npages, int access)
1660 {
1661         kib_pages_t                *p;
1662         struct ib_physical_buffer  *phys_pages;
1663         int                         i;
1664         int                         rc;
1665
1666         PORTAL_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
1667         if (p == NULL) {
1668                 CERROR ("Can't allocate buffer %d\n", npages);
1669                 return (-ENOMEM);
1670         }
1671
1672         memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1673         p->ibp_npages = npages;
1674         
1675         for (i = 0; i < npages; i++) {
1676                 p->ibp_pages[i] = alloc_page (GFP_KERNEL);
1677                 if (p->ibp_pages[i] == NULL) {
1678                         CERROR ("Can't allocate page %d of %d\n", i, npages);
1679                         kibnal_free_pages(p);
1680                         return (-ENOMEM);
1681                 }
1682         }
1683
1684         PORTAL_ALLOC(phys_pages, npages * sizeof(*phys_pages));
1685         if (phys_pages == NULL) {
1686                 CERROR ("Can't allocate physarray for %d pages\n", npages);
1687                 kibnal_free_pages(p);
1688                 return (-ENOMEM);
1689         }
1690
1691         for (i = 0; i < npages; i++) {
1692                 phys_pages[i].size = PAGE_SIZE;
1693                 phys_pages[i].address =
1694                         kibnal_page2phys(p->ibp_pages[i]);
1695         }
1696
1697         p->ibp_vaddr = 0;
1698         rc = ib_memory_register_physical(kibnal_data.kib_pd,
1699                                          phys_pages, npages,
1700                                          &p->ibp_vaddr,
1701                                          npages * PAGE_SIZE, 0,
1702                                          access,
1703                                          &p->ibp_handle,
1704                                          &p->ibp_lkey,
1705                                          &p->ibp_rkey);
1706         
1707         PORTAL_FREE(phys_pages, npages * sizeof(*phys_pages));
1708         
1709         if (rc != 0) {
1710                 CERROR ("Error %d mapping %d pages\n", rc, npages);
1711                 kibnal_free_pages(p);
1712                 return (rc);
1713         }
1714         
1715         p->ibp_mapped = 1;
1716         *pp = p;
1717         return (0);
1718 }
1719
1720 int
1721 kibnal_setup_tx_descs (void)
1722 {
1723         int           ipage = 0;
1724         int           page_offset = 0;
1725         __u64         vaddr;
1726         __u64         vaddr_base;
1727         struct page  *page;
1728         kib_tx_t     *tx;
1729         int           i;
1730         int           rc;
1731
1732         /* pre-mapped messages are not bigger than 1 page */
1733         LASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE);
1734
1735         /* No fancy arithmetic when we do the buffer calculations */
1736         LASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0);
1737
1738         rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages,
1739                                 IBNAL_TX_MSG_PAGES, 
1740                                 0);            /* local read access only */
1741         if (rc != 0)
1742                 return (rc);
1743
1744         vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr;
1745
1746         for (i = 0; i < IBNAL_TX_MSGS; i++) {
1747                 page = kibnal_data.kib_tx_pages->ibp_pages[ipage];
1748                 tx = &kibnal_data.kib_tx_descs[i];
1749
1750                 memset (tx, 0, sizeof(*tx));    /* zero flags etc */
1751                 
1752                 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
1753                 tx->tx_vaddr = vaddr;
1754                 tx->tx_isnblk = (i >= IBNAL_NTX);
1755                 tx->tx_mapped = KIB_TX_UNMAPPED;
1756
1757                 CDEBUG(D_NET, "Tx[%d] %p->%p - "LPX64"\n", 
1758                        i, tx, tx->tx_msg, tx->tx_vaddr);
1759
1760                 if (tx->tx_isnblk)
1761                         list_add (&tx->tx_list, 
1762                                   &kibnal_data.kib_idle_nblk_txs);
1763                 else
1764                         list_add (&tx->tx_list, 
1765                                   &kibnal_data.kib_idle_txs);
1766
1767                 vaddr += IBNAL_MSG_SIZE;
1768                 LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES);
1769
1770                 page_offset += IBNAL_MSG_SIZE;
1771                 LASSERT (page_offset <= PAGE_SIZE);
1772
1773                 if (page_offset == PAGE_SIZE) {
1774                         page_offset = 0;
1775                         ipage++;
1776                         LASSERT (ipage <= IBNAL_TX_MSG_PAGES);
1777                 }
1778         }
1779         
1780         return (0);
1781 }
1782
1783 void
1784 kibnal_api_shutdown (nal_t *nal)
1785 {
1786         int   i;
1787         int   rc;
1788
1789         if (nal->nal_refct != 0) {
1790                 /* This module got the first ref */
1791                 PORTAL_MODULE_UNUSE;
1792                 return;
1793         }
1794
1795         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1796                atomic_read (&portal_kmemory));
1797
1798         LASSERT(nal == &kibnal_api);
1799
1800         switch (kibnal_data.kib_init) {
1801         default:
1802                 CERROR ("Unexpected state %d\n", kibnal_data.kib_init);
1803                 LBUG();
1804
1805         case IBNAL_INIT_ALL:
1806                 /* stop calls to nal_cmd */
1807                 libcfs_nal_cmd_unregister(OPENIBNAL);
1808                 /* No new peers */
1809
1810                 /* resetting my NID unadvertises me, removes my
1811                  * listener and nukes all current peers */
1812                 kibnal_set_mynid (PTL_NID_ANY);
1813
1814                 /* Wait for all peer state to clean up */
1815                 i = 2;
1816                 while (atomic_read (&kibnal_data.kib_npeers) != 0) {
1817                         i++;
1818                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1819                                "waiting for %d peers to close down\n",
1820                                atomic_read (&kibnal_data.kib_npeers));
1821                         set_current_state (TASK_INTERRUPTIBLE);
1822                         schedule_timeout (HZ);
1823                 }
1824                 /* fall through */
1825
1826         case IBNAL_INIT_CQ:
1827                 rc = ib_cq_destroy (kibnal_data.kib_cq);
1828                 if (rc != 0)
1829                         CERROR ("Destroy CQ error: %d\n", rc);
1830                 /* fall through */
1831
1832         case IBNAL_INIT_TXD:
1833                 kibnal_free_pages (kibnal_data.kib_tx_pages);
1834                 /* fall through */
1835 #if IBNAL_FMR
1836         case IBNAL_INIT_FMR:
1837                 rc = ib_fmr_pool_destroy (kibnal_data.kib_fmr_pool);
1838                 if (rc != 0)
1839                         CERROR ("Destroy FMR pool error: %d\n", rc);
1840                 /* fall through */
1841 #endif
1842         case IBNAL_INIT_PD:
1843                 rc = ib_pd_destroy(kibnal_data.kib_pd);
1844                 if (rc != 0)
1845                         CERROR ("Destroy PD error: %d\n", rc);
1846                 /* fall through */
1847
1848         case IBNAL_INIT_LIB:
1849                 lib_fini(&kibnal_lib);
1850                 /* fall through */
1851
1852         case IBNAL_INIT_DATA:
1853                 /* Module refcount only gets to zero when all peers
1854                  * have been closed so all lists must be empty */
1855                 LASSERT (atomic_read (&kibnal_data.kib_npeers) == 0);
1856                 LASSERT (kibnal_data.kib_peers != NULL);
1857                 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1858                         LASSERT (list_empty (&kibnal_data.kib_peers[i]));
1859                 }
1860                 LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0);
1861                 LASSERT (list_empty (&kibnal_data.kib_sched_rxq));
1862                 LASSERT (list_empty (&kibnal_data.kib_sched_txq));
1863                 LASSERT (list_empty (&kibnal_data.kib_reaper_conns));
1864                 LASSERT (list_empty (&kibnal_data.kib_connd_peers));
1865                 LASSERT (list_empty (&kibnal_data.kib_connd_acceptq));
1866
1867                 /* flag threads to terminate; wake and wait for them to die */
1868                 kibnal_data.kib_shutdown = 1;
1869                 wake_up_all (&kibnal_data.kib_sched_waitq);
1870                 wake_up_all (&kibnal_data.kib_reaper_waitq);
1871                 wake_up_all (&kibnal_data.kib_connd_waitq);
1872
1873                 i = 2;
1874                 while (atomic_read (&kibnal_data.kib_nthreads) != 0) {
1875                         i++;
1876                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1877                                "Waiting for %d threads to terminate\n",
1878                                atomic_read (&kibnal_data.kib_nthreads));
1879                         set_current_state (TASK_INTERRUPTIBLE);
1880                         schedule_timeout (HZ);
1881                 }
1882                 /* fall through */
1883                 
1884         case IBNAL_INIT_NOTHING:
1885                 break;
1886         }
1887
1888         if (kibnal_data.kib_tx_descs != NULL)
1889                 PORTAL_FREE (kibnal_data.kib_tx_descs,
1890                              IBNAL_TX_MSGS * sizeof(kib_tx_t));
1891
1892         if (kibnal_data.kib_peers != NULL)
1893                 PORTAL_FREE (kibnal_data.kib_peers,
1894                              sizeof (struct list_head) * 
1895                              kibnal_data.kib_peer_hash_size);
1896
1897         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1898                atomic_read (&portal_kmemory));
1899         printk(KERN_INFO "Lustre: OpenIB NAL unloaded (final mem %d)\n",
1900                atomic_read(&portal_kmemory));
1901
1902         kibnal_data.kib_init = IBNAL_INIT_NOTHING;
1903 }
1904
1905 int
1906 kibnal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1907                      ptl_ni_limits_t *requested_limits,
1908                      ptl_ni_limits_t *actual_limits)
1909 {
1910         struct timeval    tv;
1911         ptl_process_id_t  process_id;
1912         int               pkmem = atomic_read(&portal_kmemory);
1913         int               rc;
1914         int               i;
1915
1916         LASSERT (nal == &kibnal_api);
1917
1918         if (nal->nal_refct != 0) {
1919                 if (actual_limits != NULL)
1920                         *actual_limits = kibnal_lib.libnal_ni.ni_actual_limits;
1921                 /* This module got the first ref */
1922                 PORTAL_MODULE_USE;
1923                 return (PTL_OK);
1924         }
1925
1926         LASSERT (kibnal_data.kib_init == IBNAL_INIT_NOTHING);
1927
1928         memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */
1929
1930         do_gettimeofday(&tv);
1931         kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1932
1933         init_MUTEX (&kibnal_data.kib_nid_mutex);
1934         init_MUTEX_LOCKED (&kibnal_data.kib_listener_signal);
1935
1936         rwlock_init(&kibnal_data.kib_global_lock);
1937
1938         kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE;
1939         PORTAL_ALLOC (kibnal_data.kib_peers,
1940                       sizeof (struct list_head) * kibnal_data.kib_peer_hash_size);
1941         if (kibnal_data.kib_peers == NULL) {
1942                 goto failed;
1943         }
1944         for (i = 0; i < kibnal_data.kib_peer_hash_size; i++)
1945                 INIT_LIST_HEAD(&kibnal_data.kib_peers[i]);
1946
1947         spin_lock_init (&kibnal_data.kib_reaper_lock);
1948         INIT_LIST_HEAD (&kibnal_data.kib_reaper_conns);
1949         init_waitqueue_head (&kibnal_data.kib_reaper_waitq);
1950
1951         spin_lock_init (&kibnal_data.kib_connd_lock);
1952         INIT_LIST_HEAD (&kibnal_data.kib_connd_acceptq);
1953         INIT_LIST_HEAD (&kibnal_data.kib_connd_peers);
1954         init_waitqueue_head (&kibnal_data.kib_connd_waitq);
1955
1956         spin_lock_init (&kibnal_data.kib_sched_lock);
1957         INIT_LIST_HEAD (&kibnal_data.kib_sched_txq);
1958         INIT_LIST_HEAD (&kibnal_data.kib_sched_rxq);
1959         init_waitqueue_head (&kibnal_data.kib_sched_waitq);
1960
1961         spin_lock_init (&kibnal_data.kib_tx_lock);
1962         INIT_LIST_HEAD (&kibnal_data.kib_idle_txs);
1963         INIT_LIST_HEAD (&kibnal_data.kib_idle_nblk_txs);
1964         init_waitqueue_head(&kibnal_data.kib_idle_tx_waitq);
1965
1966         PORTAL_ALLOC (kibnal_data.kib_tx_descs,
1967                       IBNAL_TX_MSGS * sizeof(kib_tx_t));
1968         if (kibnal_data.kib_tx_descs == NULL) {
1969                 CERROR ("Can't allocate tx descs\n");
1970                 goto failed;
1971         }
1972
1973         /* lists/ptrs/locks initialised */
1974         kibnal_data.kib_init = IBNAL_INIT_DATA;
1975         /*****************************************************/
1976
1977
1978         process_id.pid = requested_pid;
1979         process_id.nid = PTL_NID_ANY;           /* don't know my NID yet */
1980         
1981         rc = lib_init(&kibnal_lib, nal, process_id,
1982                       requested_limits, actual_limits);
1983         if (rc != PTL_OK) {
1984                 CERROR("lib_init failed: error %d\n", rc);
1985                 goto failed;
1986         }
1987
1988         /* lib interface initialised */
1989         kibnal_data.kib_init = IBNAL_INIT_LIB;
1990         /*****************************************************/
1991
1992         for (i = 0; i < IBNAL_N_SCHED; i++) {
1993                 rc = kibnal_thread_start (kibnal_scheduler,
1994                                           (void *)((unsigned long)i));
1995                 if (rc != 0) {
1996                         CERROR("Can't spawn openibnal scheduler[%d]: %d\n",
1997                                i, rc);
1998                         goto failed;
1999                 }
2000         }
2001
2002         for (i = 0; i < IBNAL_N_CONND; i++) {
2003                 rc = kibnal_thread_start (kibnal_connd,
2004                                           (void *)((unsigned long)i));
2005                 if (rc != 0) {
2006                         CERROR("Can't spawn openibnal connd[%d]: %d\n",
2007                                i, rc);
2008                         goto failed;
2009                 }
2010         }
2011
2012         rc = kibnal_thread_start (kibnal_reaper, NULL);
2013         if (rc != 0) {
2014                 CERROR ("Can't spawn openibnal reaper: %d\n", rc);
2015                 goto failed;
2016         }
2017
2018         kibnal_data.kib_device = ib_device_get_by_index(0);
2019         if (kibnal_data.kib_device == NULL) {
2020                 CERROR ("Can't open ib device 0\n");
2021                 goto failed;
2022         }
2023         
2024         rc = ib_device_properties_get(kibnal_data.kib_device,
2025                                       &kibnal_data.kib_device_props);
2026         if (rc != 0) {
2027                 CERROR ("Can't get device props: %d\n", rc);
2028                 goto failed;
2029         }
2030
2031         CDEBUG(D_NET, "Max Initiator: %d Max Responder %d\n", 
2032                kibnal_data.kib_device_props.max_initiator_per_qp,
2033                kibnal_data.kib_device_props.max_responder_per_qp);
2034
2035         kibnal_data.kib_port = 0;
2036         for (i = 1; i <= 2; i++) {
2037                 rc = ib_port_properties_get(kibnal_data.kib_device, i,
2038                                             &kibnal_data.kib_port_props);
2039                 if (rc == 0) {
2040                         kibnal_data.kib_port = i;
2041                         break;
2042                 }
2043         }
2044         if (kibnal_data.kib_port == 0) {
2045                 CERROR ("Can't find a port\n");
2046                 goto failed;
2047         }
2048
2049         rc = ib_pd_create(kibnal_data.kib_device,
2050                           NULL, &kibnal_data.kib_pd);
2051         if (rc != 0) {
2052                 CERROR ("Can't create PD: %d\n", rc);
2053                 goto failed;
2054         }
2055         
2056         /* flag PD initialised */
2057         kibnal_data.kib_init = IBNAL_INIT_PD;
2058         /*****************************************************/
2059 #if IBNAL_FMR
2060         {
2061                 const int pool_size = IBNAL_NTX + IBNAL_NTX_NBLK;
2062                 struct ib_fmr_pool_param params = {
2063                         .max_pages_per_fmr = PTL_MTU/PAGE_SIZE,
2064                         .access            = (IB_ACCESS_LOCAL_WRITE |
2065                                               IB_ACCESS_REMOTE_WRITE |
2066                                               IB_ACCESS_REMOTE_READ),
2067                         .pool_size         = pool_size,
2068                         .dirty_watermark   = (pool_size * 3)/4,
2069                         .flush_function    = NULL,
2070                         .flush_arg         = NULL,
2071                         .cache             = 1,
2072                 };
2073                 rc = ib_fmr_pool_create(kibnal_data.kib_pd, &params,
2074                                         &kibnal_data.kib_fmr_pool);
2075                 if (rc != 0) {
2076                         CERROR ("Can't create FMR pool size %d: %d\n", 
2077                                 pool_size, rc);
2078                         goto failed;
2079                 }
2080         }
2081
2082         /* flag FMR pool initialised */
2083         kibnal_data.kib_init = IBNAL_INIT_FMR;
2084 #endif
2085         /*****************************************************/
2086
2087         rc = kibnal_setup_tx_descs();
2088         if (rc != 0) {
2089                 CERROR ("Can't register tx descs: %d\n", rc);
2090                 goto failed;
2091         }
2092         
2093         /* flag TX descs initialised */
2094         kibnal_data.kib_init = IBNAL_INIT_TXD;
2095         /*****************************************************/
2096         
2097         {
2098                 struct ib_cq_callback callback = {
2099                         .context        = IBNAL_CALLBACK_CTXT,
2100                         .policy         = IB_CQ_PROVIDER_REARM,
2101                         .function       = {
2102                                 .entry  = kibnal_callback,
2103                         },
2104                         .arg            = NULL,
2105                 };
2106                 int  nentries = IBNAL_CQ_ENTRIES;
2107                 
2108                 rc = ib_cq_create (kibnal_data.kib_device, 
2109                                    &nentries, &callback, NULL,
2110                                    &kibnal_data.kib_cq);
2111                 if (rc != 0) {
2112                         CERROR ("Can't create CQ: %d\n", rc);
2113                         goto failed;
2114                 }
2115
2116                 /* I only want solicited events */
2117                 rc = ib_cq_request_notification(kibnal_data.kib_cq, 1);
2118                 LASSERT (rc == 0);
2119         }
2120         
2121         /* flag CQ initialised */
2122         kibnal_data.kib_init = IBNAL_INIT_CQ;
2123         /*****************************************************/
2124         
2125         rc = libcfs_nal_cmd_register(OPENIBNAL, &kibnal_cmd, NULL);
2126         if (rc != 0) {
2127                 CERROR ("Can't initialise command interface (rc = %d)\n", rc);
2128                 goto failed;
2129         }
2130
2131         /* flag everything initialised */
2132         kibnal_data.kib_init = IBNAL_INIT_ALL;
2133         /*****************************************************/
2134
2135         printk(KERN_INFO "Lustre: OpenIB NAL loaded "
2136                "(initial mem %d)\n", pkmem);
2137
2138         return (PTL_OK);
2139
2140  failed:
2141         kibnal_api_shutdown (&kibnal_api);    
2142         return (PTL_FAIL);
2143 }
2144
2145 void __exit
2146 kibnal_module_fini (void)
2147 {
2148         if (kibnal_tunables.kib_sysctl != NULL)
2149                 unregister_sysctl_table (kibnal_tunables.kib_sysctl);
2150         PtlNIFini(kibnal_ni);
2151
2152         ptl_unregister_nal(OPENIBNAL);
2153 }
2154
2155 int __init
2156 kibnal_module_init (void)
2157 {
2158         int    rc;
2159
2160         /* the following must be sizeof(int) for proc_dointvec() */
2161         LASSERT (sizeof(kibnal_tunables.kib_io_timeout) == sizeof(int));
2162         LASSERT (sizeof(kibnal_tunables.kib_listener_timeout) == sizeof(int));
2163         LASSERT (sizeof(kibnal_tunables.kib_backlog) == sizeof(int));
2164         LASSERT (sizeof(kibnal_tunables.kib_port) == sizeof(int));
2165
2166         kibnal_api.nal_ni_init = kibnal_api_startup;
2167         kibnal_api.nal_ni_fini = kibnal_api_shutdown;
2168
2169         /* Initialise dynamic tunables to defaults once only */
2170         kibnal_tunables.kib_io_timeout = IBNAL_IO_TIMEOUT;
2171         kibnal_tunables.kib_listener_timeout = IBNAL_LISTENER_TIMEOUT;
2172         kibnal_tunables.kib_backlog = IBNAL_BACKLOG;
2173         kibnal_tunables.kib_port = IBNAL_PORT;
2174
2175         rc = ptl_register_nal(OPENIBNAL, &kibnal_api);
2176         if (rc != PTL_OK) {
2177                 CERROR("Can't register IBNAL: %d\n", rc);
2178                 return (-ENOMEM);               /* or something... */
2179         }
2180
2181         /* Pure gateways want the NAL started up at module load time... */
2182         rc = PtlNIInit(OPENIBNAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kibnal_ni);
2183         if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2184                 ptl_unregister_nal(OPENIBNAL);
2185                 return (-ENODEV);
2186         }
2187         
2188         kibnal_tunables.kib_sysctl = 
2189                 register_sysctl_table (kibnal_top_ctl_table, 0);
2190         if (kibnal_tunables.kib_sysctl == NULL) {
2191                 CERROR("Can't register sysctl table\n");
2192                 PtlNIFini(kibnal_ni);
2193                 ptl_unregister_nal(OPENIBNAL);
2194                 return (-ENOMEM);
2195         }
2196
2197         return (0);
2198 }
2199
2200 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2201 MODULE_DESCRIPTION("Kernel OpenIB NAL v0.01");
2202 MODULE_LICENSE("GPL");
2203
2204 module_init(kibnal_module_init);
2205 module_exit(kibnal_module_fini);
2206