Whamcloud - gitweb
* Made openib not use the subnet manager to discover connection parameters
[fs/lustre-release.git] / lnet / klnds / openiblnd / openiblnd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004 Cluster File Systems, Inc.
5  *   Author: Eric Barton <eric@bartonsoftware.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #include "openibnal.h"
25
26 nal_t                   kibnal_api;
27 ptl_handle_ni_t         kibnal_ni;
28 kib_data_t              kibnal_data;
29 kib_tunables_t          kibnal_tunables;
30
31 #define IBNAL_SYSCTL             202
32
33 enum {
34         IBNAL_SYSCTL_TIMEOUT=1,
35         IBNAL_SYSCTL_LISTENER_TIMEOUT,
36         IBNAL_SYSCTL_BACKLOG,
37         IBNAL_SYSCTL_PORT
38 };
39
40 static ctl_table kibnal_ctl_table[] = {
41         {IBNAL_SYSCTL_TIMEOUT, "timeout", 
42          &kibnal_tunables.kib_io_timeout, sizeof (int),
43          0644, NULL, &proc_dointvec},
44         {IBNAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout", 
45          &kibnal_tunables.kib_listener_timeout, sizeof(int),
46          0644, NULL, &proc_dointvec},
47         {IBNAL_SYSCTL_BACKLOG, "backlog",
48          &kibnal_tunables.kib_backlog, sizeof(int),
49          0644, NULL, kibnal_listener_procint},
50         {IBNAL_SYSCTL_PORT, "port",
51          &kibnal_tunables.kib_port, sizeof(int),
52          0644, NULL, kibnal_listener_procint},
53         { 0 }
54 };
55
56 static ctl_table kibnal_top_ctl_table[] = {
57         {IBNAL_SYSCTL, "openibnal", NULL, 0, 0555, kibnal_ctl_table},
58         { 0 }
59 };
60
61 __u32 
62 kibnal_cksum (void *ptr, int nob)
63 {
64         char  *c  = ptr;
65         __u32  sum = 0;
66
67         while (nob-- > 0)
68                 sum = ((sum << 1) | (sum >> 31)) + *c++;
69
70         /* ensure I don't return 0 (== no checksum) */
71         return (sum == 0) ? 1 : sum;
72 }
73
74 void
75 kibnal_init_msg(kib_msg_t *msg, int type, int body_nob)
76 {
77         msg->ibm_type = type;
78         msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
79 }
80
81 void
82 kibnal_pack_msg(kib_msg_t *msg, int credits, ptl_nid_t dstnid, __u64 dststamp)
83 {
84         /* CAVEAT EMPTOR! all message fields not set here should have been
85          * initialised previously. */
86         msg->ibm_magic    = IBNAL_MSG_MAGIC;
87         msg->ibm_version  = IBNAL_MSG_VERSION;
88         /*   ibm_type */
89         msg->ibm_credits  = credits;
90         /*   ibm_nob */
91         msg->ibm_cksum    = 0;
92         msg->ibm_srcnid   = kibnal_lib.libnal_ni.ni_pid.nid;
93         msg->ibm_srcstamp = kibnal_data.kib_incarnation;
94         msg->ibm_dstnid   = dstnid;
95         msg->ibm_dststamp = dststamp;
96 #if IBNAL_CKSUM
97         /* NB ibm_cksum zero while computing cksum */
98         msg->ibm_cksum    = kibnal_cksum(msg, msg->ibm_nob);
99 #endif
100 }
101
102 int
103 kibnal_unpack_msg(kib_msg_t *msg, int nob)
104 {
105         const int hdr_size = offsetof(kib_msg_t, ibm_u);
106         __u32     msg_cksum;
107         int       flip;
108         int       msg_nob;
109
110         if (nob < 6) {
111                 CERROR("Short message: %d\n", nob);
112                 return -EPROTO;
113         }
114
115         if (msg->ibm_magic == IBNAL_MSG_MAGIC) {
116                 flip = 0;
117         } else if (msg->ibm_magic == __swab32(IBNAL_MSG_MAGIC)) {
118                 flip = 1;
119         } else {
120                 CERROR("Bad magic: %08x\n", msg->ibm_magic);
121                 return -EPROTO;
122         }
123
124         if (msg->ibm_version != 
125             (flip ? __swab16(IBNAL_MSG_VERSION) : IBNAL_MSG_VERSION)) {
126                 CERROR("Bad version: %d\n", msg->ibm_version);
127                 return -EPROTO;
128         }
129
130         if (nob < hdr_size) {
131                 CERROR("Short message: %d\n", nob);
132                 return -EPROTO;
133         }
134
135         msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
136         if (msg_nob > nob) {
137                 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
138                 return -EPROTO;
139         }
140
141         /* checksum must be computed with ibm_cksum zero and BEFORE anything
142          * gets flipped */
143         msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
144         msg->ibm_cksum = 0;
145         if (msg_cksum != 0 &&
146             msg_cksum != kibnal_cksum(msg, msg_nob)) {
147                 CERROR("Bad checksum\n");
148                 return -EPROTO;
149         }
150         msg->ibm_cksum = msg_cksum;
151         
152         if (flip) {
153                 /* leave magic unflipped as a clue to peer endianness */
154                 __swab16s(&msg->ibm_version);
155                 LASSERT (sizeof(msg->ibm_type) == 1);
156                 LASSERT (sizeof(msg->ibm_credits) == 1);
157                 msg->ibm_nob = msg_nob;
158                 __swab64s(&msg->ibm_srcnid);
159                 __swab64s(&msg->ibm_srcstamp);
160                 __swab64s(&msg->ibm_dstnid);
161                 __swab64s(&msg->ibm_dststamp);
162         }
163         
164         switch (msg->ibm_type) {
165         default:
166                 CERROR("Unknown message type %x\n", msg->ibm_type);
167                 return -EPROTO;
168                 
169         case IBNAL_MSG_SVCQRY:
170         case IBNAL_MSG_NOOP:
171                 break;
172
173         case IBNAL_MSG_SVCRSP:
174                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.svcrsp)) {
175                         CERROR("Short SVCRSP: %d(%d)\n", msg_nob,
176                                (int)(hdr_size + sizeof(msg->ibm_u.svcrsp)));
177                         return -EPROTO;
178                 }
179                 if (flip) {
180                         __swab64s(&msg->ibm_u.svcrsp.ibsr_svc_id);
181                         __swab16s(&msg->ibm_u.svcrsp.ibsr_svc_pkey);
182                 }
183                 break;
184
185         case IBNAL_MSG_CONNREQ:
186         case IBNAL_MSG_CONNACK:
187                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
188                         CERROR("Short CONNREQ: %d(%d)\n", msg_nob,
189                                (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
190                         return -EPROTO;
191                 }
192                 if (flip)
193                         __swab32s(&msg->ibm_u.connparams.ibcp_queue_depth);
194                 break;
195
196         case IBNAL_MSG_IMMEDIATE:
197                 if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
198                         CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
199                                (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
200                         return -EPROTO;
201                 }
202                 break;
203
204         case IBNAL_MSG_PUT_RDMA:
205         case IBNAL_MSG_GET_RDMA:
206                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.rdma)) {
207                         CERROR("Short RDMA req: %d(%d)\n", msg_nob,
208                                (int)(hdr_size + sizeof(msg->ibm_u.rdma)));
209                         return -EPROTO;
210                 }
211                 if (flip) {
212                         __swab32s(&msg->ibm_u.rdma.ibrm_desc.rd_key);
213                         __swab32s(&msg->ibm_u.rdma.ibrm_desc.rd_nob);
214                         __swab64s(&msg->ibm_u.rdma.ibrm_desc.rd_addr);
215                 }
216                 break;
217
218         case IBNAL_MSG_PUT_DONE:
219         case IBNAL_MSG_GET_DONE:
220                 if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
221                         CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
222                                (int)(hdr_size + sizeof(msg->ibm_u.completion)));
223                         return -EPROTO;
224                 }
225                 if (flip)
226                         __swab32s(&msg->ibm_u.completion.ibcm_status);
227                 break;
228         }
229         return 0;
230 }
231
232 int
233 kibnal_sock_write (struct socket *sock, void *buffer, int nob)
234 {
235         int           rc;
236         mm_segment_t  oldmm = get_fs();
237         struct iovec  iov = {
238                 .iov_base = buffer,
239                 .iov_len  = nob
240         };
241         struct msghdr msg = {
242                 .msg_name       = NULL,
243                 .msg_namelen    = 0,
244                 .msg_iov        = &iov,
245                 .msg_iovlen     = 1,
246                 .msg_control    = NULL,
247                 .msg_controllen = 0,
248                 .msg_flags      = MSG_DONTWAIT
249         };
250
251         /* We've set up the socket's send buffer to be large enough for
252          * everything we send, so a single non-blocking send should
253          * complete without error. */
254
255         set_fs(KERNEL_DS);
256         rc = sock_sendmsg(sock, &msg, iov.iov_len);
257         set_fs(oldmm);
258
259         if (rc == nob)
260                 return 0;
261
262         if (rc >= 0)
263                 return -EAGAIN;
264
265         return rc;
266 }
267
268 int
269 kibnal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
270 {
271         int            rc;
272         mm_segment_t   oldmm = get_fs();
273         long           ticks = timeout * HZ;
274         unsigned long  then;
275         struct timeval tv;
276
277         LASSERT (nob > 0);
278         LASSERT (ticks > 0);
279
280         for (;;) {
281                 struct iovec  iov = {
282                         .iov_base = buffer,
283                         .iov_len  = nob
284                 };
285                 struct msghdr msg = {
286                         .msg_name       = NULL,
287                         .msg_namelen    = 0,
288                         .msg_iov        = &iov,
289                         .msg_iovlen     = 1,
290                         .msg_control    = NULL,
291                         .msg_controllen = 0,
292                         .msg_flags      = 0
293                 };
294
295                 /* Set receive timeout to remaining time */
296                 tv = (struct timeval) {
297                         .tv_sec = ticks / HZ,
298                         .tv_usec = ((ticks % HZ) * 1000000) / HZ
299                 };
300                 set_fs(KERNEL_DS);
301                 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
302                                      (char *)&tv, sizeof(tv));
303                 set_fs(oldmm);
304                 if (rc != 0) {
305                         CERROR("Can't set socket recv timeout %d: %d\n",
306                                timeout, rc);
307                         return rc;
308                 }
309
310                 set_fs(KERNEL_DS);
311                 then = jiffies;
312                 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
313                 ticks -= jiffies - then;
314                 set_fs(oldmm);
315
316                 if (rc < 0)
317                         return rc;
318
319                 if (rc == 0)
320                         return -ECONNABORTED;
321
322                 buffer = ((char *)buffer) + rc;
323                 nob -= rc;
324
325                 if (nob == 0)
326                         return 0;
327
328                 if (ticks <= 0)
329                         return -ETIMEDOUT;
330         }
331 }
332
333 int
334 kibnal_create_sock(struct socket **sockp)
335 {
336         struct socket       *sock;
337         int                  rc;
338         int                  option;
339         mm_segment_t         oldmm = get_fs();
340
341         rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
342         if (rc != 0) {
343                 CERROR("Can't create socket: %d\n", rc);
344                 return rc;
345         }
346
347         /* Ensure sends will not block */
348         option = 2 * sizeof(kib_msg_t);
349         set_fs(KERNEL_DS);
350         rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
351                              (char *)&option, sizeof(option));
352         set_fs(oldmm);
353         if (rc != 0) {
354                 CERROR("Can't set send buffer %d: %d\n", option, rc);
355                 goto failed;
356         }
357
358         option = 1;
359         set_fs(KERNEL_DS);
360         rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
361                              (char *)&option, sizeof(option));
362         set_fs(oldmm);
363         if (rc != 0) {
364                 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
365                 goto failed;
366         }
367
368         *sockp = sock;
369         return 0;
370
371  failed:
372         sock_release(sock);
373         return rc;
374 }
375
376 void
377 kibnal_pause(int ticks)
378 {
379         set_current_state(TASK_UNINTERRUPTIBLE);
380         schedule_timeout(ticks);
381 }
382
383 int
384 kibnal_connect_sock(kib_peer_t *peer, struct socket **sockp)
385 {
386         struct sockaddr_in  locaddr;
387         struct sockaddr_in  srvaddr;
388         struct socket      *sock;
389         unsigned int        port;
390         int                 rc;
391
392         for (port = 1023; port >= 512; port--) {
393
394                 memset(&locaddr, 0, sizeof(locaddr)); 
395                 locaddr.sin_family      = AF_INET; 
396                 locaddr.sin_port        = htons(port);
397                 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
398
399                 memset (&srvaddr, 0, sizeof (srvaddr));
400                 srvaddr.sin_family      = AF_INET;
401                 srvaddr.sin_port        = htons (peer->ibp_port);
402                 srvaddr.sin_addr.s_addr = htonl (peer->ibp_ip);
403
404                 rc = kibnal_create_sock(&sock);
405                 if (rc != 0)
406                         return rc;
407
408                 rc = sock->ops->bind(sock,
409                                      (struct sockaddr *)&locaddr, sizeof(locaddr));
410                 if (rc != 0) {
411                         sock_release(sock);
412                         
413                         if (rc == -EADDRINUSE) {
414                                 CDEBUG(D_NET, "Port %d already in use\n", port);
415                                 continue;
416                         }
417
418                         CERROR("Can't bind to reserved port %d: %d\n", port, rc);
419                         return rc;
420                 }
421
422                 rc = sock->ops->connect(sock,
423                                         (struct sockaddr *)&srvaddr, sizeof(srvaddr),
424                                         0);
425                 if (rc == 0) {
426                         *sockp = sock;
427                         return 0;
428                 }
429                 
430                 sock_release(sock);
431
432                 if (rc != -EADDRNOTAVAIL) {
433                         CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
434                                port, HIPQUAD(peer->ibp_ip), peer->ibp_port, rc);
435                         return rc;
436                 }
437                 
438                 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n", 
439                        port, HIPQUAD(peer->ibp_ip), peer->ibp_port);
440         }
441
442         /* all ports busy */
443         return -EHOSTUNREACH;
444 }
445
446 int
447 kibnal_make_svcqry (kib_conn_t *conn) 
448 {
449         kib_peer_t    *peer = conn->ibc_peer;
450         kib_msg_t     *msg;
451         struct socket *sock;
452         int            rc;
453         int            nob;
454
455         LASSERT (conn->ibc_connreq != NULL);
456         msg = &conn->ibc_connreq->cr_msg;
457
458         kibnal_init_msg(msg, IBNAL_MSG_SVCQRY, 0);
459         kibnal_pack_msg(msg, 0, peer->ibp_nid, 0);
460
461         rc = kibnal_connect_sock(peer, &sock);
462         if (rc != 0)
463                 return rc;
464         
465         rc = kibnal_sock_write(sock, msg, msg->ibm_nob);
466         if (rc != 0) {
467                 CERROR("Error %d sending svcqry to "
468                        LPX64"@%u.%u.%u.%u/%d\n", rc, 
469                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
470                 goto out;
471         }
472
473         nob = offsetof(kib_msg_t, ibm_u) + sizeof(msg->ibm_u.svcrsp);
474         rc = kibnal_sock_read(sock, msg, nob, kibnal_tunables.kib_io_timeout);
475         if (rc != 0) {
476                 CERROR("Error %d receiving svcrsp from "
477                        LPX64"@%u.%u.%u.%u/%d\n", rc, 
478                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
479                 goto out;
480         }
481
482         rc = kibnal_unpack_msg(msg, nob);
483         if (rc != 0) {
484                 CERROR("Error %d unpacking svcrsp from "
485                        LPX64"@%u.%u.%u.%u/%d\n", rc,
486                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
487                 goto out;
488         }
489                        
490         if (msg->ibm_type != IBNAL_MSG_SVCRSP) {
491                 CERROR("Unexpected response type %d from "
492                        LPX64"@%u.%u.%u.%u/%d\n", msg->ibm_type, 
493                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
494                 rc = -EPROTO;
495                 goto out;
496         }
497         
498         if (msg->ibm_dstnid != kibnal_lib.libnal_ni.ni_pid.nid ||
499             msg->ibm_dststamp != kibnal_data.kib_incarnation) {
500                 CERROR("Unexpected dst NID/stamp "LPX64"/"LPX64" from "
501                        LPX64"@%u.%u.%u.%u/%d\n", 
502                        msg->ibm_dstnid, msg->ibm_dststamp,
503                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
504                 rc = -EPROTO;
505                 goto out;
506         }
507
508         if (msg->ibm_srcnid != peer->ibp_nid) {
509                 CERROR("Unexpected src NID "LPX64" from "
510                        LPX64"@%u.%u.%u.%u/%d\n", msg->ibm_srcnid,
511                        peer->ibp_nid, HIPQUAD(peer->ibp_ip), peer->ibp_port);
512                 rc = -EPROTO;
513                 goto out;
514         }
515
516         conn->ibc_incarnation = msg->ibm_srcstamp;
517         conn->ibc_connreq->cr_svcrsp = msg->ibm_u.svcrsp;
518  out:
519         sock_release(sock);
520         return rc;
521 }
522
523 void
524 kibnal_handle_svcqry (struct socket *sock)
525 {
526         struct sockaddr_in   addr;
527         __u32                peer_ip;
528         unsigned int         peer_port;
529         kib_msg_t           *msg;
530         __u64                srcnid;
531         __u64                srcstamp;
532         int                  len;
533         int                  rc;
534
535         len = sizeof(addr);
536         rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
537         if (rc != 0) {
538                 CERROR("Can't get peer's IP: %d\n", rc);
539                 return;
540         }
541
542         peer_ip = ntohl(addr.sin_addr.s_addr);
543         peer_port = ntohs(addr.sin_port);
544
545         if (peer_port >= 1024) {
546                 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
547                        HIPQUAD(peer_ip), peer_port);
548                 return;
549         }
550
551         PORTAL_ALLOC(msg, sizeof(*msg));
552         if (msg == NULL) {
553                 CERROR("Can't allocate msgs for %u.%u.%u.%u/%d\n",
554                        HIPQUAD(peer_ip), peer_port);
555                 goto out;
556         }
557         
558         rc = kibnal_sock_read(sock, msg, offsetof(kib_msg_t, ibm_u),
559                               kibnal_tunables.kib_listener_timeout);
560         if (rc != 0) {
561                 CERROR("Error %d receiving svcqry from %u.%u.%u.%u/%d\n",
562                        rc, HIPQUAD(peer_ip), peer_port);
563                 goto out;
564         }
565         
566         rc = kibnal_unpack_msg(msg, offsetof(kib_msg_t, ibm_u));
567         if (rc != 0) {
568                 CERROR("Error %d unpacking svcqry from %u.%u.%u.%u/%d\n",
569                        rc, HIPQUAD(peer_ip), peer_port);
570                 goto out;
571         }
572         
573         if (msg->ibm_type != IBNAL_MSG_SVCQRY) {
574                 CERROR("Unexpected message %d from %u.%u.%u.%u/%d\n",
575                        msg->ibm_type, HIPQUAD(peer_ip), peer_port);
576                 goto out;
577         }
578         
579         if (msg->ibm_dstnid != kibnal_lib.libnal_ni.ni_pid.nid) {
580                 CERROR("Unexpected dstnid "LPX64"(expected "LPX64" "
581                        "from %u.%u.%u.%u/%d\n", msg->ibm_dstnid,
582                        kibnal_lib.libnal_ni.ni_pid.nid,
583                        HIPQUAD(peer_ip), peer_port);
584                 goto out;
585         }
586
587         srcnid = msg->ibm_srcnid;
588         srcstamp = msg->ibm_srcstamp;
589         
590         kibnal_init_msg(msg, IBNAL_MSG_SVCRSP, sizeof(msg->ibm_u.svcrsp));
591
592         msg->ibm_u.svcrsp.ibsr_svc_id = kibnal_data.kib_svc_id;
593         memcpy(msg->ibm_u.svcrsp.ibsr_svc_gid, kibnal_data.kib_svc_gid,
594                sizeof(kibnal_data.kib_svc_gid));
595         msg->ibm_u.svcrsp.ibsr_svc_pkey = kibnal_data.kib_svc_pkey;
596
597         kibnal_pack_msg(msg, 0, srcnid, srcstamp);
598         
599         rc = kibnal_sock_write (sock, msg, msg->ibm_nob);
600         if (rc != 0) {
601                 CERROR("Error %d replying to svcqry from %u.%u.%u.%u/%d\n",
602                        rc, HIPQUAD(peer_ip), peer_port);
603                 goto out;
604         }
605         
606  out:
607         PORTAL_FREE(msg, sizeof(*msg));
608 }
609
610 void
611 kibnal_free_acceptsock (kib_acceptsock_t *as)
612 {
613         sock_release(as->ibas_sock);
614         PORTAL_FREE(as, sizeof(*as));
615 }
616
617 int
618 kibnal_ip_listener(void *arg)
619 {
620         struct sockaddr_in addr;
621         wait_queue_t       wait;
622         struct socket     *sock;
623         kib_acceptsock_t  *as;
624         int                port;
625         char               name[16];
626         int                rc;
627         unsigned long      flags;
628
629         /* Parent thread holds kib_nid_mutex, and is, or is about to
630          * block on kib_listener_signal */
631
632         port = kibnal_tunables.kib_port;
633         snprintf(name, sizeof(name), "kibnal_lstn%03d", port);
634         kportal_daemonize(name);
635         kportal_blockallsigs();
636
637         init_waitqueue_entry(&wait, current);
638
639         rc = kibnal_create_sock(&sock);
640         if (rc != 0)
641                 goto out_0;
642
643         memset(&addr, 0, sizeof(addr));
644         addr.sin_family      = AF_INET;
645         addr.sin_port        = htons(port);
646         addr.sin_addr.s_addr = INADDR_ANY;
647
648         rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
649         if (rc != 0) {
650                 CERROR("Can't bind to port %d\n", port);
651                 goto out_1;
652         }
653
654         rc = sock->ops->listen(sock, kibnal_tunables.kib_backlog);
655         if (rc != 0) {
656                 CERROR("Can't set listen backlog %d: %d\n", 
657                        kibnal_tunables.kib_backlog, rc);
658                 goto out_1;
659         }
660
661         LASSERT (kibnal_data.kib_listener_sock == NULL);
662         kibnal_data.kib_listener_sock = sock;
663
664         /* unblock waiting parent */
665         LASSERT (kibnal_data.kib_listener_shutdown == 0);
666         up(&kibnal_data.kib_listener_signal);
667
668         /* Wake me any time something happens on my socket */
669         add_wait_queue(sock->sk->sk_sleep, &wait);
670         as = NULL;
671
672         while (kibnal_data.kib_listener_shutdown == 0) {
673
674                 if (as == NULL) {
675                         PORTAL_ALLOC(as, sizeof(*as));
676                         if (as == NULL) {
677                                 CERROR("Out of Memory: pausing...\n");
678                                 kibnal_pause(HZ);
679                                 continue;
680                         }
681                         as->ibas_sock = NULL;
682                 }
683
684                 if (as->ibas_sock == NULL) {
685                         as->ibas_sock = sock_alloc();
686                         if (as->ibas_sock == NULL) {
687                                 CERROR("Can't allocate socket: pausing...\n");
688                                 kibnal_pause(HZ);
689                                 continue;
690                         }
691                         /* XXX this should add a ref to sock->ops->owner, if
692                          * TCP could be a module */
693                         as->ibas_sock->type = sock->type;
694                         as->ibas_sock->ops = sock->ops;
695                 }
696                 
697                 set_current_state(TASK_INTERRUPTIBLE);
698
699                 rc = sock->ops->accept(sock, as->ibas_sock, O_NONBLOCK);
700
701                 /* Sleep for socket activity? */
702                 if (rc == -EAGAIN &&
703                     kibnal_data.kib_listener_shutdown == 0)
704                         schedule();
705
706                 set_current_state(TASK_RUNNING);
707
708                 if (rc == 0) {
709                         spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
710                         
711                         list_add_tail(&as->ibas_list, 
712                                       &kibnal_data.kib_connd_acceptq);
713
714                         spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);
715                         wake_up(&kibnal_data.kib_connd_waitq);
716
717                         as = NULL;
718                         continue;
719                 }
720                 
721                 if (rc != -EAGAIN) {
722                         CERROR("Accept failed: %d, pausing...\n", rc);
723                         kibnal_pause(HZ);
724                 }
725         }
726
727         if (as != NULL) {
728                 if (as->ibas_sock != NULL)
729                         sock_release(as->ibas_sock);
730                 PORTAL_FREE(as, sizeof(*as));
731         }
732
733         rc = 0;
734         remove_wait_queue(sock->sk->sk_sleep, &wait);
735  out_1:
736         sock_release(sock);
737         kibnal_data.kib_listener_sock = NULL;
738  out_0:
739         /* set completion status and unblock thread waiting for me 
740          * (parent on startup failure, executioner on normal shutdown) */
741         kibnal_data.kib_listener_shutdown = rc;
742         up(&kibnal_data.kib_listener_signal);
743
744         return 0;
745 }
746
747 int
748 kibnal_start_ip_listener (void)
749 {
750         long           pid;
751         int            rc;
752
753         CDEBUG(D_WARNING, "Starting listener\n");
754
755         /* Called holding kib_nid_mutex: listener stopped */
756         LASSERT (kibnal_data.kib_listener_sock == NULL);
757
758         kibnal_data.kib_listener_shutdown = 0;
759         pid = kernel_thread(kibnal_ip_listener, NULL, 0);
760         if (pid < 0) {
761                 CERROR("Can't spawn listener: %ld\n", pid);
762                 return (int)pid;
763         }
764
765         /* Block until listener has started up. */
766         down(&kibnal_data.kib_listener_signal);
767
768         rc = kibnal_data.kib_listener_shutdown;
769         LASSERT ((rc != 0) == (kibnal_data.kib_listener_sock == NULL));
770
771         CDEBUG(D_WARNING, "Listener %ld started OK\n", pid);
772         return rc;
773 }
774
775 void
776 kibnal_stop_ip_listener(int clear_acceptq)
777 {
778         struct list_head  zombie_accepts;
779         kib_acceptsock_t *as;
780         unsigned long     flags;
781
782         CDEBUG(D_WARNING, "Stopping listener\n");
783
784         /* Called holding kib_nid_mutex: listener running */
785         LASSERT (kibnal_data.kib_listener_sock != NULL);
786
787         kibnal_data.kib_listener_shutdown = 1;
788         wake_up_all(kibnal_data.kib_listener_sock->sk->sk_sleep);
789
790         /* Block until listener has torn down. */
791         down(&kibnal_data.kib_listener_signal);
792
793         LASSERT (kibnal_data.kib_listener_sock == NULL);
794         CDEBUG(D_WARNING, "Listener stopped\n");
795
796         if (!clear_acceptq)
797                 return;
798
799         /* Close any unhandled accepts */
800         spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
801
802         list_add(&zombie_accepts, &kibnal_data.kib_connd_acceptq);
803         list_del_init(&kibnal_data.kib_connd_acceptq);
804
805         spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);
806         
807         while (!list_empty(&zombie_accepts)) {
808                 as = list_entry(zombie_accepts.next,
809                                 kib_acceptsock_t, ibas_list);
810                 list_del(&as->ibas_list);
811                 kibnal_free_acceptsock(as);
812         }
813 }
814
815 int 
816 kibnal_listener_procint(ctl_table *table, int write, struct file *filp,
817                         void *buffer, size_t *lenp)
818 {
819         int   *tunable = (int *)table->data;
820         int    old_val;
821         int    rc;
822
823         /* No race with nal initialisation since the nal is setup all the time
824          * it's loaded.  When that changes, change this! */
825         LASSERT (kibnal_data.kib_init == IBNAL_INIT_ALL);
826
827         down(&kibnal_data.kib_nid_mutex);
828
829         LASSERT (tunable == &kibnal_tunables.kib_port ||
830                  tunable == &kibnal_tunables.kib_backlog);
831         old_val = *tunable;
832
833         rc = proc_dointvec(table, write, filp, buffer, lenp);
834
835         if (write &&
836             (*tunable != old_val ||
837              kibnal_data.kib_listener_sock == NULL)) {
838
839                 if (kibnal_data.kib_listener_sock != NULL)
840                         kibnal_stop_ip_listener(0);
841
842                 rc = kibnal_start_ip_listener();
843
844                 if (rc != 0) {
845                         CERROR("Unable to restart listener with new tunable:"
846                                " reverting to old value\n");
847                         *tunable = old_val;
848                         kibnal_start_ip_listener();
849                 }
850         }
851
852         up(&kibnal_data.kib_nid_mutex);
853
854         LASSERT (kibnal_data.kib_init == IBNAL_INIT_ALL);
855         return rc;
856 }
857
858 int
859 kibnal_start_ib_listener (void) 
860 {
861         int    rc;
862
863         LASSERT (kibnal_data.kib_listen_handle == NULL);
864
865         kibnal_data.kib_svc_id = ib_cm_service_assign();
866         CDEBUG(D_NET, "svc id "LPX64"\n", kibnal_data.kib_svc_id);
867
868         rc = ib_cached_gid_get(kibnal_data.kib_device,
869                                kibnal_data.kib_port, 0,
870                                kibnal_data.kib_svc_gid);
871         if (rc != 0) {
872                 CERROR("Can't get port %d GID: %d\n",
873                        kibnal_data.kib_port, rc);
874                 return rc;
875         }
876         
877         rc = ib_cached_pkey_get(kibnal_data.kib_device,
878                                 kibnal_data.kib_port, 0,
879                                 &kibnal_data.kib_svc_pkey);
880         if (rc != 0) {
881                 CERROR ("Can't get port %d PKEY: %d\n",
882                         kibnal_data.kib_port, rc);
883                 return rc;
884         }
885
886         rc = ib_cm_listen(kibnal_data.kib_svc_id,
887                           TS_IB_CM_SERVICE_EXACT_MASK,
888                           kibnal_passive_conn_callback, NULL,
889                           &kibnal_data.kib_listen_handle);
890         if (rc != 0) {
891                 kibnal_data.kib_listen_handle = NULL;
892                 CERROR ("Can't create IB listener: %d\n", rc);
893                 return rc;
894         }
895         
896         LASSERT (kibnal_data.kib_listen_handle != NULL);
897         return 0;
898 }
899
900 void
901 kibnal_stop_ib_listener (void) 
902 {
903         int    rc;
904         
905         LASSERT (kibnal_data.kib_listen_handle != NULL);
906
907         rc = ib_cm_listen_stop (kibnal_data.kib_listen_handle);
908         if (rc != 0)
909                 CERROR("Error stopping IB listener: %d\n", rc);
910                 
911         kibnal_data.kib_listen_handle = NULL;
912 }
913
914 int
915 kibnal_set_mynid (ptl_nid_t nid)
916 {
917         lib_ni_t         *ni = &kibnal_lib.libnal_ni;
918         int               rc;
919
920         CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
921                nid, ni->ni_pid.nid);
922
923         down (&kibnal_data.kib_nid_mutex);
924
925         if (nid == kibnal_data.kib_nid) {
926                 /* no change of NID */
927                 up (&kibnal_data.kib_nid_mutex);
928                 return (0);
929         }
930
931         CDEBUG(D_NET, "NID "LPX64"("LPX64")\n",
932                kibnal_data.kib_nid, nid);
933
934         if (kibnal_data.kib_listener_sock != NULL)
935                 kibnal_stop_ip_listener(1);
936         
937         if (kibnal_data.kib_listen_handle != NULL)
938                 kibnal_stop_ib_listener();
939
940         ni->ni_pid.nid = nid;
941         kibnal_data.kib_incarnation++;
942         mb();
943         /* Delete all existing peers and their connections after new
944          * NID/incarnation set to ensure no old connections in our brave new
945          * world. */
946         kibnal_del_peer (PTL_NID_ANY, 0);
947
948         if (ni->ni_pid.nid != PTL_NID_ANY) {
949                 /* got a new NID to install */
950                 rc = kibnal_start_ib_listener();
951                 if (rc != 0) {
952                         CERROR("Can't start IB listener: %d\n", rc);
953                         goto failed_0;
954                 }
955         
956                 rc = kibnal_start_ip_listener();
957                 if (rc != 0) {
958                         CERROR("Can't start IP listener: %d\n", rc);
959                         goto failed_1;
960                 }
961         }
962         
963         up(&kibnal_data.kib_nid_mutex);
964         return 0;
965
966  failed_1:
967         kibnal_stop_ib_listener();
968  failed_0:
969         ni->ni_pid.nid = PTL_NID_ANY;
970         kibnal_data.kib_incarnation++;
971         mb();
972         kibnal_del_peer (PTL_NID_ANY, 0);
973         up(&kibnal_data.kib_nid_mutex);
974         return rc;
975 }
976
977 kib_peer_t *
978 kibnal_create_peer (ptl_nid_t nid)
979 {
980         kib_peer_t *peer;
981
982         LASSERT (nid != PTL_NID_ANY);
983
984         PORTAL_ALLOC (peer, sizeof (*peer));
985         if (peer == NULL)
986                 return (NULL);
987
988         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
989
990         peer->ibp_nid = nid;
991         atomic_set (&peer->ibp_refcount, 1);    /* 1 ref for caller */
992
993         INIT_LIST_HEAD (&peer->ibp_list);       /* not in the peer table yet */
994         INIT_LIST_HEAD (&peer->ibp_conns);
995         INIT_LIST_HEAD (&peer->ibp_tx_queue);
996
997         peer->ibp_reconnect_time = jiffies;
998         peer->ibp_reconnect_interval = IBNAL_MIN_RECONNECT_INTERVAL;
999
1000         atomic_inc (&kibnal_data.kib_npeers);
1001         CDEBUG(D_NET, "peer %p "LPX64"\n", peer, nid);
1002
1003         return (peer);
1004 }
1005
1006 void
1007 kibnal_destroy_peer (kib_peer_t *peer)
1008 {
1009         CDEBUG (D_NET, "peer "LPX64" %p deleted\n", peer->ibp_nid, peer);
1010
1011         LASSERT (atomic_read (&peer->ibp_refcount) == 0);
1012         LASSERT (peer->ibp_persistence == 0);
1013         LASSERT (!kibnal_peer_active(peer));
1014         LASSERT (peer->ibp_connecting == 0);
1015         LASSERT (list_empty (&peer->ibp_conns));
1016         LASSERT (list_empty (&peer->ibp_tx_queue));
1017
1018         PORTAL_FREE (peer, sizeof (*peer));
1019
1020         /* NB a peer's connections keep a reference on their peer until
1021          * they are destroyed, so we can be assured that _all_ state to do
1022          * with this peer has been cleaned up when its refcount drops to
1023          * zero. */
1024         atomic_dec (&kibnal_data.kib_npeers);
1025 }
1026
1027 void
1028 kibnal_put_peer (kib_peer_t *peer)
1029 {
1030         CDEBUG (D_OTHER, "putting peer[%p] -> "LPX64" (%d)\n",
1031                 peer, peer->ibp_nid,
1032                 atomic_read (&peer->ibp_refcount));
1033
1034         LASSERT (atomic_read (&peer->ibp_refcount) > 0);
1035         if (!atomic_dec_and_test (&peer->ibp_refcount))
1036                 return;
1037
1038         kibnal_destroy_peer (peer);
1039 }
1040
1041 kib_peer_t *
1042 kibnal_find_peer_locked (ptl_nid_t nid)
1043 {
1044         struct list_head *peer_list = kibnal_nid2peerlist (nid);
1045         struct list_head *tmp;
1046         kib_peer_t       *peer;
1047
1048         list_for_each (tmp, peer_list) {
1049
1050                 peer = list_entry (tmp, kib_peer_t, ibp_list);
1051
1052                 LASSERT (peer->ibp_persistence != 0 || /* persistent peer */
1053                          peer->ibp_connecting != 0 || /* creating conns */
1054                          !list_empty (&peer->ibp_conns));  /* active conn */
1055
1056                 if (peer->ibp_nid != nid)
1057                         continue;
1058
1059                 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1060                        peer, nid, atomic_read (&peer->ibp_refcount));
1061                 return (peer);
1062         }
1063         return (NULL);
1064 }
1065
1066 kib_peer_t *
1067 kibnal_get_peer (ptl_nid_t nid)
1068 {
1069         kib_peer_t     *peer;
1070
1071         read_lock (&kibnal_data.kib_global_lock);
1072         peer = kibnal_find_peer_locked (nid);
1073         if (peer != NULL)                       /* +1 ref for caller? */
1074                 atomic_inc (&peer->ibp_refcount);
1075         read_unlock (&kibnal_data.kib_global_lock);
1076
1077         return (peer);
1078 }
1079
1080 void
1081 kibnal_unlink_peer_locked (kib_peer_t *peer)
1082 {
1083         LASSERT (peer->ibp_persistence == 0);
1084         LASSERT (list_empty(&peer->ibp_conns));
1085
1086         LASSERT (kibnal_peer_active(peer));
1087         list_del_init (&peer->ibp_list);
1088         /* lose peerlist's ref */
1089         kibnal_put_peer (peer);
1090 }
1091
1092 int
1093 kibnal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
1094                       int *persistencep)
1095 {
1096         kib_peer_t        *peer;
1097         struct list_head  *ptmp;
1098         int                i;
1099
1100         read_lock (&kibnal_data.kib_global_lock);
1101
1102         for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1103
1104                 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
1105                         
1106                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1107                         LASSERT (peer->ibp_persistence != 0 ||
1108                                  peer->ibp_connecting != 0 ||
1109                                  !list_empty (&peer->ibp_conns));
1110
1111                         if (index-- > 0)
1112                                 continue;
1113
1114                         *nidp = peer->ibp_nid;
1115                         *ipp = peer->ibp_ip;
1116                         *portp = peer->ibp_port;
1117                         *persistencep = peer->ibp_persistence;
1118                         
1119                         read_unlock (&kibnal_data.kib_global_lock);
1120                         return (0);
1121                 }
1122         }
1123
1124         read_unlock (&kibnal_data.kib_global_lock);
1125         return (-ENOENT);
1126 }
1127
1128 int
1129 kibnal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1130 {
1131         unsigned long      flags;
1132         kib_peer_t        *peer;
1133         kib_peer_t        *peer2;
1134         
1135         if (nid == PTL_NID_ANY)
1136                 return (-EINVAL);
1137
1138         peer = kibnal_create_peer (nid);
1139         if (peer == NULL)
1140                 return (-ENOMEM);
1141
1142         write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
1143
1144         peer2 = kibnal_find_peer_locked (nid);
1145         if (peer2 != NULL) {
1146                 kibnal_put_peer (peer);
1147                 peer = peer2;
1148         } else {
1149                 /* peer table takes existing ref on peer */
1150                 list_add_tail (&peer->ibp_list,
1151                                kibnal_nid2peerlist (nid));
1152         }
1153
1154         peer->ibp_ip = ip;
1155         peer->ibp_port = port;
1156         peer->ibp_persistence++;
1157         
1158         write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
1159         return (0);
1160 }
1161
1162 void
1163 kibnal_del_peer_locked (kib_peer_t *peer, int single_share)
1164 {
1165         struct list_head *ctmp;
1166         struct list_head *cnxt;
1167         kib_conn_t       *conn;
1168
1169         if (!single_share)
1170                 peer->ibp_persistence = 0;
1171         else if (peer->ibp_persistence > 0)
1172                 peer->ibp_persistence--;
1173
1174         if (peer->ibp_persistence != 0)
1175                 return;
1176
1177         if (list_empty(&peer->ibp_conns)) {
1178                 kibnal_unlink_peer_locked(peer);
1179         } else {
1180                 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1181                         conn = list_entry(ctmp, kib_conn_t, ibc_list);
1182
1183                         kibnal_close_conn_locked (conn, 0);
1184                 }
1185                 /* NB peer is no longer persistent; closing its last conn
1186                  * unlinked it. */
1187         }
1188         /* NB peer now unlinked; might even be freed if the peer table had the
1189          * last ref on it. */
1190 }
1191
1192 int
1193 kibnal_del_peer (ptl_nid_t nid, int single_share)
1194 {
1195         unsigned long      flags;
1196         struct list_head  *ptmp;
1197         struct list_head  *pnxt;
1198         kib_peer_t        *peer;
1199         int                lo;
1200         int                hi;
1201         int                i;
1202         int                rc = -ENOENT;
1203
1204         write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
1205
1206         if (nid != PTL_NID_ANY)
1207                 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
1208         else {
1209                 lo = 0;
1210                 hi = kibnal_data.kib_peer_hash_size - 1;
1211         }
1212
1213         for (i = lo; i <= hi; i++) {
1214                 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
1215                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1216                         LASSERT (peer->ibp_persistence != 0 ||
1217                                  peer->ibp_connecting != 0 ||
1218                                  !list_empty (&peer->ibp_conns));
1219
1220                         if (!(nid == PTL_NID_ANY || peer->ibp_nid == nid))
1221                                 continue;
1222
1223                         kibnal_del_peer_locked (peer, single_share);
1224                         rc = 0;         /* matched something */
1225
1226                         if (single_share)
1227                                 goto out;
1228                 }
1229         }
1230  out:
1231         write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
1232
1233         return (rc);
1234 }
1235
1236 kib_conn_t *
1237 kibnal_get_conn_by_idx (int index)
1238 {
1239         kib_peer_t        *peer;
1240         struct list_head  *ptmp;
1241         kib_conn_t        *conn;
1242         struct list_head  *ctmp;
1243         int                i;
1244
1245         read_lock (&kibnal_data.kib_global_lock);
1246
1247         for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1248                 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
1249
1250                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1251                         LASSERT (peer->ibp_persistence > 0 ||
1252                                  peer->ibp_connecting != 0 ||
1253                                  !list_empty (&peer->ibp_conns));
1254
1255                         list_for_each (ctmp, &peer->ibp_conns) {
1256                                 if (index-- > 0)
1257                                         continue;
1258
1259                                 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1260                                 CDEBUG(D_NET, "++conn[%p] state %d -> "LPX64" (%d)\n",
1261                                        conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
1262                                        atomic_read (&conn->ibc_refcount));
1263                                 atomic_inc (&conn->ibc_refcount);
1264                                 read_unlock (&kibnal_data.kib_global_lock);
1265                                 return (conn);
1266                         }
1267                 }
1268         }
1269
1270         read_unlock (&kibnal_data.kib_global_lock);
1271         return (NULL);
1272 }
1273
1274 kib_conn_t *
1275 kibnal_create_conn (void)
1276 {
1277         kib_conn_t  *conn;
1278         int          i;
1279         __u64        vaddr = 0;
1280         __u64        vaddr_base;
1281         int          page_offset;
1282         int          ipage;
1283         int          rc;
1284         union {
1285                 struct ib_qp_create_param  qp_create;
1286                 struct ib_qp_attribute     qp_attr;
1287         } params;
1288         
1289         PORTAL_ALLOC (conn, sizeof (*conn));
1290         if (conn == NULL) {
1291                 CERROR ("Can't allocate connection\n");
1292                 return (NULL);
1293         }
1294
1295         /* zero flags, NULL pointers etc... */
1296         memset (conn, 0, sizeof (*conn));
1297
1298         INIT_LIST_HEAD (&conn->ibc_tx_queue);
1299         INIT_LIST_HEAD (&conn->ibc_active_txs);
1300         spin_lock_init (&conn->ibc_lock);
1301         
1302         atomic_inc (&kibnal_data.kib_nconns);
1303         /* well not really, but I call destroy() on failure, which decrements */
1304
1305         PORTAL_ALLOC (conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t));
1306         if (conn->ibc_rxs == NULL)
1307                 goto failed;
1308         memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t));
1309
1310         rc = kibnal_alloc_pages(&conn->ibc_rx_pages,
1311                                 IBNAL_RX_MSG_PAGES,
1312                                 IB_ACCESS_LOCAL_WRITE);
1313         if (rc != 0)
1314                 goto failed;
1315
1316         vaddr_base = vaddr = conn->ibc_rx_pages->ibp_vaddr;
1317
1318         for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) {
1319                 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
1320                 kib_rx_t   *rx = &conn->ibc_rxs[i];
1321
1322                 rx->rx_conn = conn;
1323                 rx->rx_vaddr = vaddr;
1324                 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
1325                 
1326                 vaddr += IBNAL_MSG_SIZE;
1327                 LASSERT (vaddr <= vaddr_base + IBNAL_RX_MSG_BYTES);
1328                 
1329                 page_offset += IBNAL_MSG_SIZE;
1330                 LASSERT (page_offset <= PAGE_SIZE);
1331
1332                 if (page_offset == PAGE_SIZE) {
1333                         page_offset = 0;
1334                         ipage++;
1335                         LASSERT (ipage <= IBNAL_RX_MSG_PAGES);
1336                 }
1337         }
1338
1339         params.qp_create = (struct ib_qp_create_param) {
1340                 .limit = {
1341                         /* Sends have an optional RDMA */
1342                         .max_outstanding_send_request    = 2 * IBNAL_MSG_QUEUE_SIZE,
1343                         .max_outstanding_receive_request = IBNAL_MSG_QUEUE_SIZE,
1344                         .max_send_gather_element         = 1,
1345                         .max_receive_scatter_element     = 1,
1346                 },
1347                 .pd              = kibnal_data.kib_pd,
1348                 .send_queue      = kibnal_data.kib_cq,
1349                 .receive_queue   = kibnal_data.kib_cq,
1350                 .send_policy     = IB_WQ_SIGNAL_SELECTABLE,
1351                 .receive_policy  = IB_WQ_SIGNAL_SELECTABLE,
1352                 .rd_domain       = 0,
1353                 .transport       = IB_TRANSPORT_RC,
1354                 .device_specific = NULL,
1355         };
1356         
1357         rc = ib_qp_create (&params.qp_create, &conn->ibc_qp, &conn->ibc_qpn);
1358         if (rc != 0) {
1359                 CERROR ("Failed to create queue pair: %d\n", rc);
1360                 goto failed;
1361         }
1362         
1363         /* Mark QP created */
1364         conn->ibc_state = IBNAL_CONN_INIT_QP;
1365
1366         params.qp_attr = (struct ib_qp_attribute) {
1367                 .state             = IB_QP_STATE_INIT,
1368                 .port              = kibnal_data.kib_port,
1369                 .enable_rdma_read  = 1,
1370                 .enable_rdma_write = 1,
1371                 .valid_fields      = (IB_QP_ATTRIBUTE_STATE |
1372                                       IB_QP_ATTRIBUTE_PORT |
1373                                       IB_QP_ATTRIBUTE_PKEY_INDEX |
1374                                       IB_QP_ATTRIBUTE_RDMA_ATOMIC_ENABLE),
1375         };
1376         rc = ib_qp_modify(conn->ibc_qp, &params.qp_attr);
1377         if (rc != 0) {
1378                 CERROR ("Failed to modify queue pair: %d\n", rc);
1379                 goto failed;
1380         }
1381
1382         /* 1 ref for caller */
1383         atomic_set (&conn->ibc_refcount, 1);
1384         return (conn);
1385         
1386  failed:
1387         kibnal_destroy_conn (conn);
1388         return (NULL);
1389 }
1390
1391 void
1392 kibnal_destroy_conn (kib_conn_t *conn)
1393 {
1394         int    rc;
1395         
1396         CDEBUG (D_NET, "connection %p\n", conn);
1397
1398         LASSERT (atomic_read (&conn->ibc_refcount) == 0);
1399         LASSERT (list_empty(&conn->ibc_tx_queue));
1400         LASSERT (list_empty(&conn->ibc_active_txs));
1401         LASSERT (conn->ibc_nsends_posted == 0);
1402         LASSERT (conn->ibc_connreq == NULL);
1403
1404         switch (conn->ibc_state) {
1405         case IBNAL_CONN_ZOMBIE:
1406                 /* called after connection sequence initiated */
1407
1408         case IBNAL_CONN_INIT_QP:
1409                 rc = ib_qp_destroy(conn->ibc_qp);
1410                 if (rc != 0)
1411                         CERROR("Can't destroy QP: %d\n", rc);
1412                 /* fall through */
1413                 
1414         case IBNAL_CONN_INIT_NOTHING:
1415                 break;
1416
1417         default:
1418                 LASSERT (0);
1419         }
1420
1421         if (conn->ibc_rx_pages != NULL) 
1422                 kibnal_free_pages(conn->ibc_rx_pages);
1423         
1424         if (conn->ibc_rxs != NULL)
1425                 PORTAL_FREE(conn->ibc_rxs, 
1426                             IBNAL_RX_MSGS * sizeof(kib_rx_t));
1427
1428         if (conn->ibc_peer != NULL)
1429                 kibnal_put_peer(conn->ibc_peer);
1430
1431         PORTAL_FREE(conn, sizeof (*conn));
1432
1433         atomic_dec(&kibnal_data.kib_nconns);
1434         
1435         if (atomic_read (&kibnal_data.kib_nconns) == 0 &&
1436             kibnal_data.kib_shutdown) {
1437                 /* I just nuked the last connection on shutdown; wake up
1438                  * everyone so they can exit. */
1439                 wake_up_all(&kibnal_data.kib_sched_waitq);
1440                 wake_up_all(&kibnal_data.kib_reaper_waitq);
1441         }
1442 }
1443
1444 void
1445 kibnal_put_conn (kib_conn_t *conn)
1446 {
1447         unsigned long flags;
1448
1449         CDEBUG (D_NET, "putting conn[%p] state %d -> "LPX64" (%d)\n",
1450                 conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
1451                 atomic_read (&conn->ibc_refcount));
1452
1453         LASSERT (atomic_read (&conn->ibc_refcount) > 0);
1454         if (!atomic_dec_and_test (&conn->ibc_refcount))
1455                 return;
1456
1457         /* last ref only goes on zombies */
1458         LASSERT (conn->ibc_state == IBNAL_CONN_ZOMBIE);
1459
1460         spin_lock_irqsave (&kibnal_data.kib_reaper_lock, flags);
1461
1462         list_add (&conn->ibc_list, &kibnal_data.kib_reaper_conns);
1463         wake_up (&kibnal_data.kib_reaper_waitq);
1464
1465         spin_unlock_irqrestore (&kibnal_data.kib_reaper_lock, flags);
1466 }
1467
1468 int
1469 kibnal_close_peer_conns_locked (kib_peer_t *peer, int why)
1470 {
1471         kib_conn_t         *conn;
1472         struct list_head   *ctmp;
1473         struct list_head   *cnxt;
1474         int                 count = 0;
1475
1476         list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1477                 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1478
1479                 count++;
1480                 kibnal_close_conn_locked (conn, why);
1481         }
1482
1483         return (count);
1484 }
1485
1486 int
1487 kibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
1488 {
1489         kib_conn_t         *conn;
1490         struct list_head   *ctmp;
1491         struct list_head   *cnxt;
1492         int                 count = 0;
1493
1494         list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1495                 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1496
1497                 if (conn->ibc_incarnation == incarnation)
1498                         continue;
1499
1500                 CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
1501                        peer->ibp_nid, conn->ibc_incarnation, incarnation);
1502                 
1503                 count++;
1504                 kibnal_close_conn_locked (conn, -ESTALE);
1505         }
1506
1507         return (count);
1508 }
1509
1510 int
1511 kibnal_close_matching_conns (ptl_nid_t nid)
1512 {
1513         unsigned long       flags;
1514         kib_peer_t         *peer;
1515         struct list_head   *ptmp;
1516         struct list_head   *pnxt;
1517         int                 lo;
1518         int                 hi;
1519         int                 i;
1520         int                 count = 0;
1521
1522         write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
1523
1524         if (nid != PTL_NID_ANY)
1525                 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
1526         else {
1527                 lo = 0;
1528                 hi = kibnal_data.kib_peer_hash_size - 1;
1529         }
1530
1531         for (i = lo; i <= hi; i++) {
1532                 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
1533
1534                         peer = list_entry (ptmp, kib_peer_t, ibp_list);
1535                         LASSERT (peer->ibp_persistence != 0 ||
1536                                  peer->ibp_connecting != 0 ||
1537                                  !list_empty (&peer->ibp_conns));
1538
1539                         if (!(nid == PTL_NID_ANY || nid == peer->ibp_nid))
1540                                 continue;
1541
1542                         count += kibnal_close_peer_conns_locked (peer, 0);
1543                 }
1544         }
1545
1546         write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
1547
1548         /* wildcards always succeed */
1549         if (nid == PTL_NID_ANY)
1550                 return (0);
1551         
1552         return (count == 0 ? -ENOENT : 0);
1553 }
1554
1555 int
1556 kibnal_cmd(struct portals_cfg *pcfg, void * private)
1557 {
1558         int rc = -EINVAL;
1559
1560         LASSERT (pcfg != NULL);
1561
1562         switch(pcfg->pcfg_command) {
1563         case NAL_CMD_GET_PEER: {
1564                 ptl_nid_t   nid = 0;
1565                 __u32       ip = 0;
1566                 int         port = 0;
1567                 int         share_count = 0;
1568
1569                 rc = kibnal_get_peer_info(pcfg->pcfg_count,
1570                                           &nid, &ip, &port, &share_count);
1571                 pcfg->pcfg_nid   = nid;
1572                 pcfg->pcfg_size  = 0;
1573                 pcfg->pcfg_id    = ip;
1574                 pcfg->pcfg_misc  = port;
1575                 pcfg->pcfg_count = 0;
1576                 pcfg->pcfg_wait  = share_count;
1577                 break;
1578         }
1579         case NAL_CMD_ADD_PEER: {
1580                 rc = kibnal_add_persistent_peer (pcfg->pcfg_nid,
1581                                                  pcfg->pcfg_id, /* IP */
1582                                                  pcfg->pcfg_misc); /* port */
1583                 break;
1584         }
1585         case NAL_CMD_DEL_PEER: {
1586                 rc = kibnal_del_peer (pcfg->pcfg_nid, 
1587                                        /* flags == single_share */
1588                                        pcfg->pcfg_flags != 0);
1589                 break;
1590         }
1591         case NAL_CMD_GET_CONN: {
1592                 kib_conn_t *conn = kibnal_get_conn_by_idx (pcfg->pcfg_count);
1593
1594                 if (conn == NULL)
1595                         rc = -ENOENT;
1596                 else {
1597                         rc = 0;
1598                         pcfg->pcfg_nid   = conn->ibc_peer->ibp_nid;
1599                         pcfg->pcfg_id    = 0;
1600                         pcfg->pcfg_misc  = 0;
1601                         pcfg->pcfg_flags = 0;
1602                         kibnal_put_conn (conn);
1603                 }
1604                 break;
1605         }
1606         case NAL_CMD_CLOSE_CONNECTION: {
1607                 rc = kibnal_close_matching_conns (pcfg->pcfg_nid);
1608                 break;
1609         }
1610         case NAL_CMD_REGISTER_MYNID: {
1611                 if (pcfg->pcfg_nid == PTL_NID_ANY)
1612                         rc = -EINVAL;
1613                 else
1614                         rc = kibnal_set_mynid (pcfg->pcfg_nid);
1615                 break;
1616         }
1617         }
1618
1619         return rc;
1620 }
1621
1622 void
1623 kibnal_free_pages (kib_pages_t *p)
1624 {
1625         int     npages = p->ibp_npages;
1626         int     rc;
1627         int     i;
1628         
1629         if (p->ibp_mapped) {
1630                 rc = ib_memory_deregister(p->ibp_handle);
1631                 if (rc != 0)
1632                         CERROR ("Deregister error: %d\n", rc);
1633         }
1634         
1635         for (i = 0; i < npages; i++)
1636                 if (p->ibp_pages[i] != NULL)
1637                         __free_page(p->ibp_pages[i]);
1638         
1639         PORTAL_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
1640 }
1641
1642 int
1643 kibnal_alloc_pages (kib_pages_t **pp, int npages, int access)
1644 {
1645         kib_pages_t                *p;
1646         struct ib_physical_buffer  *phys_pages;
1647         int                         i;
1648         int                         rc;
1649
1650         PORTAL_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
1651         if (p == NULL) {
1652                 CERROR ("Can't allocate buffer %d\n", npages);
1653                 return (-ENOMEM);
1654         }
1655
1656         memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1657         p->ibp_npages = npages;
1658         
1659         for (i = 0; i < npages; i++) {
1660                 p->ibp_pages[i] = alloc_page (GFP_KERNEL);
1661                 if (p->ibp_pages[i] == NULL) {
1662                         CERROR ("Can't allocate page %d of %d\n", i, npages);
1663                         kibnal_free_pages(p);
1664                         return (-ENOMEM);
1665                 }
1666         }
1667
1668         PORTAL_ALLOC(phys_pages, npages * sizeof(*phys_pages));
1669         if (phys_pages == NULL) {
1670                 CERROR ("Can't allocate physarray for %d pages\n", npages);
1671                 kibnal_free_pages(p);
1672                 return (-ENOMEM);
1673         }
1674
1675         for (i = 0; i < npages; i++) {
1676                 phys_pages[i].size = PAGE_SIZE;
1677                 phys_pages[i].address =
1678                         kibnal_page2phys(p->ibp_pages[i]);
1679         }
1680
1681         p->ibp_vaddr = 0;
1682         rc = ib_memory_register_physical(kibnal_data.kib_pd,
1683                                          phys_pages, npages,
1684                                          &p->ibp_vaddr,
1685                                          npages * PAGE_SIZE, 0,
1686                                          access,
1687                                          &p->ibp_handle,
1688                                          &p->ibp_lkey,
1689                                          &p->ibp_rkey);
1690         
1691         PORTAL_FREE(phys_pages, npages * sizeof(*phys_pages));
1692         
1693         if (rc != 0) {
1694                 CERROR ("Error %d mapping %d pages\n", rc, npages);
1695                 kibnal_free_pages(p);
1696                 return (rc);
1697         }
1698         
1699         p->ibp_mapped = 1;
1700         *pp = p;
1701         return (0);
1702 }
1703
1704 int
1705 kibnal_setup_tx_descs (void)
1706 {
1707         int           ipage = 0;
1708         int           page_offset = 0;
1709         __u64         vaddr;
1710         __u64         vaddr_base;
1711         struct page  *page;
1712         kib_tx_t     *tx;
1713         int           i;
1714         int           rc;
1715
1716         /* pre-mapped messages are not bigger than 1 page */
1717         LASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE);
1718
1719         /* No fancy arithmetic when we do the buffer calculations */
1720         LASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0);
1721
1722         rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages,
1723                                 IBNAL_TX_MSG_PAGES, 
1724                                 0);            /* local read access only */
1725         if (rc != 0)
1726                 return (rc);
1727
1728         vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr;
1729
1730         for (i = 0; i < IBNAL_TX_MSGS; i++) {
1731                 page = kibnal_data.kib_tx_pages->ibp_pages[ipage];
1732                 tx = &kibnal_data.kib_tx_descs[i];
1733
1734                 memset (tx, 0, sizeof(*tx));    /* zero flags etc */
1735                 
1736                 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
1737                 tx->tx_vaddr = vaddr;
1738                 tx->tx_isnblk = (i >= IBNAL_NTX);
1739                 tx->tx_mapped = KIB_TX_UNMAPPED;
1740
1741                 CDEBUG(D_NET, "Tx[%d] %p->%p - "LPX64"\n", 
1742                        i, tx, tx->tx_msg, tx->tx_vaddr);
1743
1744                 if (tx->tx_isnblk)
1745                         list_add (&tx->tx_list, 
1746                                   &kibnal_data.kib_idle_nblk_txs);
1747                 else
1748                         list_add (&tx->tx_list, 
1749                                   &kibnal_data.kib_idle_txs);
1750
1751                 vaddr += IBNAL_MSG_SIZE;
1752                 LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES);
1753
1754                 page_offset += IBNAL_MSG_SIZE;
1755                 LASSERT (page_offset <= PAGE_SIZE);
1756
1757                 if (page_offset == PAGE_SIZE) {
1758                         page_offset = 0;
1759                         ipage++;
1760                         LASSERT (ipage <= IBNAL_TX_MSG_PAGES);
1761                 }
1762         }
1763         
1764         return (0);
1765 }
1766
1767 void
1768 kibnal_api_shutdown (nal_t *nal)
1769 {
1770         int   i;
1771         int   rc;
1772
1773         if (nal->nal_refct != 0) {
1774                 /* This module got the first ref */
1775                 PORTAL_MODULE_UNUSE;
1776                 return;
1777         }
1778
1779         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1780                atomic_read (&portal_kmemory));
1781
1782         LASSERT(nal == &kibnal_api);
1783
1784         switch (kibnal_data.kib_init) {
1785         default:
1786                 CERROR ("Unexpected state %d\n", kibnal_data.kib_init);
1787                 LBUG();
1788
1789         case IBNAL_INIT_ALL:
1790                 /* stop calls to nal_cmd */
1791                 libcfs_nal_cmd_unregister(OPENIBNAL);
1792                 /* No new peers */
1793
1794                 /* resetting my NID unadvertises me, removes my
1795                  * listener and nukes all current peers */
1796                 kibnal_set_mynid (PTL_NID_ANY);
1797
1798                 /* Wait for all peer state to clean up */
1799                 i = 2;
1800                 while (atomic_read (&kibnal_data.kib_npeers) != 0) {
1801                         i++;
1802                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1803                                "waiting for %d peers to close down\n",
1804                                atomic_read (&kibnal_data.kib_npeers));
1805                         set_current_state (TASK_INTERRUPTIBLE);
1806                         schedule_timeout (HZ);
1807                 }
1808                 /* fall through */
1809
1810         case IBNAL_INIT_CQ:
1811                 rc = ib_cq_destroy (kibnal_data.kib_cq);
1812                 if (rc != 0)
1813                         CERROR ("Destroy CQ error: %d\n", rc);
1814                 /* fall through */
1815
1816         case IBNAL_INIT_TXD:
1817                 kibnal_free_pages (kibnal_data.kib_tx_pages);
1818                 /* fall through */
1819 #if IBNAL_FMR
1820         case IBNAL_INIT_FMR:
1821                 rc = ib_fmr_pool_destroy (kibnal_data.kib_fmr_pool);
1822                 if (rc != 0)
1823                         CERROR ("Destroy FMR pool error: %d\n", rc);
1824                 /* fall through */
1825 #endif
1826         case IBNAL_INIT_PD:
1827                 rc = ib_pd_destroy(kibnal_data.kib_pd);
1828                 if (rc != 0)
1829                         CERROR ("Destroy PD error: %d\n", rc);
1830                 /* fall through */
1831
1832         case IBNAL_INIT_LIB:
1833                 lib_fini(&kibnal_lib);
1834                 /* fall through */
1835
1836         case IBNAL_INIT_DATA:
1837                 /* Module refcount only gets to zero when all peers
1838                  * have been closed so all lists must be empty */
1839                 LASSERT (atomic_read (&kibnal_data.kib_npeers) == 0);
1840                 LASSERT (kibnal_data.kib_peers != NULL);
1841                 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1842                         LASSERT (list_empty (&kibnal_data.kib_peers[i]));
1843                 }
1844                 LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0);
1845                 LASSERT (list_empty (&kibnal_data.kib_sched_rxq));
1846                 LASSERT (list_empty (&kibnal_data.kib_sched_txq));
1847                 LASSERT (list_empty (&kibnal_data.kib_reaper_conns));
1848                 LASSERT (list_empty (&kibnal_data.kib_connd_peers));
1849                 LASSERT (list_empty (&kibnal_data.kib_connd_acceptq));
1850
1851                 /* flag threads to terminate; wake and wait for them to die */
1852                 kibnal_data.kib_shutdown = 1;
1853                 wake_up_all (&kibnal_data.kib_sched_waitq);
1854                 wake_up_all (&kibnal_data.kib_reaper_waitq);
1855                 wake_up_all (&kibnal_data.kib_connd_waitq);
1856
1857                 i = 2;
1858                 while (atomic_read (&kibnal_data.kib_nthreads) != 0) {
1859                         i++;
1860                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1861                                "Waiting for %d threads to terminate\n",
1862                                atomic_read (&kibnal_data.kib_nthreads));
1863                         set_current_state (TASK_INTERRUPTIBLE);
1864                         schedule_timeout (HZ);
1865                 }
1866                 /* fall through */
1867                 
1868         case IBNAL_INIT_NOTHING:
1869                 break;
1870         }
1871
1872         if (kibnal_data.kib_tx_descs != NULL)
1873                 PORTAL_FREE (kibnal_data.kib_tx_descs,
1874                              IBNAL_TX_MSGS * sizeof(kib_tx_t));
1875
1876         if (kibnal_data.kib_peers != NULL)
1877                 PORTAL_FREE (kibnal_data.kib_peers,
1878                              sizeof (struct list_head) * 
1879                              kibnal_data.kib_peer_hash_size);
1880
1881         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1882                atomic_read (&portal_kmemory));
1883         printk(KERN_INFO "Lustre: OpenIB NAL unloaded (final mem %d)\n",
1884                atomic_read(&portal_kmemory));
1885
1886         kibnal_data.kib_init = IBNAL_INIT_NOTHING;
1887 }
1888
1889 int
1890 kibnal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1891                      ptl_ni_limits_t *requested_limits,
1892                      ptl_ni_limits_t *actual_limits)
1893 {
1894         struct timeval    tv;
1895         ptl_process_id_t  process_id;
1896         int               pkmem = atomic_read(&portal_kmemory);
1897         int               rc;
1898         int               i;
1899
1900         LASSERT (nal == &kibnal_api);
1901
1902         if (nal->nal_refct != 0) {
1903                 if (actual_limits != NULL)
1904                         *actual_limits = kibnal_lib.libnal_ni.ni_actual_limits;
1905                 /* This module got the first ref */
1906                 PORTAL_MODULE_USE;
1907                 return (PTL_OK);
1908         }
1909
1910         LASSERT (kibnal_data.kib_init == IBNAL_INIT_NOTHING);
1911
1912         memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */
1913
1914         do_gettimeofday(&tv);
1915         kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1916
1917         init_MUTEX (&kibnal_data.kib_nid_mutex);
1918         init_MUTEX_LOCKED (&kibnal_data.kib_listener_signal);
1919
1920         rwlock_init(&kibnal_data.kib_global_lock);
1921
1922         kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE;
1923         PORTAL_ALLOC (kibnal_data.kib_peers,
1924                       sizeof (struct list_head) * kibnal_data.kib_peer_hash_size);
1925         if (kibnal_data.kib_peers == NULL) {
1926                 goto failed;
1927         }
1928         for (i = 0; i < kibnal_data.kib_peer_hash_size; i++)
1929                 INIT_LIST_HEAD(&kibnal_data.kib_peers[i]);
1930
1931         spin_lock_init (&kibnal_data.kib_reaper_lock);
1932         INIT_LIST_HEAD (&kibnal_data.kib_reaper_conns);
1933         init_waitqueue_head (&kibnal_data.kib_reaper_waitq);
1934
1935         spin_lock_init (&kibnal_data.kib_connd_lock);
1936         INIT_LIST_HEAD (&kibnal_data.kib_connd_acceptq);
1937         INIT_LIST_HEAD (&kibnal_data.kib_connd_peers);
1938         init_waitqueue_head (&kibnal_data.kib_connd_waitq);
1939
1940         spin_lock_init (&kibnal_data.kib_sched_lock);
1941         INIT_LIST_HEAD (&kibnal_data.kib_sched_txq);
1942         INIT_LIST_HEAD (&kibnal_data.kib_sched_rxq);
1943         init_waitqueue_head (&kibnal_data.kib_sched_waitq);
1944
1945         spin_lock_init (&kibnal_data.kib_tx_lock);
1946         INIT_LIST_HEAD (&kibnal_data.kib_idle_txs);
1947         INIT_LIST_HEAD (&kibnal_data.kib_idle_nblk_txs);
1948         init_waitqueue_head(&kibnal_data.kib_idle_tx_waitq);
1949
1950         PORTAL_ALLOC (kibnal_data.kib_tx_descs,
1951                       IBNAL_TX_MSGS * sizeof(kib_tx_t));
1952         if (kibnal_data.kib_tx_descs == NULL) {
1953                 CERROR ("Can't allocate tx descs\n");
1954                 goto failed;
1955         }
1956
1957         /* lists/ptrs/locks initialised */
1958         kibnal_data.kib_init = IBNAL_INIT_DATA;
1959         /*****************************************************/
1960
1961
1962         process_id.pid = requested_pid;
1963         process_id.nid = PTL_NID_ANY;           /* don't know my NID yet */
1964         
1965         rc = lib_init(&kibnal_lib, nal, process_id,
1966                       requested_limits, actual_limits);
1967         if (rc != PTL_OK) {
1968                 CERROR("lib_init failed: error %d\n", rc);
1969                 goto failed;
1970         }
1971
1972         /* lib interface initialised */
1973         kibnal_data.kib_init = IBNAL_INIT_LIB;
1974         /*****************************************************/
1975
1976         for (i = 0; i < IBNAL_N_SCHED; i++) {
1977                 rc = kibnal_thread_start (kibnal_scheduler,
1978                                           (void *)((unsigned long)i));
1979                 if (rc != 0) {
1980                         CERROR("Can't spawn openibnal scheduler[%d]: %d\n",
1981                                i, rc);
1982                         goto failed;
1983                 }
1984         }
1985
1986         for (i = 0; i < IBNAL_N_CONND; i++) {
1987                 rc = kibnal_thread_start (kibnal_connd,
1988                                           (void *)((unsigned long)i));
1989                 if (rc != 0) {
1990                         CERROR("Can't spawn openibnal connd[%d]: %d\n",
1991                                i, rc);
1992                         goto failed;
1993                 }
1994         }
1995
1996         rc = kibnal_thread_start (kibnal_reaper, NULL);
1997         if (rc != 0) {
1998                 CERROR ("Can't spawn openibnal reaper: %d\n", rc);
1999                 goto failed;
2000         }
2001
2002         kibnal_data.kib_device = ib_device_get_by_index(0);
2003         if (kibnal_data.kib_device == NULL) {
2004                 CERROR ("Can't open ib device 0\n");
2005                 goto failed;
2006         }
2007         
2008         rc = ib_device_properties_get(kibnal_data.kib_device,
2009                                       &kibnal_data.kib_device_props);
2010         if (rc != 0) {
2011                 CERROR ("Can't get device props: %d\n", rc);
2012                 goto failed;
2013         }
2014
2015         CDEBUG(D_NET, "Max Initiator: %d Max Responder %d\n", 
2016                kibnal_data.kib_device_props.max_initiator_per_qp,
2017                kibnal_data.kib_device_props.max_responder_per_qp);
2018
2019         kibnal_data.kib_port = 0;
2020         for (i = 1; i <= 2; i++) {
2021                 rc = ib_port_properties_get(kibnal_data.kib_device, i,
2022                                             &kibnal_data.kib_port_props);
2023                 if (rc == 0) {
2024                         kibnal_data.kib_port = i;
2025                         break;
2026                 }
2027         }
2028         if (kibnal_data.kib_port == 0) {
2029                 CERROR ("Can't find a port\n");
2030                 goto failed;
2031         }
2032
2033         rc = ib_pd_create(kibnal_data.kib_device,
2034                           NULL, &kibnal_data.kib_pd);
2035         if (rc != 0) {
2036                 CERROR ("Can't create PD: %d\n", rc);
2037                 goto failed;
2038         }
2039         
2040         /* flag PD initialised */
2041         kibnal_data.kib_init = IBNAL_INIT_PD;
2042         /*****************************************************/
2043 #if IBNAL_FMR
2044         {
2045                 const int pool_size = IBNAL_NTX + IBNAL_NTX_NBLK;
2046                 struct ib_fmr_pool_param params = {
2047                         .max_pages_per_fmr = PTL_MTU/PAGE_SIZE,
2048                         .access            = (IB_ACCESS_LOCAL_WRITE |
2049                                               IB_ACCESS_REMOTE_WRITE |
2050                                               IB_ACCESS_REMOTE_READ),
2051                         .pool_size         = pool_size,
2052                         .dirty_watermark   = (pool_size * 3)/4,
2053                         .flush_function    = NULL,
2054                         .flush_arg         = NULL,
2055                         .cache             = 1,
2056                 };
2057                 rc = ib_fmr_pool_create(kibnal_data.kib_pd, &params,
2058                                         &kibnal_data.kib_fmr_pool);
2059                 if (rc != 0) {
2060                         CERROR ("Can't create FMR pool size %d: %d\n", 
2061                                 pool_size, rc);
2062                         goto failed;
2063                 }
2064         }
2065
2066         /* flag FMR pool initialised */
2067         kibnal_data.kib_init = IBNAL_INIT_FMR;
2068 #endif
2069         /*****************************************************/
2070
2071         rc = kibnal_setup_tx_descs();
2072         if (rc != 0) {
2073                 CERROR ("Can't register tx descs: %d\n", rc);
2074                 goto failed;
2075         }
2076         
2077         /* flag TX descs initialised */
2078         kibnal_data.kib_init = IBNAL_INIT_TXD;
2079         /*****************************************************/
2080         
2081         {
2082                 struct ib_cq_callback callback = {
2083                         .context        = IBNAL_CALLBACK_CTXT,
2084                         .policy         = IB_CQ_PROVIDER_REARM,
2085                         .function       = {
2086                                 .entry  = kibnal_callback,
2087                         },
2088                         .arg            = NULL,
2089                 };
2090                 int  nentries = IBNAL_CQ_ENTRIES;
2091                 
2092                 rc = ib_cq_create (kibnal_data.kib_device, 
2093                                    &nentries, &callback, NULL,
2094                                    &kibnal_data.kib_cq);
2095                 if (rc != 0) {
2096                         CERROR ("Can't create CQ: %d\n", rc);
2097                         goto failed;
2098                 }
2099
2100                 /* I only want solicited events */
2101                 rc = ib_cq_request_notification(kibnal_data.kib_cq, 1);
2102                 LASSERT (rc == 0);
2103         }
2104         
2105         /* flag CQ initialised */
2106         kibnal_data.kib_init = IBNAL_INIT_CQ;
2107         /*****************************************************/
2108         
2109         rc = libcfs_nal_cmd_register(OPENIBNAL, &kibnal_cmd, NULL);
2110         if (rc != 0) {
2111                 CERROR ("Can't initialise command interface (rc = %d)\n", rc);
2112                 goto failed;
2113         }
2114
2115         /* flag everything initialised */
2116         kibnal_data.kib_init = IBNAL_INIT_ALL;
2117         /*****************************************************/
2118
2119         printk(KERN_INFO "Lustre: OpenIB NAL loaded "
2120                "(initial mem %d)\n", pkmem);
2121
2122         return (PTL_OK);
2123
2124  failed:
2125         kibnal_api_shutdown (&kibnal_api);    
2126         return (PTL_FAIL);
2127 }
2128
2129 void __exit
2130 kibnal_module_fini (void)
2131 {
2132         if (kibnal_tunables.kib_sysctl != NULL)
2133                 unregister_sysctl_table (kibnal_tunables.kib_sysctl);
2134         PtlNIFini(kibnal_ni);
2135
2136         ptl_unregister_nal(OPENIBNAL);
2137 }
2138
2139 int __init
2140 kibnal_module_init (void)
2141 {
2142         int    rc;
2143
2144         /* the following must be sizeof(int) for proc_dointvec() */
2145         LASSERT (sizeof(kibnal_tunables.kib_io_timeout) == sizeof(int));
2146         LASSERT (sizeof(kibnal_tunables.kib_listener_timeout) == sizeof(int));
2147         LASSERT (sizeof(kibnal_tunables.kib_backlog) == sizeof(int));
2148         LASSERT (sizeof(kibnal_tunables.kib_port) == sizeof(int));
2149
2150         kibnal_api.nal_ni_init = kibnal_api_startup;
2151         kibnal_api.nal_ni_fini = kibnal_api_shutdown;
2152
2153         /* Initialise dynamic tunables to defaults once only */
2154         kibnal_tunables.kib_io_timeout = IBNAL_IO_TIMEOUT;
2155         kibnal_tunables.kib_listener_timeout = IBNAL_LISTENER_TIMEOUT;
2156         kibnal_tunables.kib_backlog = IBNAL_BACKLOG;
2157         kibnal_tunables.kib_port = IBNAL_PORT;
2158
2159         rc = ptl_register_nal(OPENIBNAL, &kibnal_api);
2160         if (rc != PTL_OK) {
2161                 CERROR("Can't register IBNAL: %d\n", rc);
2162                 return (-ENOMEM);               /* or something... */
2163         }
2164
2165         /* Pure gateways want the NAL started up at module load time... */
2166         rc = PtlNIInit(OPENIBNAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kibnal_ni);
2167         if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2168                 ptl_unregister_nal(OPENIBNAL);
2169                 return (-ENODEV);
2170         }
2171         
2172         kibnal_tunables.kib_sysctl = 
2173                 register_sysctl_table (kibnal_top_ctl_table, 0);
2174         if (kibnal_tunables.kib_sysctl == NULL) {
2175                 CERROR("Can't register sysctl table\n");
2176                 PtlNIFini(kibnal_ni);
2177                 ptl_unregister_nal(OPENIBNAL);
2178                 return (-ENOMEM);
2179         }
2180
2181         return (0);
2182 }
2183
2184 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2185 MODULE_DESCRIPTION("Kernel OpenIB NAL v0.01");
2186 MODULE_LICENSE("GPL");
2187
2188 module_init(kibnal_module_init);
2189 module_exit(kibnal_module_fini);
2190