1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
6 * Author: Frank Zago <fzago@systemfabricworks.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 ptl_handle_ni_t kibnal_ni;
29 kib_data_t kibnal_data;
30 kib_tunables_t kibnal_tunables;
33 #define IBNAL_SYSCTL 202
35 #define IBNAL_SYSCTL_TIMEOUT 1
37 static ctl_table kibnal_ctl_table[] = {
38 {IBNAL_SYSCTL_TIMEOUT, "timeout",
39 &kibnal_tunables.kib_io_timeout, sizeof (int),
40 0644, NULL, &proc_dointvec},
44 static ctl_table kibnal_top_ctl_table[] = {
45 {IBNAL_SYSCTL, "vibnal", NULL, 0, 0555, kibnal_ctl_table},
50 void vibnal_assert_wire_constants (void)
52 /* Wire protocol assertions generated by 'wirecheck'
53 * running on Linux robert.bartonsoftware.com 2.6.5-1.358 #1 Sat May 8 09:04:50 EDT 2004 i686
54 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
58 CLASSERT (IBNAL_MSG_MAGIC == 0x0be91b91);
59 CLASSERT (IBNAL_MSG_VERSION == 6);
60 CLASSERT (IBNAL_MSG_CONNREQ == 0xc0);
61 CLASSERT (IBNAL_MSG_CONNACK == 0xc1);
62 CLASSERT (IBNAL_MSG_NOOP == 0xd0);
63 CLASSERT (IBNAL_MSG_IMMEDIATE == 0xd1);
64 CLASSERT (IBNAL_MSG_PUT_REQ == 0xd2);
65 CLASSERT (IBNAL_MSG_PUT_NAK == 0xd3);
66 CLASSERT (IBNAL_MSG_PUT_ACK == 0xd4);
67 CLASSERT (IBNAL_MSG_PUT_DONE == 0xd5);
68 CLASSERT (IBNAL_MSG_GET_REQ == 0xd6);
69 CLASSERT (IBNAL_MSG_GET_DONE == 0xd7);
71 /* Checks for struct kib_connparams_t */
72 CLASSERT ((int)sizeof(kib_connparams_t) == 12);
73 CLASSERT ((int)offsetof(kib_connparams_t, ibcp_queue_depth) == 0);
74 CLASSERT ((int)sizeof(((kib_connparams_t *)0)->ibcp_queue_depth) == 4);
75 CLASSERT ((int)offsetof(kib_connparams_t, ibcp_max_msg_size) == 4);
76 CLASSERT ((int)sizeof(((kib_connparams_t *)0)->ibcp_max_msg_size) == 4);
77 CLASSERT ((int)offsetof(kib_connparams_t, ibcp_max_frags) == 8);
78 CLASSERT ((int)sizeof(((kib_connparams_t *)0)->ibcp_max_frags) == 4);
80 /* Checks for struct kib_immediate_msg_t */
81 CLASSERT ((int)sizeof(kib_immediate_msg_t) == 72);
82 CLASSERT ((int)offsetof(kib_immediate_msg_t, ibim_hdr) == 0);
83 CLASSERT ((int)sizeof(((kib_immediate_msg_t *)0)->ibim_hdr) == 72);
84 CLASSERT ((int)offsetof(kib_immediate_msg_t, ibim_payload[13]) == 85);
85 CLASSERT ((int)sizeof(((kib_immediate_msg_t *)0)->ibim_payload[13]) == 1);
87 /* Checks for struct kib_rdma_frag_t */
88 CLASSERT ((int)sizeof(kib_rdma_frag_t) == 12);
89 CLASSERT ((int)offsetof(kib_rdma_frag_t, rf_nob) == 0);
90 CLASSERT ((int)sizeof(((kib_rdma_frag_t *)0)->rf_nob) == 4);
91 CLASSERT ((int)offsetof(kib_rdma_frag_t, rf_addr_lo) == 4);
92 CLASSERT ((int)sizeof(((kib_rdma_frag_t *)0)->rf_addr_lo) == 4);
93 CLASSERT ((int)offsetof(kib_rdma_frag_t, rf_addr_hi) == 8);
94 CLASSERT ((int)sizeof(((kib_rdma_frag_t *)0)->rf_addr_hi) == 4);
96 /* Checks for struct kib_rdma_desc_t */
97 CLASSERT ((int)sizeof(kib_rdma_desc_t) == 8);
98 CLASSERT ((int)offsetof(kib_rdma_desc_t, rd_key) == 0);
99 CLASSERT ((int)sizeof(((kib_rdma_desc_t *)0)->rd_key) == 4);
100 CLASSERT ((int)offsetof(kib_rdma_desc_t, rd_nfrag) == 4);
101 CLASSERT ((int)sizeof(((kib_rdma_desc_t *)0)->rd_nfrag) == 4);
102 CLASSERT ((int)offsetof(kib_rdma_desc_t, rd_frags[13]) == 164);
103 CLASSERT ((int)sizeof(((kib_rdma_desc_t *)0)->rd_frags[13]) == 12);
105 /* Checks for struct kib_putreq_msg_t */
106 CLASSERT ((int)sizeof(kib_putreq_msg_t) == 80);
107 CLASSERT ((int)offsetof(kib_putreq_msg_t, ibprm_hdr) == 0);
108 CLASSERT ((int)sizeof(((kib_putreq_msg_t *)0)->ibprm_hdr) == 72);
109 CLASSERT ((int)offsetof(kib_putreq_msg_t, ibprm_cookie) == 72);
110 CLASSERT ((int)sizeof(((kib_putreq_msg_t *)0)->ibprm_cookie) == 8);
112 /* Checks for struct kib_putack_msg_t */
113 CLASSERT ((int)sizeof(kib_putack_msg_t) == 24);
114 CLASSERT ((int)offsetof(kib_putack_msg_t, ibpam_src_cookie) == 0);
115 CLASSERT ((int)sizeof(((kib_putack_msg_t *)0)->ibpam_src_cookie) == 8);
116 CLASSERT ((int)offsetof(kib_putack_msg_t, ibpam_dst_cookie) == 8);
117 CLASSERT ((int)sizeof(((kib_putack_msg_t *)0)->ibpam_dst_cookie) == 8);
118 CLASSERT ((int)offsetof(kib_putack_msg_t, ibpam_rd) == 16);
119 CLASSERT ((int)sizeof(((kib_putack_msg_t *)0)->ibpam_rd) == 8);
121 /* Checks for struct kib_get_msg_t */
122 CLASSERT ((int)sizeof(kib_get_msg_t) == 88);
123 CLASSERT ((int)offsetof(kib_get_msg_t, ibgm_hdr) == 0);
124 CLASSERT ((int)sizeof(((kib_get_msg_t *)0)->ibgm_hdr) == 72);
125 CLASSERT ((int)offsetof(kib_get_msg_t, ibgm_cookie) == 72);
126 CLASSERT ((int)sizeof(((kib_get_msg_t *)0)->ibgm_cookie) == 8);
127 CLASSERT ((int)offsetof(kib_get_msg_t, ibgm_rd) == 80);
128 CLASSERT ((int)sizeof(((kib_get_msg_t *)0)->ibgm_rd) == 8);
130 /* Checks for struct kib_completion_msg_t */
131 CLASSERT ((int)sizeof(kib_completion_msg_t) == 12);
132 CLASSERT ((int)offsetof(kib_completion_msg_t, ibcm_cookie) == 0);
133 CLASSERT ((int)sizeof(((kib_completion_msg_t *)0)->ibcm_cookie) == 8);
134 CLASSERT ((int)offsetof(kib_completion_msg_t, ibcm_status) == 8);
135 CLASSERT ((int)sizeof(((kib_completion_msg_t *)0)->ibcm_status) == 4);
137 /* Checks for struct kib_msg_t */
138 CLASSERT ((int)sizeof(kib_msg_t) == 144);
139 CLASSERT ((int)offsetof(kib_msg_t, ibm_magic) == 0);
140 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_magic) == 4);
141 CLASSERT ((int)offsetof(kib_msg_t, ibm_version) == 4);
142 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_version) == 2);
143 CLASSERT ((int)offsetof(kib_msg_t, ibm_type) == 6);
144 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_type) == 1);
145 CLASSERT ((int)offsetof(kib_msg_t, ibm_credits) == 7);
146 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_credits) == 1);
147 CLASSERT ((int)offsetof(kib_msg_t, ibm_nob) == 8);
148 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_nob) == 4);
149 CLASSERT ((int)offsetof(kib_msg_t, ibm_cksum) == 12);
150 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_cksum) == 4);
151 CLASSERT ((int)offsetof(kib_msg_t, ibm_srcnid) == 16);
152 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_srcnid) == 8);
153 CLASSERT ((int)offsetof(kib_msg_t, ibm_srcstamp) == 24);
154 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_srcstamp) == 8);
155 CLASSERT ((int)offsetof(kib_msg_t, ibm_dstnid) == 32);
156 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_dstnid) == 8);
157 CLASSERT ((int)offsetof(kib_msg_t, ibm_dststamp) == 40);
158 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_dststamp) == 8);
159 CLASSERT ((int)offsetof(kib_msg_t, ibm_seq) == 48);
160 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_seq) == 8);
161 CLASSERT ((int)offsetof(kib_msg_t, ibm_u.connparams) == 56);
162 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_u.connparams) == 12);
163 CLASSERT ((int)offsetof(kib_msg_t, ibm_u.immediate) == 56);
164 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_u.immediate) == 72);
165 CLASSERT ((int)offsetof(kib_msg_t, ibm_u.putreq) == 56);
166 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_u.putreq) == 80);
167 CLASSERT ((int)offsetof(kib_msg_t, ibm_u.putack) == 56);
168 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_u.putack) == 24);
169 CLASSERT ((int)offsetof(kib_msg_t, ibm_u.get) == 56);
170 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_u.get) == 88);
171 CLASSERT ((int)offsetof(kib_msg_t, ibm_u.completion) == 56);
172 CLASSERT ((int)sizeof(((kib_msg_t *)0)->ibm_u.completion) == 12);
176 kibnal_pause(int ticks)
178 set_current_state(TASK_UNINTERRUPTIBLE);
179 schedule_timeout(ticks);
183 kibnal_cksum (void *ptr, int nob)
189 sum = ((sum << 1) | (sum >> 31)) + *c++;
191 /* ensure I don't return 0 (== no checksum) */
192 return (sum == 0) ? 1 : sum;
196 kibnal_init_msg(kib_msg_t *msg, int type, int body_nob)
198 msg->ibm_type = type;
199 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
203 kibnal_pack_msg(kib_msg_t *msg, int credits, ptl_nid_t dstnid,
204 __u64 dststamp, __u64 seq)
206 /* CAVEAT EMPTOR! all message fields not set here should have been
207 * initialised previously. */
208 msg->ibm_magic = IBNAL_MSG_MAGIC;
209 msg->ibm_version = IBNAL_MSG_VERSION;
211 msg->ibm_credits = credits;
214 msg->ibm_srcnid = kibnal_lib.libnal_ni.ni_pid.nid;
215 msg->ibm_srcstamp = kibnal_data.kib_incarnation;
216 msg->ibm_dstnid = dstnid;
217 msg->ibm_dststamp = dststamp;
220 /* NB ibm_cksum zero while computing cksum */
221 msg->ibm_cksum = kibnal_cksum(msg, msg->ibm_nob);
226 kibnal_unpack_msg(kib_msg_t *msg, int nob)
228 const int hdr_size = offsetof(kib_msg_t, ibm_u);
235 /* 6 bytes are enough to have received magic + version */
237 CERROR("Short message: %d\n", nob);
241 if (msg->ibm_magic == IBNAL_MSG_MAGIC) {
243 } else if (msg->ibm_magic == __swab32(IBNAL_MSG_MAGIC)) {
246 CERROR("Bad magic: %08x\n", msg->ibm_magic);
250 if (msg->ibm_version !=
251 (flip ? __swab16(IBNAL_MSG_VERSION) : IBNAL_MSG_VERSION)) {
252 CERROR("Bad version: %d\n", msg->ibm_version);
256 if (nob < hdr_size) {
257 CERROR("Short message: %d\n", nob);
261 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
263 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
267 /* checksum must be computed with ibm_cksum zero and BEFORE anything
269 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
271 if (msg_cksum != 0 &&
272 msg_cksum != kibnal_cksum(msg, msg_nob)) {
273 CERROR("Bad checksum\n");
276 msg->ibm_cksum = msg_cksum;
279 /* leave magic unflipped as a clue to peer endianness */
280 __swab16s(&msg->ibm_version);
281 CLASSERT (sizeof(msg->ibm_type) == 1);
282 CLASSERT (sizeof(msg->ibm_credits) == 1);
283 msg->ibm_nob = msg_nob;
284 __swab64s(&msg->ibm_srcnid);
285 __swab64s(&msg->ibm_srcstamp);
286 __swab64s(&msg->ibm_dstnid);
287 __swab64s(&msg->ibm_dststamp);
288 __swab64s(&msg->ibm_seq);
291 if (msg->ibm_srcnid == PTL_NID_ANY) {
292 CERROR("Bad src nid: "LPX64"\n", msg->ibm_srcnid);
296 switch (msg->ibm_type) {
298 CERROR("Unknown message type %x\n", msg->ibm_type);
304 case IBNAL_MSG_IMMEDIATE:
305 if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
306 CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
307 (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
312 case IBNAL_MSG_PUT_REQ:
313 if (msg_nob < sizeof(msg->ibm_u.putreq)) {
314 CERROR("Short PUT_REQ: %d(%d)\n", msg_nob,
315 (int)(hdr_size + sizeof(msg->ibm_u.putreq)));
320 case IBNAL_MSG_PUT_ACK:
321 if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[0])) {
322 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
323 (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[0]));
328 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
329 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrag);
332 n = msg->ibm_u.putack.ibpam_rd.rd_nfrag;
333 if (n <= 0 || n > IBNAL_MAX_RDMA_FRAGS) {
334 CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n",
335 n, IBNAL_MAX_RDMA_FRAGS);
339 if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) {
340 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
341 (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n]));
346 for (i = 0; i < n; i++) {
347 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_nob);
348 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr_lo);
349 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr_hi);
353 case IBNAL_MSG_GET_REQ:
354 if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) {
355 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
356 (int)(hdr_size + sizeof(msg->ibm_u.get)));
360 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
361 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrag);
364 n = msg->ibm_u.get.ibgm_rd.rd_nfrag;
365 if (n <= 0 || n > IBNAL_MAX_RDMA_FRAGS) {
366 CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n",
367 n, IBNAL_MAX_RDMA_FRAGS);
371 if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) {
372 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
373 (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n]));
378 for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrag; i++) {
379 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob);
380 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr_lo);
381 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr_hi);
385 case IBNAL_MSG_PUT_NAK:
386 case IBNAL_MSG_PUT_DONE:
387 case IBNAL_MSG_GET_DONE:
388 if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
389 CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
390 (int)(hdr_size + sizeof(msg->ibm_u.completion)));
394 __swab32s(&msg->ibm_u.completion.ibcm_status);
397 case IBNAL_MSG_CONNREQ:
398 case IBNAL_MSG_CONNACK:
399 if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
400 CERROR("Short connreq/ack: %d(%d)\n", msg_nob,
401 (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
405 __swab32s(&msg->ibm_u.connparams.ibcp_queue_depth);
406 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
407 __swab32s(&msg->ibm_u.connparams.ibcp_max_frags);
415 kibnal_set_mynid(ptl_nid_t nid)
417 static cm_listen_data_t info; /* protected by kib_nid_mutex */
419 lib_ni_t *ni = &kibnal_lib.libnal_ni;
423 CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
424 nid, ni->ni_pid.nid);
426 down (&kibnal_data.kib_nid_mutex);
428 if (nid == ni->ni_pid.nid) {
429 /* no change of NID */
430 up (&kibnal_data.kib_nid_mutex);
434 CDEBUG(D_NET, "NID "LPX64"("LPX64")\n", ni->ni_pid.nid, nid);
436 if (kibnal_data.kib_listen_handle != NULL) {
437 cmrc = cm_cancel(kibnal_data.kib_listen_handle);
438 if (cmrc != cm_stat_success)
439 CERROR ("Error %d stopping listener\n", cmrc);
441 kibnal_pause(HZ/10); /* ensure no more callbacks */
443 cmrc = cm_destroy_cep(kibnal_data.kib_listen_handle);
444 if (cmrc != vv_return_ok)
445 CERROR ("Error %d destroying CEP\n", cmrc);
447 kibnal_data.kib_listen_handle = NULL;
450 /* Change NID. NB queued passive connection requests (if any) will be
451 * rejected with an incorrect destination NID */
452 ni->ni_pid.nid = nid;
453 kibnal_data.kib_incarnation++;
456 /* Delete all existing peers and their connections after new
457 * NID/incarnation set to ensure no old connections in our brave
459 kibnal_del_peer (PTL_NID_ANY, 0);
461 if (ni->ni_pid.nid != PTL_NID_ANY) { /* got a new NID to install */
462 kibnal_data.kib_listen_handle =
463 cm_create_cep(cm_cep_transp_rc);
464 if (kibnal_data.kib_listen_handle == NULL) {
465 CERROR ("Can't create listen CEP\n");
470 CDEBUG(D_NET, "Created CEP %p for listening\n",
471 kibnal_data.kib_listen_handle);
473 memset(&info, 0, sizeof(info));
474 info.listen_addr.end_pt.sid = kibnal_data.kib_svc_id;
476 cmrc = cm_listen(kibnal_data.kib_listen_handle, &info,
477 kibnal_listen_callback, NULL);
479 CERROR ("cm_listen error: %d\n", cmrc);
485 up (&kibnal_data.kib_nid_mutex);
489 cmrc = cm_destroy_cep(kibnal_data.kib_listen_handle);
490 LASSERT (cmrc == cm_stat_success);
491 kibnal_data.kib_listen_handle = NULL;
493 ni->ni_pid.nid = PTL_NID_ANY;
494 kibnal_data.kib_incarnation++;
496 kibnal_del_peer (PTL_NID_ANY, 0);
497 up (&kibnal_data.kib_nid_mutex);
502 kibnal_create_peer (ptl_nid_t nid)
506 LASSERT (nid != PTL_NID_ANY);
508 PORTAL_ALLOC(peer, sizeof (*peer));
510 CERROR("Canot allocate perr\n");
514 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
517 atomic_set (&peer->ibp_refcount, 1); /* 1 ref for caller */
519 INIT_LIST_HEAD (&peer->ibp_list); /* not in the peer table yet */
520 INIT_LIST_HEAD (&peer->ibp_conns);
521 INIT_LIST_HEAD (&peer->ibp_tx_queue);
523 peer->ibp_reconnect_time = jiffies;
524 peer->ibp_reconnect_interval = IBNAL_MIN_RECONNECT_INTERVAL;
526 atomic_inc (&kibnal_data.kib_npeers);
527 if (atomic_read(&kibnal_data.kib_npeers) <= IBNAL_CONCURRENT_PEERS)
530 CERROR("Too many peers: CQ will overflow\n");
531 kibnal_peer_decref(peer);
536 kibnal_destroy_peer (kib_peer_t *peer)
539 LASSERT (atomic_read (&peer->ibp_refcount) == 0);
540 LASSERT (peer->ibp_persistence == 0);
541 LASSERT (!kibnal_peer_active(peer));
542 LASSERT (peer->ibp_connecting == 0);
543 LASSERT (list_empty (&peer->ibp_conns));
544 LASSERT (list_empty (&peer->ibp_tx_queue));
546 PORTAL_FREE (peer, sizeof (*peer));
548 /* NB a peer's connections keep a reference on their peer until
549 * they are destroyed, so we can be assured that _all_ state to do
550 * with this peer has been cleaned up when its refcount drops to
552 atomic_dec (&kibnal_data.kib_npeers);
555 /* the caller is responsible for accounting for the additional reference
556 * that this creates */
558 kibnal_find_peer_locked (ptl_nid_t nid)
560 struct list_head *peer_list = kibnal_nid2peerlist (nid);
561 struct list_head *tmp;
564 list_for_each (tmp, peer_list) {
566 peer = list_entry (tmp, kib_peer_t, ibp_list);
568 LASSERT (peer->ibp_persistence != 0 || /* persistent peer */
569 peer->ibp_connecting != 0 || /* creating conns */
570 !list_empty (&peer->ibp_conns)); /* active conn */
572 if (peer->ibp_nid != nid)
575 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
576 peer, nid, atomic_read (&peer->ibp_refcount));
583 kibnal_unlink_peer_locked (kib_peer_t *peer)
585 LASSERT (peer->ibp_persistence == 0);
586 LASSERT (list_empty(&peer->ibp_conns));
588 LASSERT (kibnal_peer_active(peer));
589 list_del_init (&peer->ibp_list);
590 /* lose peerlist's ref */
591 kibnal_peer_decref(peer);
595 kibnal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp,
599 struct list_head *ptmp;
603 read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
605 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
607 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
609 peer = list_entry (ptmp, kib_peer_t, ibp_list);
610 LASSERT (peer->ibp_persistence != 0 ||
611 peer->ibp_connecting != 0 ||
612 !list_empty (&peer->ibp_conns));
617 *nidp = peer->ibp_nid;
619 *persistencep = peer->ibp_persistence;
621 read_unlock_irqrestore(&kibnal_data.kib_global_lock,
627 read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
632 kibnal_add_persistent_peer (ptl_nid_t nid, __u32 ip)
638 CDEBUG(D_NET, LPX64"@%08x\n", nid, ip);
640 if (nid == PTL_NID_ANY)
643 peer = kibnal_create_peer (nid);
647 write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
649 peer2 = kibnal_find_peer_locked (nid);
651 kibnal_peer_decref (peer);
654 /* peer table takes existing ref on peer */
655 list_add_tail (&peer->ibp_list,
656 kibnal_nid2peerlist (nid));
660 peer->ibp_persistence++;
662 write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
667 kibnal_del_peer_locked (kib_peer_t *peer, int single_share)
669 struct list_head *ctmp;
670 struct list_head *cnxt;
674 peer->ibp_persistence = 0;
675 else if (peer->ibp_persistence > 0)
676 peer->ibp_persistence--;
678 if (peer->ibp_persistence != 0)
681 if (list_empty(&peer->ibp_conns)) {
682 kibnal_unlink_peer_locked(peer);
684 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
685 conn = list_entry(ctmp, kib_conn_t, ibc_list);
687 kibnal_close_conn_locked (conn, 0);
689 /* NB peer is no longer persistent; closing its last conn
692 /* NB peer now unlinked; might even be freed if the peer table had the
697 kibnal_del_peer (ptl_nid_t nid, int single_share)
699 struct list_head *ptmp;
700 struct list_head *pnxt;
708 write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
710 if (nid != PTL_NID_ANY)
711 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
714 hi = kibnal_data.kib_peer_hash_size - 1;
717 for (i = lo; i <= hi; i++) {
718 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
719 peer = list_entry (ptmp, kib_peer_t, ibp_list);
720 LASSERT (peer->ibp_persistence != 0 ||
721 peer->ibp_connecting != 0 ||
722 !list_empty (&peer->ibp_conns));
724 if (!(nid == PTL_NID_ANY || peer->ibp_nid == nid))
727 kibnal_del_peer_locked (peer, single_share);
728 rc = 0; /* matched something */
735 write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
740 kibnal_get_conn_by_idx (int index)
743 struct list_head *ptmp;
745 struct list_head *ctmp;
749 read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
751 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
752 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
754 peer = list_entry (ptmp, kib_peer_t, ibp_list);
755 LASSERT (peer->ibp_persistence > 0 ||
756 peer->ibp_connecting != 0 ||
757 !list_empty (&peer->ibp_conns));
759 list_for_each (ctmp, &peer->ibp_conns) {
763 conn = list_entry (ctmp, kib_conn_t, ibc_list);
764 kibnal_conn_addref(conn);
765 read_unlock_irqrestore(&kibnal_data.kib_global_lock,
772 read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
777 kibnal_set_qp_state (kib_conn_t *conn, vv_qp_state_t new_state)
779 static vv_qp_attr_t attr;
781 kib_connvars_t *cv = conn->ibc_connvars;
784 /* Only called by connd => static OK */
785 LASSERT (!in_interrupt());
786 LASSERT (current == kibnal_data.kib_connd);
788 memset(&attr, 0, sizeof(attr));
794 case vv_qp_state_init: {
795 struct vv_qp_modify_init_st *init = &attr.modify.params.init;
797 init->p_key_indx = cv->cv_pkey_index;
798 init->phy_port_num = cv->cv_port;
799 init->q_key = IBNAL_QKEY; /* XXX but VV_QP_AT_Q_KEY not set! */
800 init->access_control = vv_acc_r_mem_read |
801 vv_acc_r_mem_write; /* XXX vv_acc_l_mem_write ? */
803 attr.modify.vv_qp_attr_mask = VV_QP_AT_P_KEY_IX |
804 VV_QP_AT_PHY_PORT_NUM |
805 VV_QP_AT_ACCESS_CON_F;
808 case vv_qp_state_rtr: {
809 struct vv_qp_modify_rtr_st *rtr = &attr.modify.params.rtr;
810 vv_add_vec_t *av = &rtr->remote_add_vec;
812 av->dlid = cv->cv_path.dlid;
813 av->grh_flag = (!IBNAL_LOCAL_SUB);
814 av->max_static_rate = IBNAL_R_2_STATIC_RATE(cv->cv_path.rate);
815 av->service_level = cv->cv_path.sl;
816 av->source_path_bit = IBNAL_SOURCE_PATH_BIT;
817 av->pmtu = cv->cv_path.mtu;
818 av->rnr_retry_count = cv->cv_rnr_count;
819 av->global_dest.traffic_class = cv->cv_path.traffic_class;
820 av->global_dest.hope_limit = cv->cv_path.hop_limut;
821 av->global_dest.flow_lable = cv->cv_path.flow_label;
822 av->global_dest.s_gid_index = cv->cv_sgid_index;
823 // XXX other av fields zero?
825 rtr->destanation_qp = cv->cv_remote_qpn;
826 rtr->receive_psn = cv->cv_rxpsn;
827 rtr->responder_rdma_r_atom_num = IBNAL_OUS_DST_RD;
829 // XXX ? rtr->opt_min_rnr_nak_timer = 16;
832 // XXX sdp sets VV_QP_AT_OP_F but no actual optional options
833 attr.modify.vv_qp_attr_mask = VV_QP_AT_ADD_VEC |
836 VV_QP_AT_MIN_RNR_NAK_T |
837 VV_QP_AT_RESP_RDMA_ATOM_OUT_NUM |
841 case vv_qp_state_rts: {
842 struct vv_qp_modify_rts_st *rts = &attr.modify.params.rts;
844 rts->send_psn = cv->cv_txpsn;
845 rts->local_ack_timeout = IBNAL_LOCAL_ACK_TIMEOUT;
846 rts->retry_num = IBNAL_RETRY_CNT;
847 rts->rnr_num = IBNAL_RNR_CNT;
848 rts->dest_out_rdma_r_atom_num = IBNAL_OUS_DST_RD;
850 attr.modify.vv_qp_attr_mask = VV_QP_AT_S_PSN |
854 VV_QP_AT_DEST_RDMA_ATOM_OUT_NUM;
857 case vv_qp_state_error:
858 case vv_qp_state_reset:
859 attr.modify.vv_qp_attr_mask = 0;
863 attr.modify.qp_modify_into_state = new_state;
864 attr.modify.vv_qp_attr_mask |= VV_QP_AT_STATE;
866 vvrc = vv_qp_modify(kibnal_data.kib_hca, conn->ibc_qp, &attr, NULL);
867 if (vvrc != vv_return_ok) {
868 CERROR("Can't modify qp -> "LPX64" state to %d: %d\n",
869 conn->ibc_peer->ibp_nid, new_state, vvrc);
877 kibnal_create_conn (cm_cep_handle_t cep)
888 static vv_qp_attr_t reqattr;
889 static vv_qp_attr_t rspattr;
891 /* Only the connd creates conns => single threaded */
892 LASSERT(!in_interrupt());
893 LASSERT(current == kibnal_data.kib_connd);
895 PORTAL_ALLOC(conn, sizeof (*conn));
897 CERROR ("Can't allocate connection\n");
901 /* zero flags, NULL pointers etc... */
902 memset (conn, 0, sizeof (*conn));
904 INIT_LIST_HEAD (&conn->ibc_early_rxs);
905 INIT_LIST_HEAD (&conn->ibc_tx_queue);
906 INIT_LIST_HEAD (&conn->ibc_active_txs);
907 spin_lock_init (&conn->ibc_lock);
909 atomic_inc (&kibnal_data.kib_nconns);
910 /* well not really, but I call destroy() on failure, which decrements */
914 PORTAL_ALLOC(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
915 if (conn->ibc_connvars == NULL) {
916 CERROR("Can't allocate in-progress connection state\n");
919 memset (conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars));
920 /* Random seed for QP sequence number */
921 get_random_bytes(&conn->ibc_connvars->cv_rxpsn,
922 sizeof(conn->ibc_connvars->cv_rxpsn));
924 PORTAL_ALLOC(conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t));
925 if (conn->ibc_rxs == NULL) {
926 CERROR("Cannot allocate RX buffers\n");
929 memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t));
931 rc = kibnal_alloc_pages(&conn->ibc_rx_pages, IBNAL_RX_MSG_PAGES, 1);
935 vaddr_base = vaddr = conn->ibc_rx_pages->ibp_vaddr;
937 for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) {
938 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
939 kib_rx_t *rx = &conn->ibc_rxs[i];
942 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) +
947 vv_mem_reg_h_t mem_h;
950 /* Voltaire stack already registers the whole
951 * memory, so use that API. */
952 vvrc = vv_get_gen_mr_attrib(kibnal_data.kib_hca,
958 LASSERT (vvrc == vv_return_ok);
961 rx->rx_vaddr = vaddr;
963 CDEBUG(D_NET, "Rx[%d] %p->%p[%x:"LPX64"]\n", i, rx,
964 rx->rx_msg, KIBNAL_RX_LKEY(rx), KIBNAL_RX_VADDR(rx));
966 vaddr += IBNAL_MSG_SIZE;
967 LASSERT (vaddr <= vaddr_base + IBNAL_RX_MSG_BYTES);
969 page_offset += IBNAL_MSG_SIZE;
970 LASSERT (page_offset <= PAGE_SIZE);
972 if (page_offset == PAGE_SIZE) {
975 LASSERT (ipage <= IBNAL_RX_MSG_PAGES);
979 memset(&reqattr, 0, sizeof(reqattr));
981 reqattr.create.qp_type = vv_qp_type_r_conn;
982 reqattr.create.cq_send_h = kibnal_data.kib_cq;
983 reqattr.create.cq_receive_h = kibnal_data.kib_cq;
984 reqattr.create.send_max_outstand_wr = (1 + IBNAL_MAX_RDMA_FRAGS) *
985 IBNAL_MSG_QUEUE_SIZE;
986 reqattr.create.receive_max_outstand_wr = IBNAL_RX_MSGS;
987 reqattr.create.max_scatgat_per_send_wr = 1;
988 reqattr.create.max_scatgat_per_receive_wr = 1;
989 reqattr.create.signaling_type = vv_selectable_signaling;
990 reqattr.create.pd_h = kibnal_data.kib_pd;
991 reqattr.create.recv_solicited_events = vv_selectable_signaling; // vv_signal_all;
993 vvrc = vv_qp_create(kibnal_data.kib_hca, &reqattr, NULL,
994 &conn->ibc_qp, &rspattr);
995 if (vvrc != vv_return_ok) {
996 CERROR ("Failed to create queue pair: %d\n", vvrc);
1000 /* Mark QP created */
1001 conn->ibc_state = IBNAL_CONN_INIT;
1002 conn->ibc_connvars->cv_local_qpn = rspattr.create_return.qp_num;
1004 if (rspattr.create_return.receive_max_outstand_wr <
1005 IBNAL_MSG_QUEUE_SIZE ||
1006 rspattr.create_return.send_max_outstand_wr <
1007 (1 + IBNAL_MAX_RDMA_FRAGS) * IBNAL_MSG_QUEUE_SIZE) {
1008 CERROR("Insufficient rx/tx work items: wanted %d/%d got %d/%d\n",
1009 IBNAL_MSG_QUEUE_SIZE,
1010 (1 + IBNAL_MAX_RDMA_FRAGS) * IBNAL_MSG_QUEUE_SIZE,
1011 rspattr.create_return.receive_max_outstand_wr,
1012 rspattr.create_return.send_max_outstand_wr);
1016 /* 1 ref for caller */
1017 atomic_set (&conn->ibc_refcount, 1);
1021 kibnal_destroy_conn (conn);
1026 kibnal_destroy_conn (kib_conn_t *conn)
1030 /* Only the connd does this (i.e. single threaded) */
1031 LASSERT (!in_interrupt());
1032 LASSERT (current == kibnal_data.kib_connd);
1034 CDEBUG (D_NET, "connection %p\n", conn);
1036 LASSERT (atomic_read (&conn->ibc_refcount) == 0);
1037 LASSERT (list_empty(&conn->ibc_early_rxs));
1038 LASSERT (list_empty(&conn->ibc_tx_queue));
1039 LASSERT (list_empty(&conn->ibc_active_txs));
1040 LASSERT (conn->ibc_nsends_posted == 0);
1042 switch (conn->ibc_state) {
1044 /* conn must be completely disengaged from the network */
1047 case IBNAL_CONN_DISCONNECTED:
1048 /* connvars should have been freed already */
1049 LASSERT (conn->ibc_connvars == NULL);
1052 case IBNAL_CONN_INIT:
1053 kibnal_set_qp_state(conn, vv_qp_state_reset);
1054 vvrc = vv_qp_destroy(kibnal_data.kib_hca, conn->ibc_qp);
1055 if (vvrc != vv_return_ok)
1056 CERROR("Can't destroy QP: %d\n", vvrc);
1059 case IBNAL_CONN_INIT_NOTHING:
1063 if (conn->ibc_rx_pages != NULL)
1064 kibnal_free_pages(conn->ibc_rx_pages);
1066 if (conn->ibc_rxs != NULL)
1067 PORTAL_FREE(conn->ibc_rxs,
1068 IBNAL_RX_MSGS * sizeof(kib_rx_t));
1070 if (conn->ibc_connvars != NULL)
1071 PORTAL_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
1073 if (conn->ibc_peer != NULL)
1074 kibnal_peer_decref(conn->ibc_peer);
1076 vvrc = cm_destroy_cep(conn->ibc_cep);
1077 LASSERT (vvrc == vv_return_ok);
1079 PORTAL_FREE(conn, sizeof (*conn));
1081 atomic_dec(&kibnal_data.kib_nconns);
1085 kibnal_close_peer_conns_locked (kib_peer_t *peer, int why)
1088 struct list_head *ctmp;
1089 struct list_head *cnxt;
1092 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1093 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1096 kibnal_close_conn_locked (conn, why);
1103 kibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
1106 struct list_head *ctmp;
1107 struct list_head *cnxt;
1110 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
1111 conn = list_entry (ctmp, kib_conn_t, ibc_list);
1113 if (conn->ibc_incarnation == incarnation)
1116 CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
1117 peer->ibp_nid, conn->ibc_incarnation, incarnation);
1120 kibnal_close_conn_locked (conn, -ESTALE);
1127 kibnal_close_matching_conns (ptl_nid_t nid)
1130 struct list_head *ptmp;
1131 struct list_head *pnxt;
1135 unsigned long flags;
1138 write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
1140 if (nid != PTL_NID_ANY)
1141 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
1144 hi = kibnal_data.kib_peer_hash_size - 1;
1147 for (i = lo; i <= hi; i++) {
1148 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
1150 peer = list_entry (ptmp, kib_peer_t, ibp_list);
1151 LASSERT (peer->ibp_persistence != 0 ||
1152 peer->ibp_connecting != 0 ||
1153 !list_empty (&peer->ibp_conns));
1155 if (!(nid == PTL_NID_ANY || nid == peer->ibp_nid))
1158 count += kibnal_close_peer_conns_locked (peer, 0);
1162 write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
1164 /* wildcards always succeed */
1165 if (nid == PTL_NID_ANY)
1168 return (count == 0 ? -ENOENT : 0);
1172 kibnal_cmd(struct portals_cfg *pcfg, void * private)
1176 LASSERT (pcfg != NULL);
1178 switch(pcfg->pcfg_command) {
1179 case NAL_CMD_GET_PEER: {
1182 int share_count = 0;
1184 rc = kibnal_get_peer_info(pcfg->pcfg_count,
1185 &nid, &ip, &share_count);
1186 pcfg->pcfg_nid = nid;
1187 pcfg->pcfg_size = 0;
1189 pcfg->pcfg_misc = IBNAL_SERVICE_NUMBER; /* port */
1190 pcfg->pcfg_count = 0;
1191 pcfg->pcfg_wait = share_count;
1194 case NAL_CMD_ADD_PEER: {
1195 rc = kibnal_add_persistent_peer (pcfg->pcfg_nid,
1196 pcfg->pcfg_id); /* IP */
1199 case NAL_CMD_DEL_PEER: {
1200 rc = kibnal_del_peer (pcfg->pcfg_nid,
1201 /* flags == single_share */
1202 pcfg->pcfg_flags != 0);
1205 case NAL_CMD_GET_CONN: {
1206 kib_conn_t *conn = kibnal_get_conn_by_idx (pcfg->pcfg_count);
1212 pcfg->pcfg_nid = conn->ibc_peer->ibp_nid;
1214 pcfg->pcfg_misc = 0;
1215 pcfg->pcfg_flags = 0;
1216 kibnal_conn_decref(conn);
1220 case NAL_CMD_CLOSE_CONNECTION: {
1221 rc = kibnal_close_matching_conns (pcfg->pcfg_nid);
1224 case NAL_CMD_REGISTER_MYNID: {
1225 if (pcfg->pcfg_nid == PTL_NID_ANY)
1228 rc = kibnal_set_mynid (pcfg->pcfg_nid);
1237 kibnal_free_pages (kib_pages_t *p)
1239 int npages = p->ibp_npages;
1243 if (p->ibp_mapped) {
1244 vvrc = vv_mem_region_destroy(kibnal_data.kib_hca,
1246 if (vvrc != vv_return_ok)
1247 CERROR ("Deregister error: %d\n", vvrc);
1250 for (i = 0; i < npages; i++)
1251 if (p->ibp_pages[i] != NULL)
1252 __free_page(p->ibp_pages[i]);
1254 PORTAL_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
1258 kibnal_alloc_pages (kib_pages_t **pp, int npages, int allow_write)
1262 #if !IBNAL_WHOLE_MEM
1263 vv_phy_list_t vv_phys;
1264 vv_phy_buf_t *phys_pages;
1266 vv_access_con_bit_mask_t access;
1269 PORTAL_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
1271 CERROR ("Can't allocate buffer %d\n", npages);
1275 memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1276 p->ibp_npages = npages;
1278 for (i = 0; i < npages; i++) {
1279 p->ibp_pages[i] = alloc_page (GFP_KERNEL);
1280 if (p->ibp_pages[i] == NULL) {
1281 CERROR ("Can't allocate page %d of %d\n", i, npages);
1282 kibnal_free_pages(p);
1287 #if !IBNAL_WHOLE_MEM
1288 PORTAL_ALLOC(phys_pages, npages * sizeof(*phys_pages));
1289 if (phys_pages == NULL) {
1290 CERROR ("Can't allocate physarray for %d pages\n", npages);
1291 kibnal_free_pages(p);
1295 vv_phys.number_of_buff = npages;
1296 vv_phys.phy_list = phys_pages;
1298 for (i = 0; i < npages; i++) {
1299 phys_pages[i].size = PAGE_SIZE;
1300 phys_pages[i].start = page_to_phys(p->ibp_pages[i]);
1303 VV_ACCESS_CONTROL_MASK_SET_ALL(access);
1305 vvrc = vv_phy_mem_region_register(kibnal_data.kib_hca,
1307 0, /* requested vaddr */
1308 npages * PAGE_SIZE, 0, /* offset */
1316 PORTAL_FREE(phys_pages, npages * sizeof(*phys_pages));
1318 if (vvrc != vv_return_ok) {
1319 CERROR ("Error %d mapping %d pages\n", vvrc, npages);
1320 kibnal_free_pages(p);
1324 CDEBUG(D_NET, "registered %d pages; handle: %x vaddr "LPX64" "
1325 "lkey %x rkey %x\n", npages, p->ibp_handle,
1326 p->ibp_vaddr, p->ibp_lkey, p->ibp_rkey);
1335 kibnal_alloc_tx_descs (void)
1339 PORTAL_ALLOC (kibnal_data.kib_tx_descs,
1340 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1341 if (kibnal_data.kib_tx_descs == NULL)
1344 memset(kibnal_data.kib_tx_descs, 0,
1345 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1347 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1348 kib_tx_t *tx = &kibnal_data.kib_tx_descs[i];
1350 PORTAL_ALLOC(tx->tx_wrq,
1351 (1 + IBNAL_MAX_RDMA_FRAGS) *
1352 sizeof(*tx->tx_wrq));
1353 if (tx->tx_wrq == NULL)
1356 PORTAL_ALLOC(tx->tx_gl,
1357 (1 + IBNAL_MAX_RDMA_FRAGS) *
1358 sizeof(*tx->tx_gl));
1359 if (tx->tx_gl == NULL)
1362 PORTAL_ALLOC(tx->tx_rd,
1363 offsetof(kib_rdma_desc_t,
1364 rd_frags[IBNAL_MAX_RDMA_FRAGS]));
1365 if (tx->tx_rd == NULL)
1373 kibnal_free_tx_descs (void)
1377 if (kibnal_data.kib_tx_descs == NULL)
1380 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1381 kib_tx_t *tx = &kibnal_data.kib_tx_descs[i];
1383 if (tx->tx_wrq != NULL)
1384 PORTAL_FREE(tx->tx_wrq,
1385 (1 + IBNAL_MAX_RDMA_FRAGS) *
1386 sizeof(*tx->tx_wrq));
1388 if (tx->tx_gl != NULL)
1389 PORTAL_FREE(tx->tx_gl,
1390 (1 + IBNAL_MAX_RDMA_FRAGS) *
1391 sizeof(*tx->tx_gl));
1393 if (tx->tx_rd != NULL)
1394 PORTAL_FREE(tx->tx_rd,
1395 offsetof(kib_rdma_desc_t,
1396 rd_frags[IBNAL_MAX_RDMA_FRAGS]));
1399 PORTAL_FREE(kibnal_data.kib_tx_descs,
1400 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1404 kibnal_setup_tx_descs (void)
1407 int page_offset = 0;
1415 /* pre-mapped messages are not bigger than 1 page */
1416 CLASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE);
1418 /* No fancy arithmetic when we do the buffer calculations */
1419 CLASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0);
1421 rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages, IBNAL_TX_MSG_PAGES,
1426 /* ignored for the whole_mem case */
1427 vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr;
1429 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1430 page = kibnal_data.kib_tx_pages->ibp_pages[ipage];
1431 tx = &kibnal_data.kib_tx_descs[i];
1433 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1437 vv_mem_reg_h_t mem_h;
1441 /* Voltaire stack already registers the whole
1442 * memory, so use that API. */
1443 vvrc = vv_get_gen_mr_attrib(kibnal_data.kib_hca,
1449 LASSERT (vvrc == vv_return_ok);
1452 tx->tx_vaddr = vaddr;
1454 tx->tx_isnblk = (i >= IBNAL_NTX);
1455 tx->tx_mapped = KIB_TX_UNMAPPED;
1457 CDEBUG(D_NET, "Tx[%d] %p->%p[%x:"LPX64"]\n", i, tx,
1458 tx->tx_msg, KIBNAL_TX_LKEY(tx), KIBNAL_TX_VADDR(tx));
1461 list_add (&tx->tx_list,
1462 &kibnal_data.kib_idle_nblk_txs);
1464 list_add (&tx->tx_list,
1465 &kibnal_data.kib_idle_txs);
1467 vaddr += IBNAL_MSG_SIZE;
1468 LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES);
1470 page_offset += IBNAL_MSG_SIZE;
1471 LASSERT (page_offset <= PAGE_SIZE);
1473 if (page_offset == PAGE_SIZE) {
1476 LASSERT (ipage <= IBNAL_TX_MSG_PAGES);
1484 kibnal_api_shutdown (nal_t *nal)
1489 if (nal->nal_refct != 0) {
1490 /* This module got the first ref */
1491 PORTAL_MODULE_UNUSE;
1495 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1496 atomic_read (&portal_kmemory));
1498 LASSERT(nal == &kibnal_api);
1500 switch (kibnal_data.kib_init) {
1502 case IBNAL_INIT_ALL:
1503 /* stop calls to nal_cmd */
1504 libcfs_nal_cmd_unregister(VIBNAL);
1507 /* resetting my NID removes my listener and nukes all current
1508 * peers and their connections */
1509 kibnal_set_mynid (PTL_NID_ANY);
1511 /* Wait for all peer state to clean up */
1513 while (atomic_read (&kibnal_data.kib_npeers) != 0) {
1515 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1516 "waiting for %d peers to disconnect\n",
1517 atomic_read (&kibnal_data.kib_npeers));
1518 set_current_state (TASK_UNINTERRUPTIBLE);
1519 schedule_timeout (HZ);
1524 vvrc = vv_cq_destroy(kibnal_data.kib_hca, kibnal_data.kib_cq);
1525 if (vvrc != vv_return_ok)
1526 CERROR ("Destroy CQ error: %d\n", vvrc);
1529 case IBNAL_INIT_TXD:
1530 kibnal_free_pages (kibnal_data.kib_tx_pages);
1534 #if !IBNAL_WHOLE_MEM
1535 vvrc = vv_pd_deallocate(kibnal_data.kib_hca,
1536 kibnal_data.kib_pd);
1537 if (vvrc != vv_return_ok)
1538 CERROR ("Destroy PD error: %d\n", vvrc);
1542 case IBNAL_INIT_ASYNC:
1543 vvrc = vv_dell_async_event_cb (kibnal_data.kib_hca,
1544 kibnal_async_callback);
1545 if (vvrc != vv_return_ok)
1546 CERROR("vv_dell_async_event_cb error: %d\n", vvrc);
1550 case IBNAL_INIT_HCA:
1551 vvrc = vv_hca_close(kibnal_data.kib_hca);
1552 if (vvrc != vv_return_ok)
1553 CERROR ("Close HCA error: %d\n", vvrc);
1556 case IBNAL_INIT_LIB:
1557 lib_fini(&kibnal_lib);
1560 case IBNAL_INIT_DATA:
1561 LASSERT (atomic_read (&kibnal_data.kib_npeers) == 0);
1562 LASSERT (kibnal_data.kib_peers != NULL);
1563 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1564 LASSERT (list_empty (&kibnal_data.kib_peers[i]));
1566 LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0);
1567 LASSERT (list_empty (&kibnal_data.kib_sched_rxq));
1568 LASSERT (list_empty (&kibnal_data.kib_sched_txq));
1569 LASSERT (list_empty (&kibnal_data.kib_connd_zombies));
1570 LASSERT (list_empty (&kibnal_data.kib_connd_conns));
1571 LASSERT (list_empty (&kibnal_data.kib_connd_pcreqs));
1572 LASSERT (list_empty (&kibnal_data.kib_connd_peers));
1574 /* flag threads to terminate; wake and wait for them to die */
1575 kibnal_data.kib_shutdown = 1;
1576 wake_up_all (&kibnal_data.kib_sched_waitq);
1577 wake_up_all (&kibnal_data.kib_connd_waitq);
1580 while (atomic_read (&kibnal_data.kib_nthreads) != 0) {
1582 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1583 "Waiting for %d threads to terminate\n",
1584 atomic_read (&kibnal_data.kib_nthreads));
1585 set_current_state (TASK_INTERRUPTIBLE);
1586 schedule_timeout (HZ);
1590 case IBNAL_INIT_NOTHING:
1594 kibnal_free_tx_descs();
1596 if (kibnal_data.kib_peers != NULL)
1597 PORTAL_FREE (kibnal_data.kib_peers,
1598 sizeof (struct list_head) *
1599 kibnal_data.kib_peer_hash_size);
1601 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1602 atomic_read (&portal_kmemory));
1603 printk(KERN_INFO "Lustre: Voltaire IB NAL unloaded (final mem %d)\n",
1604 atomic_read(&portal_kmemory));
1606 kibnal_data.kib_init = IBNAL_INIT_NOTHING;
1610 kibnal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1611 ptl_ni_limits_t *requested_limits,
1612 ptl_ni_limits_t *actual_limits)
1615 ptl_process_id_t process_id;
1616 int pkmem = atomic_read(&portal_kmemory);
1619 vv_request_event_record_t req_er;
1622 LASSERT (nal == &kibnal_api);
1624 if (nal->nal_refct != 0) {
1625 if (actual_limits != NULL)
1626 *actual_limits = kibnal_lib.libnal_ni.ni_actual_limits;
1627 /* This module got the first ref */
1632 LASSERT (kibnal_data.kib_init == IBNAL_INIT_NOTHING);
1633 memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */
1635 do_gettimeofday(&tv);
1636 kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1637 kibnal_data.kib_svc_id = IBNAL_SERVICE_NUMBER;
1639 init_MUTEX (&kibnal_data.kib_nid_mutex);
1641 rwlock_init(&kibnal_data.kib_global_lock);
1643 kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE;
1644 PORTAL_ALLOC (kibnal_data.kib_peers,
1645 sizeof (struct list_head) * kibnal_data.kib_peer_hash_size);
1646 if (kibnal_data.kib_peers == NULL) {
1649 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++)
1650 INIT_LIST_HEAD(&kibnal_data.kib_peers[i]);
1652 spin_lock_init (&kibnal_data.kib_connd_lock);
1653 INIT_LIST_HEAD (&kibnal_data.kib_connd_peers);
1654 INIT_LIST_HEAD (&kibnal_data.kib_connd_pcreqs);
1655 INIT_LIST_HEAD (&kibnal_data.kib_connd_conns);
1656 INIT_LIST_HEAD (&kibnal_data.kib_connd_zombies);
1657 init_waitqueue_head (&kibnal_data.kib_connd_waitq);
1659 spin_lock_init (&kibnal_data.kib_sched_lock);
1660 INIT_LIST_HEAD (&kibnal_data.kib_sched_txq);
1661 INIT_LIST_HEAD (&kibnal_data.kib_sched_rxq);
1662 init_waitqueue_head (&kibnal_data.kib_sched_waitq);
1664 spin_lock_init (&kibnal_data.kib_tx_lock);
1665 INIT_LIST_HEAD (&kibnal_data.kib_idle_txs);
1666 INIT_LIST_HEAD (&kibnal_data.kib_idle_nblk_txs);
1667 init_waitqueue_head(&kibnal_data.kib_idle_tx_waitq);
1669 rc = kibnal_alloc_tx_descs();
1671 CERROR("Can't allocate tx descs\n");
1675 /* lists/ptrs/locks initialised */
1676 kibnal_data.kib_init = IBNAL_INIT_DATA;
1677 /*****************************************************/
1679 process_id.pid = requested_pid;
1680 process_id.nid = PTL_NID_ANY;
1682 rc = lib_init(&kibnal_lib, nal, process_id,
1683 requested_limits, actual_limits);
1685 CERROR("lib_init failed: error %d\n", rc);
1689 /* lib interface initialised */
1690 kibnal_data.kib_init = IBNAL_INIT_LIB;
1691 /*****************************************************/
1693 for (i = 0; i < IBNAL_N_SCHED; i++) {
1694 rc = kibnal_thread_start (kibnal_scheduler, (void *)((long)i));
1696 CERROR("Can't spawn vibnal scheduler[%d]: %d\n",
1702 rc = kibnal_thread_start (kibnal_connd, NULL);
1704 CERROR ("Can't spawn vibnal connd: %d\n", rc);
1708 /* TODO: apparently only one adapter is supported */
1709 vvrc = vv_hca_open("ANY_HCA", NULL, &kibnal_data.kib_hca);
1710 if (vvrc != vv_return_ok) {
1711 CERROR ("Can't open CA: %d\n", vvrc);
1715 /* Channel Adapter opened */
1716 kibnal_data.kib_init = IBNAL_INIT_HCA;
1718 /* register to get HCA's asynchronous events. */
1719 req_er.req_event_type = VV_ASYNC_EVENT_ALL_MASK;
1720 vvrc = vv_set_async_event_cb (kibnal_data.kib_hca, req_er,
1721 kibnal_async_callback);
1722 if (vvrc != vv_return_ok) {
1723 CERROR ("Can't open CA: %d\n", vvrc);
1727 kibnal_data.kib_init = IBNAL_INIT_ASYNC;
1729 /*****************************************************/
1731 vvrc = vv_hca_query(kibnal_data.kib_hca, &kibnal_data.kib_hca_attrs);
1732 if (vvrc != vv_return_ok) {
1733 CERROR ("Can't size port attrs: %d\n", vvrc);
1737 kibnal_data.kib_port = -1;
1739 for (i = 0; i<kibnal_data.kib_hca_attrs.port_num; i++) {
1742 u_int32_t tbl_count;
1743 vv_port_attrib_t *pattr = &kibnal_data.kib_port_attr;
1745 vvrc = vv_port_query(kibnal_data.kib_hca, port_num, pattr);
1746 if (vvrc != vv_return_ok) {
1747 CERROR("vv_port_query failed for port %d: %d\n",
1752 switch (pattr->port_state) {
1753 case vv_state_linkDoun:
1754 CDEBUG(D_NET, "port[%d] Down\n", port_num);
1756 case vv_state_linkInit:
1757 CDEBUG(D_NET, "port[%d] Init\n", port_num);
1759 case vv_state_linkArm:
1760 CDEBUG(D_NET, "port[%d] Armed\n", port_num);
1762 case vv_state_linkActive:
1763 CDEBUG(D_NET, "port[%d] Active\n", port_num);
1765 /* Found a suitable port. Get its GUID and PKEY. */
1766 kibnal_data.kib_port = port_num;
1769 vvrc = vv_get_port_gid_tbl(kibnal_data.kib_hca,
1770 port_num, &tbl_count,
1771 &kibnal_data.kib_port_gid);
1772 if (vvrc != vv_return_ok) {
1773 CERROR("vv_get_port_gid_tbl failed "
1774 "for port %d: %d\n", port_num, vvrc);
1779 vvrc = vv_get_port_partition_tbl(kibnal_data.kib_hca,
1780 port_num, &tbl_count,
1781 &kibnal_data.kib_port_pkey);
1782 if (vvrc != vv_return_ok) {
1783 CERROR("vv_get_port_partition_tbl failed "
1784 "for port %d: %d\n", port_num, vvrc);
1789 case vv_state_linkActDefer: /* TODO: correct? */
1790 case vv_state_linkNoChange:
1791 CERROR("Unexpected port[%d] state %d\n",
1792 i, pattr->port_state);
1798 if (kibnal_data.kib_port == -1) {
1799 CERROR ("Can't find an active port\n");
1803 CDEBUG(D_NET, "Using port %d - GID="LPX64":"LPX64"\n",
1804 kibnal_data.kib_port,
1805 kibnal_data.kib_port_gid.scope.g.subnet,
1806 kibnal_data.kib_port_gid.scope.g.eui64);
1808 /*****************************************************/
1810 #if !IBNAL_WHOLE_MEM
1811 vvrc = vv_pd_allocate(kibnal_data.kib_hca, &kibnal_data.kib_pd);
1813 vvrc = vv_get_gen_pd_h(kibnal_data.kib_hca, &kibnal_data.kib_pd);
1816 CERROR ("Can't create PD: %d\n", vvrc);
1820 /* flag PD initialised */
1821 kibnal_data.kib_init = IBNAL_INIT_PD;
1822 /*****************************************************/
1824 rc = kibnal_setup_tx_descs();
1826 CERROR ("Can't register tx descs: %d\n", rc);
1830 /* flag TX descs initialised */
1831 kibnal_data.kib_init = IBNAL_INIT_TXD;
1832 /*****************************************************/
1836 vvrc = vv_cq_create(kibnal_data.kib_hca, IBNAL_CQ_ENTRIES,
1839 &kibnal_data.kib_cq, &nentries);
1841 CERROR ("Can't create RX CQ: %d\n", vvrc);
1845 /* flag CQ initialised */
1846 kibnal_data.kib_init = IBNAL_INIT_CQ;
1848 if (nentries < IBNAL_CQ_ENTRIES) {
1849 CERROR ("CQ only has %d entries, need %d\n",
1850 nentries, IBNAL_CQ_ENTRIES);
1854 vvrc = vv_request_completion_notification(kibnal_data.kib_hca,
1856 vv_next_solicit_unsolicit_event);
1858 CERROR ("Failed to re-arm completion queue: %d\n", rc);
1863 /*****************************************************/
1865 rc = libcfs_nal_cmd_register(VIBNAL, &kibnal_cmd, NULL);
1867 CERROR ("Can't initialise command interface (rc = %d)\n", rc);
1871 /* flag everything initialised */
1872 kibnal_data.kib_init = IBNAL_INIT_ALL;
1873 /*****************************************************/
1875 printk(KERN_INFO "Lustre: Voltaire IB NAL loaded "
1876 "(initial mem %d)\n", pkmem);
1881 CDEBUG(D_NET, "kibnal_api_startup failed\n");
1882 kibnal_api_shutdown (&kibnal_api);
1887 kibnal_module_fini (void)
1889 #ifdef CONFIG_SYSCTL
1890 if (kibnal_tunables.kib_sysctl != NULL)
1891 unregister_sysctl_table (kibnal_tunables.kib_sysctl);
1893 PtlNIFini(kibnal_ni);
1895 ptl_unregister_nal(VIBNAL);
1899 kibnal_module_init (void)
1903 vibnal_assert_wire_constants();
1905 CLASSERT (offsetof(kib_msg_t, ibm_u) + sizeof(kib_connparams_t)
1906 <= cm_REQ_priv_data_len);
1907 CLASSERT (offsetof(kib_msg_t, ibm_u) + sizeof(kib_connparams_t)
1908 <= cm_REP_priv_data_len);
1909 CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBNAL_MAX_RDMA_FRAGS])
1911 CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBNAL_MAX_RDMA_FRAGS])
1914 /* the following must be sizeof(int) for proc_dointvec() */
1915 CLASSERT (sizeof (kibnal_tunables.kib_io_timeout) == sizeof (int));
1917 kibnal_api.nal_ni_init = kibnal_api_startup;
1918 kibnal_api.nal_ni_fini = kibnal_api_shutdown;
1920 /* Initialise dynamic tunables to defaults once only */
1921 kibnal_tunables.kib_io_timeout = IBNAL_IO_TIMEOUT;
1923 rc = ptl_register_nal(VIBNAL, &kibnal_api);
1925 CERROR("Can't register IBNAL: %d\n", rc);
1926 return (-ENOMEM); /* or something... */
1929 /* Pure gateways want the NAL started up at module load time... */
1930 rc = PtlNIInit(VIBNAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kibnal_ni);
1931 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
1932 ptl_unregister_nal(VIBNAL);
1936 #ifdef CONFIG_SYSCTL
1937 /* Press on regardless even if registering sysctl doesn't work */
1938 kibnal_tunables.kib_sysctl =
1939 register_sysctl_table (kibnal_top_ctl_table, 0);
1944 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1945 MODULE_DESCRIPTION("Kernel Voltaire IB NAL v0.01");
1946 MODULE_LICENSE("GPL");
1948 module_init(kibnal_module_init);
1949 module_exit(kibnal_module_fini);