1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
6 * Author: Frank Zago <fzago@systemfabricworks.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 ptl_handle_ni_t kibnal_ni;
29 kib_data_t kibnal_data;
30 kib_tunables_t kibnal_tunables;
33 #define IBNAL_SYSCTL 202
35 #define IBNAL_SYSCTL_TIMEOUT 1
37 static ctl_table kibnal_ctl_table[] = {
38 {IBNAL_SYSCTL_TIMEOUT, "timeout",
39 &kibnal_tunables.kib_io_timeout, sizeof (int),
40 0644, NULL, &proc_dointvec},
44 static ctl_table kibnal_top_ctl_table[] = {
45 {IBNAL_SYSCTL, "vibnal", NULL, 0, 0555, kibnal_ctl_table},
51 kibnal_pause(int ticks)
53 set_current_state(TASK_UNINTERRUPTIBLE);
54 schedule_timeout(ticks);
58 kibnal_cksum (void *ptr, int nob)
64 sum = ((sum << 1) | (sum >> 31)) + *c++;
66 /* ensure I don't return 0 (== no checksum) */
67 return (sum == 0) ? 1 : sum;
71 kibnal_init_msg(kib_msg_t *msg, int type, int body_nob)
74 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
78 kibnal_pack_msg(kib_msg_t *msg, int credits, ptl_nid_t dstnid, __u64 dststamp)
80 /* CAVEAT EMPTOR! all message fields not set here should have been
81 * initialised previously. */
82 msg->ibm_magic = IBNAL_MSG_MAGIC;
83 msg->ibm_version = IBNAL_MSG_VERSION;
85 msg->ibm_credits = credits;
88 msg->ibm_srcnid = kibnal_lib.libnal_ni.ni_pid.nid;
89 msg->ibm_srcstamp = kibnal_data.kib_incarnation;
90 msg->ibm_dstnid = dstnid;
91 msg->ibm_dststamp = dststamp;
93 /* NB ibm_cksum zero while computing cksum */
94 msg->ibm_cksum = kibnal_cksum(msg, msg->ibm_nob);
99 kibnal_unpack_msg(kib_msg_t *msg, int nob)
101 const int hdr_size = offsetof(kib_msg_t, ibm_u);
108 /* 6 bytes are enough to have received magic + version */
110 CERROR("Short message: %d\n", nob);
114 if (msg->ibm_magic == IBNAL_MSG_MAGIC) {
116 } else if (msg->ibm_magic == __swab32(IBNAL_MSG_MAGIC)) {
119 CERROR("Bad magic: %08x\n", msg->ibm_magic);
123 if (msg->ibm_version !=
124 (flip ? __swab16(IBNAL_MSG_VERSION) : IBNAL_MSG_VERSION)) {
125 CERROR("Bad version: %d\n", msg->ibm_version);
129 if (nob < hdr_size) {
130 CERROR("Short message: %d\n", nob);
134 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
136 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
140 /* checksum must be computed with ibm_cksum zero and BEFORE anything
142 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
144 if (msg_cksum != 0 &&
145 msg_cksum != kibnal_cksum(msg, msg_nob)) {
146 CERROR("Bad checksum\n");
149 msg->ibm_cksum = msg_cksum;
152 /* leave magic unflipped as a clue to peer endianness */
153 __swab16s(&msg->ibm_version);
154 CLASSERT (sizeof(msg->ibm_type) == 1);
155 CLASSERT (sizeof(msg->ibm_credits) == 1);
156 msg->ibm_nob = msg_nob;
157 __swab64s(&msg->ibm_srcnid);
158 __swab64s(&msg->ibm_srcstamp);
159 __swab64s(&msg->ibm_dstnid);
160 __swab64s(&msg->ibm_dststamp);
163 if (msg->ibm_srcnid == PTL_NID_ANY) {
164 CERROR("Bad src nid: "LPX64"\n", msg->ibm_srcnid);
168 switch (msg->ibm_type) {
170 CERROR("Unknown message type %x\n", msg->ibm_type);
176 case IBNAL_MSG_IMMEDIATE:
177 if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
178 CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
179 (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
184 case IBNAL_MSG_PUT_REQ:
185 /* CAVEAT EMPTOR! We don't actually put ibprm_rd on the wire;
186 * it's just there to remember the source buffers while we wait
188 if (msg_nob < offsetof(kib_msg_t, ibm_u.putreq.ibprm_rd)) {
189 CERROR("Short PUT_REQ: %d(%d)\n", msg_nob,
190 (int)(hdr_size + sizeof(msg->ibm_u.putreq)));
195 case IBNAL_MSG_PUT_ACK:
196 if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[0])) {
197 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
198 (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[0]));
203 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
204 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrag);
207 n = msg->ibm_u.putack.ibpam_rd.rd_nfrag;
208 if (n <= 0 || n > IBNAL_MAX_RDMA_FRAGS) {
209 CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n",
210 n, IBNAL_MAX_RDMA_FRAGS);
214 if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) {
215 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
216 (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n]));
221 for (i = 0; i < n; i++) {
222 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_nob);
223 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr_lo);
224 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr_hi);
228 case IBNAL_MSG_GET_REQ:
229 if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) {
230 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
231 (int)(hdr_size + sizeof(msg->ibm_u.get)));
235 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
236 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrag);
239 n = msg->ibm_u.get.ibgm_rd.rd_nfrag;
240 if (n <= 0 || n > IBNAL_MAX_RDMA_FRAGS) {
241 CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n",
242 n, IBNAL_MAX_RDMA_FRAGS);
246 if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) {
247 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
248 (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n]));
253 for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrag; i++) {
254 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob);
255 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr_lo);
256 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr_hi);
260 case IBNAL_MSG_PUT_NAK:
261 case IBNAL_MSG_PUT_DONE:
262 case IBNAL_MSG_GET_DONE:
263 if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
264 CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
265 (int)(hdr_size + sizeof(msg->ibm_u.completion)));
269 __swab32s(&msg->ibm_u.completion.ibcm_status);
272 case IBNAL_MSG_CONNREQ:
273 case IBNAL_MSG_CONNACK:
274 if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
275 CERROR("Short connreq/ack: %d(%d)\n", msg_nob,
276 (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
280 __swab32s(&msg->ibm_u.connparams.ibcp_queue_depth);
281 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
282 __swab32s(&msg->ibm_u.connparams.ibcp_max_frags);
290 kibnal_set_mynid(ptl_nid_t nid)
292 static cm_listen_data_t info; /* protected by kib_nid_mutex */
294 lib_ni_t *ni = &kibnal_lib.libnal_ni;
298 CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
299 nid, ni->ni_pid.nid);
301 down (&kibnal_data.kib_nid_mutex);
303 if (nid == ni->ni_pid.nid) {
304 /* no change of NID */
305 up (&kibnal_data.kib_nid_mutex);
309 CDEBUG(D_NET, "NID "LPX64"("LPX64")\n", ni->ni_pid.nid, nid);
311 if (kibnal_data.kib_listen_handle != NULL) {
312 cmrc = cm_cancel(kibnal_data.kib_listen_handle);
313 if (cmrc != cm_stat_success)
314 CERROR ("Error %d stopping listener\n", cmrc);
316 kibnal_pause(HZ/10); /* ensure no more callbacks */
318 cmrc = cm_destroy_cep(kibnal_data.kib_listen_handle);
319 if (cmrc != vv_return_ok)
320 CERROR ("Error %d destroying CEP\n", cmrc);
322 kibnal_data.kib_listen_handle = NULL;
325 /* Change NID. NB queued passive connection requests (if any) will be
326 * rejected with an incorrect destination NID */
327 ni->ni_pid.nid = nid;
328 kibnal_data.kib_incarnation++;
331 /* Delete all existing peers and their connections after new
332 * NID/incarnation set to ensure no old connections in our brave
334 kibnal_del_peer (PTL_NID_ANY, 0);
336 if (ni->ni_pid.nid != PTL_NID_ANY) { /* got a new NID to install */
337 kibnal_data.kib_listen_handle =
338 cm_create_cep(cm_cep_transp_rc);
339 if (kibnal_data.kib_listen_handle == NULL) {
340 CERROR ("Can't create listen CEP\n");
345 CDEBUG(D_NET, "Created CEP %p for listening\n",
346 kibnal_data.kib_listen_handle);
348 memset(&info, 0, sizeof(info));
349 info.listen_addr.end_pt.sid = kibnal_data.kib_svc_id;
351 cmrc = cm_listen(kibnal_data.kib_listen_handle, &info,
352 kibnal_listen_callback, NULL);
354 CERROR ("cm_listen error: %d\n", cmrc);
360 up (&kibnal_data.kib_nid_mutex);
364 cmrc = cm_destroy_cep(kibnal_data.kib_listen_handle);
365 LASSERT (cmrc == cm_stat_success);
366 kibnal_data.kib_listen_handle = NULL;
368 ni->ni_pid.nid = PTL_NID_ANY;
369 kibnal_data.kib_incarnation++;
371 kibnal_del_peer (PTL_NID_ANY, 0);
372 up (&kibnal_data.kib_nid_mutex);
377 kibnal_create_peer (ptl_nid_t nid)
381 LASSERT (nid != PTL_NID_ANY);
383 PORTAL_ALLOC(peer, sizeof (*peer));
385 CERROR("Canot allocate perr\n");
389 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
392 atomic_set (&peer->ibp_refcount, 1); /* 1 ref for caller */
394 INIT_LIST_HEAD (&peer->ibp_list); /* not in the peer table yet */
395 INIT_LIST_HEAD (&peer->ibp_conns);
396 INIT_LIST_HEAD (&peer->ibp_tx_queue);
398 peer->ibp_reconnect_time = jiffies;
399 peer->ibp_reconnect_interval = IBNAL_MIN_RECONNECT_INTERVAL;
401 atomic_inc (&kibnal_data.kib_npeers);
402 if (atomic_read(&kibnal_data.kib_npeers) <= IBNAL_CONCURRENT_PEERS)
405 CERROR("Too many peers: CQ will overflow\n");
406 kibnal_peer_decref(peer);
411 kibnal_destroy_peer (kib_peer_t *peer)
414 LASSERT (atomic_read (&peer->ibp_refcount) == 0);
415 LASSERT (peer->ibp_persistence == 0);
416 LASSERT (!kibnal_peer_active(peer));
417 LASSERT (peer->ibp_connecting == 0);
418 LASSERT (list_empty (&peer->ibp_conns));
419 LASSERT (list_empty (&peer->ibp_tx_queue));
421 PORTAL_FREE (peer, sizeof (*peer));
423 /* NB a peer's connections keep a reference on their peer until
424 * they are destroyed, so we can be assured that _all_ state to do
425 * with this peer has been cleaned up when its refcount drops to
427 atomic_dec (&kibnal_data.kib_npeers);
430 /* the caller is responsible for accounting for the additional reference
431 * that this creates */
433 kibnal_find_peer_locked (ptl_nid_t nid)
435 struct list_head *peer_list = kibnal_nid2peerlist (nid);
436 struct list_head *tmp;
439 list_for_each (tmp, peer_list) {
441 peer = list_entry (tmp, kib_peer_t, ibp_list);
443 LASSERT (peer->ibp_persistence != 0 || /* persistent peer */
444 peer->ibp_connecting != 0 || /* creating conns */
445 !list_empty (&peer->ibp_conns)); /* active conn */
447 if (peer->ibp_nid != nid)
450 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
451 peer, nid, atomic_read (&peer->ibp_refcount));
458 kibnal_unlink_peer_locked (kib_peer_t *peer)
460 LASSERT (peer->ibp_persistence == 0);
461 LASSERT (list_empty(&peer->ibp_conns));
463 LASSERT (kibnal_peer_active(peer));
464 list_del_init (&peer->ibp_list);
465 /* lose peerlist's ref */
466 kibnal_peer_decref(peer);
470 kibnal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp,
474 struct list_head *ptmp;
478 read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
480 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
482 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
484 peer = list_entry (ptmp, kib_peer_t, ibp_list);
485 LASSERT (peer->ibp_persistence != 0 ||
486 peer->ibp_connecting != 0 ||
487 !list_empty (&peer->ibp_conns));
492 *nidp = peer->ibp_nid;
494 *persistencep = peer->ibp_persistence;
496 read_unlock_irqrestore(&kibnal_data.kib_global_lock,
502 read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
507 kibnal_add_persistent_peer (ptl_nid_t nid, __u32 ip)
513 CDEBUG(D_NET, LPX64"@%08x\n", nid, ip);
515 if (nid == PTL_NID_ANY)
518 peer = kibnal_create_peer (nid);
522 write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
524 peer2 = kibnal_find_peer_locked (nid);
526 kibnal_peer_decref (peer);
529 /* peer table takes existing ref on peer */
530 list_add_tail (&peer->ibp_list,
531 kibnal_nid2peerlist (nid));
535 peer->ibp_persistence++;
537 write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
542 kibnal_del_peer_locked (kib_peer_t *peer, int single_share)
544 struct list_head *ctmp;
545 struct list_head *cnxt;
549 peer->ibp_persistence = 0;
550 else if (peer->ibp_persistence > 0)
551 peer->ibp_persistence--;
553 if (peer->ibp_persistence != 0)
556 if (list_empty(&peer->ibp_conns)) {
557 kibnal_unlink_peer_locked(peer);
559 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
560 conn = list_entry(ctmp, kib_conn_t, ibc_list);
562 kibnal_close_conn_locked (conn, 0);
564 /* NB peer is no longer persistent; closing its last conn
567 /* NB peer now unlinked; might even be freed if the peer table had the
572 kibnal_del_peer (ptl_nid_t nid, int single_share)
574 struct list_head *ptmp;
575 struct list_head *pnxt;
583 write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
585 if (nid != PTL_NID_ANY)
586 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
589 hi = kibnal_data.kib_peer_hash_size - 1;
592 for (i = lo; i <= hi; i++) {
593 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
594 peer = list_entry (ptmp, kib_peer_t, ibp_list);
595 LASSERT (peer->ibp_persistence != 0 ||
596 peer->ibp_connecting != 0 ||
597 !list_empty (&peer->ibp_conns));
599 if (!(nid == PTL_NID_ANY || peer->ibp_nid == nid))
602 kibnal_del_peer_locked (peer, single_share);
603 rc = 0; /* matched something */
610 write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
615 kibnal_get_conn_by_idx (int index)
618 struct list_head *ptmp;
620 struct list_head *ctmp;
624 read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
626 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
627 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
629 peer = list_entry (ptmp, kib_peer_t, ibp_list);
630 LASSERT (peer->ibp_persistence > 0 ||
631 peer->ibp_connecting != 0 ||
632 !list_empty (&peer->ibp_conns));
634 list_for_each (ctmp, &peer->ibp_conns) {
638 conn = list_entry (ctmp, kib_conn_t, ibc_list);
639 kibnal_conn_addref(conn);
640 read_unlock_irqrestore(&kibnal_data.kib_global_lock,
647 read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
652 kibnal_set_qp_state (kib_conn_t *conn, vv_qp_state_t new_state)
654 static vv_qp_attr_t attr;
656 kib_connvars_t *cv = conn->ibc_connvars;
659 /* Only called by connd => static OK */
660 LASSERT (!in_interrupt());
661 LASSERT (current == kibnal_data.kib_connd);
663 memset(&attr, 0, sizeof(attr));
669 case vv_qp_state_init: {
670 struct vv_qp_modify_init_st *init = &attr.modify.params.init;
672 init->p_key_indx = cv->cv_pkey_index;
673 init->phy_port_num = cv->cv_port;
674 init->q_key = IBNAL_QKEY; /* XXX but VV_QP_AT_Q_KEY not set! */
675 init->access_control = vv_acc_r_mem_read |
676 vv_acc_r_mem_write; /* XXX vv_acc_l_mem_write ? */
678 attr.modify.vv_qp_attr_mask = VV_QP_AT_P_KEY_IX |
679 VV_QP_AT_PHY_PORT_NUM |
680 VV_QP_AT_ACCESS_CON_F;
683 case vv_qp_state_rtr: {
684 struct vv_qp_modify_rtr_st *rtr = &attr.modify.params.rtr;
685 vv_add_vec_t *av = &rtr->remote_add_vec;
687 av->dlid = cv->cv_path.dlid;
688 av->grh_flag = (!IBNAL_LOCAL_SUB);
689 av->max_static_rate = IBNAL_R_2_STATIC_RATE(cv->cv_path.rate);
690 av->service_level = cv->cv_path.sl;
691 av->source_path_bit = IBNAL_SOURCE_PATH_BIT;
692 av->pmtu = cv->cv_path.mtu;
693 av->rnr_retry_count = cv->cv_rnr_count;
694 av->global_dest.traffic_class = cv->cv_path.traffic_class;
695 av->global_dest.hope_limit = cv->cv_path.hop_limut;
696 av->global_dest.flow_lable = cv->cv_path.flow_label;
697 av->global_dest.s_gid_index = cv->cv_sgid_index;
698 // XXX other av fields zero?
700 rtr->destanation_qp = cv->cv_remote_qpn;
701 rtr->receive_psn = cv->cv_rxpsn;
702 rtr->responder_rdma_r_atom_num = IBNAL_OUS_DST_RD;
704 // XXX ? rtr->opt_min_rnr_nak_timer = 16;
707 // XXX sdp sets VV_QP_AT_OP_F but no actual optional options
708 attr.modify.vv_qp_attr_mask = VV_QP_AT_ADD_VEC |
711 VV_QP_AT_MIN_RNR_NAK_T |
712 VV_QP_AT_RESP_RDMA_ATOM_OUT_NUM |
716 case vv_qp_state_rts: {
717 struct vv_qp_modify_rts_st *rts = &attr.modify.params.rts;
719 rts->send_psn = cv->cv_txpsn;
720 rts->local_ack_timeout = IBNAL_LOCAL_ACK_TIMEOUT;
721 rts->retry_num = IBNAL_RETRY_CNT;
722 rts->rnr_num = IBNAL_RNR_CNT;
723 rts->dest_out_rdma_r_atom_num = IBNAL_OUS_DST_RD;
725 attr.modify.vv_qp_attr_mask = VV_QP_AT_S_PSN |
729 VV_QP_AT_DEST_RDMA_ATOM_OUT_NUM;
732 case vv_qp_state_error:
733 case vv_qp_state_reset:
734 attr.modify.vv_qp_attr_mask = 0;
738 attr.modify.qp_modify_into_state = new_state;
739 attr.modify.vv_qp_attr_mask |= VV_QP_AT_STATE;
741 vvrc = vv_qp_modify(kibnal_data.kib_hca, conn->ibc_qp, &attr, NULL);
742 if (vvrc != vv_return_ok) {
743 CERROR("Can't modify qp -> "LPX64" state to %d: %d\n",
744 conn->ibc_peer->ibp_nid, new_state, vvrc);
752 kibnal_create_conn (cm_cep_handle_t cep)
763 static vv_qp_attr_t reqattr;
764 static vv_qp_attr_t rspattr;
766 /* Only the connd creates conns => single threaded */
767 LASSERT(!in_interrupt());
768 LASSERT(current == kibnal_data.kib_connd);
770 PORTAL_ALLOC(conn, sizeof (*conn));
772 CERROR ("Can't allocate connection\n");
776 /* zero flags, NULL pointers etc... */
777 memset (conn, 0, sizeof (*conn));
779 INIT_LIST_HEAD (&conn->ibc_early_rxs);
780 INIT_LIST_HEAD (&conn->ibc_tx_queue);
781 INIT_LIST_HEAD (&conn->ibc_active_txs);
782 spin_lock_init (&conn->ibc_lock);
784 atomic_inc (&kibnal_data.kib_nconns);
785 /* well not really, but I call destroy() on failure, which decrements */
789 PORTAL_ALLOC(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
790 if (conn->ibc_connvars == NULL) {
791 CERROR("Can't allocate in-progress connection state\n");
794 memset (conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars));
795 /* Random seed for QP sequence number */
796 get_random_bytes(&conn->ibc_connvars->cv_rxpsn,
797 sizeof(conn->ibc_connvars->cv_rxpsn));
799 PORTAL_ALLOC(conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t));
800 if (conn->ibc_rxs == NULL) {
801 CERROR("Cannot allocate RX buffers\n");
804 memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t));
806 rc = kibnal_alloc_pages(&conn->ibc_rx_pages, IBNAL_RX_MSG_PAGES, 1);
810 vaddr_base = vaddr = conn->ibc_rx_pages->ibp_vaddr;
812 for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) {
813 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
814 kib_rx_t *rx = &conn->ibc_rxs[i];
817 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) +
822 vv_mem_reg_h_t mem_h;
825 /* Voltaire stack already registers the whole
826 * memory, so use that API. */
827 vvrc = vv_get_gen_mr_attrib(kibnal_data.kib_hca,
833 LASSERT (vvrc == vv_return_ok);
836 rx->rx_vaddr = vaddr;
838 CDEBUG(D_NET, "Rx[%d] %p->%p[%x:"LPX64"]\n", i, rx,
839 rx->rx_msg, KIBNAL_RX_LKEY(rx), KIBNAL_RX_VADDR(rx));
841 vaddr += IBNAL_MSG_SIZE;
842 LASSERT (vaddr <= vaddr_base + IBNAL_RX_MSG_BYTES);
844 page_offset += IBNAL_MSG_SIZE;
845 LASSERT (page_offset <= PAGE_SIZE);
847 if (page_offset == PAGE_SIZE) {
850 LASSERT (ipage <= IBNAL_RX_MSG_PAGES);
854 memset(&reqattr, 0, sizeof(reqattr));
856 reqattr.create.qp_type = vv_qp_type_r_conn;
857 reqattr.create.cq_send_h = kibnal_data.kib_cq;
858 reqattr.create.cq_receive_h = kibnal_data.kib_cq;
859 reqattr.create.send_max_outstand_wr = (1 + IBNAL_MAX_RDMA_FRAGS) *
860 IBNAL_MSG_QUEUE_SIZE;
861 reqattr.create.receive_max_outstand_wr = IBNAL_RX_MSGS;
862 reqattr.create.max_scatgat_per_send_wr = 1;
863 reqattr.create.max_scatgat_per_receive_wr = 1;
864 reqattr.create.signaling_type = vv_selectable_signaling;
865 reqattr.create.pd_h = kibnal_data.kib_pd;
866 reqattr.create.recv_solicited_events = vv_selectable_signaling; // vv_signal_all;
868 vvrc = vv_qp_create(kibnal_data.kib_hca, &reqattr, NULL,
869 &conn->ibc_qp, &rspattr);
870 if (vvrc != vv_return_ok) {
871 CERROR ("Failed to create queue pair: %d\n", vvrc);
875 /* Mark QP created */
876 conn->ibc_state = IBNAL_CONN_INIT;
877 conn->ibc_connvars->cv_local_qpn = rspattr.create_return.qp_num;
879 if (rspattr.create_return.receive_max_outstand_wr <
880 IBNAL_MSG_QUEUE_SIZE ||
881 rspattr.create_return.send_max_outstand_wr <
882 (1 + IBNAL_MAX_RDMA_FRAGS) * IBNAL_MSG_QUEUE_SIZE) {
883 CERROR("Insufficient rx/tx work items: wanted %d/%d got %d/%d\n",
884 IBNAL_MSG_QUEUE_SIZE,
885 (1 + IBNAL_MAX_RDMA_FRAGS) * IBNAL_MSG_QUEUE_SIZE,
886 rspattr.create_return.receive_max_outstand_wr,
887 rspattr.create_return.send_max_outstand_wr);
891 /* 1 ref for caller */
892 atomic_set (&conn->ibc_refcount, 1);
896 kibnal_destroy_conn (conn);
901 kibnal_destroy_conn (kib_conn_t *conn)
905 /* Only the connd does this (i.e. single threaded) */
906 LASSERT (!in_interrupt());
907 LASSERT (current == kibnal_data.kib_connd);
909 CDEBUG (D_NET, "connection %p\n", conn);
911 LASSERT (atomic_read (&conn->ibc_refcount) == 0);
912 LASSERT (list_empty(&conn->ibc_early_rxs));
913 LASSERT (list_empty(&conn->ibc_tx_queue));
914 LASSERT (list_empty(&conn->ibc_active_txs));
915 LASSERT (conn->ibc_nsends_posted == 0);
917 switch (conn->ibc_state) {
919 /* conn must be completely disengaged from the network */
922 case IBNAL_CONN_DISCONNECTED:
923 /* connvars should have been freed already */
924 LASSERT (conn->ibc_connvars == NULL);
927 case IBNAL_CONN_INIT:
928 kibnal_set_qp_state(conn, vv_qp_state_reset);
929 vvrc = vv_qp_destroy(kibnal_data.kib_hca, conn->ibc_qp);
930 if (vvrc != vv_return_ok)
931 CERROR("Can't destroy QP: %d\n", vvrc);
934 case IBNAL_CONN_INIT_NOTHING:
938 if (conn->ibc_rx_pages != NULL)
939 kibnal_free_pages(conn->ibc_rx_pages);
941 if (conn->ibc_rxs != NULL)
942 PORTAL_FREE(conn->ibc_rxs,
943 IBNAL_RX_MSGS * sizeof(kib_rx_t));
945 if (conn->ibc_connvars != NULL)
946 PORTAL_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
948 if (conn->ibc_peer != NULL)
949 kibnal_peer_decref(conn->ibc_peer);
951 vvrc = cm_destroy_cep(conn->ibc_cep);
952 LASSERT (vvrc == vv_return_ok);
954 PORTAL_FREE(conn, sizeof (*conn));
956 atomic_dec(&kibnal_data.kib_nconns);
960 kibnal_close_peer_conns_locked (kib_peer_t *peer, int why)
963 struct list_head *ctmp;
964 struct list_head *cnxt;
967 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
968 conn = list_entry (ctmp, kib_conn_t, ibc_list);
971 kibnal_close_conn_locked (conn, why);
978 kibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
981 struct list_head *ctmp;
982 struct list_head *cnxt;
985 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
986 conn = list_entry (ctmp, kib_conn_t, ibc_list);
988 if (conn->ibc_incarnation == incarnation)
991 CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
992 peer->ibp_nid, conn->ibc_incarnation, incarnation);
995 kibnal_close_conn_locked (conn, -ESTALE);
1002 kibnal_close_matching_conns (ptl_nid_t nid)
1005 struct list_head *ptmp;
1006 struct list_head *pnxt;
1010 unsigned long flags;
1013 write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
1015 if (nid != PTL_NID_ANY)
1016 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
1019 hi = kibnal_data.kib_peer_hash_size - 1;
1022 for (i = lo; i <= hi; i++) {
1023 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
1025 peer = list_entry (ptmp, kib_peer_t, ibp_list);
1026 LASSERT (peer->ibp_persistence != 0 ||
1027 peer->ibp_connecting != 0 ||
1028 !list_empty (&peer->ibp_conns));
1030 if (!(nid == PTL_NID_ANY || nid == peer->ibp_nid))
1033 count += kibnal_close_peer_conns_locked (peer, 0);
1037 write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
1039 /* wildcards always succeed */
1040 if (nid == PTL_NID_ANY)
1043 return (count == 0 ? -ENOENT : 0);
1047 kibnal_cmd(struct portals_cfg *pcfg, void * private)
1051 LASSERT (pcfg != NULL);
1053 switch(pcfg->pcfg_command) {
1054 case NAL_CMD_GET_PEER: {
1057 int share_count = 0;
1059 rc = kibnal_get_peer_info(pcfg->pcfg_count,
1060 &nid, &ip, &share_count);
1061 pcfg->pcfg_nid = nid;
1062 pcfg->pcfg_size = 0;
1064 pcfg->pcfg_misc = IBNAL_SERVICE_NUMBER; /* port */
1065 pcfg->pcfg_count = 0;
1066 pcfg->pcfg_wait = share_count;
1069 case NAL_CMD_ADD_PEER: {
1070 rc = kibnal_add_persistent_peer (pcfg->pcfg_nid,
1071 pcfg->pcfg_id); /* IP */
1074 case NAL_CMD_DEL_PEER: {
1075 rc = kibnal_del_peer (pcfg->pcfg_nid,
1076 /* flags == single_share */
1077 pcfg->pcfg_flags != 0);
1080 case NAL_CMD_GET_CONN: {
1081 kib_conn_t *conn = kibnal_get_conn_by_idx (pcfg->pcfg_count);
1087 pcfg->pcfg_nid = conn->ibc_peer->ibp_nid;
1089 pcfg->pcfg_misc = 0;
1090 pcfg->pcfg_flags = 0;
1091 kibnal_conn_decref(conn);
1095 case NAL_CMD_CLOSE_CONNECTION: {
1096 rc = kibnal_close_matching_conns (pcfg->pcfg_nid);
1099 case NAL_CMD_REGISTER_MYNID: {
1100 if (pcfg->pcfg_nid == PTL_NID_ANY)
1103 rc = kibnal_set_mynid (pcfg->pcfg_nid);
1112 kibnal_free_pages (kib_pages_t *p)
1114 int npages = p->ibp_npages;
1118 if (p->ibp_mapped) {
1119 vvrc = vv_mem_region_destroy(kibnal_data.kib_hca,
1121 if (vvrc != vv_return_ok)
1122 CERROR ("Deregister error: %d\n", vvrc);
1125 for (i = 0; i < npages; i++)
1126 if (p->ibp_pages[i] != NULL)
1127 __free_page(p->ibp_pages[i]);
1129 PORTAL_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
1133 kibnal_alloc_pages (kib_pages_t **pp, int npages, int allow_write)
1137 #if !IBNAL_WHOLE_MEM
1138 vv_phy_list_t vv_phys;
1139 vv_phy_buf_t *phys_pages;
1141 vv_access_con_bit_mask_t access;
1144 PORTAL_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
1146 CERROR ("Can't allocate buffer %d\n", npages);
1150 memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1151 p->ibp_npages = npages;
1153 for (i = 0; i < npages; i++) {
1154 p->ibp_pages[i] = alloc_page (GFP_KERNEL);
1155 if (p->ibp_pages[i] == NULL) {
1156 CERROR ("Can't allocate page %d of %d\n", i, npages);
1157 kibnal_free_pages(p);
1162 #if !IBNAL_WHOLE_MEM
1163 PORTAL_ALLOC(phys_pages, npages * sizeof(*phys_pages));
1164 if (phys_pages == NULL) {
1165 CERROR ("Can't allocate physarray for %d pages\n", npages);
1166 kibnal_free_pages(p);
1170 vv_phys.number_of_buff = npages;
1171 vv_phys.phy_list = phys_pages;
1173 for (i = 0; i < npages; i++) {
1174 phys_pages[i].size = PAGE_SIZE;
1175 phys_pages[i].start =
1176 kibnal_page2phys(p->ibp_pages[i]);
1179 VV_ACCESS_CONTROL_MASK_SET_ALL(access);
1181 vvrc = vv_phy_mem_region_register(kibnal_data.kib_hca,
1183 0, /* requested vaddr */
1184 npages * PAGE_SIZE, 0, /* offset */
1192 PORTAL_FREE(phys_pages, npages * sizeof(*phys_pages));
1194 if (vvrc != vv_return_ok) {
1195 CERROR ("Error %d mapping %d pages\n", vvrc, npages);
1196 kibnal_free_pages(p);
1200 CDEBUG(D_NET, "registered %d pages; handle: %x vaddr "LPX64" "
1201 "lkey %x rkey %x\n", npages, p->ibp_handle,
1202 p->ibp_vaddr, p->ibp_lkey, p->ibp_rkey);
1211 kibnal_alloc_tx_descs (void)
1215 PORTAL_ALLOC (kibnal_data.kib_tx_descs,
1216 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1217 if (kibnal_data.kib_tx_descs == NULL)
1220 memset(kibnal_data.kib_tx_descs, 0,
1221 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1223 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1224 kib_tx_t *tx = &kibnal_data.kib_tx_descs[i];
1226 PORTAL_ALLOC(tx->tx_wrq,
1227 (1 + IBNAL_MAX_RDMA_FRAGS) *
1228 sizeof(*tx->tx_wrq));
1229 if (tx->tx_wrq == NULL)
1232 PORTAL_ALLOC(tx->tx_gl,
1233 (1 + IBNAL_MAX_RDMA_FRAGS) *
1234 sizeof(*tx->tx_gl));
1235 if (tx->tx_gl == NULL)
1238 PORTAL_ALLOC(tx->tx_rd,
1239 offsetof(kib_rdma_desc_t,
1240 rd_frags[IBNAL_MAX_RDMA_FRAGS]));
1241 if (tx->tx_rd == NULL)
1249 kibnal_free_tx_descs (void)
1253 if (kibnal_data.kib_tx_descs == NULL)
1256 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1257 kib_tx_t *tx = &kibnal_data.kib_tx_descs[i];
1259 if (tx->tx_wrq != NULL)
1260 PORTAL_FREE(tx->tx_wrq,
1261 (1 + IBNAL_MAX_RDMA_FRAGS) *
1262 sizeof(*tx->tx_wrq));
1264 if (tx->tx_gl != NULL)
1265 PORTAL_FREE(tx->tx_gl,
1266 (1 + IBNAL_MAX_RDMA_FRAGS) *
1267 sizeof(*tx->tx_gl));
1269 if (tx->tx_rd != NULL)
1270 PORTAL_FREE(tx->tx_rd,
1271 offsetof(kib_rdma_desc_t,
1272 rd_frags[IBNAL_MAX_RDMA_FRAGS]));
1275 PORTAL_FREE(kibnal_data.kib_tx_descs,
1276 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1280 kibnal_setup_tx_descs (void)
1283 int page_offset = 0;
1291 /* pre-mapped messages are not bigger than 1 page */
1292 CLASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE);
1294 /* No fancy arithmetic when we do the buffer calculations */
1295 CLASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0);
1297 rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages, IBNAL_TX_MSG_PAGES,
1302 /* ignored for the whole_mem case */
1303 vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr;
1305 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1306 page = kibnal_data.kib_tx_pages->ibp_pages[ipage];
1307 tx = &kibnal_data.kib_tx_descs[i];
1309 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1313 vv_mem_reg_h_t mem_h;
1317 /* Voltaire stack already registers the whole
1318 * memory, so use that API. */
1319 vvrc = vv_get_gen_mr_attrib(kibnal_data.kib_hca,
1325 LASSERT (vvrc == vv_return_ok);
1328 tx->tx_vaddr = vaddr;
1330 tx->tx_isnblk = (i >= IBNAL_NTX);
1331 tx->tx_mapped = KIB_TX_UNMAPPED;
1333 CDEBUG(D_NET, "Tx[%d] %p->%p[%x:"LPX64"]\n", i, tx,
1334 tx->tx_msg, KIBNAL_TX_LKEY(tx), KIBNAL_TX_VADDR(tx));
1337 list_add (&tx->tx_list,
1338 &kibnal_data.kib_idle_nblk_txs);
1340 list_add (&tx->tx_list,
1341 &kibnal_data.kib_idle_txs);
1343 vaddr += IBNAL_MSG_SIZE;
1344 LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES);
1346 page_offset += IBNAL_MSG_SIZE;
1347 LASSERT (page_offset <= PAGE_SIZE);
1349 if (page_offset == PAGE_SIZE) {
1352 LASSERT (ipage <= IBNAL_TX_MSG_PAGES);
1360 kibnal_api_shutdown (nal_t *nal)
1365 if (nal->nal_refct != 0) {
1366 /* This module got the first ref */
1367 PORTAL_MODULE_UNUSE;
1371 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1372 atomic_read (&portal_kmemory));
1374 LASSERT(nal == &kibnal_api);
1376 switch (kibnal_data.kib_init) {
1378 case IBNAL_INIT_ALL:
1379 /* stop calls to nal_cmd */
1380 libcfs_nal_cmd_unregister(VIBNAL);
1383 /* resetting my NID removes my listener and nukes all current
1384 * peers and their connections */
1385 kibnal_set_mynid (PTL_NID_ANY);
1387 /* Wait for all peer state to clean up */
1389 while (atomic_read (&kibnal_data.kib_npeers) != 0) {
1391 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1392 "waiting for %d peers to disconnect\n",
1393 atomic_read (&kibnal_data.kib_npeers));
1394 set_current_state (TASK_UNINTERRUPTIBLE);
1395 schedule_timeout (HZ);
1400 vvrc = vv_cq_destroy(kibnal_data.kib_hca, kibnal_data.kib_cq);
1401 if (vvrc != vv_return_ok)
1402 CERROR ("Destroy CQ error: %d\n", vvrc);
1405 case IBNAL_INIT_TXD:
1406 kibnal_free_pages (kibnal_data.kib_tx_pages);
1410 #if !IBNAL_WHOLE_MEM
1411 vvrc = vv_pd_deallocate(kibnal_data.kib_hca,
1412 kibnal_data.kib_pd);
1413 if (vvrc != vv_return_ok)
1414 CERROR ("Destroy PD error: %d\n", vvrc);
1418 case IBNAL_INIT_ASYNC:
1419 vvrc = vv_dell_async_event_cb (kibnal_data.kib_hca,
1420 kibnal_async_callback);
1421 if (vvrc != vv_return_ok)
1422 CERROR("vv_dell_async_event_cb error: %d\n", vvrc);
1426 case IBNAL_INIT_HCA:
1427 vvrc = vv_hca_close(kibnal_data.kib_hca);
1428 if (vvrc != vv_return_ok)
1429 CERROR ("Close HCA error: %d\n", vvrc);
1432 case IBNAL_INIT_LIB:
1433 lib_fini(&kibnal_lib);
1436 case IBNAL_INIT_DATA:
1437 LASSERT (atomic_read (&kibnal_data.kib_npeers) == 0);
1438 LASSERT (kibnal_data.kib_peers != NULL);
1439 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1440 LASSERT (list_empty (&kibnal_data.kib_peers[i]));
1442 LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0);
1443 LASSERT (list_empty (&kibnal_data.kib_sched_rxq));
1444 LASSERT (list_empty (&kibnal_data.kib_sched_txq));
1445 LASSERT (list_empty (&kibnal_data.kib_connd_zombies));
1446 LASSERT (list_empty (&kibnal_data.kib_connd_conns));
1447 LASSERT (list_empty (&kibnal_data.kib_connd_pcreqs));
1448 LASSERT (list_empty (&kibnal_data.kib_connd_peers));
1450 /* flag threads to terminate; wake and wait for them to die */
1451 kibnal_data.kib_shutdown = 1;
1452 wake_up_all (&kibnal_data.kib_sched_waitq);
1453 wake_up_all (&kibnal_data.kib_connd_waitq);
1456 while (atomic_read (&kibnal_data.kib_nthreads) != 0) {
1458 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1459 "Waiting for %d threads to terminate\n",
1460 atomic_read (&kibnal_data.kib_nthreads));
1461 set_current_state (TASK_INTERRUPTIBLE);
1462 schedule_timeout (HZ);
1466 case IBNAL_INIT_NOTHING:
1470 kibnal_free_tx_descs();
1472 if (kibnal_data.kib_peers != NULL)
1473 PORTAL_FREE (kibnal_data.kib_peers,
1474 sizeof (struct list_head) *
1475 kibnal_data.kib_peer_hash_size);
1477 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1478 atomic_read (&portal_kmemory));
1479 printk(KERN_INFO "Lustre: Voltaire IB NAL unloaded (final mem %d)\n",
1480 atomic_read(&portal_kmemory));
1482 kibnal_data.kib_init = IBNAL_INIT_NOTHING;
1486 kibnal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1487 ptl_ni_limits_t *requested_limits,
1488 ptl_ni_limits_t *actual_limits)
1491 ptl_process_id_t process_id;
1492 int pkmem = atomic_read(&portal_kmemory);
1495 vv_request_event_record_t req_er;
1498 LASSERT (nal == &kibnal_api);
1500 if (nal->nal_refct != 0) {
1501 if (actual_limits != NULL)
1502 *actual_limits = kibnal_lib.libnal_ni.ni_actual_limits;
1503 /* This module got the first ref */
1508 LASSERT (kibnal_data.kib_init == IBNAL_INIT_NOTHING);
1509 memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */
1511 do_gettimeofday(&tv);
1512 kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1513 kibnal_data.kib_svc_id = IBNAL_SERVICE_NUMBER;
1515 init_MUTEX (&kibnal_data.kib_nid_mutex);
1517 rwlock_init(&kibnal_data.kib_global_lock);
1519 kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE;
1520 PORTAL_ALLOC (kibnal_data.kib_peers,
1521 sizeof (struct list_head) * kibnal_data.kib_peer_hash_size);
1522 if (kibnal_data.kib_peers == NULL) {
1525 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++)
1526 INIT_LIST_HEAD(&kibnal_data.kib_peers[i]);
1528 spin_lock_init (&kibnal_data.kib_connd_lock);
1529 INIT_LIST_HEAD (&kibnal_data.kib_connd_peers);
1530 INIT_LIST_HEAD (&kibnal_data.kib_connd_pcreqs);
1531 INIT_LIST_HEAD (&kibnal_data.kib_connd_conns);
1532 INIT_LIST_HEAD (&kibnal_data.kib_connd_zombies);
1533 init_waitqueue_head (&kibnal_data.kib_connd_waitq);
1535 spin_lock_init (&kibnal_data.kib_sched_lock);
1536 INIT_LIST_HEAD (&kibnal_data.kib_sched_txq);
1537 INIT_LIST_HEAD (&kibnal_data.kib_sched_rxq);
1538 init_waitqueue_head (&kibnal_data.kib_sched_waitq);
1540 spin_lock_init (&kibnal_data.kib_tx_lock);
1541 INIT_LIST_HEAD (&kibnal_data.kib_idle_txs);
1542 INIT_LIST_HEAD (&kibnal_data.kib_idle_nblk_txs);
1543 init_waitqueue_head(&kibnal_data.kib_idle_tx_waitq);
1545 rc = kibnal_alloc_tx_descs();
1547 CERROR("Can't allocate tx descs\n");
1551 /* lists/ptrs/locks initialised */
1552 kibnal_data.kib_init = IBNAL_INIT_DATA;
1553 /*****************************************************/
1555 process_id.pid = requested_pid;
1556 process_id.nid = PTL_NID_ANY;
1558 rc = lib_init(&kibnal_lib, nal, process_id,
1559 requested_limits, actual_limits);
1561 CERROR("lib_init failed: error %d\n", rc);
1565 /* lib interface initialised */
1566 kibnal_data.kib_init = IBNAL_INIT_LIB;
1567 /*****************************************************/
1569 for (i = 0; i < IBNAL_N_SCHED; i++) {
1570 rc = kibnal_thread_start (kibnal_scheduler, (void *)((long)i));
1572 CERROR("Can't spawn vibnal scheduler[%d]: %d\n",
1578 rc = kibnal_thread_start (kibnal_connd, NULL);
1580 CERROR ("Can't spawn vibnal connd: %d\n", rc);
1584 /* TODO: apparently only one adapter is supported */
1585 vvrc = vv_hca_open("ANY_HCA", NULL, &kibnal_data.kib_hca);
1586 if (vvrc != vv_return_ok) {
1587 CERROR ("Can't open CA: %d\n", vvrc);
1591 /* Channel Adapter opened */
1592 kibnal_data.kib_init = IBNAL_INIT_HCA;
1594 /* register to get HCA's asynchronous events. */
1595 req_er.req_event_type = VV_ASYNC_EVENT_ALL_MASK;
1596 vvrc = vv_set_async_event_cb (kibnal_data.kib_hca, req_er,
1597 kibnal_async_callback);
1598 if (vvrc != vv_return_ok) {
1599 CERROR ("Can't open CA: %d\n", vvrc);
1603 kibnal_data.kib_init = IBNAL_INIT_ASYNC;
1605 /*****************************************************/
1607 vvrc = vv_hca_query(kibnal_data.kib_hca, &kibnal_data.kib_hca_attrs);
1608 if (vvrc != vv_return_ok) {
1609 CERROR ("Can't size port attrs: %d\n", vvrc);
1613 kibnal_data.kib_port = -1;
1615 for (i = 0; i<kibnal_data.kib_hca_attrs.port_num; i++) {
1618 u_int32_t tbl_count;
1619 vv_port_attrib_t *pattr = &kibnal_data.kib_port_attr;
1621 vvrc = vv_port_query(kibnal_data.kib_hca, port_num, pattr);
1622 if (vvrc != vv_return_ok) {
1623 CERROR("vv_port_query failed for port %d: %d\n",
1628 switch (pattr->port_state) {
1629 case vv_state_linkDoun:
1630 CDEBUG(D_NET, "port[%d] Down\n", port_num);
1632 case vv_state_linkInit:
1633 CDEBUG(D_NET, "port[%d] Init\n", port_num);
1635 case vv_state_linkArm:
1636 CDEBUG(D_NET, "port[%d] Armed\n", port_num);
1638 case vv_state_linkActive:
1639 CDEBUG(D_NET, "port[%d] Active\n", port_num);
1641 /* Found a suitable port. Get its GUID and PKEY. */
1642 kibnal_data.kib_port = port_num;
1645 vvrc = vv_get_port_gid_tbl(kibnal_data.kib_hca,
1646 port_num, &tbl_count,
1647 &kibnal_data.kib_port_gid);
1648 if (vvrc != vv_return_ok) {
1649 CERROR("vv_get_port_gid_tbl failed "
1650 "for port %d: %d\n", port_num, vvrc);
1655 vvrc = vv_get_port_partition_tbl(kibnal_data.kib_hca,
1656 port_num, &tbl_count,
1657 &kibnal_data.kib_port_pkey);
1658 if (vvrc != vv_return_ok) {
1659 CERROR("vv_get_port_partition_tbl failed "
1660 "for port %d: %d\n", port_num, vvrc);
1665 case vv_state_linkActDefer: /* TODO: correct? */
1666 case vv_state_linkNoChange:
1667 CERROR("Unexpected port[%d] state %d\n",
1668 i, pattr->port_state);
1674 if (kibnal_data.kib_port == -1) {
1675 CERROR ("Can't find an active port\n");
1679 CDEBUG(D_NET, "Using port %d - GID="LPX64":"LPX64"\n",
1680 kibnal_data.kib_port,
1681 kibnal_data.kib_port_gid.scope.g.subnet,
1682 kibnal_data.kib_port_gid.scope.g.eui64);
1684 /*****************************************************/
1686 #if !IBNAL_WHOLE_MEM
1687 vvrc = vv_pd_allocate(kibnal_data.kib_hca, &kibnal_data.kib_pd);
1689 vvrc = vv_get_gen_pd_h(kibnal_data.kib_hca, &kibnal_data.kib_pd);
1692 CERROR ("Can't create PD: %d\n", vvrc);
1696 /* flag PD initialised */
1697 kibnal_data.kib_init = IBNAL_INIT_PD;
1698 /*****************************************************/
1700 rc = kibnal_setup_tx_descs();
1702 CERROR ("Can't register tx descs: %d\n", rc);
1706 /* flag TX descs initialised */
1707 kibnal_data.kib_init = IBNAL_INIT_TXD;
1708 /*****************************************************/
1712 vvrc = vv_cq_create(kibnal_data.kib_hca, IBNAL_CQ_ENTRIES,
1715 &kibnal_data.kib_cq, &nentries);
1717 CERROR ("Can't create RX CQ: %d\n", vvrc);
1721 /* flag CQ initialised */
1722 kibnal_data.kib_init = IBNAL_INIT_CQ;
1724 if (nentries < IBNAL_CQ_ENTRIES) {
1725 CERROR ("CQ only has %d entries, need %d\n",
1726 nentries, IBNAL_CQ_ENTRIES);
1730 vvrc = vv_request_completion_notification(kibnal_data.kib_hca,
1732 vv_next_solicit_unsolicit_event);
1734 CERROR ("Failed to re-arm completion queue: %d\n", rc);
1739 /*****************************************************/
1741 rc = libcfs_nal_cmd_register(VIBNAL, &kibnal_cmd, NULL);
1743 CERROR ("Can't initialise command interface (rc = %d)\n", rc);
1747 /* flag everything initialised */
1748 kibnal_data.kib_init = IBNAL_INIT_ALL;
1749 /*****************************************************/
1751 printk(KERN_INFO "Lustre: Voltaire IB NAL loaded "
1752 "(initial mem %d)\n", pkmem);
1757 CDEBUG(D_NET, "kibnal_api_startup failed\n");
1758 kibnal_api_shutdown (&kibnal_api);
1763 kibnal_module_fini (void)
1765 #ifdef CONFIG_SYSCTL
1766 if (kibnal_tunables.kib_sysctl != NULL)
1767 unregister_sysctl_table (kibnal_tunables.kib_sysctl);
1769 PtlNIFini(kibnal_ni);
1771 ptl_unregister_nal(VIBNAL);
1775 kibnal_module_init (void)
1779 CLASSERT (offsetof(kib_msg_t, ibm_u) + sizeof(kib_connparams_t)
1780 <= cm_REQ_priv_data_len);
1781 CLASSERT (offsetof(kib_msg_t, ibm_u) + sizeof(kib_connparams_t)
1782 <= cm_REP_priv_data_len);
1783 CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBNAL_MAX_RDMA_FRAGS])
1785 CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBNAL_MAX_RDMA_FRAGS])
1788 /* the following must be sizeof(int) for proc_dointvec() */
1789 CLASSERT (sizeof (kibnal_tunables.kib_io_timeout) == sizeof (int));
1791 kibnal_api.nal_ni_init = kibnal_api_startup;
1792 kibnal_api.nal_ni_fini = kibnal_api_shutdown;
1794 /* Initialise dynamic tunables to defaults once only */
1795 kibnal_tunables.kib_io_timeout = IBNAL_IO_TIMEOUT;
1797 rc = ptl_register_nal(VIBNAL, &kibnal_api);
1799 CERROR("Can't register IBNAL: %d\n", rc);
1800 return (-ENOMEM); /* or something... */
1803 /* Pure gateways want the NAL started up at module load time... */
1804 rc = PtlNIInit(VIBNAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kibnal_ni);
1805 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
1806 ptl_unregister_nal(VIBNAL);
1810 #ifdef CONFIG_SYSCTL
1811 /* Press on regardless even if registering sysctl doesn't work */
1812 kibnal_tunables.kib_sysctl =
1813 register_sysctl_table (kibnal_top_ctl_table, 0);
1818 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1819 MODULE_DESCRIPTION("Kernel Voltaire IB NAL v0.01");
1820 MODULE_LICENSE("GPL");
1822 module_init(kibnal_module_init);
1823 module_exit(kibnal_module_fini);