1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include "openibnal.h"
27 ptl_handle_ni_t kibnal_ni;
28 kib_data_t kibnal_data;
29 kib_tunables_t kibnal_tunables;
32 #define IBNAL_SYSCTL 202
34 #define IBNAL_SYSCTL_TIMEOUT 1
36 static ctl_table kibnal_ctl_table[] = {
37 {IBNAL_SYSCTL_TIMEOUT, "timeout",
38 &kibnal_tunables.kib_io_timeout, sizeof (int),
39 0644, NULL, &proc_dointvec},
43 static ctl_table kibnal_top_ctl_table[] = {
44 {IBNAL_SYSCTL, "openibnal", NULL, 0, 0555, kibnal_ctl_table},
50 print_service(struct ib_common_attrib_service *service, char *tag, int rc)
57 "status : %d (NULL)\n", tag, rc);
60 strncpy (name, service->service_name, sizeof(name)-1);
61 name[sizeof(name)-1] = 0;
65 "service id: "LPX64"\n"
67 "NID : "LPX64"\n", tag, rc,
68 service->service_id, name,
69 *kibnal_service_nid_field(service));
73 kibnal_service_setunset_done (tTS_IB_CLIENT_QUERY_TID tid, int status,
74 struct ib_common_attrib_service *service, void *arg)
77 up (&kibnal_data.kib_nid_signal);
80 #if IBNAL_CHECK_ADVERT
82 kibnal_check_advert (void)
84 struct ib_common_attrib_service *svc;
89 PORTAL_ALLOC(svc, sizeof(*svc));
93 memset (svc, 0, sizeof (*svc));
94 kibnal_set_service_keys(svc, kibnal_data.kib_nid);
96 rc = ib_service_get (kibnal_data.kib_device,
99 KIBNAL_SERVICE_KEY_MASK,
100 kibnal_tunables.kib_io_timeout * HZ,
101 kibnal_service_setunset_done, &rc2,
105 CERROR ("Immediate error %d checking SM service\n", rc);
107 down (&kibnal_data.kib_nid_signal);
111 CERROR ("Error %d checking SM service\n", rc);
114 PORTAL_FREE(svc, sizeof(*svc));
119 kibnal_advertise (void)
121 struct ib_common_attrib_service *svc;
126 LASSERT (kibnal_data.kib_nid != PTL_NID_ANY);
128 PORTAL_ALLOC(svc, sizeof(*svc));
132 memset (svc, 0, sizeof (*svc));
134 svc->service_id = kibnal_data.kib_service_id;
136 rc = ib_cached_gid_get(kibnal_data.kib_device,
137 kibnal_data.kib_port,
141 CERROR ("Can't get port %d GID: %d\n",
142 kibnal_data.kib_port, rc);
146 rc = ib_cached_pkey_get(kibnal_data.kib_device,
147 kibnal_data.kib_port,
151 CERROR ("Can't get port %d PKEY: %d\n",
152 kibnal_data.kib_port, rc);
156 svc->service_lease = 0xffffffff;
158 kibnal_set_service_keys(svc, kibnal_data.kib_nid);
160 CDEBUG(D_NET, "Advertising service id "LPX64" %s:"LPX64"\n",
162 svc->service_name, *kibnal_service_nid_field(svc));
164 rc = ib_service_set (kibnal_data.kib_device,
165 kibnal_data.kib_port,
167 IB_SA_SERVICE_COMP_MASK_ID |
168 IB_SA_SERVICE_COMP_MASK_GID |
169 IB_SA_SERVICE_COMP_MASK_PKEY |
170 IB_SA_SERVICE_COMP_MASK_LEASE |
171 KIBNAL_SERVICE_KEY_MASK,
172 kibnal_tunables.kib_io_timeout * HZ,
173 kibnal_service_setunset_done, &rc2, &tid);
176 CERROR ("Immediate error %d advertising NID "LPX64"\n",
177 rc, kibnal_data.kib_nid);
181 down (&kibnal_data.kib_nid_signal);
185 CERROR ("Error %d advertising NID "LPX64"\n",
186 rc, kibnal_data.kib_nid);
188 PORTAL_FREE(svc, sizeof(*svc));
193 kibnal_unadvertise (int expect_success)
195 struct ib_common_attrib_service *svc;
200 LASSERT (kibnal_data.kib_nid != PTL_NID_ANY);
202 PORTAL_ALLOC(svc, sizeof(*svc));
206 memset (svc, 0, sizeof(*svc));
208 kibnal_set_service_keys(svc, kibnal_data.kib_nid);
210 CDEBUG(D_NET, "Unadvertising service %s:"LPX64"\n",
211 svc->service_name, *kibnal_service_nid_field(svc));
213 rc = ib_service_delete (kibnal_data.kib_device,
214 kibnal_data.kib_port,
216 KIBNAL_SERVICE_KEY_MASK,
217 kibnal_tunables.kib_io_timeout * HZ,
218 kibnal_service_setunset_done, &rc2, &tid);
220 CERROR ("Immediate error %d unadvertising NID "LPX64"\n",
221 rc, kibnal_data.kib_nid);
225 down (&kibnal_data.kib_nid_signal);
227 if ((rc2 == 0) == !!expect_success)
228 goto out; /* success: rc == 0 */
231 CERROR("Error %d unadvertising NID "LPX64"\n",
232 rc, kibnal_data.kib_nid);
234 CWARN("Removed conflicting NID "LPX64"\n",
235 kibnal_data.kib_nid);
237 PORTAL_FREE(svc, sizeof(*svc));
241 kibnal_set_mynid(ptl_nid_t nid)
244 lib_ni_t *ni = &kibnal_lib.libnal_ni;
247 CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
248 nid, ni->ni_pid.nid);
250 do_gettimeofday(&tv);
252 down (&kibnal_data.kib_nid_mutex);
254 if (nid == kibnal_data.kib_nid) {
255 /* no change of NID */
256 up (&kibnal_data.kib_nid_mutex);
260 CDEBUG(D_NET, "NID "LPX64"("LPX64")\n",
261 kibnal_data.kib_nid, nid);
263 if (kibnal_data.kib_nid != PTL_NID_ANY) {
265 kibnal_unadvertise (1);
267 rc = ib_cm_listen_stop (kibnal_data.kib_listen_handle);
269 CERROR ("Error %d stopping listener\n", rc);
272 kibnal_data.kib_nid = ni->ni_pid.nid = nid;
273 kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
275 /* Delete all existing peers and their connections after new
276 * NID/incarnation set to ensure no old connections in our brave
278 kibnal_del_peer (PTL_NID_ANY, 0);
280 if (kibnal_data.kib_nid == PTL_NID_ANY) {
281 /* No new NID to install */
282 up (&kibnal_data.kib_nid_mutex);
286 /* remove any previous advert (crashed node etc) */
287 kibnal_unadvertise(0);
289 /* Assign new service number */
290 kibnal_data.kib_service_id = ib_cm_service_assign();
291 CDEBUG(D_NET, "service_id "LPX64"\n", kibnal_data.kib_service_id);
293 rc = ib_cm_listen(kibnal_data.kib_service_id,
294 TS_IB_CM_SERVICE_EXACT_MASK,
295 kibnal_passive_conn_callback, NULL,
296 &kibnal_data.kib_listen_handle);
298 rc = kibnal_advertise();
300 #if IBNAL_CHECK_ADVERT
301 kibnal_check_advert();
303 up (&kibnal_data.kib_nid_mutex);
307 ib_cm_listen_stop(kibnal_data.kib_listen_handle);
308 /* remove any peers that sprung up while I failed to
309 * advertise myself */
310 kibnal_del_peer (PTL_NID_ANY, 0);
313 kibnal_data.kib_nid = PTL_NID_ANY;
314 up (&kibnal_data.kib_nid_mutex);
319 kibnal_create_peer (ptl_nid_t nid)
323 LASSERT (nid != PTL_NID_ANY);
325 PORTAL_ALLOC (peer, sizeof (*peer));
329 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
332 atomic_set (&peer->ibp_refcount, 1); /* 1 ref for caller */
334 INIT_LIST_HEAD (&peer->ibp_list); /* not in the peer table yet */
335 INIT_LIST_HEAD (&peer->ibp_conns);
336 INIT_LIST_HEAD (&peer->ibp_tx_queue);
338 peer->ibp_reconnect_time = jiffies;
339 peer->ibp_reconnect_interval = IBNAL_MIN_RECONNECT_INTERVAL;
341 atomic_inc (&kibnal_data.kib_npeers);
346 kibnal_destroy_peer (kib_peer_t *peer)
348 CDEBUG (D_NET, "peer "LPX64" %p deleted\n", peer->ibp_nid, peer);
350 LASSERT (atomic_read (&peer->ibp_refcount) == 0);
351 LASSERT (peer->ibp_persistence == 0);
352 LASSERT (!kibnal_peer_active(peer));
353 LASSERT (peer->ibp_connecting == 0);
354 LASSERT (list_empty (&peer->ibp_conns));
355 LASSERT (list_empty (&peer->ibp_tx_queue));
357 PORTAL_FREE (peer, sizeof (*peer));
359 /* NB a peer's connections keep a reference on their peer until
360 * they are destroyed, so we can be assured that _all_ state to do
361 * with this peer has been cleaned up when its refcount drops to
363 atomic_dec (&kibnal_data.kib_npeers);
367 kibnal_put_peer (kib_peer_t *peer)
369 CDEBUG (D_OTHER, "putting peer[%p] -> "LPX64" (%d)\n",
371 atomic_read (&peer->ibp_refcount));
373 LASSERT (atomic_read (&peer->ibp_refcount) > 0);
374 if (!atomic_dec_and_test (&peer->ibp_refcount))
377 kibnal_destroy_peer (peer);
381 kibnal_find_peer_locked (ptl_nid_t nid)
383 struct list_head *peer_list = kibnal_nid2peerlist (nid);
384 struct list_head *tmp;
387 list_for_each (tmp, peer_list) {
389 peer = list_entry (tmp, kib_peer_t, ibp_list);
391 LASSERT (peer->ibp_persistence != 0 || /* persistent peer */
392 peer->ibp_connecting != 0 || /* creating conns */
393 !list_empty (&peer->ibp_conns)); /* active conn */
395 if (peer->ibp_nid != nid)
398 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
399 peer, nid, atomic_read (&peer->ibp_refcount));
406 kibnal_get_peer (ptl_nid_t nid)
410 read_lock (&kibnal_data.kib_global_lock);
411 peer = kibnal_find_peer_locked (nid);
412 if (peer != NULL) /* +1 ref for caller? */
413 atomic_inc (&peer->ibp_refcount);
414 read_unlock (&kibnal_data.kib_global_lock);
420 kibnal_unlink_peer_locked (kib_peer_t *peer)
422 LASSERT (peer->ibp_persistence == 0);
423 LASSERT (list_empty(&peer->ibp_conns));
425 LASSERT (kibnal_peer_active(peer));
426 list_del_init (&peer->ibp_list);
427 /* lose peerlist's ref */
428 kibnal_put_peer (peer);
432 kibnal_get_peer_info (int index, ptl_nid_t *nidp, int *persistencep)
435 struct list_head *ptmp;
438 read_lock (&kibnal_data.kib_global_lock);
440 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
442 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
444 peer = list_entry (ptmp, kib_peer_t, ibp_list);
445 LASSERT (peer->ibp_persistence != 0 ||
446 peer->ibp_connecting != 0 ||
447 !list_empty (&peer->ibp_conns));
452 *nidp = peer->ibp_nid;
453 *persistencep = peer->ibp_persistence;
455 read_unlock (&kibnal_data.kib_global_lock);
460 read_unlock (&kibnal_data.kib_global_lock);
465 kibnal_add_persistent_peer (ptl_nid_t nid)
471 if (nid == PTL_NID_ANY)
474 peer = kibnal_create_peer (nid);
478 write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
480 peer2 = kibnal_find_peer_locked (nid);
482 kibnal_put_peer (peer);
485 /* peer table takes existing ref on peer */
486 list_add_tail (&peer->ibp_list,
487 kibnal_nid2peerlist (nid));
490 peer->ibp_persistence++;
492 write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
497 kibnal_del_peer_locked (kib_peer_t *peer, int single_share)
499 struct list_head *ctmp;
500 struct list_head *cnxt;
504 peer->ibp_persistence = 0;
505 else if (peer->ibp_persistence > 0)
506 peer->ibp_persistence--;
508 if (peer->ibp_persistence != 0)
511 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
512 conn = list_entry(ctmp, kib_conn_t, ibc_list);
514 kibnal_close_conn_locked (conn, 0);
517 /* NB peer unlinks itself when last conn is closed */
521 kibnal_del_peer (ptl_nid_t nid, int single_share)
524 struct list_head *ptmp;
525 struct list_head *pnxt;
532 write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
534 if (nid != PTL_NID_ANY)
535 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
538 hi = kibnal_data.kib_peer_hash_size - 1;
541 for (i = lo; i <= hi; i++) {
542 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
543 peer = list_entry (ptmp, kib_peer_t, ibp_list);
544 LASSERT (peer->ibp_persistence != 0 ||
545 peer->ibp_connecting != 0 ||
546 !list_empty (&peer->ibp_conns));
548 if (!(nid == PTL_NID_ANY || peer->ibp_nid == nid))
551 kibnal_del_peer_locked (peer, single_share);
552 rc = 0; /* matched something */
559 write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
565 kibnal_get_conn_by_idx (int index)
568 struct list_head *ptmp;
570 struct list_head *ctmp;
573 read_lock (&kibnal_data.kib_global_lock);
575 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
576 list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
578 peer = list_entry (ptmp, kib_peer_t, ibp_list);
579 LASSERT (peer->ibp_persistence > 0 ||
580 peer->ibp_connecting != 0 ||
581 !list_empty (&peer->ibp_conns));
583 list_for_each (ctmp, &peer->ibp_conns) {
587 conn = list_entry (ctmp, kib_conn_t, ibc_list);
588 CDEBUG(D_NET, "++conn[%p] state %d -> "LPX64" (%d)\n",
589 conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
590 atomic_read (&conn->ibc_refcount));
591 atomic_inc (&conn->ibc_refcount);
592 read_unlock (&kibnal_data.kib_global_lock);
598 read_unlock (&kibnal_data.kib_global_lock);
603 kibnal_create_conn (void)
613 struct ib_qp_create_param qp_create;
614 struct ib_qp_attribute qp_attr;
617 PORTAL_ALLOC (conn, sizeof (*conn));
619 CERROR ("Can't allocate connection\n");
623 /* zero flags, NULL pointers etc... */
624 memset (conn, 0, sizeof (*conn));
626 INIT_LIST_HEAD (&conn->ibc_tx_queue);
627 INIT_LIST_HEAD (&conn->ibc_active_txs);
628 spin_lock_init (&conn->ibc_lock);
630 atomic_inc (&kibnal_data.kib_nconns);
631 /* well not really, but I call destroy() on failure, which decrements */
633 PORTAL_ALLOC (conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t));
634 if (conn->ibc_rxs == NULL)
636 memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t));
638 rc = kibnal_alloc_pages(&conn->ibc_rx_pages,
640 IB_ACCESS_LOCAL_WRITE);
644 vaddr_base = vaddr = conn->ibc_rx_pages->ibp_vaddr;
646 for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) {
647 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
648 kib_rx_t *rx = &conn->ibc_rxs[i];
651 rx->rx_vaddr = vaddr;
652 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
654 vaddr += IBNAL_MSG_SIZE;
655 LASSERT (vaddr <= vaddr_base + IBNAL_RX_MSG_BYTES);
657 page_offset += IBNAL_MSG_SIZE;
658 LASSERT (page_offset <= PAGE_SIZE);
660 if (page_offset == PAGE_SIZE) {
663 LASSERT (ipage <= IBNAL_RX_MSG_PAGES);
667 params.qp_create = (struct ib_qp_create_param) {
669 /* Sends have an optional RDMA */
670 .max_outstanding_send_request = 2 * IBNAL_MSG_QUEUE_SIZE,
671 .max_outstanding_receive_request = IBNAL_MSG_QUEUE_SIZE,
672 .max_send_gather_element = 1,
673 .max_receive_scatter_element = 1,
675 .pd = kibnal_data.kib_pd,
676 .send_queue = kibnal_data.kib_cq,
677 .receive_queue = kibnal_data.kib_cq,
678 .send_policy = IB_WQ_SIGNAL_SELECTABLE,
679 .receive_policy = IB_WQ_SIGNAL_SELECTABLE,
681 .transport = IB_TRANSPORT_RC,
682 .device_specific = NULL,
685 rc = ib_qp_create (¶ms.qp_create, &conn->ibc_qp, &conn->ibc_qpn);
687 CERROR ("Failed to create queue pair: %d\n", rc);
691 /* Mark QP created */
692 conn->ibc_state = IBNAL_CONN_INIT_QP;
694 params.qp_attr = (struct ib_qp_attribute) {
695 .state = IB_QP_STATE_INIT,
696 .port = kibnal_data.kib_port,
697 .enable_rdma_read = 1,
698 .enable_rdma_write = 1,
699 .valid_fields = (IB_QP_ATTRIBUTE_STATE |
700 IB_QP_ATTRIBUTE_PORT |
701 IB_QP_ATTRIBUTE_PKEY_INDEX |
702 IB_QP_ATTRIBUTE_RDMA_ATOMIC_ENABLE),
704 rc = ib_qp_modify(conn->ibc_qp, ¶ms.qp_attr);
706 CERROR ("Failed to modify queue pair: %d\n", rc);
710 /* 1 ref for caller */
711 atomic_set (&conn->ibc_refcount, 1);
715 kibnal_destroy_conn (conn);
720 kibnal_destroy_conn (kib_conn_t *conn)
724 CDEBUG (D_NET, "connection %p\n", conn);
726 LASSERT (atomic_read (&conn->ibc_refcount) == 0);
727 LASSERT (list_empty(&conn->ibc_tx_queue));
728 LASSERT (list_empty(&conn->ibc_active_txs));
729 LASSERT (conn->ibc_nsends_posted == 0);
730 LASSERT (conn->ibc_connreq == NULL);
732 switch (conn->ibc_state) {
733 case IBNAL_CONN_ZOMBIE:
734 /* called after connection sequence initiated */
736 case IBNAL_CONN_INIT_QP:
737 rc = ib_qp_destroy(conn->ibc_qp);
739 CERROR("Can't destroy QP: %d\n", rc);
742 case IBNAL_CONN_INIT_NOTHING:
749 if (conn->ibc_rx_pages != NULL)
750 kibnal_free_pages(conn->ibc_rx_pages);
752 if (conn->ibc_rxs != NULL)
753 PORTAL_FREE(conn->ibc_rxs,
754 IBNAL_RX_MSGS * sizeof(kib_rx_t));
756 if (conn->ibc_peer != NULL)
757 kibnal_put_peer(conn->ibc_peer);
759 PORTAL_FREE(conn, sizeof (*conn));
761 atomic_dec(&kibnal_data.kib_nconns);
763 if (atomic_read (&kibnal_data.kib_nconns) == 0 &&
764 kibnal_data.kib_shutdown) {
765 /* I just nuked the last connection on shutdown; wake up
766 * everyone so they can exit. */
767 wake_up_all(&kibnal_data.kib_sched_waitq);
768 wake_up_all(&kibnal_data.kib_connd_waitq);
773 kibnal_put_conn (kib_conn_t *conn)
777 CDEBUG (D_NET, "putting conn[%p] state %d -> "LPX64" (%d)\n",
778 conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
779 atomic_read (&conn->ibc_refcount));
781 LASSERT (atomic_read (&conn->ibc_refcount) > 0);
782 if (!atomic_dec_and_test (&conn->ibc_refcount))
785 /* last ref only goes on zombies */
786 LASSERT (conn->ibc_state == IBNAL_CONN_ZOMBIE);
788 spin_lock_irqsave (&kibnal_data.kib_connd_lock, flags);
790 list_add (&conn->ibc_list, &kibnal_data.kib_connd_conns);
791 wake_up (&kibnal_data.kib_connd_waitq);
793 spin_unlock_irqrestore (&kibnal_data.kib_connd_lock, flags);
797 kibnal_close_peer_conns_locked (kib_peer_t *peer, int why)
800 struct list_head *ctmp;
801 struct list_head *cnxt;
804 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
805 conn = list_entry (ctmp, kib_conn_t, ibc_list);
808 kibnal_close_conn_locked (conn, why);
815 kibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
818 struct list_head *ctmp;
819 struct list_head *cnxt;
822 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
823 conn = list_entry (ctmp, kib_conn_t, ibc_list);
825 if (conn->ibc_incarnation == incarnation)
828 CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
829 peer->ibp_nid, conn->ibc_incarnation, incarnation);
832 kibnal_close_conn_locked (conn, -ESTALE);
839 kibnal_close_matching_conns (ptl_nid_t nid)
843 struct list_head *ptmp;
844 struct list_head *pnxt;
850 write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
852 if (nid != PTL_NID_ANY)
853 lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
856 hi = kibnal_data.kib_peer_hash_size - 1;
859 for (i = lo; i <= hi; i++) {
860 list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
862 peer = list_entry (ptmp, kib_peer_t, ibp_list);
863 LASSERT (peer->ibp_persistence != 0 ||
864 peer->ibp_connecting != 0 ||
865 !list_empty (&peer->ibp_conns));
867 if (!(nid == PTL_NID_ANY || nid == peer->ibp_nid))
870 count += kibnal_close_peer_conns_locked (peer, 0);
874 write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
876 /* wildcards always succeed */
877 if (nid == PTL_NID_ANY)
880 return (count == 0 ? -ENOENT : 0);
884 kibnal_cmd(struct portals_cfg *pcfg, void * private)
888 LASSERT (pcfg != NULL);
890 switch(pcfg->pcfg_command) {
891 case NAL_CMD_GET_PEER: {
895 rc = kibnal_get_peer_info(pcfg->pcfg_count,
897 pcfg->pcfg_nid = nid;
901 pcfg->pcfg_count = 0;
902 pcfg->pcfg_wait = share_count;
905 case NAL_CMD_ADD_PEER: {
906 rc = kibnal_add_persistent_peer (pcfg->pcfg_nid);
909 case NAL_CMD_DEL_PEER: {
910 rc = kibnal_del_peer (pcfg->pcfg_nid,
911 /* flags == single_share */
912 pcfg->pcfg_flags != 0);
915 case NAL_CMD_GET_CONN: {
916 kib_conn_t *conn = kibnal_get_conn_by_idx (pcfg->pcfg_count);
922 pcfg->pcfg_nid = conn->ibc_peer->ibp_nid;
925 pcfg->pcfg_flags = 0;
926 kibnal_put_conn (conn);
930 case NAL_CMD_CLOSE_CONNECTION: {
931 rc = kibnal_close_matching_conns (pcfg->pcfg_nid);
934 case NAL_CMD_REGISTER_MYNID: {
935 if (pcfg->pcfg_nid == PTL_NID_ANY)
938 rc = kibnal_set_mynid (pcfg->pcfg_nid);
947 kibnal_free_pages (kib_pages_t *p)
949 int npages = p->ibp_npages;
954 rc = ib_memory_deregister(p->ibp_handle);
956 CERROR ("Deregister error: %d\n", rc);
959 for (i = 0; i < npages; i++)
960 if (p->ibp_pages[i] != NULL)
961 __free_page(p->ibp_pages[i]);
963 PORTAL_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
967 kibnal_alloc_pages (kib_pages_t **pp, int npages, int access)
970 struct ib_physical_buffer *phys_pages;
974 PORTAL_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
976 CERROR ("Can't allocate buffer %d\n", npages);
980 memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
981 p->ibp_npages = npages;
983 for (i = 0; i < npages; i++) {
984 p->ibp_pages[i] = alloc_page (GFP_KERNEL);
985 if (p->ibp_pages[i] == NULL) {
986 CERROR ("Can't allocate page %d of %d\n", i, npages);
987 kibnal_free_pages(p);
992 PORTAL_ALLOC(phys_pages, npages * sizeof(*phys_pages));
993 if (phys_pages == NULL) {
994 CERROR ("Can't allocate physarray for %d pages\n", npages);
995 kibnal_free_pages(p);
999 for (i = 0; i < npages; i++) {
1000 phys_pages[i].size = PAGE_SIZE;
1001 phys_pages[i].address =
1002 kibnal_page2phys(p->ibp_pages[i]);
1006 rc = ib_memory_register_physical(kibnal_data.kib_pd,
1009 npages * PAGE_SIZE, 0,
1015 PORTAL_FREE(phys_pages, npages * sizeof(*phys_pages));
1018 CERROR ("Error %d mapping %d pages\n", rc, npages);
1019 kibnal_free_pages(p);
1029 kibnal_setup_tx_descs (void)
1032 int page_offset = 0;
1040 /* pre-mapped messages are not bigger than 1 page */
1041 LASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE);
1043 /* No fancy arithmetic when we do the buffer calculations */
1044 LASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0);
1046 rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages,
1048 0); /* local read access only */
1052 vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr;
1054 for (i = 0; i < IBNAL_TX_MSGS; i++) {
1055 page = kibnal_data.kib_tx_pages->ibp_pages[ipage];
1056 tx = &kibnal_data.kib_tx_descs[i];
1058 memset (tx, 0, sizeof(*tx)); /* zero flags etc */
1060 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
1061 tx->tx_vaddr = vaddr;
1062 tx->tx_isnblk = (i >= IBNAL_NTX);
1063 tx->tx_mapped = KIB_TX_UNMAPPED;
1065 CDEBUG(D_NET, "Tx[%d] %p->%p - "LPX64"\n",
1066 i, tx, tx->tx_msg, tx->tx_vaddr);
1069 list_add (&tx->tx_list,
1070 &kibnal_data.kib_idle_nblk_txs);
1072 list_add (&tx->tx_list,
1073 &kibnal_data.kib_idle_txs);
1075 vaddr += IBNAL_MSG_SIZE;
1076 LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES);
1078 page_offset += IBNAL_MSG_SIZE;
1079 LASSERT (page_offset <= PAGE_SIZE);
1081 if (page_offset == PAGE_SIZE) {
1084 LASSERT (ipage <= IBNAL_TX_MSG_PAGES);
1092 kibnal_api_shutdown (nal_t *nal)
1097 if (nal->nal_refct != 0) {
1098 /* This module got the first ref */
1099 PORTAL_MODULE_UNUSE;
1103 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1104 atomic_read (&portal_kmemory));
1106 LASSERT(nal == &kibnal_api);
1108 switch (kibnal_data.kib_init) {
1110 CERROR ("Unexpected state %d\n", kibnal_data.kib_init);
1113 case IBNAL_INIT_ALL:
1114 /* stop calls to nal_cmd */
1115 libcfs_nal_cmd_unregister(OPENIBNAL);
1118 /* resetting my NID to unadvertises me, removes my
1119 * listener and nukes all current peers */
1120 kibnal_set_mynid (PTL_NID_ANY);
1122 /* Wait for all peer state to clean up */
1124 while (atomic_read (&kibnal_data.kib_npeers) != 0) {
1126 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1127 "waiting for %d peers to close down\n",
1128 atomic_read (&kibnal_data.kib_npeers));
1129 set_current_state (TASK_INTERRUPTIBLE);
1130 schedule_timeout (HZ);
1135 rc = ib_cq_destroy (kibnal_data.kib_cq);
1137 CERROR ("Destroy CQ error: %d\n", rc);
1140 case IBNAL_INIT_TXD:
1141 kibnal_free_pages (kibnal_data.kib_tx_pages);
1144 case IBNAL_INIT_FMR:
1145 rc = ib_fmr_pool_destroy (kibnal_data.kib_fmr_pool);
1147 CERROR ("Destroy FMR pool error: %d\n", rc);
1151 rc = ib_pd_destroy(kibnal_data.kib_pd);
1153 CERROR ("Destroy PD error: %d\n", rc);
1156 case IBNAL_INIT_LIB:
1157 lib_fini(&kibnal_lib);
1160 case IBNAL_INIT_DATA:
1161 /* Module refcount only gets to zero when all peers
1162 * have been closed so all lists must be empty */
1163 LASSERT (atomic_read (&kibnal_data.kib_npeers) == 0);
1164 LASSERT (kibnal_data.kib_peers != NULL);
1165 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
1166 LASSERT (list_empty (&kibnal_data.kib_peers[i]));
1168 LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0);
1169 LASSERT (list_empty (&kibnal_data.kib_sched_rxq));
1170 LASSERT (list_empty (&kibnal_data.kib_sched_txq));
1171 LASSERT (list_empty (&kibnal_data.kib_connd_conns));
1172 LASSERT (list_empty (&kibnal_data.kib_connd_peers));
1174 /* flag threads to terminate; wake and wait for them to die */
1175 kibnal_data.kib_shutdown = 1;
1176 wake_up_all (&kibnal_data.kib_sched_waitq);
1177 wake_up_all (&kibnal_data.kib_connd_waitq);
1180 while (atomic_read (&kibnal_data.kib_nthreads) != 0) {
1182 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1183 "Waiting for %d threads to terminate\n",
1184 atomic_read (&kibnal_data.kib_nthreads));
1185 set_current_state (TASK_INTERRUPTIBLE);
1186 schedule_timeout (HZ);
1190 case IBNAL_INIT_NOTHING:
1194 if (kibnal_data.kib_tx_descs != NULL)
1195 PORTAL_FREE (kibnal_data.kib_tx_descs,
1196 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1198 if (kibnal_data.kib_peers != NULL)
1199 PORTAL_FREE (kibnal_data.kib_peers,
1200 sizeof (struct list_head) *
1201 kibnal_data.kib_peer_hash_size);
1203 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1204 atomic_read (&portal_kmemory));
1205 printk(KERN_INFO "Lustre: OpenIB NAL unloaded (final mem %d)\n",
1206 atomic_read(&portal_kmemory));
1208 kibnal_data.kib_init = IBNAL_INIT_NOTHING;
1212 kibnal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1213 ptl_ni_limits_t *requested_limits,
1214 ptl_ni_limits_t *actual_limits)
1216 ptl_process_id_t process_id;
1217 int pkmem = atomic_read(&portal_kmemory);
1221 LASSERT (nal == &kibnal_api);
1223 if (nal->nal_refct != 0) {
1224 if (actual_limits != NULL)
1225 *actual_limits = kibnal_lib.libnal_ni.ni_actual_limits;
1226 /* This module got the first ref */
1231 LASSERT (kibnal_data.kib_init == IBNAL_INIT_NOTHING);
1233 memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */
1235 init_MUTEX (&kibnal_data.kib_nid_mutex);
1236 init_MUTEX_LOCKED (&kibnal_data.kib_nid_signal);
1237 kibnal_data.kib_nid = PTL_NID_ANY;
1239 rwlock_init(&kibnal_data.kib_global_lock);
1241 kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE;
1242 PORTAL_ALLOC (kibnal_data.kib_peers,
1243 sizeof (struct list_head) * kibnal_data.kib_peer_hash_size);
1244 if (kibnal_data.kib_peers == NULL) {
1247 for (i = 0; i < kibnal_data.kib_peer_hash_size; i++)
1248 INIT_LIST_HEAD(&kibnal_data.kib_peers[i]);
1250 spin_lock_init (&kibnal_data.kib_connd_lock);
1251 INIT_LIST_HEAD (&kibnal_data.kib_connd_peers);
1252 INIT_LIST_HEAD (&kibnal_data.kib_connd_conns);
1253 init_waitqueue_head (&kibnal_data.kib_connd_waitq);
1255 spin_lock_init (&kibnal_data.kib_sched_lock);
1256 INIT_LIST_HEAD (&kibnal_data.kib_sched_txq);
1257 INIT_LIST_HEAD (&kibnal_data.kib_sched_rxq);
1258 init_waitqueue_head (&kibnal_data.kib_sched_waitq);
1260 spin_lock_init (&kibnal_data.kib_tx_lock);
1261 INIT_LIST_HEAD (&kibnal_data.kib_idle_txs);
1262 INIT_LIST_HEAD (&kibnal_data.kib_idle_nblk_txs);
1263 init_waitqueue_head(&kibnal_data.kib_idle_tx_waitq);
1265 PORTAL_ALLOC (kibnal_data.kib_tx_descs,
1266 IBNAL_TX_MSGS * sizeof(kib_tx_t));
1267 if (kibnal_data.kib_tx_descs == NULL) {
1268 CERROR ("Can't allocate tx descs\n");
1272 /* lists/ptrs/locks initialised */
1273 kibnal_data.kib_init = IBNAL_INIT_DATA;
1274 /*****************************************************/
1277 process_id.pid = requested_pid;
1278 process_id.nid = kibnal_data.kib_nid;
1280 rc = lib_init(&kibnal_lib, nal, process_id,
1281 requested_limits, actual_limits);
1283 CERROR("lib_init failed: error %d\n", rc);
1287 /* lib interface initialised */
1288 kibnal_data.kib_init = IBNAL_INIT_LIB;
1289 /*****************************************************/
1291 for (i = 0; i < IBNAL_N_SCHED; i++) {
1292 rc = kibnal_thread_start (kibnal_scheduler, (void *)i);
1294 CERROR("Can't spawn openibnal scheduler[%d]: %d\n",
1300 rc = kibnal_thread_start (kibnal_connd, NULL);
1302 CERROR ("Can't spawn openibnal connd: %d\n", rc);
1306 kibnal_data.kib_device = ib_device_get_by_index(0);
1307 if (kibnal_data.kib_device == NULL) {
1308 CERROR ("Can't open ib device 0\n");
1312 rc = ib_device_properties_get(kibnal_data.kib_device,
1313 &kibnal_data.kib_device_props);
1315 CERROR ("Can't get device props: %d\n", rc);
1319 CDEBUG(D_NET, "Max Initiator: %d Max Responder %d\n",
1320 kibnal_data.kib_device_props.max_initiator_per_qp,
1321 kibnal_data.kib_device_props.max_responder_per_qp);
1323 kibnal_data.kib_port = 0;
1324 for (i = 1; i <= 2; i++) {
1325 rc = ib_port_properties_get(kibnal_data.kib_device, i,
1326 &kibnal_data.kib_port_props);
1328 kibnal_data.kib_port = i;
1332 if (kibnal_data.kib_port == 0) {
1333 CERROR ("Can't find a port\n");
1337 rc = ib_pd_create(kibnal_data.kib_device,
1338 NULL, &kibnal_data.kib_pd);
1340 CERROR ("Can't create PD: %d\n", rc);
1344 /* flag PD initialised */
1345 kibnal_data.kib_init = IBNAL_INIT_PD;
1346 /*****************************************************/
1349 const int pool_size = IBNAL_NTX + IBNAL_NTX_NBLK;
1350 struct ib_fmr_pool_param params = {
1351 .max_pages_per_fmr = PTL_MTU/PAGE_SIZE,
1352 .access = (IB_ACCESS_LOCAL_WRITE |
1353 IB_ACCESS_REMOTE_WRITE |
1354 IB_ACCESS_REMOTE_READ),
1355 .pool_size = pool_size,
1356 .dirty_watermark = (pool_size * 3)/4,
1357 .flush_function = NULL,
1361 rc = ib_fmr_pool_create(kibnal_data.kib_pd, ¶ms,
1362 &kibnal_data.kib_fmr_pool);
1364 CERROR ("Can't create FMR pool size %d: %d\n",
1370 /* flag FMR pool initialised */
1371 kibnal_data.kib_init = IBNAL_INIT_FMR;
1373 /*****************************************************/
1375 rc = kibnal_setup_tx_descs();
1377 CERROR ("Can't register tx descs: %d\n", rc);
1381 /* flag TX descs initialised */
1382 kibnal_data.kib_init = IBNAL_INIT_TXD;
1383 /*****************************************************/
1386 struct ib_cq_callback callback = {
1387 .context = IBNAL_CALLBACK_CTXT,
1388 .policy = IB_CQ_PROVIDER_REARM,
1390 .entry = kibnal_callback,
1394 int nentries = IBNAL_CQ_ENTRIES;
1396 rc = ib_cq_create (kibnal_data.kib_device,
1397 &nentries, &callback, NULL,
1398 &kibnal_data.kib_cq);
1400 CERROR ("Can't create CQ: %d\n", rc);
1404 /* I only want solicited events */
1405 rc = ib_cq_request_notification(kibnal_data.kib_cq, 1);
1409 /* flag CQ initialised */
1410 kibnal_data.kib_init = IBNAL_INIT_CQ;
1411 /*****************************************************/
1413 rc = libcfs_nal_cmd_register(OPENIBNAL, &kibnal_cmd, NULL);
1415 CERROR ("Can't initialise command interface (rc = %d)\n", rc);
1419 /* flag everything initialised */
1420 kibnal_data.kib_init = IBNAL_INIT_ALL;
1421 /*****************************************************/
1423 printk(KERN_INFO "Lustre: OpenIB NAL loaded "
1424 "(initial mem %d)\n", pkmem);
1429 kibnal_api_shutdown (&kibnal_api);
1434 kibnal_module_fini (void)
1436 #ifdef CONFIG_SYSCTL
1437 if (kibnal_tunables.kib_sysctl != NULL)
1438 unregister_sysctl_table (kibnal_tunables.kib_sysctl);
1440 PtlNIFini(kibnal_ni);
1442 ptl_unregister_nal(OPENIBNAL);
1446 kibnal_module_init (void)
1450 /* the following must be sizeof(int) for proc_dointvec() */
1451 LASSERT(sizeof (kibnal_tunables.kib_io_timeout) == sizeof (int));
1453 kibnal_api.nal_ni_init = kibnal_api_startup;
1454 kibnal_api.nal_ni_fini = kibnal_api_shutdown;
1456 /* Initialise dynamic tunables to defaults once only */
1457 kibnal_tunables.kib_io_timeout = IBNAL_IO_TIMEOUT;
1459 rc = ptl_register_nal(OPENIBNAL, &kibnal_api);
1461 CERROR("Can't register IBNAL: %d\n", rc);
1462 return (-ENOMEM); /* or something... */
1465 /* Pure gateways want the NAL started up at module load time... */
1466 rc = PtlNIInit(OPENIBNAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kibnal_ni);
1467 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
1468 ptl_unregister_nal(OPENIBNAL);
1472 #ifdef CONFIG_SYSCTL
1473 /* Press on regardless even if registering sysctl doesn't work */
1474 kibnal_tunables.kib_sysctl =
1475 register_sysctl_table (kibnal_top_ctl_table, 0);
1480 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1481 MODULE_DESCRIPTION("Kernel OpenIB NAL v0.01");
1482 MODULE_LICENSE("GPL");
1484 module_init(kibnal_module_init);
1485 module_exit(kibnal_module_fini);