1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 static int kranal_devids[] = {RAPK_MAIN_DEVICE_ID,
26 RAPK_EXPANSION_DEVICE_ID};
29 ptl_handle_ni_t kranal_ni;
30 kra_data_t kranal_data;
31 kra_tunables_t kranal_tunables;
33 #define RANAL_SYSCTL_TIMEOUT 1
34 #define RANAL_SYSCTL_LISTENER_TIMEOUT 2
35 #define RANAL_SYSCTL_BACKLOG 3
36 #define RANAL_SYSCTL_PORT 4
37 #define RANAL_SYSCTL_MAX_IMMEDIATE 5
39 #define RANAL_SYSCTL 202
41 static ctl_table kranal_ctl_table[] = {
42 {RANAL_SYSCTL_TIMEOUT, "timeout",
43 &kranal_tunables.kra_timeout, sizeof(int),
44 0644, NULL, &proc_dointvec},
45 {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout",
46 &kranal_tunables.kra_listener_timeout, sizeof(int),
47 0644, NULL, &proc_dointvec},
48 {RANAL_SYSCTL_BACKLOG, "backlog",
49 &kranal_tunables.kra_backlog, sizeof(int),
50 0644, NULL, kranal_listener_procint},
51 {RANAL_SYSCTL_PORT, "port",
52 &kranal_tunables.kra_port, sizeof(int),
53 0644, NULL, kranal_listener_procint},
54 {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate",
55 &kranal_tunables.kra_max_immediate, sizeof(int),
56 0644, NULL, &proc_dointvec},
60 static ctl_table kranal_top_ctl_table[] = {
61 {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
66 kranal_sock_write (struct socket *sock, void *buffer, int nob)
69 mm_segment_t oldmm = get_fs();
81 .msg_flags = MSG_DONTWAIT
84 /* We've set up the socket's send buffer to be large enough for
85 * everything we send, so a single non-blocking send should
86 * complete without error. */
89 rc = sock_sendmsg(sock, &msg, iov.iov_len);
102 kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
105 mm_segment_t oldmm = get_fs();
106 long ticks = timeout * HZ;
118 struct msghdr msg = {
128 /* Set receive timeout to remaining time */
129 tv = (struct timeval) {
130 .tv_sec = ticks / HZ,
131 .tv_usec = ((ticks % HZ) * 1000000) / HZ
134 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
135 (char *)&tv, sizeof(tv));
138 CERROR("Can't set socket recv timeout %d: %d\n",
145 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
146 ticks -= jiffies - then;
153 return -ECONNABORTED;
155 buffer = ((char *)buffer) + rc;
167 kranal_create_sock(struct socket **sockp)
172 mm_segment_t oldmm = get_fs();
174 rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
176 CERROR("Can't create socket: %d\n", rc);
180 /* Ensure sending connection info doesn't block */
181 option = 2 * sizeof(kra_connreq_t);
183 rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
184 (char *)&option, sizeof(option));
187 CERROR("Can't set send buffer %d: %d\n", option, rc);
193 rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
194 (char *)&option, sizeof(option));
197 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
210 kranal_pause(int ticks)
212 set_current_state(TASK_UNINTERRUPTIBLE);
213 schedule_timeout(ticks);
217 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid)
221 memset(connreq, 0, sizeof(*connreq));
223 connreq->racr_magic = RANAL_MSG_MAGIC;
224 connreq->racr_version = RANAL_MSG_VERSION;
225 connreq->racr_devid = conn->rac_device->rad_id;
226 connreq->racr_srcnid = kranal_lib.libnal_ni.ni_pid.nid;
227 connreq->racr_dstnid = dstnid;
228 connreq->racr_peerstamp = kranal_data.kra_peerstamp;
229 connreq->racr_connstamp = conn->rac_my_connstamp;
230 connreq->racr_timeout = conn->rac_timeout;
232 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
233 LASSERT(rrc == RAP_SUCCESS);
237 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout)
241 rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout);
243 CERROR("Read failed: %d\n", rc);
247 if (connreq->racr_magic != RANAL_MSG_MAGIC) {
248 if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
249 CERROR("Unexpected magic %08x\n", connreq->racr_magic);
253 __swab32s(&connreq->racr_magic);
254 __swab16s(&connreq->racr_version);
255 __swab16s(&connreq->racr_devid);
256 __swab64s(&connreq->racr_srcnid);
257 __swab64s(&connreq->racr_dstnid);
258 __swab64s(&connreq->racr_peerstamp);
259 __swab64s(&connreq->racr_connstamp);
260 __swab32s(&connreq->racr_timeout);
262 __swab32s(&connreq->racr_riparams.HostId);
263 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
264 __swab32s(&connreq->racr_riparams.PTag);
265 __swab32s(&connreq->racr_riparams.CompletionCookie);
268 if (connreq->racr_version != RANAL_MSG_VERSION) {
269 CERROR("Unexpected version %d\n", connreq->racr_version);
273 if (connreq->racr_srcnid == PTL_NID_ANY ||
274 connreq->racr_dstnid == PTL_NID_ANY) {
275 CERROR("Received PTL_NID_ANY\n");
279 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
280 CERROR("Received timeout %d < MIN %d\n",
281 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
289 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
292 struct list_head *ctmp;
293 struct list_head *cnxt;
297 loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
299 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
300 conn = list_entry(ctmp, kra_conn_t, rac_list);
305 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
306 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
307 " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid,
308 conn->rac_peerstamp, newconn->rac_peerstamp);
309 LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
311 kranal_close_conn_locked(conn, -ESTALE);
315 if (conn->rac_device != newconn->rac_device)
319 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
320 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
323 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
325 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
326 " connstamp:"LPX64"("LPX64")\n", peer->rap_nid,
327 conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
330 kranal_close_conn_locked(conn, -ESTALE);
337 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
340 struct list_head *tmp;
343 loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
345 list_for_each(tmp, &peer->rap_conns) {
346 conn = list_entry(tmp, kra_conn_t, rac_list);
348 /* 'newconn' is from an earlier version of 'peer'!!! */
349 if (newconn->rac_peerstamp < conn->rac_peerstamp)
352 /* 'conn' is from an earlier version of 'peer': it will be
353 * removed when we cull stale conns later on... */
354 if (newconn->rac_peerstamp > conn->rac_peerstamp)
357 /* Different devices are OK */
358 if (conn->rac_device != newconn->rac_device)
361 /* It's me connecting to myself */
363 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
364 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
367 /* 'newconn' is an earlier connection from 'peer'!!! */
368 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
371 /* 'conn' is an earlier connection from 'peer': it will be
372 * removed when we cull stale conns later on... */
373 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
376 /* 'newconn' has the SAME connection stamp; 'peer' isn't
377 * playing the game... */
385 kranal_set_conn_uniqueness (kra_conn_t *conn)
389 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
391 conn->rac_my_connstamp = kranal_data.kra_connstamp++;
393 do { /* allocate a unique cqid */
394 conn->rac_cqid = kranal_data.kra_next_cqid++;
395 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
397 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
401 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
406 LASSERT (!in_interrupt());
407 PORTAL_ALLOC(conn, sizeof(*conn));
412 memset(conn, 0, sizeof(*conn));
413 atomic_set(&conn->rac_refcount, 1);
414 INIT_LIST_HEAD(&conn->rac_list);
415 INIT_LIST_HEAD(&conn->rac_hashlist);
416 INIT_LIST_HEAD(&conn->rac_schedlist);
417 INIT_LIST_HEAD(&conn->rac_fmaq);
418 INIT_LIST_HEAD(&conn->rac_rdmaq);
419 INIT_LIST_HEAD(&conn->rac_replyq);
420 spin_lock_init(&conn->rac_lock);
422 kranal_set_conn_uniqueness(conn);
424 conn->rac_device = dev;
425 conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
426 kranal_update_reaper_timeout(conn->rac_timeout);
428 rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
429 &conn->rac_rihandle);
430 if (rrc != RAP_SUCCESS) {
431 CERROR("RapkCreateRi failed: %d\n", rrc);
432 PORTAL_FREE(conn, sizeof(*conn));
436 atomic_inc(&kranal_data.kra_nconns);
442 kranal_destroy_conn(kra_conn_t *conn)
446 LASSERT (!in_interrupt());
447 LASSERT (!conn->rac_scheduled);
448 LASSERT (list_empty(&conn->rac_list));
449 LASSERT (list_empty(&conn->rac_hashlist));
450 LASSERT (list_empty(&conn->rac_schedlist));
451 LASSERT (atomic_read(&conn->rac_refcount) == 0);
452 LASSERT (list_empty(&conn->rac_fmaq));
453 LASSERT (list_empty(&conn->rac_rdmaq));
454 LASSERT (list_empty(&conn->rac_replyq));
456 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
458 LASSERT (rrc == RAP_SUCCESS);
460 if (conn->rac_peer != NULL)
461 kranal_peer_decref(conn->rac_peer);
463 PORTAL_FREE(conn, sizeof(*conn));
464 atomic_dec(&kranal_data.kra_nconns);
468 kranal_terminate_conn_locked (kra_conn_t *conn)
470 LASSERT (!in_interrupt());
471 LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
472 LASSERT (!list_empty(&conn->rac_hashlist));
473 LASSERT (list_empty(&conn->rac_list));
475 /* Remove from conn hash table: no new callbacks */
476 list_del_init(&conn->rac_hashlist);
477 kranal_conn_decref(conn);
479 conn->rac_state = RANAL_CONN_CLOSED;
481 /* schedule to clear out all uncompleted comms in context of dev's
483 kranal_schedule_conn(conn);
487 kranal_close_conn_locked (kra_conn_t *conn, int error)
489 kra_peer_t *peer = conn->rac_peer;
491 CDEBUG(error == 0 ? D_NET : D_ERROR,
492 "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
494 LASSERT (!in_interrupt());
495 LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
496 LASSERT (!list_empty(&conn->rac_hashlist));
497 LASSERT (!list_empty(&conn->rac_list));
499 list_del_init(&conn->rac_list);
501 if (list_empty(&peer->rap_conns) &&
502 peer->rap_persistence == 0) {
503 /* Non-persistent peer with no more conns... */
504 kranal_unlink_peer_locked(peer);
507 /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
508 * full timeout. If we get a CLOSE we know the peer has stopped all
509 * RDMA. Otherwise if we wait for the full timeout we can also be sure
510 * all RDMA has stopped. */
511 conn->rac_last_rx = jiffies;
514 conn->rac_state = RANAL_CONN_CLOSING;
515 kranal_schedule_conn(conn); /* schedule sending CLOSE */
517 kranal_conn_decref(conn); /* lose peer's ref */
521 kranal_close_conn (kra_conn_t *conn, int error)
526 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
528 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
529 kranal_close_conn_locked(conn, error);
531 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
535 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
536 __u32 peer_ip, int peer_port)
538 kra_device_t *dev = conn->rac_device;
542 /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
543 * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
544 conn->rac_last_tx = jiffies;
545 conn->rac_keepalive = 0;
547 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
548 if (rrc != RAP_SUCCESS) {
549 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
550 HIPQUAD(peer_ip), peer_port, rrc);
551 return -ECONNABORTED;
554 /* Schedule conn on rad_new_conns */
555 kranal_conn_addref(conn);
556 spin_lock_irqsave(&dev->rad_lock, flags);
557 list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
558 wake_up(&dev->rad_waitq);
559 spin_unlock_irqrestore(&dev->rad_lock, flags);
561 rrc = RapkWaitToConnect(conn->rac_rihandle);
562 if (rrc != RAP_SUCCESS) {
563 CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
564 HIPQUAD(peer_ip), peer_port, rrc);
565 return -ECONNABORTED;
568 /* Scheduler doesn't touch conn apart from to deschedule and decref it
569 * after RapkCompleteSync() return success, so conn is all mine */
571 conn->rac_peerstamp = connreq->racr_peerstamp;
572 conn->rac_peer_connstamp = connreq->racr_connstamp;
573 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
574 kranal_update_reaper_timeout(conn->rac_keepalive);
579 kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp,
580 ptl_nid_t *dst_nidp, kra_conn_t **connp)
582 struct sockaddr_in addr;
584 unsigned int peer_port;
585 kra_connreq_t rx_connreq;
586 kra_connreq_t tx_connreq;
594 rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
596 CERROR("Can't get peer's IP: %d\n", rc);
600 peer_ip = ntohl(addr.sin_addr.s_addr);
601 peer_port = ntohs(addr.sin_port);
603 if (peer_port >= 1024) {
604 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
605 HIPQUAD(peer_ip), peer_port);
606 return -ECONNREFUSED;
609 rc = kranal_recv_connreq(sock, &rx_connreq,
610 kranal_tunables.kra_listener_timeout);
612 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
613 HIPQUAD(peer_ip), peer_port, rc);
618 if (i == kranal_data.kra_ndevs) {
619 CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
620 rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
623 dev = &kranal_data.kra_devices[i];
624 if (dev->rad_id == rx_connreq.racr_devid)
628 rc = kranal_create_conn(&conn, dev);
632 kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
634 rc = kranal_sock_write(sock, &tx_connreq, sizeof(tx_connreq));
636 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
637 HIPQUAD(peer_ip), peer_port, rc);
638 kranal_conn_decref(conn);
642 rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
644 kranal_conn_decref(conn);
649 *src_nidp = rx_connreq.racr_srcnid;
650 *dst_nidp = rx_connreq.racr_dstnid;
655 ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
657 struct sockaddr_in locaddr;
658 struct sockaddr_in srvaddr;
663 for (port = 1023; port >= 512; port--) {
665 memset(&locaddr, 0, sizeof(locaddr));
666 locaddr.sin_family = AF_INET;
667 locaddr.sin_port = htons(port);
668 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
670 memset (&srvaddr, 0, sizeof (srvaddr));
671 srvaddr.sin_family = AF_INET;
672 srvaddr.sin_port = htons (peer->rap_port);
673 srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
675 rc = kranal_create_sock(&sock);
679 rc = sock->ops->bind(sock,
680 (struct sockaddr *)&locaddr, sizeof(locaddr));
684 if (rc == -EADDRINUSE) {
685 CDEBUG(D_NET, "Port %d already in use\n", port);
689 CERROR("Can't bind to reserved port %d: %d\n", port, rc);
693 rc = sock->ops->connect(sock,
694 (struct sockaddr *)&srvaddr, sizeof(srvaddr),
703 if (rc != -EADDRNOTAVAIL) {
704 CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
705 port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
709 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n",
710 port, HIPQUAD(peer->rap_ip), peer->rap_port);
714 return -EHOSTUNREACH;
719 kranal_active_conn_handshake(kra_peer_t *peer,
720 ptl_nid_t *dst_nidp, kra_conn_t **connp)
722 kra_connreq_t connreq;
729 /* spread connections over all devices using both peer NIDs to ensure
730 * all nids use all devices */
731 idx = peer->rap_nid + kranal_lib.libnal_ni.ni_pid.nid;
732 dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
734 rc = kranal_create_conn(&conn, dev);
738 kranal_pack_connreq(&connreq, conn, peer->rap_nid);
740 rc = ranal_connect_sock(peer, &sock);
744 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
745 * immediately after accepting a connection, so we connect and then
746 * send immediately. */
748 rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
750 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
751 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
755 rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout);
757 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
758 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
765 if (connreq.racr_srcnid != peer->rap_nid) {
766 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
767 "received "LPX64" expected "LPX64"\n",
768 HIPQUAD(peer->rap_ip), peer->rap_port,
769 connreq.racr_srcnid, peer->rap_nid);
773 if (connreq.racr_devid != dev->rad_id) {
774 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
775 "received %d expected %d\n",
776 HIPQUAD(peer->rap_ip), peer->rap_port,
777 connreq.racr_devid, dev->rad_id);
781 rc = kranal_set_conn_params(conn, &connreq,
782 peer->rap_ip, peer->rap_port);
787 *dst_nidp = connreq.racr_dstnid;
793 kranal_conn_decref(conn);
798 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
811 /* active: connd wants to connect to 'peer' */
812 LASSERT (peer != NULL);
813 LASSERT (peer->rap_connecting);
815 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
819 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
821 if (!kranal_peer_active(peer)) {
822 /* raced with peer getting unlinked */
823 write_unlock_irqrestore(&kranal_data.kra_global_lock,
825 kranal_conn_decref(conn);
829 peer_nid = peer->rap_nid;
831 /* passive: listener accepted 'sock' */
832 LASSERT (peer == NULL);
834 rc = kranal_passive_conn_handshake(sock, &peer_nid,
839 /* assume this is a new peer */
840 peer = kranal_create_peer(peer_nid);
842 CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
843 kranal_conn_decref(conn);
847 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
849 peer2 = kranal_find_peer_locked(peer_nid);
853 /* peer_nid already in the peer table */
854 kranal_peer_decref(peer);
859 LASSERT ((!new_peer) != (!kranal_peer_active(peer)));
861 /* Refuse connection if peer thinks we are a different NID. We check
862 * this while holding the global lock, to synch with connection
863 * destruction on NID change. */
864 if (dst_nid != kranal_lib.libnal_ni.ni_pid.nid) {
865 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
867 CERROR("Stale/bad connection with "LPX64
868 ": dst_nid "LPX64", expected "LPX64"\n",
869 peer_nid, dst_nid, kranal_lib.libnal_ni.ni_pid.nid);
874 /* Refuse to duplicate an existing connection (both sides might try to
875 * connect at once). NB we return success! We _are_ connected so we
876 * _don't_ have any blocked txs to complete with failure. */
877 rc = kranal_conn_isdup_locked(peer, conn);
879 LASSERT (!list_empty(&peer->rap_conns));
880 LASSERT (list_empty(&peer->rap_tx_queue));
881 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
882 CWARN("Not creating duplicate connection to "LPX64": %d\n",
889 /* peer table takes my ref on the new peer */
890 list_add_tail(&peer->rap_list,
891 kranal_nid2peerlist(peer_nid));
894 /* initialise timestamps before reaper looks at them */
895 conn->rac_last_tx = conn->rac_last_rx = jiffies;
897 kranal_peer_addref(peer); /* +1 ref for conn */
898 conn->rac_peer = peer;
899 list_add_tail(&conn->rac_list, &peer->rap_conns);
901 kranal_conn_addref(conn); /* +1 ref for conn table */
902 list_add_tail(&conn->rac_hashlist,
903 kranal_cqid2connlist(conn->rac_cqid));
905 /* Schedule all packets blocking for a connection */
906 while (!list_empty(&peer->rap_tx_queue)) {
907 tx = list_entry(peer->rap_tx_queue.next,
910 list_del(&tx->tx_list);
911 kranal_post_fma(conn, tx);
914 nstale = kranal_close_stale_conns_locked(peer, conn);
916 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
918 /* CAVEAT EMPTOR: passive peer can disappear NOW */
921 CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
923 CWARN("New connection to "LPX64" on devid[%d] = %d\n",
924 peer_nid, conn->rac_device->rad_idx, conn->rac_device->rad_id);
926 /* Ensure conn gets checked. Transmits may have been queued and an
927 * FMA event may have happened before it got in the cq hash table */
928 kranal_schedule_conn(conn);
933 kranal_peer_decref(peer);
934 kranal_conn_decref(conn);
939 kranal_connect (kra_peer_t *peer)
943 struct list_head zombies;
946 LASSERT (peer->rap_connecting);
948 CDEBUG(D_NET, "About to handshake "LPX64"\n", peer->rap_nid);
950 rc = kranal_conn_handshake(NULL, peer);
952 CDEBUG(D_NET, "Done handshake "LPX64":%d \n", peer->rap_nid, rc);
954 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
956 LASSERT (peer->rap_connecting);
957 peer->rap_connecting = 0;
960 /* kranal_conn_handshake() queues blocked txs immediately on
961 * success to avoid messages jumping the queue */
962 LASSERT (list_empty(&peer->rap_tx_queue));
964 /* reset reconnection timeouts */
965 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
966 peer->rap_reconnect_time = CURRENT_SECONDS;
968 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
972 LASSERT (peer->rap_reconnect_interval != 0);
973 peer->rap_reconnect_time = CURRENT_SECONDS + peer->rap_reconnect_interval;
974 peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
975 1 * peer->rap_reconnect_interval);
977 /* Grab all blocked packets while we have the global lock */
978 list_add(&zombies, &peer->rap_tx_queue);
979 list_del_init(&peer->rap_tx_queue);
981 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
983 if (list_empty(&zombies))
986 CWARN("Dropping packets for "LPX64": connection failed\n",
990 tx = list_entry(zombies.next, kra_tx_t, tx_list);
992 list_del(&tx->tx_list);
993 kranal_tx_done(tx, -EHOSTUNREACH);
995 } while (!list_empty(&zombies));
999 kranal_free_acceptsock (kra_acceptsock_t *ras)
1001 sock_release(ras->ras_sock);
1002 PORTAL_FREE(ras, sizeof(*ras));
1006 kranal_listener (void *arg)
1008 struct sockaddr_in addr;
1010 struct socket *sock;
1011 kra_acceptsock_t *ras;
1015 unsigned long flags;
1017 /* Parent thread holds kra_nid_mutex, and is, or is about to
1018 * block on kra_listener_signal */
1020 port = kranal_tunables.kra_port;
1021 snprintf(name, sizeof(name), "kranal_lstn%03d", port);
1022 kportal_daemonize(name);
1023 kportal_blockallsigs();
1025 init_waitqueue_entry(&wait, current);
1027 rc = kranal_create_sock(&sock);
1031 memset(&addr, 0, sizeof(addr));
1032 addr.sin_family = AF_INET;
1033 addr.sin_port = htons(port);
1034 addr.sin_addr.s_addr = INADDR_ANY;
1036 rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
1038 CERROR("Can't bind to port %d\n", port);
1042 rc = sock->ops->listen(sock, kranal_tunables.kra_backlog);
1044 CERROR("Can't set listen backlog %d: %d\n",
1045 kranal_tunables.kra_backlog, rc);
1049 LASSERT (kranal_data.kra_listener_sock == NULL);
1050 kranal_data.kra_listener_sock = sock;
1052 /* unblock waiting parent */
1053 LASSERT (kranal_data.kra_listener_shutdown == 0);
1054 up(&kranal_data.kra_listener_signal);
1056 /* Wake me any time something happens on my socket */
1057 add_wait_queue(sock->sk->sk_sleep, &wait);
1060 while (kranal_data.kra_listener_shutdown == 0) {
1063 PORTAL_ALLOC(ras, sizeof(*ras));
1065 CERROR("Out of Memory: pausing...\n");
1069 ras->ras_sock = NULL;
1072 if (ras->ras_sock == NULL) {
1073 ras->ras_sock = sock_alloc();
1074 if (ras->ras_sock == NULL) {
1075 CERROR("Can't allocate socket: pausing...\n");
1079 /* XXX this should add a ref to sock->ops->owner, if
1080 * TCP could be a module */
1081 ras->ras_sock->type = sock->type;
1082 ras->ras_sock->ops = sock->ops;
1085 set_current_state(TASK_INTERRUPTIBLE);
1087 rc = sock->ops->accept(sock, ras->ras_sock, O_NONBLOCK);
1089 /* Sleep for socket activity? */
1090 if (rc == -EAGAIN &&
1091 kranal_data.kra_listener_shutdown == 0)
1094 set_current_state(TASK_RUNNING);
1097 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1099 list_add_tail(&ras->ras_list,
1100 &kranal_data.kra_connd_acceptq);
1102 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1103 wake_up(&kranal_data.kra_connd_waitq);
1109 if (rc != -EAGAIN) {
1110 CERROR("Accept failed: %d, pausing...\n", rc);
1116 if (ras->ras_sock != NULL)
1117 sock_release(ras->ras_sock);
1118 PORTAL_FREE(ras, sizeof(*ras));
1122 remove_wait_queue(sock->sk->sk_sleep, &wait);
1125 kranal_data.kra_listener_sock = NULL;
1127 /* set completion status and unblock thread waiting for me
1128 * (parent on startup failure, executioner on normal shutdown) */
1129 kranal_data.kra_listener_shutdown = rc;
1130 up(&kranal_data.kra_listener_signal);
1136 kranal_start_listener (void)
1141 CDEBUG(D_NET, "Starting listener\n");
1143 /* Called holding kra_nid_mutex: listener stopped */
1144 LASSERT (kranal_data.kra_listener_sock == NULL);
1146 kranal_data.kra_listener_shutdown = 0;
1147 pid = kernel_thread(kranal_listener, NULL, 0);
1149 CERROR("Can't spawn listener: %ld\n", pid);
1153 /* Block until listener has started up. */
1154 down(&kranal_data.kra_listener_signal);
1156 rc = kranal_data.kra_listener_shutdown;
1157 LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
1159 CDEBUG(D_NET, "Listener %ld started OK\n", pid);
1164 kranal_stop_listener(int clear_acceptq)
1166 struct list_head zombie_accepts;
1167 unsigned long flags;
1168 kra_acceptsock_t *ras;
1170 CDEBUG(D_NET, "Stopping listener\n");
1172 /* Called holding kra_nid_mutex: listener running */
1173 LASSERT (kranal_data.kra_listener_sock != NULL);
1175 kranal_data.kra_listener_shutdown = 1;
1176 wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep);
1178 /* Block until listener has torn down. */
1179 down(&kranal_data.kra_listener_signal);
1181 LASSERT (kranal_data.kra_listener_sock == NULL);
1182 CDEBUG(D_NET, "Listener stopped\n");
1187 /* Close any unhandled accepts */
1188 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1190 list_add(&zombie_accepts, &kranal_data.kra_connd_acceptq);
1191 list_del_init(&kranal_data.kra_connd_acceptq);
1193 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1195 while (!list_empty(&zombie_accepts)) {
1196 ras = list_entry(zombie_accepts.next,
1197 kra_acceptsock_t, ras_list);
1198 list_del(&ras->ras_list);
1199 kranal_free_acceptsock(ras);
1204 kranal_listener_procint(ctl_table *table, int write, struct file *filp,
1205 void *buffer, size_t *lenp)
1207 int *tunable = (int *)table->data;
1211 /* No race with nal initialisation since the nal is setup all the time
1212 * it's loaded. When that changes, change this! */
1213 LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1215 down(&kranal_data.kra_nid_mutex);
1217 LASSERT (tunable == &kranal_tunables.kra_port ||
1218 tunable == &kranal_tunables.kra_backlog);
1221 rc = proc_dointvec(table, write, filp, buffer, lenp);
1224 (*tunable != old_val ||
1225 kranal_data.kra_listener_sock == NULL)) {
1227 if (kranal_data.kra_listener_sock != NULL)
1228 kranal_stop_listener(0);
1230 rc = kranal_start_listener();
1233 CWARN("Unable to start listener with new tunable:"
1234 " reverting to old value\n");
1236 kranal_start_listener();
1240 up(&kranal_data.kra_nid_mutex);
1242 LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1247 kranal_set_mynid(ptl_nid_t nid)
1249 unsigned long flags;
1250 lib_ni_t *ni = &kranal_lib.libnal_ni;
1253 CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
1254 nid, ni->ni_pid.nid);
1256 down(&kranal_data.kra_nid_mutex);
1258 if (nid == ni->ni_pid.nid) {
1259 /* no change of NID */
1260 up(&kranal_data.kra_nid_mutex);
1264 if (kranal_data.kra_listener_sock != NULL)
1265 kranal_stop_listener(1);
1267 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1268 kranal_data.kra_peerstamp++;
1269 ni->ni_pid.nid = nid;
1270 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1272 /* Delete all existing peers and their connections after new
1273 * NID/connstamp set to ensure no old connections in our brave
1275 kranal_del_peer(PTL_NID_ANY, 0);
1277 if (nid != PTL_NID_ANY)
1278 rc = kranal_start_listener();
1280 up(&kranal_data.kra_nid_mutex);
1285 kranal_create_peer (ptl_nid_t nid)
1289 LASSERT (nid != PTL_NID_ANY);
1291 PORTAL_ALLOC(peer, sizeof(*peer));
1295 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
1297 peer->rap_nid = nid;
1298 atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
1300 INIT_LIST_HEAD(&peer->rap_list);
1301 INIT_LIST_HEAD(&peer->rap_connd_list);
1302 INIT_LIST_HEAD(&peer->rap_conns);
1303 INIT_LIST_HEAD(&peer->rap_tx_queue);
1305 peer->rap_reconnect_time = CURRENT_SECONDS;
1306 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
1308 atomic_inc(&kranal_data.kra_npeers);
1313 kranal_destroy_peer (kra_peer_t *peer)
1315 CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
1317 LASSERT (atomic_read(&peer->rap_refcount) == 0);
1318 LASSERT (peer->rap_persistence == 0);
1319 LASSERT (!kranal_peer_active(peer));
1320 LASSERT (!peer->rap_connecting);
1321 LASSERT (list_empty(&peer->rap_conns));
1322 LASSERT (list_empty(&peer->rap_tx_queue));
1323 LASSERT (list_empty(&peer->rap_connd_list));
1325 PORTAL_FREE(peer, sizeof(*peer));
1327 /* NB a peer's connections keep a reference on their peer until
1328 * they are destroyed, so we can be assured that _all_ state to do
1329 * with this peer has been cleaned up when its refcount drops to
1331 atomic_dec(&kranal_data.kra_npeers);
1335 kranal_find_peer_locked (ptl_nid_t nid)
1337 struct list_head *peer_list = kranal_nid2peerlist(nid);
1338 struct list_head *tmp;
1341 list_for_each (tmp, peer_list) {
1343 peer = list_entry(tmp, kra_peer_t, rap_list);
1345 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
1346 !list_empty(&peer->rap_conns)); /* active conn */
1348 if (peer->rap_nid != nid)
1351 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1352 peer, nid, atomic_read(&peer->rap_refcount));
1359 kranal_find_peer (ptl_nid_t nid)
1363 read_lock(&kranal_data.kra_global_lock);
1364 peer = kranal_find_peer_locked(nid);
1365 if (peer != NULL) /* +1 ref for caller? */
1366 kranal_peer_addref(peer);
1367 read_unlock(&kranal_data.kra_global_lock);
1373 kranal_unlink_peer_locked (kra_peer_t *peer)
1375 LASSERT (peer->rap_persistence == 0);
1376 LASSERT (list_empty(&peer->rap_conns));
1378 LASSERT (kranal_peer_active(peer));
1379 list_del_init(&peer->rap_list);
1381 /* lose peerlist's ref */
1382 kranal_peer_decref(peer);
1386 kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
1390 struct list_head *ptmp;
1393 read_lock(&kranal_data.kra_global_lock);
1395 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1397 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1399 peer = list_entry(ptmp, kra_peer_t, rap_list);
1400 LASSERT (peer->rap_persistence > 0 ||
1401 !list_empty(&peer->rap_conns));
1406 *nidp = peer->rap_nid;
1407 *ipp = peer->rap_ip;
1408 *portp = peer->rap_port;
1409 *persistencep = peer->rap_persistence;
1411 read_unlock(&kranal_data.kra_global_lock);
1416 read_unlock(&kranal_data.kra_global_lock);
1421 kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1423 unsigned long flags;
1427 if (nid == PTL_NID_ANY)
1430 peer = kranal_create_peer(nid);
1434 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1436 peer2 = kranal_find_peer_locked(nid);
1437 if (peer2 != NULL) {
1438 kranal_peer_decref(peer);
1441 /* peer table takes existing ref on peer */
1442 list_add_tail(&peer->rap_list,
1443 kranal_nid2peerlist(nid));
1447 peer->rap_port = port;
1448 peer->rap_persistence++;
1450 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1455 kranal_del_peer_locked (kra_peer_t *peer, int single_share)
1457 struct list_head *ctmp;
1458 struct list_head *cnxt;
1462 peer->rap_persistence = 0;
1463 else if (peer->rap_persistence > 0)
1464 peer->rap_persistence--;
1466 if (peer->rap_persistence != 0)
1469 if (list_empty(&peer->rap_conns)) {
1470 kranal_unlink_peer_locked(peer);
1472 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1473 conn = list_entry(ctmp, kra_conn_t, rac_list);
1475 kranal_close_conn_locked(conn, 0);
1477 /* peer unlinks itself when last conn is closed */
1482 kranal_del_peer (ptl_nid_t nid, int single_share)
1484 unsigned long flags;
1485 struct list_head *ptmp;
1486 struct list_head *pnxt;
1493 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1495 if (nid != PTL_NID_ANY)
1496 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1499 hi = kranal_data.kra_peer_hash_size - 1;
1502 for (i = lo; i <= hi; i++) {
1503 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1504 peer = list_entry(ptmp, kra_peer_t, rap_list);
1505 LASSERT (peer->rap_persistence > 0 ||
1506 !list_empty(&peer->rap_conns));
1508 if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
1511 kranal_del_peer_locked(peer, single_share);
1512 rc = 0; /* matched something */
1519 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1525 kranal_get_conn_by_idx (int index)
1528 struct list_head *ptmp;
1530 struct list_head *ctmp;
1533 read_lock (&kranal_data.kra_global_lock);
1535 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1536 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1538 peer = list_entry(ptmp, kra_peer_t, rap_list);
1539 LASSERT (peer->rap_persistence > 0 ||
1540 !list_empty(&peer->rap_conns));
1542 list_for_each (ctmp, &peer->rap_conns) {
1546 conn = list_entry(ctmp, kra_conn_t, rac_list);
1547 CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
1548 conn, conn->rac_peer->rap_nid,
1549 atomic_read(&conn->rac_refcount));
1550 atomic_inc(&conn->rac_refcount);
1551 read_unlock(&kranal_data.kra_global_lock);
1557 read_unlock(&kranal_data.kra_global_lock);
1562 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1565 struct list_head *ctmp;
1566 struct list_head *cnxt;
1569 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1570 conn = list_entry(ctmp, kra_conn_t, rac_list);
1573 kranal_close_conn_locked(conn, why);
1580 kranal_close_matching_conns (ptl_nid_t nid)
1582 unsigned long flags;
1584 struct list_head *ptmp;
1585 struct list_head *pnxt;
1591 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1593 if (nid != PTL_NID_ANY)
1594 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1597 hi = kranal_data.kra_peer_hash_size - 1;
1600 for (i = lo; i <= hi; i++) {
1601 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1603 peer = list_entry(ptmp, kra_peer_t, rap_list);
1604 LASSERT (peer->rap_persistence > 0 ||
1605 !list_empty(&peer->rap_conns));
1607 if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
1610 count += kranal_close_peer_conns_locked(peer, 0);
1614 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1616 /* wildcards always succeed */
1617 if (nid == PTL_NID_ANY)
1620 return (count == 0) ? -ENOENT : 0;
1624 kranal_cmd(struct portals_cfg *pcfg, void * private)
1628 LASSERT (pcfg != NULL);
1630 switch(pcfg->pcfg_command) {
1631 case NAL_CMD_GET_PEER: {
1635 int share_count = 0;
1637 rc = kranal_get_peer_info(pcfg->pcfg_count,
1638 &nid, &ip, &port, &share_count);
1639 pcfg->pcfg_nid = nid;
1640 pcfg->pcfg_size = 0;
1642 pcfg->pcfg_misc = port;
1643 pcfg->pcfg_count = 0;
1644 pcfg->pcfg_wait = share_count;
1647 case NAL_CMD_ADD_PEER: {
1648 rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
1649 pcfg->pcfg_id, /* IP */
1650 pcfg->pcfg_misc); /* port */
1653 case NAL_CMD_DEL_PEER: {
1654 rc = kranal_del_peer(pcfg->pcfg_nid,
1655 /* flags == single_share */
1656 pcfg->pcfg_flags != 0);
1659 case NAL_CMD_GET_CONN: {
1660 kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
1666 pcfg->pcfg_nid = conn->rac_peer->rap_nid;
1667 pcfg->pcfg_id = conn->rac_device->rad_id;
1668 pcfg->pcfg_misc = 0;
1669 pcfg->pcfg_flags = 0;
1670 kranal_conn_decref(conn);
1674 case NAL_CMD_CLOSE_CONNECTION: {
1675 rc = kranal_close_matching_conns(pcfg->pcfg_nid);
1678 case NAL_CMD_REGISTER_MYNID: {
1679 if (pcfg->pcfg_nid == PTL_NID_ANY)
1682 rc = kranal_set_mynid(pcfg->pcfg_nid);
1691 kranal_free_txdescs(struct list_head *freelist)
1695 while (!list_empty(freelist)) {
1696 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1698 list_del(&tx->tx_list);
1699 PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1700 PORTAL_FREE(tx, sizeof(*tx));
1705 kranal_alloc_txdescs(struct list_head *freelist, int n)
1707 int isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
1711 LASSERT (freelist == &kranal_data.kra_idle_txs ||
1712 freelist == &kranal_data.kra_idle_nblk_txs);
1713 LASSERT (list_empty(freelist));
1715 for (i = 0; i < n; i++) {
1717 PORTAL_ALLOC(tx, sizeof(*tx));
1719 CERROR("Can't allocate %stx[%d]\n",
1720 isnblk ? "nblk " : "", i);
1721 kranal_free_txdescs(freelist);
1725 PORTAL_ALLOC(tx->tx_phys,
1726 PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1727 if (tx->tx_phys == NULL) {
1728 CERROR("Can't allocate %stx[%d]->tx_phys\n",
1729 isnblk ? "nblk " : "", i);
1731 PORTAL_FREE(tx, sizeof(*tx));
1732 kranal_free_txdescs(freelist);
1736 tx->tx_isnblk = isnblk;
1737 tx->tx_buftype = RANAL_BUF_NONE;
1738 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1740 list_add(&tx->tx_list, freelist);
1747 kranal_device_init(int id, kra_device_t *dev)
1749 const int total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
1753 rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1755 if (rrc != RAP_SUCCESS) {
1756 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1760 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1761 if (rrc != RAP_SUCCESS) {
1762 CERROR("Can't reserve %d RDMA descriptors"
1763 " for device %d: %d\n", total_ntx, id, rrc);
1767 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1768 &dev->rad_rdma_cqh);
1769 if (rrc != RAP_SUCCESS) {
1770 CERROR("Can't create rdma cq size %d"
1771 " for device %d: %d\n", total_ntx, id, rrc);
1775 rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE, RAP_CQTYPE_RECV,
1777 if (rrc != RAP_SUCCESS) {
1778 CERROR("Can't create fma cq size %d"
1779 " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc);
1786 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1788 RapkReleaseDevice(dev->rad_handle);
1794 kranal_device_fini(kra_device_t *dev)
1796 LASSERT(dev->rad_scheduler == NULL);
1797 RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1798 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1799 RapkReleaseDevice(dev->rad_handle);
1803 kranal_api_shutdown (nal_t *nal)
1806 unsigned long flags;
1808 if (nal->nal_refct != 0) {
1809 /* This module got the first ref */
1810 PORTAL_MODULE_UNUSE;
1814 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1815 atomic_read(&portal_kmemory));
1817 LASSERT (nal == &kranal_api);
1819 switch (kranal_data.kra_init) {
1821 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1824 case RANAL_INIT_ALL:
1825 /* stop calls to nal_cmd */
1826 libcfs_nal_cmd_unregister(RANAL);
1827 /* No new persistent peers */
1829 /* resetting my NID to unadvertises me, removes my
1830 * listener and nukes all current peers */
1831 kranal_set_mynid(PTL_NID_ANY);
1832 /* no new peers or conns */
1834 /* Wait for all peer/conn state to clean up */
1836 while (atomic_read(&kranal_data.kra_nconns) != 0 ||
1837 atomic_read(&kranal_data.kra_npeers) != 0) {
1839 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1840 "waiting for %d peers and %d conns to close down\n",
1841 atomic_read(&kranal_data.kra_npeers),
1842 atomic_read(&kranal_data.kra_nconns));
1847 case RANAL_INIT_LIB:
1848 lib_fini(&kranal_lib);
1851 case RANAL_INIT_DATA:
1855 /* Conn/Peer state all cleaned up BEFORE setting shutdown, so threads
1856 * don't have to worry about shutdown races */
1857 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1858 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1860 /* flag threads to terminate; wake and wait for them to die */
1861 kranal_data.kra_shutdown = 1;
1863 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1864 kra_device_t *dev = &kranal_data.kra_devices[i];
1866 LASSERT (list_empty(&dev->rad_ready_conns));
1867 LASSERT (list_empty(&dev->rad_new_conns));
1868 LASSERT (dev->rad_nphysmap == 0);
1869 LASSERT (dev->rad_nppphysmap == 0);
1870 LASSERT (dev->rad_nvirtmap == 0);
1871 LASSERT (dev->rad_nobvirtmap == 0);
1873 spin_lock_irqsave(&dev->rad_lock, flags);
1874 wake_up(&dev->rad_waitq);
1875 spin_unlock_irqrestore(&dev->rad_lock, flags);
1878 spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1879 wake_up_all(&kranal_data.kra_reaper_waitq);
1880 spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1882 LASSERT (list_empty(&kranal_data.kra_connd_peers));
1883 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1884 wake_up_all(&kranal_data.kra_connd_waitq);
1885 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1888 while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1890 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1891 "Waiting for %d threads to terminate\n",
1892 atomic_read(&kranal_data.kra_nthreads));
1896 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1897 if (kranal_data.kra_peers != NULL) {
1898 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1899 LASSERT (list_empty(&kranal_data.kra_peers[i]));
1901 PORTAL_FREE(kranal_data.kra_peers,
1902 sizeof (struct list_head) *
1903 kranal_data.kra_peer_hash_size);
1906 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1907 if (kranal_data.kra_conns != NULL) {
1908 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1909 LASSERT (list_empty(&kranal_data.kra_conns[i]));
1911 PORTAL_FREE(kranal_data.kra_conns,
1912 sizeof (struct list_head) *
1913 kranal_data.kra_conn_hash_size);
1916 for (i = 0; i < kranal_data.kra_ndevs; i++)
1917 kranal_device_fini(&kranal_data.kra_devices[i]);
1919 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1920 kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
1922 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1923 atomic_read(&portal_kmemory));
1924 printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
1925 atomic_read(&portal_kmemory));
1927 kranal_data.kra_init = RANAL_INIT_NOTHING;
1931 kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1932 ptl_ni_limits_t *requested_limits,
1933 ptl_ni_limits_t *actual_limits)
1936 ptl_process_id_t process_id;
1937 int pkmem = atomic_read(&portal_kmemory);
1942 LASSERT (nal == &kranal_api);
1944 if (nal->nal_refct != 0) {
1945 if (actual_limits != NULL)
1946 *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
1947 /* This module got the first ref */
1952 LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
1954 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1956 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1957 * a unique (for all time) connstamp so we can uniquely identify
1958 * the sender. The connstamp is an incrementing counter
1959 * initialised with seconds + microseconds at startup time. So we
1960 * rely on NOT creating connections more frequently on average than
1961 * 1MHz to ensure we don't use old connstamps when we reboot. */
1962 do_gettimeofday(&tv);
1963 kranal_data.kra_connstamp =
1964 kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1966 init_MUTEX(&kranal_data.kra_nid_mutex);
1967 init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
1969 rwlock_init(&kranal_data.kra_global_lock);
1971 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1972 kra_device_t *dev = &kranal_data.kra_devices[i];
1975 INIT_LIST_HEAD(&dev->rad_ready_conns);
1976 INIT_LIST_HEAD(&dev->rad_new_conns);
1977 init_waitqueue_head(&dev->rad_waitq);
1978 spin_lock_init(&dev->rad_lock);
1981 kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1982 init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1983 spin_lock_init(&kranal_data.kra_reaper_lock);
1985 INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1986 INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1987 init_waitqueue_head(&kranal_data.kra_connd_waitq);
1988 spin_lock_init(&kranal_data.kra_connd_lock);
1990 INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1991 INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
1992 init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
1993 spin_lock_init(&kranal_data.kra_tx_lock);
1995 /* OK to call kranal_api_shutdown() to cleanup now */
1996 kranal_data.kra_init = RANAL_INIT_DATA;
1998 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1999 PORTAL_ALLOC(kranal_data.kra_peers,
2000 sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
2001 if (kranal_data.kra_peers == NULL)
2004 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
2005 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
2007 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
2008 PORTAL_ALLOC(kranal_data.kra_conns,
2009 sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
2010 if (kranal_data.kra_conns == NULL)
2013 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
2014 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
2016 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
2020 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
2024 process_id.pid = requested_pid;
2025 process_id.nid = PTL_NID_ANY; /* don't know my NID yet */
2027 rc = lib_init(&kranal_lib, nal, process_id,
2028 requested_limits, actual_limits);
2030 CERROR("lib_init failed: error %d\n", rc);
2034 /* lib interface initialised */
2035 kranal_data.kra_init = RANAL_INIT_LIB;
2036 /*****************************************************/
2038 rc = kranal_thread_start(kranal_reaper, NULL);
2040 CERROR("Can't spawn ranal reaper: %d\n", rc);
2044 for (i = 0; i < RANAL_N_CONND; i++) {
2045 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
2047 CERROR("Can't spawn ranal connd[%d]: %d\n",
2053 LASSERT (kranal_data.kra_ndevs == 0);
2055 for (i = 0; i < sizeof(kranal_devids)/sizeof(kranal_devids[0]); i++) {
2056 LASSERT (i < RANAL_MAXDEVS);
2058 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
2060 rc = kranal_device_init(kranal_devids[i], dev);
2062 kranal_data.kra_ndevs++;
2065 if (kranal_data.kra_ndevs == 0) {
2066 CERROR("Can't initialise any RapidArray devices\n");
2070 for (i = 0; i < kranal_data.kra_ndevs; i++) {
2071 dev = &kranal_data.kra_devices[i];
2072 rc = kranal_thread_start(kranal_scheduler, dev);
2074 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
2080 rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL);
2082 CERROR("Can't initialise command interface (rc = %d)\n", rc);
2086 /* flag everything initialised */
2087 kranal_data.kra_init = RANAL_INIT_ALL;
2088 /*****************************************************/
2090 CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
2091 printk(KERN_INFO "Lustre: RapidArray NAL loaded "
2092 "(initial mem %d)\n", pkmem);
2097 kranal_api_shutdown(&kranal_api);
2102 kranal_module_fini (void)
2104 if (kranal_tunables.kra_sysctl != NULL)
2105 unregister_sysctl_table(kranal_tunables.kra_sysctl);
2107 PtlNIFini(kranal_ni);
2109 ptl_unregister_nal(RANAL);
2113 kranal_module_init (void)
2117 /* the following must be sizeof(int) for
2118 * proc_dointvec/kranal_listener_procint() */
2119 LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
2120 LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
2121 LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
2122 LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
2123 LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
2125 kranal_api.nal_ni_init = kranal_api_startup;
2126 kranal_api.nal_ni_fini = kranal_api_shutdown;
2128 /* Initialise dynamic tunables to defaults once only */
2129 kranal_tunables.kra_timeout = RANAL_TIMEOUT;
2130 kranal_tunables.kra_listener_timeout = RANAL_LISTENER_TIMEOUT;
2131 kranal_tunables.kra_backlog = RANAL_BACKLOG;
2132 kranal_tunables.kra_port = RANAL_PORT;
2133 kranal_tunables.kra_max_immediate = RANAL_MAX_IMMEDIATE;
2135 rc = ptl_register_nal(RANAL, &kranal_api);
2137 CERROR("Can't register RANAL: %d\n", rc);
2138 return -ENOMEM; /* or something... */
2141 /* Pure gateways want the NAL started up at module load time... */
2142 rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
2143 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2144 ptl_unregister_nal(RANAL);
2148 kranal_tunables.kra_sysctl =
2149 register_sysctl_table(kranal_top_ctl_table, 0);
2150 if (kranal_tunables.kra_sysctl == NULL) {
2151 CERROR("Can't register sysctl table\n");
2152 PtlNIFini(kranal_ni);
2153 ptl_unregister_nal(RANAL);
2160 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2161 MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
2162 MODULE_LICENSE("GPL");
2164 module_init(kranal_module_init);
2165 module_exit(kranal_module_fini);