1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 ptl_handle_ni_t kranal_ni;
28 kra_data_t kranal_data;
29 kra_tunables_t kranal_tunables;
32 #define RANAL_SYSCTL_TIMEOUT 1
33 #define RANAL_SYSCTL_LISTENER_TIMEOUT 2
34 #define RANAL_SYSCTL_BACKLOG 3
35 #define RANAL_SYSCTL_PORT 4
36 #define RANAL_SYSCTL_MAX_IMMEDIATE 5
38 #define RANAL_SYSCTL 202
40 static ctl_table kranal_ctl_table[] = {
41 {RANAL_SYSCTL_TIMEOUT, "timeout",
42 &kranal_tunables.kra_timeout, sizeof(int),
43 0644, NULL, &proc_dointvec},
44 {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout",
45 &kranal_tunables.kra_listener_timeout, sizeof(int),
46 0644, NULL, &proc_dointvec},
47 {RANAL_SYSCTL_BACKLOG, "backlog",
48 &kranal_tunables.kra_backlog, sizeof(int),
49 0644, NULL, kranal_listener_procint},
50 {RANAL_SYSCTL_PORT, "port",
51 &kranal_tunables.kra_port, sizeof(int),
52 0644, NULL, kranal_listener_procint},
53 {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate",
54 &kranal_tunables.kra_max_immediate, sizeof(int),
55 0644, NULL, &proc_dointvec},
59 static ctl_table kranal_top_ctl_table[] = {
60 {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
66 kranal_sock_write (struct socket *sock, void *buffer, int nob)
69 mm_segment_t oldmm = get_fs();
81 .msg_flags = MSG_DONTWAIT
84 /* We've set up the socket's send buffer to be large enough for
85 * everything we send, so a single non-blocking send should
86 * complete without error. */
89 rc = sock_sendmsg(sock, &msg, iov.iov_len);
96 kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
99 mm_segment_t oldmm = get_fs();
100 long ticks = timeout * HZ;
112 struct msghdr msg = {
122 /* Set receive timeout to remaining time */
123 tv = (struct timeval) {
124 .tv_sec = ticks / HZ,
125 .tv_usec = ((ticks % HZ) * 1000000) / HZ
128 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
129 (char *)&tv, sizeof(tv));
132 CERROR("Can't set socket recv timeout %d: %d\n",
139 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
140 ticks -= jiffies - then;
147 return -ECONNABORTED;
149 buffer = ((char *)buffer) + rc;
161 kranal_create_sock(struct socket **sockp)
166 mm_segment_t oldmm = get_fs();
168 rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
170 CERROR("Can't create socket: %d\n", rc);
174 /* Ensure sending connection info doesn't block */
175 option = 2 * sizeof(kra_connreq_t);
177 rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
178 (char *)&option, sizeof(option));
181 CERROR("Can't set send buffer %d: %d\n", option, rc);
187 rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
188 (char *)&option, sizeof(option));
191 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
204 kranal_pause(int ticks)
206 set_current_state(TASK_UNINTERRUPTIBLE);
207 schedule_timeout(ticks);
211 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn)
215 memset(connreq, 0, sizeof(*connreq));
217 connreq->racr_magic = RANAL_MSG_MAGIC;
218 connreq->racr_version = RANAL_MSG_VERSION;
219 connreq->racr_devid = conn->rac_device->rad_id;
220 connreq->racr_srcnid = kranal_lib.libnal_ni.ni_pid.nid;
221 connreq->racr_dstnid = conn->rac_peer->rap_nid;
222 connreq->racr_peerstamp = kranal_data.kra_peerstamp;
223 connreq->racr_connstamp = conn->rac_my_connstamp;
224 connreq->racr_timeout = conn->rac_timeout;
226 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
227 LASSERT(rrc == RAP_SUCCESS);
231 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout)
235 rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout);
237 CERROR("Read failed: %d\n", rc);
241 if (connreq->racr_magic != RANAL_MSG_MAGIC) {
242 if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
243 CERROR("Unexpected magic %08x\n", connreq->racr_magic);
247 __swab32s(&connreq->racr_magic);
248 __swab16s(&connreq->racr_version);
249 __swab16s(&connreq->racr_devid);
250 __swab64s(&connreq->racr_srcnid);
251 __swab64s(&connreq->racr_dstnid);
252 __swab64s(&connreq->racr_peerstamp);
253 __swab64s(&connreq->racr_connstamp);
254 __swab32s(&connreq->racr_timeout);
256 __swab32s(&connreq->racr_riparams.HostId);
257 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
258 __swab32s(&connreq->racr_riparams.PTag);
259 __swab32s(&connreq->racr_riparams.CompletionCookie);
262 if (connreq->racr_version != RANAL_MSG_VERSION) {
263 CERROR("Unexpected version %d\n", connreq->racr_version);
267 if (connreq->racr_srcnid == PTL_NID_ANY ||
268 connreq->racr_dstnid == PTL_NID_ANY) {
269 CERROR("Received PTL_NID_ANY\n");
273 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
274 CERROR("Received timeout %d < MIN %d\n",
275 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
283 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
286 struct list_head *ctmp;
287 struct list_head *cnxt;
291 loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
293 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
294 conn = list_entry(ctmp, kra_conn_t, rac_list);
299 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
300 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
301 " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid,
302 conn->rac_peerstamp, newconn->rac_peerstamp);
303 LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
305 kranal_close_conn_locked(conn, -ESTALE);
309 if (conn->rac_device != newconn->rac_device)
313 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
314 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
317 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
319 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
320 " connstamp:"LPX64"("LPX64")\n", peer->rap_nid,
321 conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
324 kranal_close_conn_locked(conn, -ESTALE);
331 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
334 struct list_head *tmp;
337 loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
339 list_for_each(tmp, &peer->rap_conns) {
340 conn = list_entry(tmp, kra_conn_t, rac_list);
342 /* 'newconn' is from an earlier version of 'peer'!!! */
343 if (newconn->rac_peerstamp < conn->rac_peerstamp)
346 /* 'conn' is from an earlier version of 'peer': it will be
347 * removed when we cull stale conns later on... */
348 if (newconn->rac_peerstamp > conn->rac_peerstamp)
351 /* Different devices are OK */
352 if (conn->rac_device != newconn->rac_device)
355 /* It's me connecting to myself */
357 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
358 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
361 /* 'newconn' is an earlier connection from 'peer'!!! */
362 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
365 /* 'conn' is an earlier connection from 'peer': it will be
366 * removed when we cull stale conns later on... */
367 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
370 /* 'newconn' has the SAME connection stamp; 'peer' isn't
371 * playing the game... */
379 kranal_set_conn_uniqueness (kra_conn_t *conn)
383 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
385 conn->rac_my_connstamp = kranal_data.kra_connstamp++;
387 do { /* allocate a unique cqid */
388 conn->rac_cqid = kranal_data.kra_next_cqid++;
389 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
392 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
396 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
401 LASSERT (!in_interrupt());
402 PORTAL_ALLOC(conn, sizeof(*conn));
407 memset(conn, 0, sizeof(*conn));
408 atomic_set(&conn->rac_refcount, 1);
409 INIT_LIST_HEAD(&conn->rac_list);
410 INIT_LIST_HEAD(&conn->rac_hashlist);
411 INIT_LIST_HEAD(&conn->rac_schedlist);
412 INIT_LIST_HEAD(&conn->rac_fmaq);
413 INIT_LIST_HEAD(&conn->rac_rdmaq);
414 INIT_LIST_HEAD(&conn->rac_replyq);
415 spin_lock_init(&conn->rac_lock);
417 kranal_set_conn_uniqueness(conn);
419 conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
420 kranal_update_reaper_timeout(conn->rac_timeout);
422 rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
423 &conn->rac_rihandle);
424 if (rrc != RAP_SUCCESS) {
425 CERROR("RapkCreateRi failed: %d\n", rrc);
426 PORTAL_FREE(conn, sizeof(*conn));
430 atomic_inc(&kranal_data.kra_nconns);
436 kranal_destroy_conn(kra_conn_t *conn)
440 LASSERT (!in_interrupt());
441 LASSERT (!conn->rac_scheduled);
442 LASSERT (list_empty(&conn->rac_list));
443 LASSERT (list_empty(&conn->rac_hashlist));
444 LASSERT (list_empty(&conn->rac_schedlist));
445 LASSERT (atomic_read(&conn->rac_refcount) == 0);
446 LASSERT (list_empty(&conn->rac_fmaq));
447 LASSERT (list_empty(&conn->rac_rdmaq));
448 LASSERT (list_empty(&conn->rac_replyq));
450 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
452 LASSERT (rrc == RAP_SUCCESS);
454 if (conn->rac_peer != NULL)
455 kranal_peer_decref(conn->rac_peer);
457 PORTAL_FREE(conn, sizeof(*conn));
458 atomic_dec(&kranal_data.kra_nconns);
462 kranal_terminate_conn_locked (kra_conn_t *conn)
464 LASSERT (!in_interrupt());
465 LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
466 LASSERT (!list_empty(&conn->rac_hashlist));
467 LASSERT (list_empty(&conn->rac_list));
469 /* Remove from conn hash table: no new callbacks */
470 list_del_init(&conn->rac_hashlist);
471 kranal_conn_decref(conn);
473 conn->rac_state = RANAL_CONN_CLOSED;
475 /* schedule to clear out all uncompleted comms in context of dev's
477 kranal_schedule_conn(conn);
481 kranal_close_conn_locked (kra_conn_t *conn, int error)
483 kra_peer_t *peer = conn->rac_peer;
485 CDEBUG(error == 0 ? D_NET : D_ERROR,
486 "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
488 LASSERT (!in_interrupt());
489 LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
490 LASSERT (!list_empty(&conn->rac_hashlist));
491 LASSERT (!list_empty(&conn->rac_list));
493 list_del_init(&conn->rac_list);
495 if (list_empty(&peer->rap_conns) &&
496 peer->rap_persistence == 0) {
497 /* Non-persistent peer with no more conns... */
498 kranal_unlink_peer_locked(peer);
501 /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
503 conn->rac_last_rx = jiffies;
506 conn->rac_state = RANAL_CONN_CLOSING;
507 kranal_schedule_conn(conn); /* schedule sending CLOSE */
509 kranal_conn_decref(conn); /* lose peer's ref */
513 kranal_close_conn (kra_conn_t *conn, int error)
518 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
520 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
521 kranal_close_conn_locked(conn, error);
523 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
527 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
528 __u32 peer_ip, int peer_port)
532 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
533 if (rrc != RAP_SUCCESS) {
534 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
535 HIPQUAD(peer_ip), peer_port, rrc);
539 conn->rac_peerstamp = connreq->racr_peerstamp;
540 conn->rac_peer_connstamp = connreq->racr_connstamp;
541 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
542 kranal_update_reaper_timeout(conn->rac_keepalive);
547 kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp,
548 ptl_nid_t *dst_nidp, kra_conn_t **connp)
550 struct sockaddr_in addr;
552 unsigned int peer_port;
553 kra_connreq_t connreq;
563 rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
565 CERROR("Can't get peer's IP: %d\n", rc);
569 peer_ip = ntohl(addr.sin_addr.s_addr);
570 peer_port = ntohs(addr.sin_port);
572 if (peer_port >= 1024) {
573 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
574 HIPQUAD(peer_ip), peer_port);
575 return -ECONNREFUSED;
578 rc = kranal_recv_connreq(sock, &connreq,
579 kranal_tunables.kra_listener_timeout);
581 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
582 HIPQUAD(peer_ip), peer_port, rc);
586 src_nid = connreq.racr_srcnid;
587 dst_nid = connreq.racr_dstnid;
590 if (i == kranal_data.kra_ndevs) {
591 CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
592 connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
595 dev = &kranal_data.kra_devices[i];
596 if (dev->rad_id == connreq.racr_devid)
600 rc = kranal_create_conn(&conn, dev);
604 rc = kranal_set_conn_params(conn, &connreq, peer_ip, peer_port);
606 kranal_conn_decref(conn);
610 kranal_pack_connreq(&connreq, conn);
612 rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
614 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
615 HIPQUAD(peer_ip), peer_port, rc);
616 kranal_conn_decref(conn);
627 ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
629 struct sockaddr_in locaddr;
630 struct sockaddr_in srvaddr;
635 for (port = 1023; port >= 512; port--) {
637 memset(&locaddr, 0, sizeof(locaddr));
638 locaddr.sin_family = AF_INET;
639 locaddr.sin_port = htons(port);
640 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
642 memset (&srvaddr, 0, sizeof (srvaddr));
643 srvaddr.sin_family = AF_INET;
644 srvaddr.sin_port = htons (peer->rap_port);
645 srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
647 rc = kranal_create_sock(&sock);
651 rc = sock->ops->bind(sock,
652 (struct sockaddr *)&locaddr, sizeof(locaddr));
656 if (rc == -EADDRINUSE) {
657 CDEBUG(D_NET, "Port %d already in use\n", port);
661 CERROR("Can't bind to reserved port %d: %d\n", port, rc);
665 rc = sock->ops->connect(sock,
666 (struct sockaddr *)&srvaddr, sizeof(srvaddr),
675 if (rc != -EADDRNOTAVAIL) {
676 CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
677 port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
681 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n",
682 port, HIPQUAD(peer->rap_ip), peer->rap_port);
686 return -EHOSTUNREACH;
691 kranal_active_conn_handshake(kra_peer_t *peer,
692 ptl_nid_t *dst_nidp, kra_conn_t **connp)
694 kra_connreq_t connreq;
701 /* spread connections over all devices using both peer NIDs to ensure
702 * all nids use all devices */
703 idx = peer->rap_nid + kranal_lib.libnal_ni.ni_pid.nid;
704 dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
706 rc = kranal_create_conn(&conn, dev);
710 kranal_pack_connreq(&connreq, conn);
712 rc = ranal_connect_sock(peer, &sock);
716 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
717 * immediately after accepting a connection, so we connect and then
718 * send immediately. */
720 rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
722 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
723 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
727 rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout);
729 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
730 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
737 if (connreq.racr_srcnid != peer->rap_nid) {
738 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
739 "received "LPX64" expected "LPX64"\n",
740 HIPQUAD(peer->rap_ip), peer->rap_port,
741 connreq.racr_srcnid, peer->rap_nid);
745 if (connreq.racr_devid != dev->rad_id) {
746 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
747 "received %d expected %d\n",
748 HIPQUAD(peer->rap_ip), peer->rap_port,
749 connreq.racr_devid, dev->rad_id);
753 rc = kranal_set_conn_params(conn, &connreq,
754 peer->rap_ip, peer->rap_port);
759 *dst_nidp = connreq.racr_dstnid;
765 kranal_conn_decref(conn);
770 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
783 /* active: connd wants to connect to 'peer' */
784 LASSERT (peer != NULL);
785 LASSERT (peer->rap_connecting);
787 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
791 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
793 if (!kranal_peer_active(peer)) {
794 /* raced with peer getting unlinked */
795 write_unlock_irqrestore(&kranal_data.kra_global_lock,
797 kranal_conn_decref(conn);
801 peer_nid = peer->rap_nid;
803 /* passive: listener accepted 'sock' */
804 LASSERT (peer == NULL);
806 rc = kranal_passive_conn_handshake(sock, &peer_nid,
811 /* assume this is a new peer */
812 peer = kranal_create_peer(peer_nid);
814 CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
815 kranal_conn_decref(conn);
819 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
821 peer2 = kranal_find_peer_locked(peer_nid);
825 /* peer_nid already in the peer table */
826 kranal_peer_decref(peer);
831 LASSERT (!new_peer == !kranal_peer_active(peer));
833 /* Refuse connection if peer thinks we are a different NID. We check
834 * this while holding the global lock, to synch with connection
835 * destruction on NID change. */
836 if (dst_nid != kranal_lib.libnal_ni.ni_pid.nid) {
837 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
839 CERROR("Stale/bad connection with "LPX64
840 ": dst_nid "LPX64", expected "LPX64"\n",
841 peer_nid, dst_nid, kranal_lib.libnal_ni.ni_pid.nid);
846 /* Refuse to duplicate an existing connection (both sides might try to
847 * connect at once). NB we return success! We _are_ connected so we
848 * _don't_ have any blocked txs to complete with failure. */
849 rc = kranal_conn_isdup_locked(peer, conn);
851 LASSERT (!list_empty(&peer->rap_conns));
852 LASSERT (list_empty(&peer->rap_tx_queue));
853 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
854 CWARN("Not creating duplicate connection to "LPX64": %d\n",
861 /* peer table takes my ref on the new peer */
862 list_add_tail(&peer->rap_list,
863 kranal_nid2peerlist(peer_nid));
866 kranal_peer_addref(peer); /* +1 ref for conn */
867 conn->rac_peer = peer;
868 list_add_tail(&conn->rac_list, &peer->rap_conns);
870 kranal_conn_addref(conn); /* +1 ref for conn table */
871 list_add_tail(&conn->rac_hashlist,
872 kranal_cqid2connlist(conn->rac_cqid));
874 /* Schedule all packets blocking for a connection */
875 while (!list_empty(&peer->rap_tx_queue)) {
876 tx = list_entry(&peer->rap_tx_queue.next,
879 list_del(&tx->tx_list);
880 kranal_post_fma(conn, tx);
883 nstale = kranal_close_stale_conns_locked(peer, conn);
885 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
887 /* CAVEAT EMPTOR: passive peer can disappear NOW */
890 CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
892 /* Ensure conn gets checked. Transmits may have been queued and an
893 * FMA event may have happened before it got in the cq hash table */
894 kranal_schedule_conn(conn);
899 kranal_peer_decref(peer);
900 kranal_conn_decref(conn);
905 kranal_connect (kra_peer_t *peer)
909 struct list_head zombies;
912 LASSERT (peer->rap_connecting);
914 rc = kranal_conn_handshake(NULL, peer);
916 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
918 LASSERT (peer->rap_connecting);
919 peer->rap_connecting = 0;
922 /* kranal_conn_handshake() queues blocked txs immediately on
923 * success to avoid messages jumping the queue */
924 LASSERT (list_empty(&peer->rap_tx_queue));
926 /* reset reconnection timeouts */
927 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
928 peer->rap_reconnect_time = CURRENT_TIME;
930 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
934 LASSERT (peer->rap_reconnect_interval != 0);
935 peer->rap_reconnect_time = CURRENT_TIME + peer->rap_reconnect_interval;
936 peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
937 1 * peer->rap_reconnect_interval);
939 /* Grab all blocked packets while we have the global lock */
940 list_add(&zombies, &peer->rap_tx_queue);
941 list_del_init(&peer->rap_tx_queue);
943 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
945 if (list_empty(&zombies))
948 CWARN("Dropping packets for "LPX64": connection failed\n",
952 tx = list_entry(zombies.next, kra_tx_t, tx_list);
954 list_del(&tx->tx_list);
955 kranal_tx_done(tx, -EHOSTUNREACH);
957 } while (!list_empty(&zombies));
961 kranal_listener(void *arg)
963 struct sockaddr_in addr;
966 kra_acceptsock_t *ras;
972 /* Parent thread holds kra_nid_mutex, and is, or is about to
973 * block on kra_listener_signal */
975 port = kranal_tunables.kra_port;
976 snprintf(name, sizeof(name), "kranal_lstn%03d", port);
977 kportal_daemonize(name);
978 kportal_blockallsigs();
980 init_waitqueue_entry(&wait, current);
982 rc = kranal_create_sock(&sock);
986 memset(&addr, 0, sizeof(addr));
987 addr.sin_family = AF_INET;
988 addr.sin_port = htons(port);
989 addr.sin_addr.s_addr = INADDR_ANY;
991 rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
993 CERROR("Can't bind to port %d\n", port);
997 rc = sock->ops->listen(sock, kranal_tunables.kra_backlog);
999 CERROR("Can't set listen backlog %d: %d\n",
1000 kranal_tunables.kra_backlog, rc);
1004 LASSERT (kranal_data.kra_listener_sock == NULL);
1005 kranal_data.kra_listener_sock = sock;
1007 /* unblock waiting parent */
1008 LASSERT (kranal_data.kra_listener_shutdown == 0);
1009 up(&kranal_data.kra_listener_signal);
1011 /* Wake me any time something happens on my socket */
1012 add_wait_queue(sock->sk->sk_sleep, &wait);
1015 while (kranal_data.kra_listener_shutdown == 0) {
1018 PORTAL_ALLOC(ras, sizeof(*ras));
1020 CERROR("Out of Memory: pausing...\n");
1024 ras->ras_sock = NULL;
1027 if (ras->ras_sock == NULL) {
1028 ras->ras_sock = sock_alloc();
1029 if (ras->ras_sock == NULL) {
1030 CERROR("Can't allocate socket: pausing...\n");
1036 set_current_state(TASK_INTERRUPTIBLE);
1038 rc = sock->ops->accept(sock, ras->ras_sock, O_NONBLOCK);
1040 /* Sleep for socket activity? */
1041 if (rc == -EAGAIN &&
1042 kranal_data.kra_listener_shutdown == 0)
1045 set_current_state(TASK_RUNNING);
1048 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1050 list_add_tail(&ras->ras_list,
1051 &kranal_data.kra_connd_acceptq);
1053 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1054 wake_up(&kranal_data.kra_connd_waitq);
1060 if (rc != -EAGAIN) {
1061 CERROR("Accept failed: %d, pausing...\n", rc);
1067 if (ras->ras_sock != NULL)
1068 sock_release(ras->ras_sock);
1069 PORTAL_FREE(ras, sizeof(*ras));
1073 remove_wait_queue(sock->sk->sk_sleep, &wait);
1076 kranal_data.kra_listener_sock = NULL;
1078 /* set completion status and unblock thread waiting for me
1079 * (parent on startup failure, executioner on normal shutdown) */
1080 kranal_data.kra_listener_shutdown = rc;
1081 up(&kranal_data.kra_listener_signal);
1087 kranal_start_listener (void)
1092 CDEBUG(D_WARNING, "Starting listener\n");
1094 /* Called holding kra_nid_mutex: listener stopped */
1095 LASSERT (kranal_data.kra_listener_sock == NULL);
1097 kranal_data.kra_listener_shutdown = 0;
1098 pid = kernel_thread(kranal_listener, NULL, 0);
1100 CERROR("Can't spawn listener: %ld\n", pid);
1104 /* Block until listener has started up. */
1105 down(&kranal_data.kra_listener_signal);
1107 rc = kranal_data.kra_listener_shutdown;
1108 LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
1110 CDEBUG(D_WARNING, "Listener %ld started OK\n", pid);
1115 kranal_stop_listener(void)
1117 CDEBUG(D_WARNING, "Stopping listener\n");
1119 /* Called holding kra_nid_mutex: listener running */
1120 LASSERT (kranal_data.kra_listener_sock != NULL);
1122 kranal_data.kra_listener_shutdown = 1;
1123 wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep);
1125 /* Block until listener has torn down. */
1126 down(&kranal_data.kra_listener_signal);
1128 LASSERT (kranal_data.kra_listener_sock == NULL);
1129 CDEBUG(D_WARNING, "Listener stopped\n");
1133 kranal_listener_procint(ctl_table *table, int write, struct file *filp,
1134 void *buffer, size_t *lenp)
1136 int *tunable = (int *)table->data;
1140 /* No race with nal initialisation since the nal is setup all the time
1141 * it's loaded. When that changes, change this! */
1142 LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1144 down(&kranal_data.kra_nid_mutex);
1146 LASSERT (tunable == &kranal_tunables.kra_port ||
1147 tunable == &kranal_tunables.kra_backlog);
1150 rc = proc_dointvec(table, write, filp, buffer, lenp);
1153 (*tunable != old_val ||
1154 kranal_data.kra_listener_sock == NULL)) {
1156 if (kranal_data.kra_listener_sock != NULL)
1157 kranal_stop_listener();
1159 rc = kranal_start_listener();
1162 CWARN("Unable to start listener with new tunable:"
1163 " reverting to old value\n");
1165 kranal_start_listener();
1169 up(&kranal_data.kra_nid_mutex);
1171 LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1176 kranal_set_mynid(ptl_nid_t nid)
1178 unsigned long flags;
1179 lib_ni_t *ni = &kranal_lib.libnal_ni;
1182 CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
1183 nid, ni->ni_pid.nid);
1185 down(&kranal_data.kra_nid_mutex);
1187 if (nid == ni->ni_pid.nid) {
1188 /* no change of NID */
1189 up(&kranal_data.kra_nid_mutex);
1193 if (kranal_data.kra_listener_sock != NULL)
1194 kranal_stop_listener();
1196 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1197 kranal_data.kra_peerstamp++;
1198 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1200 ni->ni_pid.nid = nid;
1202 /* Delete all existing peers and their connections after new
1203 * NID/connstamp set to ensure no old connections in our brave
1205 kranal_del_peer(PTL_NID_ANY, 0);
1207 if (nid != PTL_NID_ANY)
1208 rc = kranal_start_listener();
1210 up(&kranal_data.kra_nid_mutex);
1215 kranal_create_peer (ptl_nid_t nid)
1219 LASSERT (nid != PTL_NID_ANY);
1221 PORTAL_ALLOC(peer, sizeof(*peer));
1225 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
1227 peer->rap_nid = nid;
1228 atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
1230 INIT_LIST_HEAD(&peer->rap_list);
1231 INIT_LIST_HEAD(&peer->rap_connd_list);
1232 INIT_LIST_HEAD(&peer->rap_conns);
1233 INIT_LIST_HEAD(&peer->rap_tx_queue);
1235 peer->rap_reconnect_time = CURRENT_TIME;
1236 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
1238 atomic_inc(&kranal_data.kra_npeers);
1243 kranal_destroy_peer (kra_peer_t *peer)
1245 CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
1247 LASSERT (atomic_read(&peer->rap_refcount) == 0);
1248 LASSERT (peer->rap_persistence == 0);
1249 LASSERT (!kranal_peer_active(peer));
1250 LASSERT (!peer->rap_connecting);
1251 LASSERT (list_empty(&peer->rap_conns));
1252 LASSERT (list_empty(&peer->rap_tx_queue));
1253 LASSERT (list_empty(&peer->rap_connd_list));
1255 PORTAL_FREE(peer, sizeof(*peer));
1257 /* NB a peer's connections keep a reference on their peer until
1258 * they are destroyed, so we can be assured that _all_ state to do
1259 * with this peer has been cleaned up when its refcount drops to
1261 atomic_dec(&kranal_data.kra_npeers);
1265 kranal_find_peer_locked (ptl_nid_t nid)
1267 struct list_head *peer_list = kranal_nid2peerlist(nid);
1268 struct list_head *tmp;
1271 list_for_each (tmp, peer_list) {
1273 peer = list_entry(tmp, kra_peer_t, rap_list);
1275 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
1276 !list_empty(&peer->rap_conns)); /* active conn */
1278 if (peer->rap_nid != nid)
1281 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1282 peer, nid, atomic_read(&peer->rap_refcount));
1289 kranal_find_peer (ptl_nid_t nid)
1293 read_lock(&kranal_data.kra_global_lock);
1294 peer = kranal_find_peer_locked(nid);
1295 if (peer != NULL) /* +1 ref for caller? */
1296 kranal_peer_addref(peer);
1297 read_unlock(&kranal_data.kra_global_lock);
1303 kranal_unlink_peer_locked (kra_peer_t *peer)
1305 LASSERT (peer->rap_persistence == 0);
1306 LASSERT (list_empty(&peer->rap_conns));
1308 LASSERT (kranal_peer_active(peer));
1309 list_del_init(&peer->rap_list);
1311 /* lose peerlist's ref */
1312 kranal_peer_decref(peer);
1316 kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
1320 struct list_head *ptmp;
1323 read_lock(&kranal_data.kra_global_lock);
1325 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1327 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1329 peer = list_entry(ptmp, kra_peer_t, rap_list);
1330 LASSERT (peer->rap_persistence > 0 ||
1331 !list_empty(&peer->rap_conns));
1336 *nidp = peer->rap_nid;
1337 *ipp = peer->rap_ip;
1338 *portp = peer->rap_port;
1339 *persistencep = peer->rap_persistence;
1341 read_unlock(&kranal_data.kra_global_lock);
1346 read_unlock(&kranal_data.kra_global_lock);
1351 kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1353 unsigned long flags;
1357 if (nid == PTL_NID_ANY)
1360 peer = kranal_create_peer(nid);
1364 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1366 peer2 = kranal_find_peer_locked(nid);
1367 if (peer2 != NULL) {
1368 kranal_peer_decref(peer);
1371 /* peer table takes existing ref on peer */
1372 list_add_tail(&peer->rap_list,
1373 kranal_nid2peerlist(nid));
1377 peer->rap_port = port;
1378 peer->rap_persistence++;
1380 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1385 kranal_del_peer_locked (kra_peer_t *peer, int single_share)
1387 struct list_head *ctmp;
1388 struct list_head *cnxt;
1392 peer->rap_persistence = 0;
1393 else if (peer->rap_persistence > 0)
1394 peer->rap_persistence--;
1396 if (peer->rap_persistence != 0)
1399 if (list_empty(&peer->rap_conns)) {
1400 kranal_unlink_peer_locked(peer);
1402 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1403 conn = list_entry(ctmp, kra_conn_t, rac_list);
1405 kranal_close_conn_locked(conn, 0);
1407 /* peer unlinks itself when last conn is closed */
1412 kranal_del_peer (ptl_nid_t nid, int single_share)
1414 unsigned long flags;
1415 struct list_head *ptmp;
1416 struct list_head *pnxt;
1423 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1425 if (nid != PTL_NID_ANY)
1426 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1429 hi = kranal_data.kra_peer_hash_size - 1;
1432 for (i = lo; i <= hi; i++) {
1433 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1434 peer = list_entry(ptmp, kra_peer_t, rap_list);
1435 LASSERT (peer->rap_persistence > 0 ||
1436 !list_empty(&peer->rap_conns));
1438 if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
1441 kranal_del_peer_locked(peer, single_share);
1442 rc = 0; /* matched something */
1449 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1455 kranal_get_conn_by_idx (int index)
1458 struct list_head *ptmp;
1460 struct list_head *ctmp;
1463 read_lock (&kranal_data.kra_global_lock);
1465 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1466 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1468 peer = list_entry(ptmp, kra_peer_t, rap_list);
1469 LASSERT (peer->rap_persistence > 0 ||
1470 !list_empty(&peer->rap_conns));
1472 list_for_each (ctmp, &peer->rap_conns) {
1476 conn = list_entry(ctmp, kra_conn_t, rac_list);
1477 CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
1478 conn, conn->rac_peer->rap_nid,
1479 atomic_read(&conn->rac_refcount));
1480 atomic_inc(&conn->rac_refcount);
1481 read_unlock(&kranal_data.kra_global_lock);
1487 read_unlock(&kranal_data.kra_global_lock);
1492 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1495 struct list_head *ctmp;
1496 struct list_head *cnxt;
1499 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1500 conn = list_entry(ctmp, kra_conn_t, rac_list);
1503 kranal_close_conn_locked(conn, why);
1510 kranal_close_matching_conns (ptl_nid_t nid)
1512 unsigned long flags;
1514 struct list_head *ptmp;
1515 struct list_head *pnxt;
1521 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1523 if (nid != PTL_NID_ANY)
1524 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1527 hi = kranal_data.kra_peer_hash_size - 1;
1530 for (i = lo; i <= hi; i++) {
1531 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1533 peer = list_entry(ptmp, kra_peer_t, rap_list);
1534 LASSERT (peer->rap_persistence > 0 ||
1535 !list_empty(&peer->rap_conns));
1537 if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
1540 count += kranal_close_peer_conns_locked(peer, 0);
1544 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1546 /* wildcards always succeed */
1547 if (nid == PTL_NID_ANY)
1550 return (count == 0) ? -ENOENT : 0;
1554 kranal_cmd(struct portals_cfg *pcfg, void * private)
1558 LASSERT (pcfg != NULL);
1560 switch(pcfg->pcfg_command) {
1561 case NAL_CMD_GET_PEER: {
1565 int share_count = 0;
1567 rc = kranal_get_peer_info(pcfg->pcfg_count,
1568 &nid, &ip, &port, &share_count);
1569 pcfg->pcfg_nid = nid;
1570 pcfg->pcfg_size = 0;
1572 pcfg->pcfg_misc = port;
1573 pcfg->pcfg_count = 0;
1574 pcfg->pcfg_wait = share_count;
1577 case NAL_CMD_ADD_PEER: {
1578 rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
1579 pcfg->pcfg_id, /* IP */
1580 pcfg->pcfg_misc); /* port */
1583 case NAL_CMD_DEL_PEER: {
1584 rc = kranal_del_peer(pcfg->pcfg_nid,
1585 /* flags == single_share */
1586 pcfg->pcfg_flags != 0);
1589 case NAL_CMD_GET_CONN: {
1590 kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
1596 pcfg->pcfg_nid = conn->rac_peer->rap_nid;
1597 pcfg->pcfg_id = conn->rac_device->rad_id;
1598 pcfg->pcfg_misc = 0;
1599 pcfg->pcfg_flags = 0;
1600 kranal_conn_decref(conn);
1604 case NAL_CMD_CLOSE_CONNECTION: {
1605 rc = kranal_close_matching_conns(pcfg->pcfg_nid);
1608 case NAL_CMD_REGISTER_MYNID: {
1609 if (pcfg->pcfg_nid == PTL_NID_ANY)
1612 rc = kranal_set_mynid(pcfg->pcfg_nid);
1621 kranal_free_txdescs(struct list_head *freelist)
1625 while (!list_empty(freelist)) {
1626 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1628 list_del(&tx->tx_list);
1629 PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1630 PORTAL_FREE(tx, sizeof(*tx));
1635 kranal_alloc_txdescs(struct list_head *freelist, int n)
1637 int isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
1641 LASSERT (freelist == &kranal_data.kra_idle_txs ||
1642 freelist == &kranal_data.kra_idle_nblk_txs);
1643 LASSERT (list_empty(freelist));
1645 for (i = 0; i < n; i++) {
1647 PORTAL_ALLOC(tx, sizeof(*tx));
1649 CERROR("Can't allocate %stx[%d]\n",
1650 isnblk ? "nblk " : "", i);
1651 kranal_free_txdescs(freelist);
1655 PORTAL_ALLOC(tx->tx_phys,
1656 PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1657 if (tx->tx_phys == NULL) {
1658 CERROR("Can't allocate %stx[%d]->tx_phys\n",
1659 isnblk ? "nblk " : "", i);
1661 PORTAL_FREE(tx, sizeof(*tx));
1662 kranal_free_txdescs(freelist);
1666 tx->tx_isnblk = isnblk;
1667 tx->tx_buftype = RANAL_BUF_NONE;
1668 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1670 list_add(&tx->tx_list, freelist);
1677 kranal_device_init(int id, kra_device_t *dev)
1679 const int total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
1683 rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1685 if (rrc != RAP_SUCCESS) {
1686 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1690 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1691 if (rrc != RAP_SUCCESS) {
1692 CERROR("Can't reserve %d RDMA descriptors"
1693 " for device %d: %d\n", total_ntx, id, rrc);
1697 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1698 &dev->rad_rdma_cqh);
1699 if (rrc != RAP_SUCCESS) {
1700 CERROR("Can't create rdma cq size %d"
1701 " for device %d: %d\n", total_ntx, id, rrc);
1705 rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE, RAP_CQTYPE_RECV,
1707 if (rrc != RAP_SUCCESS) {
1708 CERROR("Can't create fma cq size %d"
1709 " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc);
1716 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1718 RapkReleaseDevice(dev->rad_handle);
1724 kranal_device_fini(kra_device_t *dev)
1726 LASSERT(dev->rad_scheduler == NULL);
1727 RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1728 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1729 RapkReleaseDevice(dev->rad_handle);
1733 kranal_api_shutdown (nal_t *nal)
1736 unsigned long flags;
1738 if (nal->nal_refct != 0) {
1739 /* This module got the first ref */
1740 PORTAL_MODULE_UNUSE;
1744 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1745 atomic_read(&portal_kmemory));
1747 LASSERT (nal == &kranal_api);
1749 switch (kranal_data.kra_init) {
1751 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1754 case RANAL_INIT_ALL:
1755 /* stop calls to nal_cmd */
1756 libcfs_nal_cmd_unregister(RANAL);
1757 /* No new persistent peers */
1759 /* resetting my NID to unadvertises me, removes my
1760 * listener and nukes all current peers */
1761 kranal_set_mynid(PTL_NID_ANY);
1762 /* no new peers or conns */
1764 /* Wait for all peer/conn state to clean up */
1766 while (atomic_read(&kranal_data.kra_nconns) != 0 ||
1767 atomic_read(&kranal_data.kra_npeers) != 0) {
1769 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1770 "waiting for %d peers and %d conns to close down\n",
1771 atomic_read(&kranal_data.kra_npeers),
1772 atomic_read(&kranal_data.kra_nconns));
1777 case RANAL_INIT_LIB:
1778 lib_fini(&kranal_lib);
1781 case RANAL_INIT_DATA:
1785 /* flag threads to terminate; wake and wait for them to die */
1786 kranal_data.kra_shutdown = 1;
1788 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1789 kra_device_t *dev = &kranal_data.kra_devices[i];
1791 LASSERT (list_empty(&dev->rad_connq));
1793 spin_lock_irqsave(&dev->rad_lock, flags);
1794 wake_up(&dev->rad_waitq);
1795 spin_unlock_irqrestore(&dev->rad_lock, flags);
1798 spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1799 wake_up_all(&kranal_data.kra_reaper_waitq);
1800 spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1802 LASSERT (list_empty(&kranal_data.kra_connd_peers));
1803 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1804 wake_up_all(&kranal_data.kra_connd_waitq);
1805 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1808 while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1810 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1811 "Waiting for %d threads to terminate\n",
1812 atomic_read(&kranal_data.kra_nthreads));
1816 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1817 if (kranal_data.kra_peers != NULL) {
1818 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1819 LASSERT (list_empty(&kranal_data.kra_peers[i]));
1821 PORTAL_FREE(kranal_data.kra_peers,
1822 sizeof (struct list_head) *
1823 kranal_data.kra_peer_hash_size);
1826 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1827 if (kranal_data.kra_conns != NULL) {
1828 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1829 LASSERT (list_empty(&kranal_data.kra_conns[i]));
1831 PORTAL_FREE(kranal_data.kra_conns,
1832 sizeof (struct list_head) *
1833 kranal_data.kra_conn_hash_size);
1836 for (i = 0; i < kranal_data.kra_ndevs; i++)
1837 kranal_device_fini(&kranal_data.kra_devices[i]);
1839 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1840 kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
1842 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1843 atomic_read(&portal_kmemory));
1844 printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
1845 atomic_read(&portal_kmemory));
1847 kranal_data.kra_init = RANAL_INIT_NOTHING;
1851 kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1852 ptl_ni_limits_t *requested_limits,
1853 ptl_ni_limits_t *actual_limits)
1855 static int device_ids[] = {RAPK_MAIN_DEVICE_ID,
1856 RAPK_EXPANSION_DEVICE_ID};
1858 ptl_process_id_t process_id;
1859 int pkmem = atomic_read(&portal_kmemory);
1864 LASSERT (nal == &kranal_api);
1866 if (nal->nal_refct != 0) {
1867 if (actual_limits != NULL)
1868 *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
1869 /* This module got the first ref */
1874 LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
1876 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1878 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1879 * a unique (for all time) connstamp so we can uniquely identify
1880 * the sender. The connstamp is an incrementing counter
1881 * initialised with seconds + microseconds at startup time. So we
1882 * rely on NOT creating connections more frequently on average than
1883 * 1MHz to ensure we don't use old connstamps when we reboot. */
1884 do_gettimeofday(&tv);
1885 kranal_data.kra_connstamp =
1886 kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1888 init_MUTEX(&kranal_data.kra_nid_mutex);
1889 init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
1891 rwlock_init(&kranal_data.kra_global_lock);
1893 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1894 kra_device_t *dev = &kranal_data.kra_devices[i];
1897 INIT_LIST_HEAD(&dev->rad_connq);
1898 init_waitqueue_head(&dev->rad_waitq);
1899 spin_lock_init(&dev->rad_lock);
1902 kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1903 init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1904 spin_lock_init(&kranal_data.kra_reaper_lock);
1906 INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1907 init_waitqueue_head(&kranal_data.kra_connd_waitq);
1908 spin_lock_init(&kranal_data.kra_connd_lock);
1910 INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1911 INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
1912 init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
1913 spin_lock_init(&kranal_data.kra_tx_lock);
1915 /* OK to call kranal_api_shutdown() to cleanup now */
1916 kranal_data.kra_init = RANAL_INIT_DATA;
1918 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1919 PORTAL_ALLOC(kranal_data.kra_peers,
1920 sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
1921 if (kranal_data.kra_peers == NULL)
1924 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1925 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
1927 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
1928 PORTAL_ALLOC(kranal_data.kra_conns,
1929 sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
1930 if (kranal_data.kra_conns == NULL)
1933 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1934 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
1936 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
1940 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
1944 process_id.pid = requested_pid;
1945 process_id.nid = PTL_NID_ANY; /* don't know my NID yet */
1947 rc = lib_init(&kranal_lib, nal, process_id,
1948 requested_limits, actual_limits);
1950 CERROR("lib_init failed: error %d\n", rc);
1954 /* lib interface initialised */
1955 kranal_data.kra_init = RANAL_INIT_LIB;
1956 /*****************************************************/
1958 rc = kranal_thread_start(kranal_reaper, NULL);
1960 CERROR("Can't spawn ranal reaper: %d\n", rc);
1964 for (i = 0; i < RANAL_N_CONND; i++) {
1965 rc = kranal_thread_start(kranal_connd, (void *)i);
1967 CERROR("Can't spawn ranal connd[%d]: %d\n",
1973 LASSERT(kranal_data.kra_ndevs == 0);
1974 for (i = 0; i < sizeof(device_ids)/sizeof(device_ids[0]); i++) {
1975 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
1977 rc = kranal_device_init(device_ids[i], dev);
1979 kranal_data.kra_ndevs++;
1981 rc = kranal_thread_start(kranal_scheduler, dev);
1983 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
1989 if (kranal_data.kra_ndevs == 0)
1992 rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL);
1994 CERROR("Can't initialise command interface (rc = %d)\n", rc);
1998 /* flag everything initialised */
1999 kranal_data.kra_init = RANAL_INIT_ALL;
2000 /*****************************************************/
2002 CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
2003 printk(KERN_INFO "Lustre: RapidArray NAL loaded "
2004 "(initial mem %d)\n", pkmem);
2009 kranal_api_shutdown(&kranal_api);
2014 kranal_module_fini (void)
2016 #ifdef CONFIG_SYSCTL
2017 if (kranal_tunables.kra_sysctl != NULL)
2018 unregister_sysctl_table(kranal_tunables.kra_sysctl);
2020 PtlNIFini(kranal_ni);
2022 ptl_unregister_nal(RANAL);
2026 kranal_module_init (void)
2030 /* the following must be sizeof(int) for
2031 * proc_dointvec/kranal_listener_procint() */
2032 LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
2033 LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
2034 LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
2035 LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
2036 LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
2038 kranal_api.nal_ni_init = kranal_api_startup;
2039 kranal_api.nal_ni_fini = kranal_api_shutdown;
2041 /* Initialise dynamic tunables to defaults once only */
2042 kranal_tunables.kra_timeout = RANAL_TIMEOUT;
2044 rc = ptl_register_nal(RANAL, &kranal_api);
2046 CERROR("Can't register RANAL: %d\n", rc);
2047 return -ENOMEM; /* or something... */
2050 /* Pure gateways want the NAL started up at module load time... */
2051 rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
2052 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2053 ptl_unregister_nal(RANAL);
2057 #ifdef CONFIG_SYSCTL
2058 /* Press on regardless even if registering sysctl doesn't work */
2059 kranal_tunables.kra_sysctl =
2060 register_sysctl_table(kranal_top_ctl_table, 0);
2065 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2066 MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
2067 MODULE_LICENSE("GPL");
2069 module_init(kranal_module_init);
2070 module_exit(kranal_module_fini);