1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 ptl_handle_ni_t kranal_ni;
28 kra_data_t kranal_data;
29 kra_tunables_t kranal_tunables;
31 #define RANAL_SYSCTL_TIMEOUT 1
32 #define RANAL_SYSCTL_LISTENER_TIMEOUT 2
33 #define RANAL_SYSCTL_BACKLOG 3
34 #define RANAL_SYSCTL_PORT 4
35 #define RANAL_SYSCTL_MAX_IMMEDIATE 5
37 #define RANAL_SYSCTL 202
39 static ctl_table kranal_ctl_table[] = {
40 {RANAL_SYSCTL_TIMEOUT, "timeout",
41 &kranal_tunables.kra_timeout, sizeof(int),
42 0644, NULL, &proc_dointvec},
43 {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout",
44 &kranal_tunables.kra_listener_timeout, sizeof(int),
45 0644, NULL, &proc_dointvec},
46 {RANAL_SYSCTL_BACKLOG, "backlog",
47 &kranal_tunables.kra_backlog, sizeof(int),
48 0644, NULL, kranal_listener_procint},
49 {RANAL_SYSCTL_PORT, "port",
50 &kranal_tunables.kra_port, sizeof(int),
51 0644, NULL, kranal_listener_procint},
52 {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate",
53 &kranal_tunables.kra_max_immediate, sizeof(int),
54 0644, NULL, &proc_dointvec},
58 static ctl_table kranal_top_ctl_table[] = {
59 {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
64 kranal_sock_write (struct socket *sock, void *buffer, int nob)
67 mm_segment_t oldmm = get_fs();
79 .msg_flags = MSG_DONTWAIT
82 /* We've set up the socket's send buffer to be large enough for
83 * everything we send, so a single non-blocking send should
84 * complete without error. */
87 rc = sock_sendmsg(sock, &msg, iov.iov_len);
100 kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
103 mm_segment_t oldmm = get_fs();
104 long ticks = timeout * HZ;
117 struct msghdr msg = {
127 /* Set receive timeout to remaining time */
128 tv = (struct timeval) {
129 .tv_sec = ticks / HZ,
130 .tv_usec = ((ticks % HZ) * 1000000) / HZ
133 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
134 (char *)&tv, sizeof(tv));
137 CERROR("Can't set socket recv timeout %d: %d\n",
144 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
145 ticks -= jiffies - then;
148 CDEBUG(D_WARNING, "rc %d at %d/%d bytes %d/%d secs\n",
149 rc, wanted - nob, wanted, timeout - (int)(ticks/HZ), timeout);
155 return -ECONNABORTED;
157 buffer = ((char *)buffer) + rc;
169 kranal_create_sock(struct socket **sockp)
174 mm_segment_t oldmm = get_fs();
176 rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
178 CERROR("Can't create socket: %d\n", rc);
182 /* Ensure sending connection info doesn't block */
183 option = 2 * sizeof(kra_connreq_t);
185 rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
186 (char *)&option, sizeof(option));
189 CERROR("Can't set send buffer %d: %d\n", option, rc);
195 rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
196 (char *)&option, sizeof(option));
199 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
212 kranal_pause(int ticks)
214 set_current_state(TASK_UNINTERRUPTIBLE);
215 schedule_timeout(ticks);
219 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid)
223 memset(connreq, 0, sizeof(*connreq));
225 connreq->racr_magic = RANAL_MSG_MAGIC;
226 connreq->racr_version = RANAL_MSG_VERSION;
227 connreq->racr_devid = conn->rac_device->rad_id;
228 connreq->racr_srcnid = kranal_lib.libnal_ni.ni_pid.nid;
229 connreq->racr_dstnid = dstnid;
230 connreq->racr_peerstamp = kranal_data.kra_peerstamp;
231 connreq->racr_connstamp = conn->rac_my_connstamp;
232 connreq->racr_timeout = conn->rac_timeout;
234 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
235 LASSERT(rrc == RAP_SUCCESS);
237 CDEBUG(D_WARNING,"devid %d, riparams: HID %08x FDH %08x PT %08x CC %08x\n",
239 connreq->racr_riparams.HostId,
240 connreq->racr_riparams.FmaDomainHndl,
241 connreq->racr_riparams.PTag,
242 connreq->racr_riparams.CompletionCookie);
246 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout)
250 rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout);
252 CERROR("Read failed: %d\n", rc);
256 if (connreq->racr_magic != RANAL_MSG_MAGIC) {
257 if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
258 CERROR("Unexpected magic %08x\n", connreq->racr_magic);
262 __swab32s(&connreq->racr_magic);
263 __swab16s(&connreq->racr_version);
264 __swab16s(&connreq->racr_devid);
265 __swab64s(&connreq->racr_srcnid);
266 __swab64s(&connreq->racr_dstnid);
267 __swab64s(&connreq->racr_peerstamp);
268 __swab64s(&connreq->racr_connstamp);
269 __swab32s(&connreq->racr_timeout);
271 __swab32s(&connreq->racr_riparams.HostId);
272 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
273 __swab32s(&connreq->racr_riparams.PTag);
274 __swab32s(&connreq->racr_riparams.CompletionCookie);
277 if (connreq->racr_version != RANAL_MSG_VERSION) {
278 CERROR("Unexpected version %d\n", connreq->racr_version);
282 if (connreq->racr_srcnid == PTL_NID_ANY ||
283 connreq->racr_dstnid == PTL_NID_ANY) {
284 CERROR("Received PTL_NID_ANY\n");
288 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
289 CERROR("Received timeout %d < MIN %d\n",
290 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
298 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
301 struct list_head *ctmp;
302 struct list_head *cnxt;
306 loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
308 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
309 conn = list_entry(ctmp, kra_conn_t, rac_list);
314 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
315 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
316 " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid,
317 conn->rac_peerstamp, newconn->rac_peerstamp);
318 LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
320 kranal_close_conn_locked(conn, -ESTALE);
324 if (conn->rac_device != newconn->rac_device)
328 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
329 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
332 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
334 CDEBUG(D_NET, "Closing stale conn nid:"LPX64
335 " connstamp:"LPX64"("LPX64")\n", peer->rap_nid,
336 conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
339 kranal_close_conn_locked(conn, -ESTALE);
346 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
349 struct list_head *tmp;
352 loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
354 list_for_each(tmp, &peer->rap_conns) {
355 conn = list_entry(tmp, kra_conn_t, rac_list);
357 /* 'newconn' is from an earlier version of 'peer'!!! */
358 if (newconn->rac_peerstamp < conn->rac_peerstamp)
361 /* 'conn' is from an earlier version of 'peer': it will be
362 * removed when we cull stale conns later on... */
363 if (newconn->rac_peerstamp > conn->rac_peerstamp)
366 /* Different devices are OK */
367 if (conn->rac_device != newconn->rac_device)
370 /* It's me connecting to myself */
372 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
373 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
376 /* 'newconn' is an earlier connection from 'peer'!!! */
377 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
380 /* 'conn' is an earlier connection from 'peer': it will be
381 * removed when we cull stale conns later on... */
382 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
385 /* 'newconn' has the SAME connection stamp; 'peer' isn't
386 * playing the game... */
394 kranal_set_conn_uniqueness (kra_conn_t *conn)
398 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
400 conn->rac_my_connstamp = kranal_data.kra_connstamp++;
402 do { /* allocate a unique cqid */
403 conn->rac_cqid = kranal_data.kra_next_cqid++;
404 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
406 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
410 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
415 LASSERT (!in_interrupt());
416 PORTAL_ALLOC(conn, sizeof(*conn));
421 memset(conn, 0, sizeof(*conn));
422 atomic_set(&conn->rac_refcount, 1);
423 INIT_LIST_HEAD(&conn->rac_list);
424 INIT_LIST_HEAD(&conn->rac_hashlist);
425 INIT_LIST_HEAD(&conn->rac_schedlist);
426 INIT_LIST_HEAD(&conn->rac_fmaq);
427 INIT_LIST_HEAD(&conn->rac_rdmaq);
428 INIT_LIST_HEAD(&conn->rac_replyq);
429 spin_lock_init(&conn->rac_lock);
431 kranal_set_conn_uniqueness(conn);
433 conn->rac_device = dev;
434 conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
435 kranal_update_reaper_timeout(conn->rac_timeout);
437 rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
438 &conn->rac_rihandle);
439 if (rrc != RAP_SUCCESS) {
440 CERROR("RapkCreateRi failed: %d\n", rrc);
441 PORTAL_FREE(conn, sizeof(*conn));
445 atomic_inc(&kranal_data.kra_nconns);
451 kranal_destroy_conn(kra_conn_t *conn)
455 LASSERT (!in_interrupt());
456 LASSERT (!conn->rac_scheduled);
457 LASSERT (list_empty(&conn->rac_list));
458 LASSERT (list_empty(&conn->rac_hashlist));
459 LASSERT (list_empty(&conn->rac_schedlist));
460 LASSERT (atomic_read(&conn->rac_refcount) == 0);
461 LASSERT (list_empty(&conn->rac_fmaq));
462 LASSERT (list_empty(&conn->rac_rdmaq));
463 LASSERT (list_empty(&conn->rac_replyq));
465 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
467 LASSERT (rrc == RAP_SUCCESS);
469 if (conn->rac_peer != NULL)
470 kranal_peer_decref(conn->rac_peer);
472 PORTAL_FREE(conn, sizeof(*conn));
473 atomic_dec(&kranal_data.kra_nconns);
477 kranal_terminate_conn_locked (kra_conn_t *conn)
479 LASSERT (!in_interrupt());
480 LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
481 LASSERT (!list_empty(&conn->rac_hashlist));
482 LASSERT (list_empty(&conn->rac_list));
484 /* Remove from conn hash table: no new callbacks */
485 list_del_init(&conn->rac_hashlist);
486 kranal_conn_decref(conn);
488 conn->rac_state = RANAL_CONN_CLOSED;
490 /* schedule to clear out all uncompleted comms in context of dev's
492 kranal_schedule_conn(conn);
496 kranal_close_conn_locked (kra_conn_t *conn, int error)
498 kra_peer_t *peer = conn->rac_peer;
500 CDEBUG(error == 0 ? D_NET : D_ERROR,
501 "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
503 LASSERT (!in_interrupt());
504 LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
505 LASSERT (!list_empty(&conn->rac_hashlist));
506 LASSERT (!list_empty(&conn->rac_list));
508 list_del_init(&conn->rac_list);
510 if (list_empty(&peer->rap_conns) &&
511 peer->rap_persistence == 0) {
512 /* Non-persistent peer with no more conns... */
513 kranal_unlink_peer_locked(peer);
516 /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
518 conn->rac_last_rx = jiffies;
521 conn->rac_state = RANAL_CONN_CLOSING;
522 kranal_schedule_conn(conn); /* schedule sending CLOSE */
524 kranal_conn_decref(conn); /* lose peer's ref */
528 kranal_close_conn (kra_conn_t *conn, int error)
533 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
535 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
536 kranal_close_conn_locked(conn, error);
538 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
542 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
543 __u32 peer_ip, int peer_port)
547 CDEBUG(D_WARNING,"devid %d, riparams: HID %08x FDH %08x PT %08x CC %08x\n",
548 conn->rac_device->rad_id,
549 connreq->racr_riparams.HostId,
550 connreq->racr_riparams.FmaDomainHndl,
551 connreq->racr_riparams.PTag,
552 connreq->racr_riparams.CompletionCookie);
554 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
555 if (rrc != RAP_SUCCESS) {
556 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
557 HIPQUAD(peer_ip), peer_port, rrc);
561 conn->rac_peerstamp = connreq->racr_peerstamp;
562 conn->rac_peer_connstamp = connreq->racr_connstamp;
563 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
564 kranal_update_reaper_timeout(conn->rac_keepalive);
569 kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp,
570 ptl_nid_t *dst_nidp, kra_conn_t **connp)
572 struct sockaddr_in addr;
574 unsigned int peer_port;
575 kra_connreq_t rx_connreq;
576 kra_connreq_t tx_connreq;
583 CDEBUG(D_WARNING,"!!\n");
586 rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
588 CERROR("Can't get peer's IP: %d\n", rc);
592 peer_ip = ntohl(addr.sin_addr.s_addr);
593 peer_port = ntohs(addr.sin_port);
595 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
597 if (peer_port >= 1024) {
598 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
599 HIPQUAD(peer_ip), peer_port);
600 return -ECONNREFUSED;
603 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
605 rc = kranal_recv_connreq(sock, &rx_connreq,
606 kranal_tunables.kra_listener_timeout);
608 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
609 HIPQUAD(peer_ip), peer_port, rc);
613 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
616 if (i == kranal_data.kra_ndevs) {
617 CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
618 rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
621 dev = &kranal_data.kra_devices[i];
622 if (dev->rad_id == rx_connreq.racr_devid)
626 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
628 rc = kranal_create_conn(&conn, dev);
632 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
634 kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
636 rc = kranal_sock_write(sock, &tx_connreq, sizeof(tx_connreq));
638 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
639 HIPQUAD(peer_ip), peer_port, rc);
640 kranal_conn_decref(conn);
644 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
646 rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
648 kranal_conn_decref(conn);
652 CDEBUG(D_WARNING,"%u.%u.%u.%u\n", HIPQUAD(peer_ip));
655 *src_nidp = rx_connreq.racr_srcnid;
656 *dst_nidp = rx_connreq.racr_dstnid;
661 ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
663 struct sockaddr_in locaddr;
664 struct sockaddr_in srvaddr;
669 for (port = 1023; port >= 512; port--) {
671 memset(&locaddr, 0, sizeof(locaddr));
672 locaddr.sin_family = AF_INET;
673 locaddr.sin_port = htons(port);
674 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
676 memset (&srvaddr, 0, sizeof (srvaddr));
677 srvaddr.sin_family = AF_INET;
678 srvaddr.sin_port = htons (peer->rap_port);
679 srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
681 rc = kranal_create_sock(&sock);
685 rc = sock->ops->bind(sock,
686 (struct sockaddr *)&locaddr, sizeof(locaddr));
690 if (rc == -EADDRINUSE) {
691 CDEBUG(D_NET, "Port %d already in use\n", port);
695 CERROR("Can't bind to reserved port %d: %d\n", port, rc);
699 rc = sock->ops->connect(sock,
700 (struct sockaddr *)&srvaddr, sizeof(srvaddr),
709 if (rc != -EADDRNOTAVAIL) {
710 CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
711 port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
715 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n",
716 port, HIPQUAD(peer->rap_ip), peer->rap_port);
720 return -EHOSTUNREACH;
725 kranal_active_conn_handshake(kra_peer_t *peer,
726 ptl_nid_t *dst_nidp, kra_conn_t **connp)
728 kra_connreq_t connreq;
735 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
737 /* spread connections over all devices using both peer NIDs to ensure
738 * all nids use all devices */
739 idx = peer->rap_nid + kranal_lib.libnal_ni.ni_pid.nid;
740 dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
742 rc = kranal_create_conn(&conn, dev);
746 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
748 kranal_pack_connreq(&connreq, conn, peer->rap_nid);
750 rc = ranal_connect_sock(peer, &sock);
754 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
756 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
757 * immediately after accepting a connection, so we connect and then
758 * send immediately. */
760 rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
762 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
763 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
767 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
769 rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout);
771 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
772 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
776 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
781 if (connreq.racr_srcnid != peer->rap_nid) {
782 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
783 "received "LPX64" expected "LPX64"\n",
784 HIPQUAD(peer->rap_ip), peer->rap_port,
785 connreq.racr_srcnid, peer->rap_nid);
789 if (connreq.racr_devid != dev->rad_id) {
790 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
791 "received %d expected %d\n",
792 HIPQUAD(peer->rap_ip), peer->rap_port,
793 connreq.racr_devid, dev->rad_id);
797 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
799 rc = kranal_set_conn_params(conn, &connreq,
800 peer->rap_ip, peer->rap_port);
805 *dst_nidp = connreq.racr_dstnid;
806 CDEBUG(D_WARNING,LPX64"\n", peer->rap_nid);
812 kranal_conn_decref(conn);
813 CDEBUG(D_WARNING,LPX64": %d\n", peer->rap_nid, rc);
818 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
831 /* active: connd wants to connect to 'peer' */
832 LASSERT (peer != NULL);
833 LASSERT (peer->rap_connecting);
835 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
839 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
841 if (!kranal_peer_active(peer)) {
842 /* raced with peer getting unlinked */
843 write_unlock_irqrestore(&kranal_data.kra_global_lock,
845 kranal_conn_decref(conn);
849 peer_nid = peer->rap_nid;
851 /* passive: listener accepted 'sock' */
852 LASSERT (peer == NULL);
854 rc = kranal_passive_conn_handshake(sock, &peer_nid,
859 /* assume this is a new peer */
860 peer = kranal_create_peer(peer_nid);
862 CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
863 kranal_conn_decref(conn);
867 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
869 peer2 = kranal_find_peer_locked(peer_nid);
873 /* peer_nid already in the peer table */
874 kranal_peer_decref(peer);
879 LASSERT (!new_peer == !kranal_peer_active(peer));
881 /* Refuse connection if peer thinks we are a different NID. We check
882 * this while holding the global lock, to synch with connection
883 * destruction on NID change. */
884 if (dst_nid != kranal_lib.libnal_ni.ni_pid.nid) {
885 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
887 CERROR("Stale/bad connection with "LPX64
888 ": dst_nid "LPX64", expected "LPX64"\n",
889 peer_nid, dst_nid, kranal_lib.libnal_ni.ni_pid.nid);
894 /* Refuse to duplicate an existing connection (both sides might try to
895 * connect at once). NB we return success! We _are_ connected so we
896 * _don't_ have any blocked txs to complete with failure. */
897 rc = kranal_conn_isdup_locked(peer, conn);
899 LASSERT (!list_empty(&peer->rap_conns));
900 LASSERT (list_empty(&peer->rap_tx_queue));
901 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
902 CWARN("Not creating duplicate connection to "LPX64": %d\n",
909 /* peer table takes my ref on the new peer */
910 list_add_tail(&peer->rap_list,
911 kranal_nid2peerlist(peer_nid));
914 kranal_peer_addref(peer); /* +1 ref for conn */
915 conn->rac_peer = peer;
916 list_add_tail(&conn->rac_list, &peer->rap_conns);
918 kranal_conn_addref(conn); /* +1 ref for conn table */
919 list_add_tail(&conn->rac_hashlist,
920 kranal_cqid2connlist(conn->rac_cqid));
922 /* Schedule all packets blocking for a connection */
923 while (!list_empty(&peer->rap_tx_queue)) {
924 tx = list_entry(peer->rap_tx_queue.next,
927 list_del(&tx->tx_list);
928 kranal_post_fma(conn, tx);
931 nstale = kranal_close_stale_conns_locked(peer, conn);
933 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
935 /* CAVEAT EMPTOR: passive peer can disappear NOW */
938 CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
940 /* Ensure conn gets checked. Transmits may have been queued and an
941 * FMA event may have happened before it got in the cq hash table */
942 kranal_schedule_conn(conn);
947 kranal_peer_decref(peer);
948 kranal_conn_decref(conn);
953 kranal_connect (kra_peer_t *peer)
957 struct list_head zombies;
960 LASSERT (peer->rap_connecting);
962 CDEBUG(D_WARNING,"About to handshake "LPX64"\n", peer->rap_nid);
964 rc = kranal_conn_handshake(NULL, peer);
966 CDEBUG(D_WARNING,"Done handshake "LPX64":%d \n", peer->rap_nid, rc);
968 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
970 LASSERT (peer->rap_connecting);
971 peer->rap_connecting = 0;
974 /* kranal_conn_handshake() queues blocked txs immediately on
975 * success to avoid messages jumping the queue */
976 LASSERT (list_empty(&peer->rap_tx_queue));
978 /* reset reconnection timeouts */
979 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
980 peer->rap_reconnect_time = CURRENT_SECONDS;
982 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
986 LASSERT (peer->rap_reconnect_interval != 0);
987 peer->rap_reconnect_time = CURRENT_SECONDS + peer->rap_reconnect_interval;
988 peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
989 1 * peer->rap_reconnect_interval);
991 /* Grab all blocked packets while we have the global lock */
992 list_add(&zombies, &peer->rap_tx_queue);
993 list_del_init(&peer->rap_tx_queue);
995 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
997 if (list_empty(&zombies))
1000 CWARN("Dropping packets for "LPX64": connection failed\n",
1004 tx = list_entry(zombies.next, kra_tx_t, tx_list);
1006 list_del(&tx->tx_list);
1007 kranal_tx_done(tx, -EHOSTUNREACH);
1009 } while (!list_empty(&zombies));
1013 kranal_free_acceptsock (kra_acceptsock_t *ras)
1015 sock_release(ras->ras_sock);
1016 PORTAL_FREE(ras, sizeof(*ras));
1020 kranal_listener (void *arg)
1022 struct sockaddr_in addr;
1024 struct socket *sock;
1025 kra_acceptsock_t *ras;
1029 unsigned long flags;
1031 /* Parent thread holds kra_nid_mutex, and is, or is about to
1032 * block on kra_listener_signal */
1034 port = kranal_tunables.kra_port;
1035 snprintf(name, sizeof(name), "kranal_lstn%03d", port);
1036 kportal_daemonize(name);
1037 kportal_blockallsigs();
1039 init_waitqueue_entry(&wait, current);
1041 rc = kranal_create_sock(&sock);
1045 memset(&addr, 0, sizeof(addr));
1046 addr.sin_family = AF_INET;
1047 addr.sin_port = htons(port);
1048 addr.sin_addr.s_addr = INADDR_ANY;
1050 rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
1052 CERROR("Can't bind to port %d\n", port);
1056 rc = sock->ops->listen(sock, kranal_tunables.kra_backlog);
1058 CERROR("Can't set listen backlog %d: %d\n",
1059 kranal_tunables.kra_backlog, rc);
1063 LASSERT (kranal_data.kra_listener_sock == NULL);
1064 kranal_data.kra_listener_sock = sock;
1066 /* unblock waiting parent */
1067 LASSERT (kranal_data.kra_listener_shutdown == 0);
1068 up(&kranal_data.kra_listener_signal);
1070 /* Wake me any time something happens on my socket */
1071 add_wait_queue(sock->sk->sk_sleep, &wait);
1074 while (kranal_data.kra_listener_shutdown == 0) {
1077 PORTAL_ALLOC(ras, sizeof(*ras));
1079 CERROR("Out of Memory: pausing...\n");
1083 ras->ras_sock = NULL;
1086 if (ras->ras_sock == NULL) {
1087 ras->ras_sock = sock_alloc();
1088 if (ras->ras_sock == NULL) {
1089 CERROR("Can't allocate socket: pausing...\n");
1093 /* XXX this should add a ref to sock->ops->owner, if
1094 * TCP could be a module */
1095 ras->ras_sock->type = sock->type;
1096 ras->ras_sock->ops = sock->ops;
1099 set_current_state(TASK_INTERRUPTIBLE);
1101 rc = sock->ops->accept(sock, ras->ras_sock, O_NONBLOCK);
1103 /* Sleep for socket activity? */
1104 if (rc == -EAGAIN &&
1105 kranal_data.kra_listener_shutdown == 0)
1108 set_current_state(TASK_RUNNING);
1111 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1113 list_add_tail(&ras->ras_list,
1114 &kranal_data.kra_connd_acceptq);
1116 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1117 wake_up(&kranal_data.kra_connd_waitq);
1123 if (rc != -EAGAIN) {
1124 CERROR("Accept failed: %d, pausing...\n", rc);
1130 if (ras->ras_sock != NULL)
1131 sock_release(ras->ras_sock);
1132 PORTAL_FREE(ras, sizeof(*ras));
1136 remove_wait_queue(sock->sk->sk_sleep, &wait);
1139 kranal_data.kra_listener_sock = NULL;
1141 /* set completion status and unblock thread waiting for me
1142 * (parent on startup failure, executioner on normal shutdown) */
1143 kranal_data.kra_listener_shutdown = rc;
1144 up(&kranal_data.kra_listener_signal);
1150 kranal_start_listener (void)
1155 CDEBUG(D_WARNING, "Starting listener\n");
1157 /* Called holding kra_nid_mutex: listener stopped */
1158 LASSERT (kranal_data.kra_listener_sock == NULL);
1160 kranal_data.kra_listener_shutdown = 0;
1161 pid = kernel_thread(kranal_listener, NULL, 0);
1163 CERROR("Can't spawn listener: %ld\n", pid);
1167 /* Block until listener has started up. */
1168 down(&kranal_data.kra_listener_signal);
1170 rc = kranal_data.kra_listener_shutdown;
1171 LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
1173 CDEBUG(D_WARNING, "Listener %ld started OK\n", pid);
1178 kranal_stop_listener(int clear_acceptq)
1180 struct list_head zombie_accepts;
1181 unsigned long flags;
1182 kra_acceptsock_t *ras;
1184 CDEBUG(D_WARNING, "Stopping listener\n");
1186 /* Called holding kra_nid_mutex: listener running */
1187 LASSERT (kranal_data.kra_listener_sock != NULL);
1189 kranal_data.kra_listener_shutdown = 1;
1190 wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep);
1192 /* Block until listener has torn down. */
1193 down(&kranal_data.kra_listener_signal);
1195 LASSERT (kranal_data.kra_listener_sock == NULL);
1196 CDEBUG(D_WARNING, "Listener stopped\n");
1201 /* Close any unhandled accepts */
1202 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1204 list_add(&zombie_accepts, &kranal_data.kra_connd_acceptq);
1205 list_del_init(&kranal_data.kra_connd_acceptq);
1207 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1209 while (!list_empty(&zombie_accepts)) {
1210 ras = list_entry(zombie_accepts.next,
1211 kra_acceptsock_t, ras_list);
1212 list_del(&ras->ras_list);
1213 kranal_free_acceptsock(ras);
1218 kranal_listener_procint(ctl_table *table, int write, struct file *filp,
1219 void *buffer, size_t *lenp)
1221 int *tunable = (int *)table->data;
1225 /* No race with nal initialisation since the nal is setup all the time
1226 * it's loaded. When that changes, change this! */
1227 LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1229 down(&kranal_data.kra_nid_mutex);
1231 LASSERT (tunable == &kranal_tunables.kra_port ||
1232 tunable == &kranal_tunables.kra_backlog);
1235 rc = proc_dointvec(table, write, filp, buffer, lenp);
1238 (*tunable != old_val ||
1239 kranal_data.kra_listener_sock == NULL)) {
1241 if (kranal_data.kra_listener_sock != NULL)
1242 kranal_stop_listener(0);
1244 rc = kranal_start_listener();
1247 CWARN("Unable to start listener with new tunable:"
1248 " reverting to old value\n");
1250 kranal_start_listener();
1254 up(&kranal_data.kra_nid_mutex);
1256 LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
1261 kranal_set_mynid(ptl_nid_t nid)
1263 unsigned long flags;
1264 lib_ni_t *ni = &kranal_lib.libnal_ni;
1267 CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
1268 nid, ni->ni_pid.nid);
1270 down(&kranal_data.kra_nid_mutex);
1272 if (nid == ni->ni_pid.nid) {
1273 /* no change of NID */
1274 up(&kranal_data.kra_nid_mutex);
1278 if (kranal_data.kra_listener_sock != NULL)
1279 kranal_stop_listener(1);
1281 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1282 kranal_data.kra_peerstamp++;
1283 ni->ni_pid.nid = nid;
1284 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1286 /* Delete all existing peers and their connections after new
1287 * NID/connstamp set to ensure no old connections in our brave
1289 kranal_del_peer(PTL_NID_ANY, 0);
1291 if (nid != PTL_NID_ANY)
1292 rc = kranal_start_listener();
1294 up(&kranal_data.kra_nid_mutex);
1299 kranal_create_peer (ptl_nid_t nid)
1303 LASSERT (nid != PTL_NID_ANY);
1305 PORTAL_ALLOC(peer, sizeof(*peer));
1309 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
1311 peer->rap_nid = nid;
1312 atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
1314 INIT_LIST_HEAD(&peer->rap_list);
1315 INIT_LIST_HEAD(&peer->rap_connd_list);
1316 INIT_LIST_HEAD(&peer->rap_conns);
1317 INIT_LIST_HEAD(&peer->rap_tx_queue);
1319 peer->rap_reconnect_time = CURRENT_SECONDS;
1320 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
1322 atomic_inc(&kranal_data.kra_npeers);
1327 kranal_destroy_peer (kra_peer_t *peer)
1329 CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
1331 LASSERT (atomic_read(&peer->rap_refcount) == 0);
1332 LASSERT (peer->rap_persistence == 0);
1333 LASSERT (!kranal_peer_active(peer));
1334 LASSERT (!peer->rap_connecting);
1335 LASSERT (list_empty(&peer->rap_conns));
1336 LASSERT (list_empty(&peer->rap_tx_queue));
1337 LASSERT (list_empty(&peer->rap_connd_list));
1339 PORTAL_FREE(peer, sizeof(*peer));
1341 /* NB a peer's connections keep a reference on their peer until
1342 * they are destroyed, so we can be assured that _all_ state to do
1343 * with this peer has been cleaned up when its refcount drops to
1345 atomic_dec(&kranal_data.kra_npeers);
1349 kranal_find_peer_locked (ptl_nid_t nid)
1351 struct list_head *peer_list = kranal_nid2peerlist(nid);
1352 struct list_head *tmp;
1355 list_for_each (tmp, peer_list) {
1357 peer = list_entry(tmp, kra_peer_t, rap_list);
1359 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
1360 !list_empty(&peer->rap_conns)); /* active conn */
1362 if (peer->rap_nid != nid)
1365 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1366 peer, nid, atomic_read(&peer->rap_refcount));
1373 kranal_find_peer (ptl_nid_t nid)
1377 read_lock(&kranal_data.kra_global_lock);
1378 peer = kranal_find_peer_locked(nid);
1379 if (peer != NULL) /* +1 ref for caller? */
1380 kranal_peer_addref(peer);
1381 read_unlock(&kranal_data.kra_global_lock);
1387 kranal_unlink_peer_locked (kra_peer_t *peer)
1389 LASSERT (peer->rap_persistence == 0);
1390 LASSERT (list_empty(&peer->rap_conns));
1392 LASSERT (kranal_peer_active(peer));
1393 list_del_init(&peer->rap_list);
1395 /* lose peerlist's ref */
1396 kranal_peer_decref(peer);
1400 kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
1404 struct list_head *ptmp;
1407 read_lock(&kranal_data.kra_global_lock);
1409 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1411 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1413 peer = list_entry(ptmp, kra_peer_t, rap_list);
1414 LASSERT (peer->rap_persistence > 0 ||
1415 !list_empty(&peer->rap_conns));
1420 *nidp = peer->rap_nid;
1421 *ipp = peer->rap_ip;
1422 *portp = peer->rap_port;
1423 *persistencep = peer->rap_persistence;
1425 read_unlock(&kranal_data.kra_global_lock);
1430 read_unlock(&kranal_data.kra_global_lock);
1435 kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1437 unsigned long flags;
1441 if (nid == PTL_NID_ANY)
1444 peer = kranal_create_peer(nid);
1448 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1450 peer2 = kranal_find_peer_locked(nid);
1451 if (peer2 != NULL) {
1452 kranal_peer_decref(peer);
1455 /* peer table takes existing ref on peer */
1456 list_add_tail(&peer->rap_list,
1457 kranal_nid2peerlist(nid));
1461 peer->rap_port = port;
1462 peer->rap_persistence++;
1464 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1469 kranal_del_peer_locked (kra_peer_t *peer, int single_share)
1471 struct list_head *ctmp;
1472 struct list_head *cnxt;
1476 peer->rap_persistence = 0;
1477 else if (peer->rap_persistence > 0)
1478 peer->rap_persistence--;
1480 if (peer->rap_persistence != 0)
1483 if (list_empty(&peer->rap_conns)) {
1484 kranal_unlink_peer_locked(peer);
1486 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1487 conn = list_entry(ctmp, kra_conn_t, rac_list);
1489 kranal_close_conn_locked(conn, 0);
1491 /* peer unlinks itself when last conn is closed */
1496 kranal_del_peer (ptl_nid_t nid, int single_share)
1498 unsigned long flags;
1499 struct list_head *ptmp;
1500 struct list_head *pnxt;
1507 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1509 if (nid != PTL_NID_ANY)
1510 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1513 hi = kranal_data.kra_peer_hash_size - 1;
1516 for (i = lo; i <= hi; i++) {
1517 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1518 peer = list_entry(ptmp, kra_peer_t, rap_list);
1519 LASSERT (peer->rap_persistence > 0 ||
1520 !list_empty(&peer->rap_conns));
1522 if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
1525 kranal_del_peer_locked(peer, single_share);
1526 rc = 0; /* matched something */
1533 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1539 kranal_get_conn_by_idx (int index)
1542 struct list_head *ptmp;
1544 struct list_head *ctmp;
1547 read_lock (&kranal_data.kra_global_lock);
1549 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1550 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1552 peer = list_entry(ptmp, kra_peer_t, rap_list);
1553 LASSERT (peer->rap_persistence > 0 ||
1554 !list_empty(&peer->rap_conns));
1556 list_for_each (ctmp, &peer->rap_conns) {
1560 conn = list_entry(ctmp, kra_conn_t, rac_list);
1561 CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
1562 conn, conn->rac_peer->rap_nid,
1563 atomic_read(&conn->rac_refcount));
1564 atomic_inc(&conn->rac_refcount);
1565 read_unlock(&kranal_data.kra_global_lock);
1571 read_unlock(&kranal_data.kra_global_lock);
1576 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1579 struct list_head *ctmp;
1580 struct list_head *cnxt;
1583 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1584 conn = list_entry(ctmp, kra_conn_t, rac_list);
1587 kranal_close_conn_locked(conn, why);
1594 kranal_close_matching_conns (ptl_nid_t nid)
1596 unsigned long flags;
1598 struct list_head *ptmp;
1599 struct list_head *pnxt;
1605 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1607 if (nid != PTL_NID_ANY)
1608 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1611 hi = kranal_data.kra_peer_hash_size - 1;
1614 for (i = lo; i <= hi; i++) {
1615 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1617 peer = list_entry(ptmp, kra_peer_t, rap_list);
1618 LASSERT (peer->rap_persistence > 0 ||
1619 !list_empty(&peer->rap_conns));
1621 if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
1624 count += kranal_close_peer_conns_locked(peer, 0);
1628 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1630 /* wildcards always succeed */
1631 if (nid == PTL_NID_ANY)
1634 return (count == 0) ? -ENOENT : 0;
1638 kranal_cmd(struct portals_cfg *pcfg, void * private)
1642 LASSERT (pcfg != NULL);
1644 switch(pcfg->pcfg_command) {
1645 case NAL_CMD_GET_PEER: {
1649 int share_count = 0;
1651 rc = kranal_get_peer_info(pcfg->pcfg_count,
1652 &nid, &ip, &port, &share_count);
1653 pcfg->pcfg_nid = nid;
1654 pcfg->pcfg_size = 0;
1656 pcfg->pcfg_misc = port;
1657 pcfg->pcfg_count = 0;
1658 pcfg->pcfg_wait = share_count;
1661 case NAL_CMD_ADD_PEER: {
1662 rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
1663 pcfg->pcfg_id, /* IP */
1664 pcfg->pcfg_misc); /* port */
1667 case NAL_CMD_DEL_PEER: {
1668 rc = kranal_del_peer(pcfg->pcfg_nid,
1669 /* flags == single_share */
1670 pcfg->pcfg_flags != 0);
1673 case NAL_CMD_GET_CONN: {
1674 kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
1680 pcfg->pcfg_nid = conn->rac_peer->rap_nid;
1681 pcfg->pcfg_id = conn->rac_device->rad_id;
1682 pcfg->pcfg_misc = 0;
1683 pcfg->pcfg_flags = 0;
1684 kranal_conn_decref(conn);
1688 case NAL_CMD_CLOSE_CONNECTION: {
1689 rc = kranal_close_matching_conns(pcfg->pcfg_nid);
1692 case NAL_CMD_REGISTER_MYNID: {
1693 if (pcfg->pcfg_nid == PTL_NID_ANY)
1696 rc = kranal_set_mynid(pcfg->pcfg_nid);
1705 kranal_free_txdescs(struct list_head *freelist)
1709 while (!list_empty(freelist)) {
1710 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1712 list_del(&tx->tx_list);
1713 PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1714 PORTAL_FREE(tx, sizeof(*tx));
1719 kranal_alloc_txdescs(struct list_head *freelist, int n)
1721 int isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
1725 LASSERT (freelist == &kranal_data.kra_idle_txs ||
1726 freelist == &kranal_data.kra_idle_nblk_txs);
1727 LASSERT (list_empty(freelist));
1729 for (i = 0; i < n; i++) {
1731 PORTAL_ALLOC(tx, sizeof(*tx));
1733 CERROR("Can't allocate %stx[%d]\n",
1734 isnblk ? "nblk " : "", i);
1735 kranal_free_txdescs(freelist);
1739 PORTAL_ALLOC(tx->tx_phys,
1740 PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1741 if (tx->tx_phys == NULL) {
1742 CERROR("Can't allocate %stx[%d]->tx_phys\n",
1743 isnblk ? "nblk " : "", i);
1745 PORTAL_FREE(tx, sizeof(*tx));
1746 kranal_free_txdescs(freelist);
1750 tx->tx_isnblk = isnblk;
1751 tx->tx_buftype = RANAL_BUF_NONE;
1752 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1754 list_add(&tx->tx_list, freelist);
1761 kranal_device_init(int id, kra_device_t *dev)
1763 const int total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
1767 rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1769 if (rrc != RAP_SUCCESS) {
1770 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1774 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1775 if (rrc != RAP_SUCCESS) {
1776 CERROR("Can't reserve %d RDMA descriptors"
1777 " for device %d: %d\n", total_ntx, id, rrc);
1781 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1782 &dev->rad_rdma_cqh);
1783 if (rrc != RAP_SUCCESS) {
1784 CERROR("Can't create rdma cq size %d"
1785 " for device %d: %d\n", total_ntx, id, rrc);
1789 rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE, RAP_CQTYPE_RECV,
1791 if (rrc != RAP_SUCCESS) {
1792 CERROR("Can't create fma cq size %d"
1793 " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc);
1800 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1802 RapkReleaseDevice(dev->rad_handle);
1808 kranal_device_fini(kra_device_t *dev)
1810 LASSERT(dev->rad_scheduler == NULL);
1811 RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1812 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1813 RapkReleaseDevice(dev->rad_handle);
1817 kranal_api_shutdown (nal_t *nal)
1820 unsigned long flags;
1822 if (nal->nal_refct != 0) {
1823 /* This module got the first ref */
1824 PORTAL_MODULE_UNUSE;
1828 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1829 atomic_read(&portal_kmemory));
1831 LASSERT (nal == &kranal_api);
1833 switch (kranal_data.kra_init) {
1835 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1838 case RANAL_INIT_ALL:
1839 /* stop calls to nal_cmd */
1840 libcfs_nal_cmd_unregister(RANAL);
1841 /* No new persistent peers */
1843 /* resetting my NID to unadvertises me, removes my
1844 * listener and nukes all current peers */
1845 kranal_set_mynid(PTL_NID_ANY);
1846 /* no new peers or conns */
1848 /* Wait for all peer/conn state to clean up */
1850 while (atomic_read(&kranal_data.kra_nconns) != 0 ||
1851 atomic_read(&kranal_data.kra_npeers) != 0) {
1853 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1854 "waiting for %d peers and %d conns to close down\n",
1855 atomic_read(&kranal_data.kra_npeers),
1856 atomic_read(&kranal_data.kra_nconns));
1861 case RANAL_INIT_LIB:
1862 lib_fini(&kranal_lib);
1865 case RANAL_INIT_DATA:
1869 /* flag threads to terminate; wake and wait for them to die */
1870 kranal_data.kra_shutdown = 1;
1872 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1873 kra_device_t *dev = &kranal_data.kra_devices[i];
1875 LASSERT (list_empty(&dev->rad_connq));
1877 spin_lock_irqsave(&dev->rad_lock, flags);
1878 wake_up(&dev->rad_waitq);
1879 spin_unlock_irqrestore(&dev->rad_lock, flags);
1882 spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1883 wake_up_all(&kranal_data.kra_reaper_waitq);
1884 spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1886 LASSERT (list_empty(&kranal_data.kra_connd_peers));
1887 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1888 wake_up_all(&kranal_data.kra_connd_waitq);
1889 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1892 while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1894 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1895 "Waiting for %d threads to terminate\n",
1896 atomic_read(&kranal_data.kra_nthreads));
1900 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1901 if (kranal_data.kra_peers != NULL) {
1902 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1903 LASSERT (list_empty(&kranal_data.kra_peers[i]));
1905 PORTAL_FREE(kranal_data.kra_peers,
1906 sizeof (struct list_head) *
1907 kranal_data.kra_peer_hash_size);
1910 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1911 if (kranal_data.kra_conns != NULL) {
1912 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1913 LASSERT (list_empty(&kranal_data.kra_conns[i]));
1915 PORTAL_FREE(kranal_data.kra_conns,
1916 sizeof (struct list_head) *
1917 kranal_data.kra_conn_hash_size);
1920 for (i = 0; i < kranal_data.kra_ndevs; i++)
1921 kranal_device_fini(&kranal_data.kra_devices[i]);
1923 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1924 kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
1926 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1927 atomic_read(&portal_kmemory));
1928 printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
1929 atomic_read(&portal_kmemory));
1931 kranal_data.kra_init = RANAL_INIT_NOTHING;
1935 kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1936 ptl_ni_limits_t *requested_limits,
1937 ptl_ni_limits_t *actual_limits)
1939 static int device_ids[] = {RAPK_MAIN_DEVICE_ID,
1940 RAPK_EXPANSION_DEVICE_ID};
1942 ptl_process_id_t process_id;
1943 int pkmem = atomic_read(&portal_kmemory);
1948 LASSERT (nal == &kranal_api);
1950 if (nal->nal_refct != 0) {
1951 if (actual_limits != NULL)
1952 *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
1953 /* This module got the first ref */
1958 LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
1960 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1962 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1963 * a unique (for all time) connstamp so we can uniquely identify
1964 * the sender. The connstamp is an incrementing counter
1965 * initialised with seconds + microseconds at startup time. So we
1966 * rely on NOT creating connections more frequently on average than
1967 * 1MHz to ensure we don't use old connstamps when we reboot. */
1968 do_gettimeofday(&tv);
1969 kranal_data.kra_connstamp =
1970 kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1972 init_MUTEX(&kranal_data.kra_nid_mutex);
1973 init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
1975 rwlock_init(&kranal_data.kra_global_lock);
1977 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1978 kra_device_t *dev = &kranal_data.kra_devices[i];
1981 INIT_LIST_HEAD(&dev->rad_connq);
1982 init_waitqueue_head(&dev->rad_waitq);
1983 spin_lock_init(&dev->rad_lock);
1986 kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1987 init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1988 spin_lock_init(&kranal_data.kra_reaper_lock);
1990 INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1991 INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1992 init_waitqueue_head(&kranal_data.kra_connd_waitq);
1993 spin_lock_init(&kranal_data.kra_connd_lock);
1995 INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1996 INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
1997 init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
1998 spin_lock_init(&kranal_data.kra_tx_lock);
2000 /* OK to call kranal_api_shutdown() to cleanup now */
2001 kranal_data.kra_init = RANAL_INIT_DATA;
2003 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
2004 PORTAL_ALLOC(kranal_data.kra_peers,
2005 sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
2006 if (kranal_data.kra_peers == NULL)
2009 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
2010 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
2012 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
2013 PORTAL_ALLOC(kranal_data.kra_conns,
2014 sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
2015 if (kranal_data.kra_conns == NULL)
2018 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
2019 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
2021 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
2025 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
2029 process_id.pid = requested_pid;
2030 process_id.nid = PTL_NID_ANY; /* don't know my NID yet */
2032 rc = lib_init(&kranal_lib, nal, process_id,
2033 requested_limits, actual_limits);
2035 CERROR("lib_init failed: error %d\n", rc);
2039 /* lib interface initialised */
2040 kranal_data.kra_init = RANAL_INIT_LIB;
2041 /*****************************************************/
2043 rc = kranal_thread_start(kranal_reaper, NULL);
2045 CERROR("Can't spawn ranal reaper: %d\n", rc);
2049 for (i = 0; i < RANAL_N_CONND; i++) {
2050 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
2052 CERROR("Can't spawn ranal connd[%d]: %d\n",
2058 LASSERT(kranal_data.kra_ndevs == 0);
2059 for (i = 0; i < sizeof(device_ids)/sizeof(device_ids[0]); i++) {
2060 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
2062 rc = kranal_device_init(device_ids[i], dev);
2064 kranal_data.kra_ndevs++;
2066 rc = kranal_thread_start(kranal_scheduler, dev);
2068 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
2074 if (kranal_data.kra_ndevs == 0)
2077 rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL);
2079 CERROR("Can't initialise command interface (rc = %d)\n", rc);
2083 /* flag everything initialised */
2084 kranal_data.kra_init = RANAL_INIT_ALL;
2085 /*****************************************************/
2087 CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
2088 printk(KERN_INFO "Lustre: RapidArray NAL loaded "
2089 "(initial mem %d)\n", pkmem);
2094 kranal_api_shutdown(&kranal_api);
2099 kranal_module_fini (void)
2101 if (kranal_tunables.kra_sysctl != NULL)
2102 unregister_sysctl_table(kranal_tunables.kra_sysctl);
2104 PtlNIFini(kranal_ni);
2106 ptl_unregister_nal(RANAL);
2110 kranal_module_init (void)
2114 /* the following must be sizeof(int) for
2115 * proc_dointvec/kranal_listener_procint() */
2116 LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
2117 LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
2118 LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
2119 LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
2120 LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
2122 kranal_api.nal_ni_init = kranal_api_startup;
2123 kranal_api.nal_ni_fini = kranal_api_shutdown;
2125 /* Initialise dynamic tunables to defaults once only */
2126 kranal_tunables.kra_timeout = RANAL_TIMEOUT;
2127 kranal_tunables.kra_listener_timeout = RANAL_LISTENER_TIMEOUT;
2128 kranal_tunables.kra_backlog = RANAL_BACKLOG;
2129 kranal_tunables.kra_port = RANAL_PORT;
2130 kranal_tunables.kra_max_immediate = RANAL_MAX_IMMEDIATE;
2132 rc = ptl_register_nal(RANAL, &kranal_api);
2134 CERROR("Can't register RANAL: %d\n", rc);
2135 return -ENOMEM; /* or something... */
2138 /* Pure gateways want the NAL started up at module load time... */
2139 rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
2140 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
2141 ptl_unregister_nal(RANAL);
2145 kranal_tunables.kra_sysctl =
2146 register_sysctl_table(kranal_top_ctl_table, 0);
2147 if (kranal_tunables.kra_sysctl == NULL) {
2148 CERROR("Can't register sysctl table\n");
2149 PtlNIFini(kranal_ni);
2150 ptl_unregister_nal(RANAL);
2157 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2158 MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
2159 MODULE_LICENSE("GPL");
2161 module_init(kranal_module_init);
2162 module_exit(kranal_module_fini);