1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 ptl_handle_ni_t kranal_ni;
28 kra_data_t kranal_data;
29 kra_tunables_t kranal_tunables;
32 #define RANAL_SYSCTL_TIMEOUT 1
33 #define RANAL_SYSCTL_LISTENER_TIMEOUT 2
34 #define RANAL_SYSCTL_BACKLOG 3
35 #define RANAL_SYSCTL_PORT 4
36 #define RANAL_SYSCTL_MAX_IMMEDIATE 5
38 #define RANAL_SYSCTL 202
40 static ctl_table kranal_ctl_table[] = {
41 {RANAL_SYSCTL_TIMEOUT, "timeout",
42 &kranal_tunables.kra_timeout, sizeof(int),
43 0644, NULL, &proc_dointvec},
44 {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout",
45 &kranal_tunables.kra_listener_timeout, sizeof(int),
46 0644, NULL, &proc_dointvec},
47 {RANAL_SYSCTL_BACKLOG, "backlog",
48 &kranal_tunables.kra_backlog, sizeof(int),
49 0644, NULL, kranal_listener_procint},
50 {RANAL_SYSCTL_PORT, "port",
51 &kranal_tunables.kra_port, sizeof(int),
52 0644, NULL, kranal_listener_procint},
53 {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate",
54 &kranal_tunables.kra_max_immediate, sizeof(int),
55 0644, NULL, &proc_dointvec},
59 static ctl_table kranal_top_ctl_table[] = {
60 {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
66 kranal_sock_write (struct socket *sock, void *buffer, int nob)
69 mm_segment_t oldmm = get_fs();
81 .msg_flags = MSG_DONTWAIT
84 /* We've set up the socket's send buffer to be large enough for
85 * everything we send, so a single non-blocking send should
86 * complete without error. */
89 rc = sock_sendmsg(sock, &msg, iov.iov_len);
96 kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
99 mm_segment_t oldmm = get_fs();
100 long ticks = timeout * HZ;
112 struct msghdr msg = {
122 /* Set receive timeout to remaining time */
123 tv = (struct timeval) {
124 .tv_sec = ticks / HZ,
125 .tv_usec = ((ticks % HZ) * 1000000) / HZ;
128 rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
129 (char *)&tv, sizeof(tv));
132 CERROR("Can't set socket recv timeout %d: %d\n",
139 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
140 ticks -= jiffies - then;
147 return -ECONNABORTED;
149 buffer = ((char *)buffer) + rc;
161 kranal_create_sock(struct socket **sockp)
167 mm_segment_t oldmm = get_fs();
169 rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
171 CERROR("Can't create socket: %d\n", rc);
175 /* Ensure sending connection info doesn't block */
176 option = 2 * sizeof(kra_connreq_t);
178 rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
179 (char *)&option, sizeof(option));
182 CERROR("Can't set send buffer %d: %d\n", option, rc);
188 rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
189 (char *)&option, sizeof(option));
192 CERROR("Can't set SO_REUSEADDR: %d\n", rc);
205 kranal_pause(int ticks)
207 set_current_state(TASK_UNINTERRUPTIBLE);
208 schedule_timeout(ticks);
212 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn)
214 memset(connreq, 0, sizeof(*connreq));
216 connreq->racr_magic = RANAL_MSG_MAGIC;
217 connreq->racr_version = RANAL_MSG_VERSION;
218 connreq->racr_devid = conn->rac_device->rad_id;
219 connreq->racr_nid = kranal_lib.libnal_ni.ni_pid.nid;
220 connreq->racr_timeout = conn->rac_timeout;
221 connreq->racr_incarnation = conn->rac_my_incarnation;
223 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
224 LASSERT(rrc == RAP_SUCCESS);
228 kranal_recv_connreq(struct sock *sock, kra_connreq_t *connreq, int timeout)
233 rc = kranal_sock_read(newsock, connreq, sizeof(*connreq), timeout);
235 CERROR("Read failed: %d\n", rc);
239 if (connreq->racr_magic != RANAL_MSG_MAGIC) {
240 if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
241 CERROR("Unexpected magic %08x\n", connreq->racr_magic);
245 __swab32s(&connreq->racr_magic);
246 __swab16s(&connreq->racr_version);
247 __swab16s(&connreq->racr_devid);
248 __swab64s(&connreq->racr_nid);
249 __swab64s(&connreq->racr_incarnation);
250 __swab32s(&connreq->racr_timeout);
252 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
253 __swab32s(&connreq->racr_riparams.RcvCqHndl);
254 __swab32s(&connreq->racr_riparams.PTag);
255 __swab32s(&connreq->racr_riparams.CompletionCookie);
258 if (connreq->racr_version != RANAL_MSG_VERSION) {
259 CERROR("Unexpected version %d\n", connreq->racr_version);
263 if (connreq->racr_nid == PTL_NID_ANY) {
264 CERROR("Received PTL_NID_ANY\n");
268 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
269 CERROR("Received timeout %d < MIN %d\n",
270 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
274 for (i = 0; i < kranal_data.kra_ndevs; i++)
275 if (connreq->racr_devid ==
276 kranal_data.kra_devices[i]->rad_id)
279 if (i == kranal_data.kra_ndevs) {
280 CERROR("Can't match device %d\n", connreq->racr_devid);
288 kranal_conn_isdup_locked(kranal_peer_t *peer, __u64 incarnation)
291 struct list_head *tmp;
294 list_for_each(tmp, &peer->rap_conns) {
295 conn = list_entry(tmp, kra_conn_t, rac_list);
297 if (conn->rac_incarnation < incarnation) {
298 /* Conns with an older incarnation get culled later */
303 conn->rac_incarnation == incarnation &&
304 peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid) {
305 /* loopback creates 2 conns */
317 kranal_set_conn_uniqueness (kra_conn_t *conn)
321 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
323 conn->rac_my_incarnation = kranal_data.kra_next_incarnation++;
325 do { /* allocate a unique cqid */
326 conn->rac_cqid = kranal_data.kra_next_cqid++;
327 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL)
330 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
334 kranal_alloc_conn(kra_conn_t **connp, kra_device_t *dev)
339 LASSERT (!in_interrupt());
340 PORTAL_ALLOC(conn, sizeof(*conn));
345 memset(conn, 0, sizeof(*conn));
346 conn->rac_cqid = cqid;
347 atomic_set(&conn->rac_refcount, 1);
348 INIT_LIST_HEAD(&conn->rac_list);
349 INIT_LIST_HEAD(&conn->rac_hashlist);
350 INIT_LIST_HEAD(&conn->rac_fmaq);
351 INIT_LIST_HEAD(&conn->rac_rdmaq);
352 INIT_LIST_HEAD(&conn->rac_replyq);
353 spin_lock_init(&conn->rac_lock);
355 conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
356 kranal_update_reaper_timeout(conn->rac_timeout);
358 rrc = RapkCreateRi(dev->rad_handle, cqid, dev->rad_ptag,
359 dev->rad_rdma_cq, dev->rad_fma_cq,
360 &conn->rac_rihandle);
361 if (rrc != RAP_SUCCESS) {
362 CERROR("RapkCreateRi failed: %d\n", rrc);
363 PORTAL_FREE(conn, sizeof(*conn));
367 atomic_inc(&kranal_data.kra_nconns);
373 __kranal_conn_decref(kra_conn_t *conn)
378 LASSERT (!in_interrupt());
379 LASSERT (!conn->rac_scheduled);
380 LASSERT (list_empty(&conn->rac_list));
381 LASSERT (list_empty(&conn->rac_hashlist));
382 LASSERT (atomic_read(&conn->rac_refcount) == 0);
384 while (!list_empty(&conn->rac_fmaq)) {
385 tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
387 list_del(&tx->tx_list);
388 kranal_tx_done(tx, -ECONNABORTED);
391 /* We may not destroy this connection while it has RDMAs outstanding */
392 LASSERT (list_empty(&conn->rac_rdmaq));
394 while (!list_empty(&conn->rac_replyq)) {
395 tx = list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
397 list_del(&tx->tx_list);
398 kranal_tx_done(tx, -ECONNABORTED);
401 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
403 LASSERT (rrc == RAP_SUCCESS);
405 if (conn->rac_peer != NULL)
406 kranal_peer_decref(conn->rac_peer);
408 PORTAL_FREE(conn, sizeof(*conn));
409 atomic_dec(&kranal_data.kra_nconns);
413 kranal_terminate_conn_locked (kra_conn_t *conn)
415 kra_peer_t *peer - conn->rac_peer;
417 LASSERT (!in_interrupt());
418 LASSERT (conn->rac_closing);
419 LASSERT (!list_empty(&conn->rac_hashlist));
420 LASSERT (list_empty(&conn->rac_list));
422 /* Remove from conn hash table (no new callbacks) */
423 list_del_init(&conn->rac_hashlist);
424 kranal_conn_decref(conn);
426 /* Conn is now just waiting for remaining refs to go */
430 kranal_close_conn_locked (kra_conn_t *conn, int error)
432 kra_peer_t *peer = conn->rac_peer;
434 CDEBUG(error == 0 ? D_NET : D_ERROR,
435 "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
437 LASSERT (!in_interrupt());
438 LASSERT (!conn->rac_closing);
439 LASSERT (!list_empty(&conn->rac_hashlist));
440 LASSERT (!list_empty(&conn->rac_list));
442 list_del_init(&conn->rac_list);
444 if (list_empty(&peer->rap_conns) &&
445 peer->rap_persistence == 0) {
446 /* Non-persistent peer with no more conns... */
447 kranal_unlink_peer_locked(peer);
450 conn->rac_closing = 1;
451 kranal_schedule_conn(conn);
453 kranal_conn_decref(conn); /* lose peer's ref */
457 kranal_close_conn (kra_conn_t *conn, int error)
462 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
464 if (!conn->rac_closing)
465 kranal_close_conn_locked(conn, error);
467 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
471 kranal_passive_conn_handshake (struct socket *sock,
472 ptl_nid_t **peer_nidp, kra_conn_t **connp)
474 struct sockaddr_in addr;
476 unsigned int peer_port;
477 kra_connreq_t connreq;
485 rc = sock->ops->getname(newsock, (struct sockaddr *)addr, &len, 2);
487 CERROR("Can't get peer's IP: %d\n", rc);
491 peer_ip = ntohl(sin.sin_addr.s_addr);
492 peer_port = ntohs(sin.sin_port);
494 if (peer_port >= 1024) {
495 CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
496 HIPQUAD(peer_ip), peer_port);
497 return -ECONNREFUSED;
500 rc = kranal_recv_connreq(sock, &connreq,
501 kranal_data.kra_listener_timeout);
503 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
504 HIPQUAD(peer_ip), peer_port, rc);
508 peer_nid = connreq.racr_nid;
509 LASSERT (peer_nid != PTL_NID_ANY);
512 LASSERT(i < kranal_data.kra_ndevs);
513 dev = &kranal_data.kra_devices[i];
514 if (dev->rad_id == connreq->racr_devid)
518 rc = kranal_alloc_conn(&conn, dev,(__u32)(peer_nid & 0xffffffff));
522 conn->rac_peer_incarnation = connreq.racr_incarnation;
523 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq.racr_timeout);
524 kranal_update_reaper_timeout(conn->rac_keepalive);
526 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
527 if (rrc != RAP_SUCCESS) {
528 CERROR("Can't set riparams for "LPX64": %d\n", peer_nid, rrc);
529 kranal_conn_decref(conn);
533 kranal_pack_connreq(&connreq, conn);
535 rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
537 CERROR("Can't tx connreq to %u.%u.%u.%u/%p: %d\n",
538 HIPQUAD(peer_ip), peer_port, rc);
539 kranal_conn_decref(conn);
544 *peer_nidp = peer_nid;
549 ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
551 struct sockaddr_in locaddr;
552 struct sockaddr_in srvaddr;
557 mm_segment_t oldmm = get_fs();
560 for (port = 1023; port >= 512; port--) {
562 memset(&locaddr, 0, sizeof(locaddr));
563 locaddr.sin_family = AF_INET;
564 locaddr.sin_port = htons(port);
565 locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
567 memset (&srvaddr, 0, sizeof (srvaddr));
568 srvaddr.sin_family = AF_INET;
569 srvaddr.sin_port = htons (peer->rap_port);
570 srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
572 rc = kranal_create_sock(&sock);
576 rc = sock->ops->bind(sock,
577 (struct sockaddr *)&locaddr, sizeof(locaddr));
581 if (rc == -EADDRINUSE) {
582 CDEBUG(D_NET, "Port %d already in use\n", port);
586 CERROR("Can't bind to reserved port %d: %d\n", port, rc);
590 rc = sock->ops->connect(sock,
591 (struct sockaddr *)&srvaddr, sizeof(srvaddr),
600 if (rc != -EADDRNOTAVAIL) {
601 CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
602 port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
606 CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n",
607 port, HIPQUAD(peer->rap_ip), peer->rap_port);
613 kranal_active_conn_handshake(kra_peer_t *peer, kra_conn_t **connp)
615 kra_connreq_t connreq;
623 id32 = (peer_nid & 0xffffffff);
624 dev = &kranal_data.kra_devices[id32 % kranal_data.kra_ndevs];
626 rc = kranal_alloc_conn(&conn, dev, id32);
630 kranal_pack_connreq(&connreq, conn);
632 memset(&dstaddr, 0, sizeof(addr));
633 dstaddr.sin_family = AF_INET;
634 dstaddr.sin_port = htons(peer->rap_port);
635 dstaddr.sin_addr.s_addr = htonl(peer->rap_ip);
637 memset(&srcaddr, 0, sizeof(addr));
639 rc = ranal_connect_sock(peer, &sock);
643 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
644 * immediately after accepting a connection, so we connect and then
645 * send immediately. */
647 rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
649 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
650 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
654 rc = kranal_recv_connreq(sock, &connreq, kranal_data.kra_timeout);
656 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
657 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
664 if (connreq.racr_nid != peer->rap_nid) {
665 CERROR("Unexpected nid from %u.%u.%u.%u/%d: "
666 "received "LPX64" expected "LPX64"\n",
667 HIPQUAD(peer->rap_ip), peer->rap_port,
668 connreq.racr_nid, peer->rap_nid);
672 if (connreq.racr_devid != dev->rad_id) {
673 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
674 "received %d expected %d\n",
675 HIPQUAD(peer->rap_ip), peer->rap_port,
676 connreq.racr_devid, dev->rad_id);
680 conn->rac_peer_incarnation = connreq.racr_incarnation;
681 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq.racr_timeout);
682 kranal_update_reaper_timeout(conn->rac_keepalive);
685 rrc = RapkSetRiParams(conn->rac_rihandle,
686 &connreq->racr_riparams);
687 if (rrc != RAP_SUCCESS) {
688 CERROR("Can't set riparams for "LPX64": %d\n",
699 kranal_conn_decref(conn);
704 kranal_conn_handshake (struct socket *sock, kranal_peer_t *peer)
706 kranal_peer_t *peer2;
709 unsigned long timeout;
715 /* passive: listener accepted sock */
716 LASSERT (peer == NULL);
718 rc = kranal_passive_conn_handshake(sock, &peer_nid, &conn);
722 /* assume this is a new peer */
723 peer = kranal_create_peer(peer_nid);
725 CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
726 kranal_conn_decref(conn);
730 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
732 peer2 = kranal_find_peer_locked(peer_nid);
734 /* peer table takes my initial ref on peer */
735 list_add_tail(&peer->rap_list,
736 kranal_nid2peerlist(peer_nid));
738 /* peer_nid already in the peer table */
739 kranal_peer_decref(peer);
742 /* NB I may now have a non-persistent peer in the peer
743 * table with no connections: I can't drop the global lock
744 * until I've given it a connection or removed it, and when
745 * I do 'peer' can disappear under me. */
747 /* active: connd wants to connect to peer */
748 LASSERT (peer != NULL);
749 LASSERT (peer->rap_connecting);
751 rc = kranal_active_conn_handshake(peer, &conn);
755 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
757 if (!kranal_peer_active(peer)) {
758 /* raced with peer getting unlinked */
759 write_unlock_irqrestore(&kranal_data.kra_global_lock,
761 kranal_conn_decref(conn);
766 LASSERT (kranal_peer_active(peer)); /* peer is in the peer table */
767 peer_nid = peer->rap_nid;
769 /* Refuse to duplicate an existing connection (both sides might try
770 * to connect at once). NB we return success! We _do_ have a
771 * connection (so we don't need to remove the peer from the peer
772 * table) and we _don't_ have any blocked txs to complete */
773 if (kranal_conn_isdup_locked(peer, conn->rac_incarnation)) {
774 LASSERT (!list_empty(&peer->rap_conns));
775 LASSERT (list_empty(&peer->rap_tx_queue));
776 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
777 CWARN("Not creating duplicate connection to "LPX64"\n",
779 kranal_conn_decref(conn);
783 kranal_peer_addref(peer); /* +1 ref for conn */
784 conn->rac_peer = peer;
785 list_add_tail(&conn->rac_list, &peer->rap_conns);
787 kranal_conn_addref(conn); /* +1 ref for conn table */
788 list_add_tail(&conn->rac_hashlist,
789 kranal_cqid2connlist(conn->rac_cqid));
791 /* Schedule all packets blocking for a connection */
792 while (!list_empty(&peer->rap_tx_queue)) {
793 tx = list_entry(&peer->rap_tx_queue.next,
796 list_del(&tx->tx_list);
797 kranal_queue_tx_locked(tx, conn);
800 nstale = kranal_close_stale_conns_locked(peer, conn->rac_incarnation);
802 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
804 /* CAVEAT EMPTOR: passive peer can disappear NOW */
807 CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
809 /* Ensure conn gets checked. Transmits may have been queued and an
810 * FMA event may have happened before it got in the cq hash table */
811 kranal_schedule_conn(conn);
816 kranal_connect (kra_peer_t *peer)
820 struct list_head zombies;
823 LASSERT (peer->rap_connecting);
825 rc = kranal_conn_handshake(NULL, peer);
827 write_lock_irqqsave(&kranal_data.kra_global_lock, flags);
829 LASSERT (peer->rap_connecting);
830 peer->rap_connecting = 0;
833 /* kranal_conn_handshake() queues blocked txs immediately on
834 * success to avoid messages jumping the queue */
835 LASSERT (list_empty(&peer->rap_tx_queue));
837 /* reset reconnection timeouts */
838 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
839 peer->rap_reconnect_time = CURRENT_TIME;
841 write_unlock_irqrestore(&kranal-data.kra_global_lock, flags);
845 LASSERT (peer->rap_reconnect_interval != 0);
846 peer->rap_reconnect_time = CURRENT_TIME + peer->rap_reconnect_interval;
847 peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
848 1 * peer->rap_reconnect_interval);
850 /* Grab all blocked packets while we have the global lock */
851 list_add(&zombies, &peer->rap_tx_queue);
852 list_del_init(&peer->rap_tx_queue);
854 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
856 if (list_empty(&zombies))
859 CWARN("Dropping packets for "LPX64": connection failed\n",
863 tx = list_entry(zombies.next, kra_tx_t, tx_list);
865 list_del(&tx->tx_list);
866 kranal_tx_done(tx, -EHOSTUNREACH);
868 } while (!list_empty(&zombies));
872 kranal_listener(void *arg)
874 struct sockaddr_in addr;
877 struct socket *newsock;
881 kra_connreq_t *connreqs;
884 /* Parent thread holds kra_nid_mutex, and is, or is about to
885 * block on kra_listener_signal */
887 port = kra_tunables.kra_port;
888 snprintf(name, "kranal_lstn%03d", port);
889 kportal_daemonize(name);
890 kportal_blockallsigs();
892 init_waitqueue_entry(&wait, current);
895 PORTAL_ALLOC(connreqs, 2 * sizeof(*connreqs));
896 if (connreqs == NULL)
899 rc = kranal_create_sock(&sock, port);
903 memset(&addr, 0, sizeof(addr));
904 addr.sin_family = AF_INET;
905 addr.sin_port = htons(port);
906 addr.sin_addr.s_addr = INADDR_ANY
908 rc = sock->ops->bind(sock, &addr, sizeof(addr));
910 CERROR("Can't bind to port %d\n", port);
914 rc = sock->ops->listen(sock, kra_tunalbes.kra_backlog);
916 CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
920 LASSERT (kranal_data.kra_listener_sock == NULL);
921 kranal_data.kra_listener_sock = sock;
923 /* unblock waiting parent */
924 LASSERT (kranal_data.kra_listener_shutdown == 0);
925 up(&kranal_data.kra_listener_signal);
927 /* Wake me any time something happens on my socket */
928 add_wait_queue(sock->sk->sk_sleep, &wait);
930 while (kranal_data.kra_listener_shutdown == 0) {
932 newsock = sock_alloc();
933 if (newsock == NULL) {
934 CERROR("Can't allocate new socket for accept\n");
939 set_current_state(TASK_INTERRUPTIBLE);
941 rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
944 kranal_data.kra_listener_shutdown == 0)
947 set_current_state(TASK_RUNNING);
950 sock_release(newsock);
952 CERROR("Accept failed: %d\n", rc);
958 kranal_conn_handshake(newsock, NULL);
959 sock_release(newsock);
963 remove_wait_queue(sock->sk->sk_sleep, &wait);
966 kranal_data.kra_listener_sock = NULL;
968 PORTAL_FREE(connreqs, 2 * sizeof(*connreqs));
970 /* set completion status and unblock thread waiting for me
971 * (parent on startup failure, executioner on normal shutdown) */
972 kranal_data.kra_listener_shutdown = rc;
973 up(&kranal_data.kra_listener_signal);
979 kranal_start_listener ()
984 CDEBUG(D_WARNING, "Starting listener\n");
986 /* Called holding kra_nid_mutex: listener stopped */
987 LASSERT (kranal_data.kra_listener_sock == NULL);
989 kranal_data.kra_listener_shutdown == 0;
990 pid = kernel_thread(kranal_listener, sock, 0);
992 CERROR("Can't spawn listener: %ld\n", pid);
996 /* Block until listener has started up. */
997 down(&kranal_data.kra_listener_signal);
999 rc = kranal_data.kra_listener_shutdown;
1000 LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
1002 CDEBUG(D_WARNING, "Listener %ld started OK\n", pid);
1007 kranal_stop_listener()
1009 CDEBUG(D_WARNING, "Stopping listener\n");
1011 /* Called holding kra_nid_mutex: listener running */
1012 LASSERT (kranal_data.kra_listener_sock != NULL);
1014 kranal_data.kra_listener_shutdown = 1;
1015 wake_up_all(kranal_data->kra_listener_sock->sk->sk_sleep);
1017 /* Block until listener has torn down. */
1018 down(&kranal_data.kra_listener_signal);
1020 LASSERT (kranal_data.kra_listener_sock == NULL);
1021 CDEBUG(D_WARNING, "Listener stopped\n");
1025 kranal_listener_procint(ctl_table *table, int write, struct file *filp,
1026 void *buffer, size_t *lenp)
1028 int *tunable = (int *)table->data;
1032 down(&kranal_data.kra_nid_mutex);
1034 LASSERT (tunable == &kranal_data.kra_port ||
1035 tunable == &kranal_data.kra_backlog);
1038 rc = proc_dointvec(table, write, filp, buffer, lenp);
1041 (*tunable != old_val ||
1042 kranal_data.kra_listener_sock == NULL)) {
1044 if (kranal_data.kra_listener_sock != NULL)
1045 kranal_stop_listener();
1047 rc = kranal_start_listener();
1051 kranal_start_listener();
1055 up(&kranal_data.kra_nid_mutex);
1060 kranal_set_mynid(ptl_nid_t nid)
1062 lib_ni_t *ni = &kranal_lib.libnal_ni;
1065 CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
1066 nid, ni->ni_pid.nid);
1068 down(&kranal_data.kra_nid_mutex);
1070 if (nid == ni->ni_pid.nid) {
1071 /* no change of NID */
1072 up(&kranal_data.kra_nid_mutex);
1076 if (kranal_data.kra_listener_sock != NULL)
1077 kranal_stop_listener();
1079 ni->ni_pid.nid = nid;
1081 /* Delete all existing peers and their connections after new
1082 * NID/incarnation set to ensure no old connections in our brave
1084 kranal_del_peer(PTL_NID_ANY, 0);
1086 if (nid != PTL_NID_ANY)
1087 rc = kranal_start_listener();
1089 up(&kranal_data.kra_nid_mutex);
1094 kranal_create_peer (ptl_nid_t nid)
1098 LASSERT (nid != PTL_NID_ANY);
1100 PORTAL_ALLOC(peer, sizeof(*peer));
1104 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
1106 peer->rap_nid = nid;
1107 atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
1109 INIT_LIST_HEAD(&peer->rap_list); /* not in the peer table yet */
1110 INIT_LIST_HEAD(&peer->rap_conns);
1111 INIT_LIST_HEAD(&peer->rap_tx_queue);
1113 peer->rap_reconnect_time = CURRENT_TIME;
1114 peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
1116 atomic_inc(&kranal_data.kra_npeers);
1121 __kranal_peer_decref (kra_peer_t *peer)
1123 CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
1125 LASSERT (atomic_read(&peer->rap_refcount) == 0);
1126 LASSERT (peer->rap_persistence == 0);
1127 LASSERT (!kranal_peer_active(peer));
1128 LASSERT (peer->rap_connecting == 0);
1129 LASSERT (list_empty(&peer->rap_conns));
1130 LASSERT (list_empty(&peer->rap_tx_queue));
1132 PORTAL_FREE(peer, sizeof(*peer));
1134 /* NB a peer's connections keep a reference on their peer until
1135 * they are destroyed, so we can be assured that _all_ state to do
1136 * with this peer has been cleaned up when its refcount drops to
1138 atomic_dec(&kranal_data.kra_npeers);
1142 kranal_find_peer_locked (ptl_nid_t nid)
1144 struct list_head *peer_list = kranal_nid2peerlist(nid);
1145 struct list_head *tmp;
1148 list_for_each (tmp, peer_list) {
1150 peer = list_entry(tmp, kra_peer_t, rap_list);
1152 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
1153 !list_empty(&peer->rap_conns)); /* active conn */
1155 if (peer->rap_nid != nid)
1158 CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
1159 peer, nid, atomic_read(&peer->rap_refcount));
1166 kranal_find_peer (ptl_nid_t nid)
1170 read_lock(&kranal_data.kra_global_lock);
1171 peer = kranal_find_peer_locked(nid);
1172 if (peer != NULL) /* +1 ref for caller? */
1173 kranal_peer_addref(peer);
1174 read_unlock(&kranal_data.kra_global_lock);
1180 kranal_unlink_peer_locked (kra_peer_t *peer)
1182 LASSERT (peer->rap_persistence == 0);
1183 LASSERT (list_empty(&peer->rap_conns));
1185 LASSERT (kranal_peer_active(peer));
1186 list_del_init(&peer->rap_list);
1188 /* lose peerlist's ref */
1189 kranal_peer_decref(peer);
1193 kranal_get_peer_info (int index, ptl_nid_t *nidp, int *portp, int *persistencep)
1196 struct list_head *ptmp;
1199 read_lock(&kranal_data.kra_global_lock);
1201 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1203 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1205 peer = list_entry(ptmp, kra_peer_t, rap_list);
1206 LASSERT (peer->rap_persistence > 0 ||
1207 !list_empty(&peer->rap_conns));
1212 *nidp = peer->rap_nid;
1213 *portp = peer->rap_port;
1214 *persistencep = peer->rap_persistence;
1216 read_unlock(&kranal_data.kra_global_lock);
1221 read_unlock(&kranal_data.kra_global_lock);
1226 kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
1228 unsigned long flags;
1232 if (nid == PTL_NID_ANY)
1235 peer = kranal_create_peer(nid);
1239 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1241 peer2 = kranal_find_peer_locked(nid);
1242 if (peer2 != NULL) {
1243 kranal_put_peer(peer);
1246 /* peer table takes existing ref on peer */
1247 list_add_tail(&peer->rap_list,
1248 kranal_nid2peerlist(nid));
1252 peer->rap_port = port;
1253 peer->rap_persistence++;
1255 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1260 kranal_del_peer_locked (kra_peer_t *peer, int single_share)
1262 struct list_head *ctmp;
1263 struct list_head *cnxt;
1267 peer->rap_persistence = 0;
1268 else if (peer->rap_persistence > 0)
1269 peer->rap_persistence--;
1271 if (peer->rap_persistence != 0)
1274 if (list_empty(&peer->rap_conns)) {
1275 kranal_unlink_peer_locked(peer);
1277 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1278 conn = list_entry(ctmp, kra_conn_t, rac_list);
1280 kranal_close_conn_locked(conn, 0);
1282 /* peer unlinks itself when last conn is closed */
1287 kranal_del_peer (ptl_nid_t nid, int single_share)
1289 unsigned long flags;
1290 struct list_head *ptmp;
1291 struct list_head *pnxt;
1298 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1300 if (nid != PTL_NID_ANY)
1301 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1304 hi = kranal_data.kra_peer_hash_size - 1;
1307 for (i = lo; i <= hi; i++) {
1308 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1309 peer = list_entry(ptmp, kra_peer_t, rap_list);
1310 LASSERT (peer->rap_persistence > 0 ||
1311 !list_empty(&peer->rap_conns));
1313 if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
1316 kranal_del_peer_locked(peer, single_share);
1317 rc = 0; /* matched something */
1324 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1330 kranal_get_conn_by_idx (int index)
1333 struct list_head *ptmp;
1335 struct list_head *ctmp;
1338 read_lock (&kranal_data.kra_global_lock);
1340 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1341 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1343 peer = list_entry(ptmp, kra_peer_t, rap_list);
1344 LASSERT (peer->rap_persistence > 0 ||
1345 !list_empty(&peer->rap_conns));
1347 list_for_each (ctmp, &peer->rap_conns) {
1351 conn = list_entry(ctmp, kra_conn_t, rac_list);
1352 CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
1353 conn, conn->rac_peer->rap_nid,
1354 atomic_read(&conn->rac_refcount));
1355 atomic_inc(&conn->rac_refcount);
1356 read_unlock(&kranal_data.kra_global_lock);
1362 read_unlock(&kranal_data.kra_global_lock);
1367 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1370 struct list_head *ctmp;
1371 struct list_head *cnxt;
1374 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1375 conn = list_entry(ctmp, kra_conn_t, rac_list);
1378 kranal_close_conn_locked(conn, why);
1385 kranal_close_stale_conns_locked (kra_peer_t *peer, __u64 incarnation)
1388 struct list_head *ctmp;
1389 struct list_head *cnxt;
1392 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1393 conn = list_entry(ctmp, kra_conn_t, rac_list);
1395 if (conn->rac_incarnation == incarnation)
1398 CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
1399 peer->rap_nid, conn->rac_incarnation, incarnation);
1400 LASSERT (conn->rac_incarnation < incarnation);
1403 kranal_close_conn_locked(conn, -ESTALE);
1410 kranal_close_matching_conns (ptl_nid_t nid)
1412 unsigned long flags;
1414 struct list_head *ptmp;
1415 struct list_head *pnxt;
1421 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1423 if (nid != PTL_NID_ANY)
1424 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1427 hi = kranal_data.kra_peer_hash_size - 1;
1430 for (i = lo; i <= hi; i++) {
1431 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1433 peer = list_entry(ptmp, kra_peer_t, rap_list);
1434 LASSERT (peer->rap_persistence > 0 ||
1435 !list_empty(&peer->rap_conns));
1437 if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
1440 count += kranal_close_peer_conns_locked(peer, 0);
1444 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1446 /* wildcards always succeed */
1447 if (nid == PTL_NID_ANY)
1450 return (count == 0) ? -ENOENT : 0;
1454 kranal_cmd(struct portals_cfg *pcfg, void * private)
1458 LASSERT (pcfg != NULL);
1460 switch(pcfg->pcfg_command) {
1461 case NAL_CMD_GET_PEER: {
1465 int share_count = 0;
1467 rc = kranal_get_peer_info(pcfg->pcfg_count,
1468 &nid, &ip, &port, &share_count);
1469 pcfg->pcfg_nid = nid;
1470 pcfg->pcfg_size = 0;
1472 pcfg->pcfg_misc = port;
1473 pcfg->pcfg_count = 0;
1474 pcfg->pcfg_wait = share_count;
1477 case NAL_CMD_ADD_PEER: {
1478 rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
1479 pcfg->pcfg_id, /* IP */
1480 pcfg->pcfg_misc); /* port */
1483 case NAL_CMD_DEL_PEER: {
1484 rc = kranal_del_peer(pcfg->pcfg_nid,
1485 /* flags == single_share */
1486 pcfg->pcfg_flags != 0);
1489 case NAL_CMD_GET_CONN: {
1490 kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
1496 pcfg->pcfg_nid = conn->rac_peer->rap_nid;
1498 pcfg->pcfg_misc = 0;
1499 pcfg->pcfg_flags = 0;
1500 kranal_put_conn(conn);
1504 case NAL_CMD_CLOSE_CONNECTION: {
1505 rc = kranal_close_matching_conns(pcfg->pcfg_nid);
1508 case NAL_CMD_REGISTER_MYNID: {
1509 if (pcfg->pcfg_nid == PTL_NID_ANY)
1512 rc = kranal_set_mynid(pcfg->pcfg_nid);
1521 kranal_free_txdescs(struct list_head *freelist)
1525 while (!list_empty(freelist)) {
1526 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1528 list_del(&tx->tx_list);
1529 PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
1530 PORTAL_FREE(tx, sizeof(*tx));
1535 kranal_alloc_txdescs(struct list_head *freelist, int n)
1537 int isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
1541 LASSERT (freelist == &kranal_data.kra_idle_txs ||
1542 freelist == &kranal_data.kra_idle_nblk_txs);
1543 LASSERT (list_empty(freelist));
1545 for (i = 0; i < n; i++) {
1547 PORTAL_ALLOC(tx, sizeof(*tx));
1549 CERROR("Can't allocate %stx[%d]\n",
1550 isnblk ? "nblk ", i);
1551 kranal_free_txdescs();
1555 PORTAL_ALLOC(tx->tx_phys,
1556 PLT_MD_MAX_IOV * sizeof(*tx->tx_phys));
1557 if (tx->tx_phys == NULL) {
1558 CERROR("Can't allocate %stx[%d]->tx_phys\n",
1559 isnblk ? "nblk ", i);
1561 PORTAL_FREE(tx, sizeof(*tx));
1562 kranal_free_txdescs(freelist);
1566 tx->tx_isnblk = isnblk
1567 tx->tx_buftype = RANAL_BUF_NONE;
1569 list_add(&tx->tx_list, freelist);
1576 kranal_device_init(int id, kra_device_t *dev)
1578 const int total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
1582 rrc = RapkGetDeviceByIndex(id, NULL, kranal_device_callback,
1584 if (rrc != RAP_SUCCESS) {
1585 CERROR("Can't get Rapidarray Device %d: %d\n", idx, rrc);
1589 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1590 if (rrc != RAP_SUCCESS) {
1591 CERROR("Can't reserve %d RDMA descriptors"
1592 " for device[%d]: %d\n", total_ntx, i, rrc);
1596 rrc = RapkCreatePtag(dev->rad_handle,
1598 if (rrc != RAP_SUCCESS) {
1599 CERROR("Can't create ptag"
1600 " for device[%d]: %d\n", i, rrc);
1604 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, dev->rad_ptag,
1606 if (rrc != RAP_SUCCESS) {
1607 CERROR("Can't create rdma cq size %d"
1608 " for device[%d]: %d\n", total_ntx, i, rrc);
1612 rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE,
1613 dev->rad_ptag, &dev->rad_fma_cq);
1614 if (rrc != RAP_SUCCESS) {
1615 CERROR("Can't create fma cq size %d"
1616 " for device[%d]: %d\n", RANAL_RX_CQ_SIZE, i, rrc);
1623 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cq, dev->rad_ptag);
1625 RapkDestroyPtag(dev->rad_handle, dev->rad_ptag);
1627 RapkReleaseDevice(dev->rad_handle);
1633 kranal_device_fini(kra_device_t *dev)
1635 RapkDestroyCQ(dev->rad_handle, dev->rad_rx_cq, dev->rad_ptag);
1636 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cq, dev->rad_ptag);
1637 RapkDestroyPtag(dev->rad_handle, dev->rad_ptag);
1638 RapkReleaseDevice(dev->rad_handle);
1642 kranal_api_shutdown (nal_t *nal)
1646 unsigned long flags;
1648 if (nal->nal_refct != 0) {
1649 /* This module got the first ref */
1650 PORTAL_MODULE_UNUSE;
1654 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1655 atomic_read(&portal_kmemory));
1657 LASSERT (nal == &kranal_api);
1659 switch (kranal_data.kra_init) {
1661 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1664 case RANAL_INIT_ALL:
1665 /* stop calls to nal_cmd */
1666 libcfs_nal_cmd_unregister(OPENRANAL);
1667 /* No new persistent peers */
1669 /* resetting my NID to unadvertises me, removes my
1670 * listener and nukes all current peers */
1671 kranal_set_mynid(PTL_NID_ANY);
1672 /* no new peers or conns */
1674 /* Wait for all peer/conn state to clean up */
1676 while (atomic_read(&kranal_data.kra_nconns) != 0 ||
1677 atomic_read(&kranal-data.kra_npeers) != 0) {
1679 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1680 "waiting for %d peers and %d conns to close down\n",
1681 atomic_read(&kranal_data.kra_npeers),
1682 atomic_read(&kranal_data.kra_nconns));
1687 case RANAL_INIT_LIB:
1688 lib_fini(&kranal_lib);
1691 case RANAL_INIT_DATA:
1695 /* flag threads to terminate; wake and wait for them to die */
1696 kranal_data.kra_shutdown = 1;
1698 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1699 kra_device_t *dev = &kranal_data.kra_devices[i];
1701 LASSERT (list_empty(&dev->rad_connq));
1703 spin_lock_irqsave(&dev->rad_lock, flags);
1704 wake_up(&dev->rad_waitq);
1705 spin_unlock_irqrestore(&dev->rad_lock, flags);
1708 spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1709 wake_up_all(&kranal_data.kra_reaper_waitq);
1710 spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1712 LASSERT (list_empty(&kranal_data.kra_connd_peers));
1713 spin_lock_irqsave(&kranal-data.kra_connd_lock, flags);
1714 wake_up_all(&kranal_data.kra_connd_waitq);
1715 spin_unlock_irqrestore(&kranal-data.kra_connd_lock, flags);
1718 while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1720 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1721 "Waiting for %d threads to terminate\n",
1722 atomic_read(&kranal_data.kra_nthreads));
1726 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1727 if (kranal_data.kra_peers != NULL) {
1728 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1729 LASSERT (list_empty(&kranal_data.kra_peers[i]));
1731 PORTAL_FREE(kranal_data.kra_peers,
1732 sizeof (struct list_head) *
1733 kranal_data.kra_peer_hash_size);
1736 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1737 if (kranal_data.kra_conns != NULL) {
1738 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1739 LASSERT (list_empty(&kranal_data.kra_conns[i]));
1741 PORTAL_FREE(kranal_data.kra_conns,
1742 sizeof (struct list_head) *
1743 kranal_data.kra_conn_hash_size);
1746 for (i = 0; i < kranal_data.kra_ndevs; i++)
1747 kranal_device_fini(&kranal_data.kra_devices[i]);
1749 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1750 kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
1752 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1753 atomic_read(&portal_kmemory));
1754 printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
1755 atomic_read(&portal_kmemory));
1757 kranal_data.kra_init = RANAL_INIT_NOTHING;
1761 kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
1762 ptl_ni_limits_t *requested_limits,
1763 ptl_ni_limits_t *actual_limits)
1765 static int device_ids[] = {RAPK_MAIN_DEVICE_ID,
1766 RAPK_EXPANSION_DEVICE_ID};
1768 ptl_process_id_t process_id;
1769 int pkmem = atomic_read(&portal_kmemory);
1774 LASSERT (nal == &kranal_api);
1776 if (nal->nal_refct != 0) {
1777 if (actual_limits != NULL)
1778 *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
1779 /* This module got the first ref */
1784 LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
1786 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1788 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1789 * a unique (for all time) incarnation so we can uniquely identify
1790 * the sender. The incarnation is an incrementing counter
1791 * initialised with seconds + microseconds at startup time. So we
1792 * rely on NOT creating connections more frequently on average than
1793 * 1MHz to ensure we don't use old incarnations when we reboot. */
1794 do_gettimeofday(&tv);
1795 kranal_data.kra_next_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1797 init_MUTEX(&kranal_data.kra_nid_mutex);
1798 init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
1800 rwlock_init(&kranal_data.kra_global_lock);
1802 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1803 kra_device_t *dev = &kranal_data.kra_devices[i];
1806 INIT_LIST_HEAD(&dev->rad_connq);
1807 init_waitqueue_head(&dev->rad_waitq);
1808 spin_lock_init(&dev->rad_lock);
1811 init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1812 spin_lock_init(&kranal_data.kra_reaper_lock);
1814 INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1815 init_waitqueue_head(&kranal_data.kra_connd_waitq);
1816 spin_lock_init(&kranal_data.kra_connd_lock);
1818 INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1819 INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
1820 init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
1821 spin_lock_init(&kranal_data.kra_tx_lock);
1823 /* OK to call kranal_api_shutdown() to cleanup now */
1824 kranal_data.kra_init = RANAL_INIT_DATA;
1826 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1827 PORTAL_ALLOC(kranal_data.kra_peers,
1828 sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
1829 if (kranal_data.kra_peers == NULL)
1832 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1833 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
1835 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
1836 PORTAL_ALLOC(kranal_data.kra_conns,
1837 sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
1838 if (kranal_data.kra_conns == NULL)
1841 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1842 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
1844 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
1848 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
1852 process_id.pid = requested_pid;
1853 process_id.nid = PTL_NID_ANY; /* don't know my NID yet */
1855 rc = lib_init(&kranal_lib, nal, process_id,
1856 requested_limits, actual_limits);
1858 CERROR("lib_init failed: error %d\n", rc);
1862 /* lib interface initialised */
1863 kranal_data.kra_init = RANAL_INIT_LIB;
1864 /*****************************************************/
1866 rc = kranal_thread_start(kranal_reaper, NULL);
1868 CERROR("Can't spawn ranal reaper: %d\n", rc);
1872 for (i = 0; i < RANAL_N_CONND; i++) {
1873 rc = kranal_thread_start(kranal_connd, (void *)i);
1875 CERROR("Can't spawn ranal connd[%d]: %d\n",
1881 LASSERT(kranal_data.kra_ndevs == 0);
1882 for (i = 0; i < sizeof(device_ids)/sizeof(device_ids[0]); i++) {
1883 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
1885 rc = kranal_device_init(device_ids[i], dev);
1887 kranal_data.kra_ndevs++;
1889 rc = kranal_thread_start(kranal_scheduler, dev);
1891 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
1897 if (kranal_data.kra_ndevs == 0)
1900 rc = libcfs_nal_cmd_register(OPENRANAL, &kranal_cmd, NULL);
1902 CERROR("Can't initialise command interface (rc = %d)\n", rc);
1906 /* flag everything initialised */
1907 kranal_data.kra_init = RANAL_INIT_ALL;
1908 /*****************************************************/
1910 CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
1911 printk(KERN_INFO "Lustre: RapidArray NAL loaded "
1912 "(initial mem %d)\n", pkmem);
1917 kranal_api_shutdown(&kranal_api);
1922 kranal_module_fini (void)
1924 #ifdef CONFIG_SYSCTL
1925 if (kranal_tunables.kra_sysctl != NULL)
1926 unregister_sysctl_table(kranal_tunables.kra_sysctl);
1928 PtlNIFini(kranal_ni);
1930 ptl_unregister_nal(OPENRANAL);
1934 kranal_module_init (void)
1938 /* the following must be sizeof(int) for
1939 * proc_dointvec/kranal_listener_procint() */
1940 LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
1941 LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
1942 LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
1943 LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
1944 LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
1946 kranal_api.nal_ni_init = kranal_api_startup;
1947 kranal_api.nal_ni_fini = kranal_api_shutdown;
1949 /* Initialise dynamic tunables to defaults once only */
1950 kranal_tunables.kra_timeout = RANAL_TIMEOUT;
1952 rc = ptl_register_nal(OPENRANAL, &kranal_api);
1954 CERROR("Can't register RANAL: %d\n", rc);
1955 return -ENOMEM; /* or something... */
1958 /* Pure gateways want the NAL started up at module load time... */
1959 rc = PtlNIInit(OPENRANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
1960 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
1961 ptl_unregister_nal(OPENRANAL);
1965 #ifdef CONFIG_SYSCTL
1966 /* Press on regardless even if registering sysctl doesn't work */
1967 kranal_tunables.kra_sysctl =
1968 register_sysctl_table(kranal_top_ctl_table, 0);
1973 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1974 MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
1975 MODULE_LICENSE("GPL");
1977 module_init(kranal_module_init);
1978 module_exit(kranal_module_fini);