1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/ralnd/ralnd.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
42 static int kranal_devids[RANAL_MAXDEVS] = {RAPK_MAIN_DEVICE_ID,
43 RAPK_EXPANSION_DEVICE_ID};
47 .lnd_startup = kranal_startup,
48 .lnd_shutdown = kranal_shutdown,
49 .lnd_ctl = kranal_ctl,
50 .lnd_send = kranal_send,
51 .lnd_recv = kranal_recv,
52 .lnd_eager_recv = kranal_eager_recv,
53 .lnd_accept = kranal_accept,
56 kra_data_t kranal_data;
59 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, lnet_nid_t dstnid)
63 memset(connreq, 0, sizeof(*connreq));
65 connreq->racr_magic = RANAL_MSG_MAGIC;
66 connreq->racr_version = RANAL_MSG_VERSION;
68 if (conn == NULL) /* prepping a "stub" reply */
71 connreq->racr_devid = conn->rac_device->rad_id;
72 connreq->racr_srcnid = lnet_ptlcompat_srcnid(kranal_data.kra_ni->ni_nid,
74 connreq->racr_dstnid = dstnid;
75 connreq->racr_peerstamp = kranal_data.kra_peerstamp;
76 connreq->racr_connstamp = conn->rac_my_connstamp;
77 connreq->racr_timeout = conn->rac_timeout;
79 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
80 LASSERT(rrc == RAP_SUCCESS);
84 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active)
86 int timeout = active ? *kranal_tunables.kra_timeout :
87 lnet_acceptor_timeout();
91 /* return 0 on success, -ve on error, +ve to tell the peer I'm "old" */
93 rc = libcfs_sock_read(sock, &connreq->racr_magic,
94 sizeof(connreq->racr_magic), timeout);
96 CERROR("Read(magic) failed(1): %d\n", rc);
100 if (connreq->racr_magic != RANAL_MSG_MAGIC &&
101 connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) {
102 /* Unexpected magic! */
104 the_lnet.ln_ptlcompat == 0 &&
105 (connreq->racr_magic == LNET_PROTO_MAGIC ||
106 connreq->racr_magic == __swab32(LNET_PROTO_MAGIC))) {
107 /* future protocol version compatibility!
108 * When LNET unifies protocols over all LNDs, the first
109 * thing sent will be a version query. +ve rc means I
110 * reply with my current magic/version */
115 the_lnet.ln_ptlcompat == 0) {
116 CERROR("Unexpected magic %08x (1)\n",
117 connreq->racr_magic);
121 /* When portals compatibility is set, I may be passed a new
122 * connection "blindly" by the acceptor, and I have to
123 * determine if my peer has sent an acceptor connection request
124 * or not. This isn't a connreq, so I'll get the acceptor to
126 rc = lnet_accept(kranal_data.kra_ni, sock, connreq->racr_magic);
130 /* ...and if it's OK I'm back to looking for a connreq... */
131 rc = libcfs_sock_read(sock, &connreq->racr_magic,
132 sizeof(connreq->racr_magic), timeout);
134 CERROR("Read(magic) failed(2): %d\n", rc);
138 if (connreq->racr_magic != RANAL_MSG_MAGIC &&
139 connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) {
140 CERROR("Unexpected magic %08x(2)\n",
141 connreq->racr_magic);
146 swab = (connreq->racr_magic == __swab32(RANAL_MSG_MAGIC));
148 rc = libcfs_sock_read(sock, &connreq->racr_version,
149 sizeof(connreq->racr_version), timeout);
151 CERROR("Read(version) failed: %d\n", rc);
156 __swab16s(&connreq->racr_version);
158 if (connreq->racr_version != RANAL_MSG_VERSION) {
160 CERROR("Unexpected version %d\n", connreq->racr_version);
163 /* If this is a future version of the ralnd protocol, and I'm
164 * passive (accepted the connection), tell my peer I'm "old"
169 rc = libcfs_sock_read(sock, &connreq->racr_devid,
170 sizeof(connreq->racr_version) -
171 offsetof(kra_connreq_t, racr_devid),
174 CERROR("Read(body) failed: %d\n", rc);
179 __swab32s(&connreq->racr_magic);
180 __swab16s(&connreq->racr_version);
181 __swab16s(&connreq->racr_devid);
182 __swab64s(&connreq->racr_srcnid);
183 __swab64s(&connreq->racr_dstnid);
184 __swab64s(&connreq->racr_peerstamp);
185 __swab64s(&connreq->racr_connstamp);
186 __swab32s(&connreq->racr_timeout);
188 __swab32s(&connreq->racr_riparams.HostId);
189 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
190 __swab32s(&connreq->racr_riparams.PTag);
191 __swab32s(&connreq->racr_riparams.CompletionCookie);
194 if (connreq->racr_srcnid == LNET_NID_ANY ||
195 connreq->racr_dstnid == LNET_NID_ANY) {
196 CERROR("Received LNET_NID_ANY\n");
200 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
201 CERROR("Received timeout %d < MIN %d\n",
202 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
210 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
213 struct list_head *ctmp;
214 struct list_head *cnxt;
218 loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
220 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
221 conn = list_entry(ctmp, kra_conn_t, rac_list);
226 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
227 CDEBUG(D_NET, "Closing stale conn nid: %s "
228 " peerstamp:"LPX64"("LPX64")\n",
229 libcfs_nid2str(peer->rap_nid),
230 conn->rac_peerstamp, newconn->rac_peerstamp);
231 LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
233 kranal_close_conn_locked(conn, -ESTALE);
237 if (conn->rac_device != newconn->rac_device)
241 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
242 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
245 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
247 CDEBUG(D_NET, "Closing stale conn nid: %s"
248 " connstamp:"LPX64"("LPX64")\n",
249 libcfs_nid2str(peer->rap_nid),
250 conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
253 kranal_close_conn_locked(conn, -ESTALE);
260 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
263 struct list_head *tmp;
266 loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
268 list_for_each(tmp, &peer->rap_conns) {
269 conn = list_entry(tmp, kra_conn_t, rac_list);
271 /* 'newconn' is from an earlier version of 'peer'!!! */
272 if (newconn->rac_peerstamp < conn->rac_peerstamp)
275 /* 'conn' is from an earlier version of 'peer': it will be
276 * removed when we cull stale conns later on... */
277 if (newconn->rac_peerstamp > conn->rac_peerstamp)
280 /* Different devices are OK */
281 if (conn->rac_device != newconn->rac_device)
284 /* It's me connecting to myself */
286 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
287 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
290 /* 'newconn' is an earlier connection from 'peer'!!! */
291 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
294 /* 'conn' is an earlier connection from 'peer': it will be
295 * removed when we cull stale conns later on... */
296 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
299 /* 'newconn' has the SAME connection stamp; 'peer' isn't
300 * playing the game... */
308 kranal_set_conn_uniqueness (kra_conn_t *conn)
312 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
314 conn->rac_my_connstamp = kranal_data.kra_connstamp++;
316 do { /* allocate a unique cqid */
317 conn->rac_cqid = kranal_data.kra_next_cqid++;
318 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
320 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
324 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
329 LASSERT (!in_interrupt());
330 LIBCFS_ALLOC(conn, sizeof(*conn));
335 memset(conn, 0, sizeof(*conn));
336 atomic_set(&conn->rac_refcount, 1);
337 INIT_LIST_HEAD(&conn->rac_list);
338 INIT_LIST_HEAD(&conn->rac_hashlist);
339 INIT_LIST_HEAD(&conn->rac_schedlist);
340 INIT_LIST_HEAD(&conn->rac_fmaq);
341 INIT_LIST_HEAD(&conn->rac_rdmaq);
342 INIT_LIST_HEAD(&conn->rac_replyq);
343 spin_lock_init(&conn->rac_lock);
345 kranal_set_conn_uniqueness(conn);
347 conn->rac_device = dev;
348 conn->rac_timeout = MAX(*kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
349 kranal_update_reaper_timeout(conn->rac_timeout);
351 rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
352 &conn->rac_rihandle);
353 if (rrc != RAP_SUCCESS) {
354 CERROR("RapkCreateRi failed: %d\n", rrc);
355 LIBCFS_FREE(conn, sizeof(*conn));
359 atomic_inc(&kranal_data.kra_nconns);
365 kranal_destroy_conn(kra_conn_t *conn)
369 LASSERT (!in_interrupt());
370 LASSERT (!conn->rac_scheduled);
371 LASSERT (list_empty(&conn->rac_list));
372 LASSERT (list_empty(&conn->rac_hashlist));
373 LASSERT (list_empty(&conn->rac_schedlist));
374 LASSERT (atomic_read(&conn->rac_refcount) == 0);
375 LASSERT (list_empty(&conn->rac_fmaq));
376 LASSERT (list_empty(&conn->rac_rdmaq));
377 LASSERT (list_empty(&conn->rac_replyq));
379 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
381 LASSERT (rrc == RAP_SUCCESS);
383 if (conn->rac_peer != NULL)
384 kranal_peer_decref(conn->rac_peer);
386 LIBCFS_FREE(conn, sizeof(*conn));
387 atomic_dec(&kranal_data.kra_nconns);
391 kranal_terminate_conn_locked (kra_conn_t *conn)
393 LASSERT (!in_interrupt());
394 LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
395 LASSERT (!list_empty(&conn->rac_hashlist));
396 LASSERT (list_empty(&conn->rac_list));
398 /* Remove from conn hash table: no new callbacks */
399 list_del_init(&conn->rac_hashlist);
400 kranal_conn_decref(conn);
402 conn->rac_state = RANAL_CONN_CLOSED;
404 /* schedule to clear out all uncompleted comms in context of dev's
406 kranal_schedule_conn(conn);
410 kranal_close_conn_locked (kra_conn_t *conn, int error)
412 kra_peer_t *peer = conn->rac_peer;
414 CDEBUG(error == 0 ? D_NET : D_NETERROR,
415 "closing conn to %s: error %d\n",
416 libcfs_nid2str(peer->rap_nid), error);
418 LASSERT (!in_interrupt());
419 LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
420 LASSERT (!list_empty(&conn->rac_hashlist));
421 LASSERT (!list_empty(&conn->rac_list));
423 list_del_init(&conn->rac_list);
425 if (list_empty(&peer->rap_conns) &&
426 peer->rap_persistence == 0) {
427 /* Non-persistent peer with no more conns... */
428 kranal_unlink_peer_locked(peer);
431 /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
432 * full timeout. If we get a CLOSE we know the peer has stopped all
433 * RDMA. Otherwise if we wait for the full timeout we can also be sure
434 * all RDMA has stopped. */
435 conn->rac_last_rx = jiffies;
438 conn->rac_state = RANAL_CONN_CLOSING;
439 kranal_schedule_conn(conn); /* schedule sending CLOSE */
441 kranal_conn_decref(conn); /* lose peer's ref */
445 kranal_close_conn (kra_conn_t *conn, int error)
450 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
452 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
453 kranal_close_conn_locked(conn, error);
455 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
459 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
460 __u32 peer_ip, int peer_port)
462 kra_device_t *dev = conn->rac_device;
466 /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
467 * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
468 conn->rac_last_tx = jiffies;
469 conn->rac_keepalive = 0;
471 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
472 if (rrc != RAP_SUCCESS) {
473 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
474 HIPQUAD(peer_ip), peer_port, rrc);
475 return -ECONNABORTED;
478 /* Schedule conn on rad_new_conns */
479 kranal_conn_addref(conn);
480 spin_lock_irqsave(&dev->rad_lock, flags);
481 list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
482 wake_up(&dev->rad_waitq);
483 spin_unlock_irqrestore(&dev->rad_lock, flags);
485 rrc = RapkWaitToConnect(conn->rac_rihandle);
486 if (rrc != RAP_SUCCESS) {
487 CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
488 HIPQUAD(peer_ip), peer_port, rrc);
489 return -ECONNABORTED;
492 /* Scheduler doesn't touch conn apart from to deschedule and decref it
493 * after RapkCompleteSync() return success, so conn is all mine */
495 conn->rac_peerstamp = connreq->racr_peerstamp;
496 conn->rac_peer_connstamp = connreq->racr_connstamp;
497 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
498 kranal_update_reaper_timeout(conn->rac_keepalive);
503 kranal_passive_conn_handshake (struct socket *sock, lnet_nid_t *src_nidp,
504 lnet_nid_t *dst_nidp, kra_conn_t **connp)
507 unsigned int peer_port;
508 kra_connreq_t rx_connreq;
509 kra_connreq_t tx_connreq;
515 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
517 CERROR("Can't get peer's IP: %d\n", rc);
521 rc = kranal_recv_connreq(sock, &rx_connreq, 0);
524 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
525 HIPQUAD(peer_ip), peer_port, rc);
530 /* Request from "new" peer: send reply with my MAGIC/VERSION to
531 * tell her I'm old... */
532 kranal_pack_connreq(&tx_connreq, NULL, LNET_NID_ANY);
534 rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
535 lnet_acceptor_timeout());
537 CERROR("Can't tx stub connreq to %u.%u.%u.%u/%d: %d\n",
538 HIPQUAD(peer_ip), peer_port, rc);
544 if (i == kranal_data.kra_ndevs) {
545 CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
546 rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
549 dev = &kranal_data.kra_devices[i];
550 if (dev->rad_id == rx_connreq.racr_devid)
554 rc = kranal_create_conn(&conn, dev);
558 kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
560 rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
561 lnet_acceptor_timeout());
563 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
564 HIPQUAD(peer_ip), peer_port, rc);
565 kranal_conn_decref(conn);
569 rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
571 kranal_conn_decref(conn);
576 *src_nidp = rx_connreq.racr_srcnid;
577 *dst_nidp = rx_connreq.racr_dstnid;
582 kranal_active_conn_handshake(kra_peer_t *peer,
583 lnet_nid_t *dst_nidp, kra_conn_t **connp)
585 kra_connreq_t connreq;
592 /* spread connections over all devices using both peer NIDs to ensure
593 * all nids use all devices */
594 idx = peer->rap_nid + kranal_data.kra_ni->ni_nid;
595 dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
597 rc = kranal_create_conn(&conn, dev);
601 kranal_pack_connreq(&connreq, conn, peer->rap_nid);
603 if (the_lnet.ln_testprotocompat != 0) {
604 /* single-shot proto test */
606 if ((the_lnet.ln_testprotocompat & 1) != 0) {
607 connreq.racr_version++;
608 the_lnet.ln_testprotocompat &= ~1;
610 if ((the_lnet.ln_testprotocompat & 2) != 0) {
611 connreq.racr_magic = LNET_PROTO_MAGIC;
612 the_lnet.ln_testprotocompat &= ~2;
617 rc = lnet_connect(&sock, peer->rap_nid,
618 0, peer->rap_ip, peer->rap_port);
622 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
623 * immediately after accepting a connection, so we connect and then
624 * send immediately. */
626 rc = libcfs_sock_write(sock, &connreq, sizeof(connreq),
627 lnet_acceptor_timeout());
629 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
630 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
634 rc = kranal_recv_connreq(sock, &connreq, 1);
636 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
637 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
641 libcfs_sock_release(sock);
644 if (connreq.racr_srcnid != peer->rap_nid) {
645 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
646 "received %s expected %s\n",
647 HIPQUAD(peer->rap_ip), peer->rap_port,
648 libcfs_nid2str(connreq.racr_srcnid),
649 libcfs_nid2str(peer->rap_nid));
653 if (connreq.racr_devid != dev->rad_id) {
654 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
655 "received %d expected %d\n",
656 HIPQUAD(peer->rap_ip), peer->rap_port,
657 connreq.racr_devid, dev->rad_id);
661 rc = kranal_set_conn_params(conn, &connreq,
662 peer->rap_ip, peer->rap_port);
667 *dst_nidp = connreq.racr_dstnid;
671 libcfs_sock_release(sock);
673 lnet_connect_console_error(rc, peer->rap_nid,
674 peer->rap_ip, peer->rap_port);
676 kranal_conn_decref(conn);
681 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
694 /* active: connd wants to connect to 'peer' */
695 LASSERT (peer != NULL);
696 LASSERT (peer->rap_connecting);
698 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
702 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
704 if (!kranal_peer_active(peer)) {
705 /* raced with peer getting unlinked */
706 write_unlock_irqrestore(&kranal_data.kra_global_lock,
708 kranal_conn_decref(conn);
712 peer_nid = peer->rap_nid;
714 /* passive: listener accepted 'sock' */
715 LASSERT (peer == NULL);
717 rc = kranal_passive_conn_handshake(sock, &peer_nid,
722 /* assume this is a new peer */
723 rc = kranal_create_peer(&peer, peer_nid);
725 CERROR("Can't create conn for %s\n",
726 libcfs_nid2str(peer_nid));
727 kranal_conn_decref(conn);
731 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
733 peer2 = kranal_find_peer_locked(peer_nid);
737 /* peer_nid already in the peer table */
738 kranal_peer_decref(peer);
743 LASSERT ((!new_peer) != (!kranal_peer_active(peer)));
745 /* Refuse connection if peer thinks we are a different NID. We check
746 * this while holding the global lock, to synch with connection
747 * destruction on NID change. */
748 if (!lnet_ptlcompat_matchnid(kranal_data.kra_ni->ni_nid, dst_nid)) {
749 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
751 CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
752 libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid),
753 libcfs_nid2str(kranal_data.kra_ni->ni_nid));
758 /* Refuse to duplicate an existing connection (both sides might try to
759 * connect at once). NB we return success! We _are_ connected so we
760 * _don't_ have any blocked txs to complete with failure. */
761 rc = kranal_conn_isdup_locked(peer, conn);
763 LASSERT (!list_empty(&peer->rap_conns));
764 LASSERT (list_empty(&peer->rap_tx_queue));
765 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
766 CWARN("Not creating duplicate connection to %s: %d\n",
767 libcfs_nid2str(peer_nid), rc);
773 /* peer table takes my ref on the new peer */
774 list_add_tail(&peer->rap_list,
775 kranal_nid2peerlist(peer_nid));
778 /* initialise timestamps before reaper looks at them */
779 conn->rac_last_tx = conn->rac_last_rx = jiffies;
781 kranal_peer_addref(peer); /* +1 ref for conn */
782 conn->rac_peer = peer;
783 list_add_tail(&conn->rac_list, &peer->rap_conns);
785 kranal_conn_addref(conn); /* +1 ref for conn table */
786 list_add_tail(&conn->rac_hashlist,
787 kranal_cqid2connlist(conn->rac_cqid));
789 /* Schedule all packets blocking for a connection */
790 while (!list_empty(&peer->rap_tx_queue)) {
791 tx = list_entry(peer->rap_tx_queue.next,
794 list_del(&tx->tx_list);
795 kranal_post_fma(conn, tx);
798 nstale = kranal_close_stale_conns_locked(peer, conn);
800 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
802 /* CAVEAT EMPTOR: passive peer can disappear NOW */
805 CWARN("Closed %d stale conns to %s\n", nstale,
806 libcfs_nid2str(peer_nid));
808 CWARN("New connection to %s on devid[%d] = %d\n",
809 libcfs_nid2str(peer_nid),
810 conn->rac_device->rad_idx, conn->rac_device->rad_id);
812 /* Ensure conn gets checked. Transmits may have been queued and an
813 * FMA event may have happened before it got in the cq hash table */
814 kranal_schedule_conn(conn);
819 kranal_peer_decref(peer);
820 kranal_conn_decref(conn);
825 kranal_connect (kra_peer_t *peer)
829 struct list_head zombies;
832 LASSERT (peer->rap_connecting);
834 CDEBUG(D_NET, "About to handshake %s\n",
835 libcfs_nid2str(peer->rap_nid));
837 rc = kranal_conn_handshake(NULL, peer);
839 CDEBUG(D_NET, "Done handshake %s:%d \n",
840 libcfs_nid2str(peer->rap_nid), rc);
842 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
844 LASSERT (peer->rap_connecting);
845 peer->rap_connecting = 0;
848 /* kranal_conn_handshake() queues blocked txs immediately on
849 * success to avoid messages jumping the queue */
850 LASSERT (list_empty(&peer->rap_tx_queue));
852 peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
854 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
858 peer->rap_reconnect_interval *= 2;
859 peer->rap_reconnect_interval =
860 MAX(peer->rap_reconnect_interval,
861 *kranal_tunables.kra_min_reconnect_interval);
862 peer->rap_reconnect_interval =
863 MIN(peer->rap_reconnect_interval,
864 *kranal_tunables.kra_max_reconnect_interval);
866 peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
868 /* Grab all blocked packets while we have the global lock */
869 list_add(&zombies, &peer->rap_tx_queue);
870 list_del_init(&peer->rap_tx_queue);
872 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
874 if (list_empty(&zombies))
877 CDEBUG(D_NETERROR, "Dropping packets for %s: connection failed\n",
878 libcfs_nid2str(peer->rap_nid));
881 tx = list_entry(zombies.next, kra_tx_t, tx_list);
883 list_del(&tx->tx_list);
884 kranal_tx_done(tx, -EHOSTUNREACH);
886 } while (!list_empty(&zombies));
890 kranal_free_acceptsock (kra_acceptsock_t *ras)
892 libcfs_sock_release(ras->ras_sock);
893 LIBCFS_FREE(ras, sizeof(*ras));
897 kranal_accept (lnet_ni_t *ni, struct socket *sock)
899 kra_acceptsock_t *ras;
905 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
906 LASSERT (rc == 0); /* we succeeded before */
908 LIBCFS_ALLOC(ras, sizeof(*ras));
910 CERROR("ENOMEM allocating connection request from "
911 "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
915 ras->ras_sock = sock;
917 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
919 list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
920 wake_up(&kranal_data.kra_connd_waitq);
922 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
927 kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
932 LASSERT (nid != LNET_NID_ANY);
934 LIBCFS_ALLOC(peer, sizeof(*peer));
938 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
941 atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
943 INIT_LIST_HEAD(&peer->rap_list);
944 INIT_LIST_HEAD(&peer->rap_connd_list);
945 INIT_LIST_HEAD(&peer->rap_conns);
946 INIT_LIST_HEAD(&peer->rap_tx_queue);
948 peer->rap_reconnect_interval = 0; /* OK to connect at any time */
950 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
952 if (kranal_data.kra_nonewpeers) {
953 /* shutdown has started already */
954 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
956 LIBCFS_FREE(peer, sizeof(*peer));
957 CERROR("Can't create peer: network shutdown\n");
961 atomic_inc(&kranal_data.kra_npeers);
963 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
970 kranal_destroy_peer (kra_peer_t *peer)
972 CDEBUG(D_NET, "peer %s %p deleted\n",
973 libcfs_nid2str(peer->rap_nid), peer);
975 LASSERT (atomic_read(&peer->rap_refcount) == 0);
976 LASSERT (peer->rap_persistence == 0);
977 LASSERT (!kranal_peer_active(peer));
978 LASSERT (!peer->rap_connecting);
979 LASSERT (list_empty(&peer->rap_conns));
980 LASSERT (list_empty(&peer->rap_tx_queue));
981 LASSERT (list_empty(&peer->rap_connd_list));
983 LIBCFS_FREE(peer, sizeof(*peer));
985 /* NB a peer's connections keep a reference on their peer until
986 * they are destroyed, so we can be assured that _all_ state to do
987 * with this peer has been cleaned up when its refcount drops to
989 atomic_dec(&kranal_data.kra_npeers);
993 kranal_find_peer_locked (lnet_nid_t nid)
995 struct list_head *peer_list = kranal_nid2peerlist(nid);
996 struct list_head *tmp;
999 list_for_each (tmp, peer_list) {
1001 peer = list_entry(tmp, kra_peer_t, rap_list);
1003 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
1004 !list_empty(&peer->rap_conns)); /* active conn */
1006 if (peer->rap_nid != nid)
1009 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
1010 peer, libcfs_nid2str(nid),
1011 atomic_read(&peer->rap_refcount));
1018 kranal_find_peer (lnet_nid_t nid)
1022 read_lock(&kranal_data.kra_global_lock);
1023 peer = kranal_find_peer_locked(nid);
1024 if (peer != NULL) /* +1 ref for caller? */
1025 kranal_peer_addref(peer);
1026 read_unlock(&kranal_data.kra_global_lock);
1032 kranal_unlink_peer_locked (kra_peer_t *peer)
1034 LASSERT (peer->rap_persistence == 0);
1035 LASSERT (list_empty(&peer->rap_conns));
1037 LASSERT (kranal_peer_active(peer));
1038 list_del_init(&peer->rap_list);
1040 /* lose peerlist's ref */
1041 kranal_peer_decref(peer);
1045 kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
1049 struct list_head *ptmp;
1052 read_lock(&kranal_data.kra_global_lock);
1054 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1056 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1058 peer = list_entry(ptmp, kra_peer_t, rap_list);
1059 LASSERT (peer->rap_persistence > 0 ||
1060 !list_empty(&peer->rap_conns));
1065 *nidp = peer->rap_nid;
1066 *ipp = peer->rap_ip;
1067 *portp = peer->rap_port;
1068 *persistencep = peer->rap_persistence;
1070 read_unlock(&kranal_data.kra_global_lock);
1075 read_unlock(&kranal_data.kra_global_lock);
1080 kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
1082 unsigned long flags;
1087 if (nid == LNET_NID_ANY)
1090 rc = kranal_create_peer(&peer, nid);
1094 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1096 peer2 = kranal_find_peer_locked(nid);
1097 if (peer2 != NULL) {
1098 kranal_peer_decref(peer);
1101 /* peer table takes existing ref on peer */
1102 list_add_tail(&peer->rap_list,
1103 kranal_nid2peerlist(nid));
1107 peer->rap_port = port;
1108 peer->rap_persistence++;
1110 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1115 kranal_del_peer_locked (kra_peer_t *peer)
1117 struct list_head *ctmp;
1118 struct list_head *cnxt;
1121 peer->rap_persistence = 0;
1123 if (list_empty(&peer->rap_conns)) {
1124 kranal_unlink_peer_locked(peer);
1126 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1127 conn = list_entry(ctmp, kra_conn_t, rac_list);
1129 kranal_close_conn_locked(conn, 0);
1131 /* peer unlinks itself when last conn is closed */
1136 kranal_del_peer (lnet_nid_t nid)
1138 unsigned long flags;
1139 struct list_head *ptmp;
1140 struct list_head *pnxt;
1147 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1149 if (nid != LNET_NID_ANY)
1150 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1153 hi = kranal_data.kra_peer_hash_size - 1;
1156 for (i = lo; i <= hi; i++) {
1157 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1158 peer = list_entry(ptmp, kra_peer_t, rap_list);
1159 LASSERT (peer->rap_persistence > 0 ||
1160 !list_empty(&peer->rap_conns));
1162 if (!(nid == LNET_NID_ANY || peer->rap_nid == nid))
1165 kranal_del_peer_locked(peer);
1166 rc = 0; /* matched something */
1170 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1176 kranal_get_conn_by_idx (int index)
1179 struct list_head *ptmp;
1181 struct list_head *ctmp;
1184 read_lock (&kranal_data.kra_global_lock);
1186 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1187 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1189 peer = list_entry(ptmp, kra_peer_t, rap_list);
1190 LASSERT (peer->rap_persistence > 0 ||
1191 !list_empty(&peer->rap_conns));
1193 list_for_each (ctmp, &peer->rap_conns) {
1197 conn = list_entry(ctmp, kra_conn_t, rac_list);
1198 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1199 libcfs_nid2str(conn->rac_peer->rap_nid),
1200 atomic_read(&conn->rac_refcount));
1201 atomic_inc(&conn->rac_refcount);
1202 read_unlock(&kranal_data.kra_global_lock);
1208 read_unlock(&kranal_data.kra_global_lock);
1213 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1216 struct list_head *ctmp;
1217 struct list_head *cnxt;
1220 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1221 conn = list_entry(ctmp, kra_conn_t, rac_list);
1224 kranal_close_conn_locked(conn, why);
1231 kranal_close_matching_conns (lnet_nid_t nid)
1233 unsigned long flags;
1235 struct list_head *ptmp;
1236 struct list_head *pnxt;
1242 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1244 if (nid != LNET_NID_ANY)
1245 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1248 hi = kranal_data.kra_peer_hash_size - 1;
1251 for (i = lo; i <= hi; i++) {
1252 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1254 peer = list_entry(ptmp, kra_peer_t, rap_list);
1255 LASSERT (peer->rap_persistence > 0 ||
1256 !list_empty(&peer->rap_conns));
1258 if (!(nid == LNET_NID_ANY || nid == peer->rap_nid))
1261 count += kranal_close_peer_conns_locked(peer, 0);
1265 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1267 /* wildcards always succeed */
1268 if (nid == LNET_NID_ANY)
1271 return (count == 0) ? -ENOENT : 0;
1275 kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1277 struct libcfs_ioctl_data *data = arg;
1280 LASSERT (ni == kranal_data.kra_ni);
1283 case IOC_LIBCFS_GET_PEER: {
1287 int share_count = 0;
1289 rc = kranal_get_peer_info(data->ioc_count,
1290 &nid, &ip, &port, &share_count);
1291 data->ioc_nid = nid;
1292 data->ioc_count = share_count;
1293 data->ioc_u32[0] = ip;
1294 data->ioc_u32[1] = port;
1297 case IOC_LIBCFS_ADD_PEER: {
1298 rc = kranal_add_persistent_peer(data->ioc_nid,
1299 data->ioc_u32[0], /* IP */
1300 data->ioc_u32[1]); /* port */
1303 case IOC_LIBCFS_DEL_PEER: {
1304 rc = kranal_del_peer(data->ioc_nid);
1307 case IOC_LIBCFS_GET_CONN: {
1308 kra_conn_t *conn = kranal_get_conn_by_idx(data->ioc_count);
1314 data->ioc_nid = conn->rac_peer->rap_nid;
1315 data->ioc_u32[0] = conn->rac_device->rad_id;
1316 kranal_conn_decref(conn);
1320 case IOC_LIBCFS_CLOSE_CONNECTION: {
1321 rc = kranal_close_matching_conns(data->ioc_nid);
1324 case IOC_LIBCFS_REGISTER_MYNID: {
1325 /* Ignore if this is a noop */
1326 if (data->ioc_nid == ni->ni_nid) {
1329 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1330 libcfs_nid2str(data->ioc_nid),
1331 libcfs_nid2str(ni->ni_nid));
1342 kranal_free_txdescs(struct list_head *freelist)
1346 while (!list_empty(freelist)) {
1347 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1349 list_del(&tx->tx_list);
1350 LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
1351 LIBCFS_FREE(tx, sizeof(*tx));
1356 kranal_alloc_txdescs(struct list_head *freelist, int n)
1361 LASSERT (freelist == &kranal_data.kra_idle_txs);
1362 LASSERT (list_empty(freelist));
1364 for (i = 0; i < n; i++) {
1366 LIBCFS_ALLOC(tx, sizeof(*tx));
1368 CERROR("Can't allocate tx[%d]\n", i);
1369 kranal_free_txdescs(freelist);
1373 LIBCFS_ALLOC(tx->tx_phys,
1374 LNET_MAX_IOV * sizeof(*tx->tx_phys));
1375 if (tx->tx_phys == NULL) {
1376 CERROR("Can't allocate tx[%d]->tx_phys\n", i);
1378 LIBCFS_FREE(tx, sizeof(*tx));
1379 kranal_free_txdescs(freelist);
1383 tx->tx_buftype = RANAL_BUF_NONE;
1384 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1386 list_add(&tx->tx_list, freelist);
1393 kranal_device_init(int id, kra_device_t *dev)
1395 int total_ntx = *kranal_tunables.kra_ntx;
1399 rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1401 if (rrc != RAP_SUCCESS) {
1402 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1406 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1407 if (rrc != RAP_SUCCESS) {
1408 CERROR("Can't reserve %d RDMA descriptors"
1409 " for device %d: %d\n", total_ntx, id, rrc);
1413 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1414 &dev->rad_rdma_cqh);
1415 if (rrc != RAP_SUCCESS) {
1416 CERROR("Can't create rdma cq size %d for device %d: %d\n",
1417 total_ntx, id, rrc);
1421 rrc = RapkCreateCQ(dev->rad_handle,
1422 *kranal_tunables.kra_fma_cq_size,
1423 RAP_CQTYPE_RECV, &dev->rad_fma_cqh);
1424 if (rrc != RAP_SUCCESS) {
1425 CERROR("Can't create fma cq size %d for device %d: %d\n",
1426 *kranal_tunables.kra_fma_cq_size, id, rrc);
1433 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1435 RapkReleaseDevice(dev->rad_handle);
1441 kranal_device_fini(kra_device_t *dev)
1443 LASSERT (list_empty(&dev->rad_ready_conns));
1444 LASSERT (list_empty(&dev->rad_new_conns));
1445 LASSERT (dev->rad_nphysmap == 0);
1446 LASSERT (dev->rad_nppphysmap == 0);
1447 LASSERT (dev->rad_nvirtmap == 0);
1448 LASSERT (dev->rad_nobvirtmap == 0);
1450 LASSERT(dev->rad_scheduler == NULL);
1451 RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1452 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1453 RapkReleaseDevice(dev->rad_handle);
1457 kranal_shutdown (lnet_ni_t *ni)
1460 unsigned long flags;
1462 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1463 atomic_read(&libcfs_kmemory));
1465 LASSERT (ni == kranal_data.kra_ni);
1466 LASSERT (ni->ni_data == &kranal_data);
1468 switch (kranal_data.kra_init) {
1470 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1473 case RANAL_INIT_ALL:
1474 /* Prevent new peers from being created */
1475 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1476 kranal_data.kra_nonewpeers = 1;
1477 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1479 /* Remove all existing peers from the peer table */
1480 kranal_del_peer(LNET_NID_ANY);
1482 /* Wait for pending conn reqs to be handled */
1484 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1485 while (!list_empty(&kranal_data.kra_connd_acceptq)) {
1486 spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
1489 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
1490 "waiting for conn reqs to clean up\n");
1491 cfs_pause(cfs_time_seconds(1));
1493 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1495 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1497 /* Wait for all peers to be freed */
1499 while (atomic_read(&kranal_data.kra_npeers) != 0) {
1501 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
1502 "waiting for %d peers to close down\n",
1503 atomic_read(&kranal_data.kra_npeers));
1504 cfs_pause(cfs_time_seconds(1));
1508 case RANAL_INIT_DATA:
1512 /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
1513 * have to worry about shutdown races. NB connections may be created
1514 * while there are still active connds, but these will be temporary
1515 * since peer creation always fails after the listener has started to
1517 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1519 /* Flag threads to terminate */
1520 kranal_data.kra_shutdown = 1;
1522 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1523 kra_device_t *dev = &kranal_data.kra_devices[i];
1525 spin_lock_irqsave(&dev->rad_lock, flags);
1526 wake_up(&dev->rad_waitq);
1527 spin_unlock_irqrestore(&dev->rad_lock, flags);
1530 spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1531 wake_up_all(&kranal_data.kra_reaper_waitq);
1532 spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1534 LASSERT (list_empty(&kranal_data.kra_connd_peers));
1535 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1536 wake_up_all(&kranal_data.kra_connd_waitq);
1537 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1539 /* Wait for threads to exit */
1541 while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1543 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1544 "Waiting for %d threads to terminate\n",
1545 atomic_read(&kranal_data.kra_nthreads));
1546 cfs_pause(cfs_time_seconds(1));
1549 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1550 if (kranal_data.kra_peers != NULL) {
1551 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1552 LASSERT (list_empty(&kranal_data.kra_peers[i]));
1554 LIBCFS_FREE(kranal_data.kra_peers,
1555 sizeof (struct list_head) *
1556 kranal_data.kra_peer_hash_size);
1559 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1560 if (kranal_data.kra_conns != NULL) {
1561 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1562 LASSERT (list_empty(&kranal_data.kra_conns[i]));
1564 LIBCFS_FREE(kranal_data.kra_conns,
1565 sizeof (struct list_head) *
1566 kranal_data.kra_conn_hash_size);
1569 for (i = 0; i < kranal_data.kra_ndevs; i++)
1570 kranal_device_fini(&kranal_data.kra_devices[i]);
1572 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1574 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1575 atomic_read(&libcfs_kmemory));
1577 kranal_data.kra_init = RANAL_INIT_NOTHING;
1578 PORTAL_MODULE_UNUSE;
1582 kranal_startup (lnet_ni_t *ni)
1585 int pkmem = atomic_read(&libcfs_kmemory);
1590 LASSERT (ni->ni_lnd == &the_kralnd);
1592 /* Only 1 instance supported */
1593 if (kranal_data.kra_init != RANAL_INIT_NOTHING) {
1594 CERROR ("Only 1 instance supported\n");
1598 if (lnet_set_ip_niaddr(ni) != 0) {
1599 CERROR ("Can't determine my NID\n");
1603 if (*kranal_tunables.kra_credits > *kranal_tunables.kra_ntx) {
1604 CERROR ("Can't set credits(%d) > ntx(%d)\n",
1605 *kranal_tunables.kra_credits,
1606 *kranal_tunables.kra_ntx);
1610 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1612 ni->ni_maxtxcredits = *kranal_tunables.kra_credits;
1613 ni->ni_peertxcredits = *kranal_tunables.kra_peercredits;
1615 ni->ni_data = &kranal_data;
1616 kranal_data.kra_ni = ni;
1618 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1619 * a unique (for all time) connstamp so we can uniquely identify
1620 * the sender. The connstamp is an incrementing counter
1621 * initialised with seconds + microseconds at startup time. So we
1622 * rely on NOT creating connections more frequently on average than
1623 * 1MHz to ensure we don't use old connstamps when we reboot. */
1624 do_gettimeofday(&tv);
1625 kranal_data.kra_connstamp =
1626 kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1628 rwlock_init(&kranal_data.kra_global_lock);
1630 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1631 kra_device_t *dev = &kranal_data.kra_devices[i];
1634 INIT_LIST_HEAD(&dev->rad_ready_conns);
1635 INIT_LIST_HEAD(&dev->rad_new_conns);
1636 init_waitqueue_head(&dev->rad_waitq);
1637 spin_lock_init(&dev->rad_lock);
1640 kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1641 init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1642 spin_lock_init(&kranal_data.kra_reaper_lock);
1644 INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1645 INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1646 init_waitqueue_head(&kranal_data.kra_connd_waitq);
1647 spin_lock_init(&kranal_data.kra_connd_lock);
1649 INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1650 spin_lock_init(&kranal_data.kra_tx_lock);
1652 /* OK to call kranal_api_shutdown() to cleanup now */
1653 kranal_data.kra_init = RANAL_INIT_DATA;
1656 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1657 LIBCFS_ALLOC(kranal_data.kra_peers,
1658 sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
1659 if (kranal_data.kra_peers == NULL)
1662 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1663 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
1665 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
1666 LIBCFS_ALLOC(kranal_data.kra_conns,
1667 sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
1668 if (kranal_data.kra_conns == NULL)
1671 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1672 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
1674 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs,
1675 *kranal_tunables.kra_ntx);
1679 rc = kranal_thread_start(kranal_reaper, NULL);
1681 CERROR("Can't spawn ranal reaper: %d\n", rc);
1685 for (i = 0; i < *kranal_tunables.kra_n_connd; i++) {
1686 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
1688 CERROR("Can't spawn ranal connd[%d]: %d\n",
1694 LASSERT (kranal_data.kra_ndevs == 0);
1696 /* Use all available RapidArray devices */
1697 for (i = 0; i < RANAL_MAXDEVS; i++) {
1698 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
1700 rc = kranal_device_init(kranal_devids[i], dev);
1702 kranal_data.kra_ndevs++;
1705 if (kranal_data.kra_ndevs == 0) {
1706 CERROR("Can't initialise any RapidArray devices\n");
1710 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1711 dev = &kranal_data.kra_devices[i];
1712 rc = kranal_thread_start(kranal_scheduler, dev);
1714 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
1720 /* flag everything initialised */
1721 kranal_data.kra_init = RANAL_INIT_ALL;
1722 /*****************************************************/
1724 CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
1728 kranal_shutdown(ni);
1733 kranal_module_fini (void)
1735 lnet_unregister_lnd(&the_kralnd);
1736 kranal_tunables_fini();
1740 kranal_module_init (void)
1744 rc = kranal_tunables_init();
1748 lnet_register_lnd(&the_kralnd);
1753 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1754 MODULE_DESCRIPTION("Kernel RapidArray LND v0.01");
1755 MODULE_LICENSE("GPL");
1757 module_init(kranal_module_init);
1758 module_exit(kranal_module_fini);