4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lnet/klnds/ralnd/ralnd.c
36 * Author: Eric Barton <eric@bartonsoftware.com>
40 static int kranal_devids[RANAL_MAXDEVS] = {RAPK_MAIN_DEVICE_ID,
41 RAPK_EXPANSION_DEVICE_ID};
45 .lnd_startup = kranal_startup,
46 .lnd_shutdown = kranal_shutdown,
47 .lnd_ctl = kranal_ctl,
48 .lnd_send = kranal_send,
49 .lnd_recv = kranal_recv,
50 .lnd_eager_recv = kranal_eager_recv,
51 .lnd_accept = kranal_accept,
54 kra_data_t kranal_data;
57 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, lnet_nid_t dstnid)
61 memset(connreq, 0, sizeof(*connreq));
63 connreq->racr_magic = RANAL_MSG_MAGIC;
64 connreq->racr_version = RANAL_MSG_VERSION;
66 if (conn == NULL) /* prepping a "stub" reply */
69 connreq->racr_devid = conn->rac_device->rad_id;
70 connreq->racr_srcnid = kranal_data.kra_ni->ni_nid;
71 connreq->racr_dstnid = dstnid;
72 connreq->racr_peerstamp = kranal_data.kra_peerstamp;
73 connreq->racr_connstamp = conn->rac_my_connstamp;
74 connreq->racr_timeout = conn->rac_timeout;
76 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
77 LASSERT(rrc == RAP_SUCCESS);
81 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active)
83 int timeout = active ? *kranal_tunables.kra_timeout :
84 lnet_acceptor_timeout();
88 /* return 0 on success, -ve on error, +ve to tell the peer I'm "old" */
90 rc = libcfs_sock_read(sock, &connreq->racr_magic,
91 sizeof(connreq->racr_magic), timeout);
93 CERROR("Read(magic) failed(1): %d\n", rc);
97 if (connreq->racr_magic != RANAL_MSG_MAGIC &&
98 connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) {
99 /* Unexpected magic! */
101 (connreq->racr_magic == LNET_PROTO_MAGIC ||
102 connreq->racr_magic == __swab32(LNET_PROTO_MAGIC))) {
103 /* future protocol version compatibility!
104 * When LNET unifies protocols over all LNDs, the first
105 * thing sent will be a version query. +ve rc means I
106 * reply with my current magic/version */
110 CERROR("Unexpected magic %08x (%s)\n",
111 connreq->racr_magic, active ? "active" : "passive");
115 swab = (connreq->racr_magic == __swab32(RANAL_MSG_MAGIC));
117 rc = libcfs_sock_read(sock, &connreq->racr_version,
118 sizeof(connreq->racr_version), timeout);
120 CERROR("Read(version) failed: %d\n", rc);
125 __swab16s(&connreq->racr_version);
127 if (connreq->racr_version != RANAL_MSG_VERSION) {
129 CERROR("Unexpected version %d\n", connreq->racr_version);
132 /* If this is a future version of the ralnd protocol, and I'm
133 * passive (accepted the connection), tell my peer I'm "old"
138 rc = libcfs_sock_read(sock, &connreq->racr_devid,
139 sizeof(connreq->racr_version) -
140 offsetof(kra_connreq_t, racr_devid),
143 CERROR("Read(body) failed: %d\n", rc);
148 __swab32s(&connreq->racr_magic);
149 __swab16s(&connreq->racr_version);
150 __swab16s(&connreq->racr_devid);
151 __swab64s(&connreq->racr_srcnid);
152 __swab64s(&connreq->racr_dstnid);
153 __swab64s(&connreq->racr_peerstamp);
154 __swab64s(&connreq->racr_connstamp);
155 __swab32s(&connreq->racr_timeout);
157 __swab32s(&connreq->racr_riparams.HostId);
158 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
159 __swab32s(&connreq->racr_riparams.PTag);
160 __swab32s(&connreq->racr_riparams.CompletionCookie);
163 if (connreq->racr_srcnid == LNET_NID_ANY ||
164 connreq->racr_dstnid == LNET_NID_ANY) {
165 CERROR("Received LNET_NID_ANY\n");
169 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
170 CERROR("Received timeout %d < MIN %d\n",
171 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
179 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
187 loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
189 cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
190 conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
195 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
196 CDEBUG(D_NET, "Closing stale conn nid: %s "
197 " peerstamp:"LPX64"("LPX64")\n",
198 libcfs_nid2str(peer->rap_nid),
199 conn->rac_peerstamp, newconn->rac_peerstamp);
200 LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
202 kranal_close_conn_locked(conn, -ESTALE);
206 if (conn->rac_device != newconn->rac_device)
210 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
211 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
214 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
216 CDEBUG(D_NET, "Closing stale conn nid: %s"
217 " connstamp:"LPX64"("LPX64")\n",
218 libcfs_nid2str(peer->rap_nid),
219 conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
222 kranal_close_conn_locked(conn, -ESTALE);
229 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
235 loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
237 cfs_list_for_each(tmp, &peer->rap_conns) {
238 conn = cfs_list_entry(tmp, kra_conn_t, rac_list);
240 /* 'newconn' is from an earlier version of 'peer'!!! */
241 if (newconn->rac_peerstamp < conn->rac_peerstamp)
244 /* 'conn' is from an earlier version of 'peer': it will be
245 * removed when we cull stale conns later on... */
246 if (newconn->rac_peerstamp > conn->rac_peerstamp)
249 /* Different devices are OK */
250 if (conn->rac_device != newconn->rac_device)
253 /* It's me connecting to myself */
255 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
256 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
259 /* 'newconn' is an earlier connection from 'peer'!!! */
260 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
263 /* 'conn' is an earlier connection from 'peer': it will be
264 * removed when we cull stale conns later on... */
265 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
268 /* 'newconn' has the SAME connection stamp; 'peer' isn't
269 * playing the game... */
277 kranal_set_conn_uniqueness (kra_conn_t *conn)
281 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
283 conn->rac_my_connstamp = kranal_data.kra_connstamp++;
285 do { /* allocate a unique cqid */
286 conn->rac_cqid = kranal_data.kra_next_cqid++;
287 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
289 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
293 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
298 LASSERT (!cfs_in_interrupt());
299 LIBCFS_ALLOC(conn, sizeof(*conn));
304 memset(conn, 0, sizeof(*conn));
305 cfs_atomic_set(&conn->rac_refcount, 1);
306 CFS_INIT_LIST_HEAD(&conn->rac_list);
307 CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
308 CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
309 CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
310 CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
311 CFS_INIT_LIST_HEAD(&conn->rac_replyq);
312 cfs_spin_lock_init(&conn->rac_lock);
314 kranal_set_conn_uniqueness(conn);
316 conn->rac_device = dev;
317 conn->rac_timeout = MAX(*kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
318 kranal_update_reaper_timeout(conn->rac_timeout);
320 rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
321 &conn->rac_rihandle);
322 if (rrc != RAP_SUCCESS) {
323 CERROR("RapkCreateRi failed: %d\n", rrc);
324 LIBCFS_FREE(conn, sizeof(*conn));
328 cfs_atomic_inc(&kranal_data.kra_nconns);
334 kranal_destroy_conn(kra_conn_t *conn)
338 LASSERT (!cfs_in_interrupt());
339 LASSERT (!conn->rac_scheduled);
340 LASSERT (cfs_list_empty(&conn->rac_list));
341 LASSERT (cfs_list_empty(&conn->rac_hashlist));
342 LASSERT (cfs_list_empty(&conn->rac_schedlist));
343 LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
344 LASSERT (cfs_list_empty(&conn->rac_fmaq));
345 LASSERT (cfs_list_empty(&conn->rac_rdmaq));
346 LASSERT (cfs_list_empty(&conn->rac_replyq));
348 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
350 LASSERT (rrc == RAP_SUCCESS);
352 if (conn->rac_peer != NULL)
353 kranal_peer_decref(conn->rac_peer);
355 LIBCFS_FREE(conn, sizeof(*conn));
356 cfs_atomic_dec(&kranal_data.kra_nconns);
360 kranal_terminate_conn_locked (kra_conn_t *conn)
362 LASSERT (!cfs_in_interrupt());
363 LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
364 LASSERT (!cfs_list_empty(&conn->rac_hashlist));
365 LASSERT (cfs_list_empty(&conn->rac_list));
367 /* Remove from conn hash table: no new callbacks */
368 cfs_list_del_init(&conn->rac_hashlist);
369 kranal_conn_decref(conn);
371 conn->rac_state = RANAL_CONN_CLOSED;
373 /* schedule to clear out all uncompleted comms in context of dev's
375 kranal_schedule_conn(conn);
379 kranal_close_conn_locked (kra_conn_t *conn, int error)
381 kra_peer_t *peer = conn->rac_peer;
383 CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
384 "closing conn to %s: error %d\n",
385 libcfs_nid2str(peer->rap_nid), error);
387 LASSERT (!cfs_in_interrupt());
388 LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
389 LASSERT (!cfs_list_empty(&conn->rac_hashlist));
390 LASSERT (!cfs_list_empty(&conn->rac_list));
392 cfs_list_del_init(&conn->rac_list);
394 if (cfs_list_empty(&peer->rap_conns) &&
395 peer->rap_persistence == 0) {
396 /* Non-persistent peer with no more conns... */
397 kranal_unlink_peer_locked(peer);
400 /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
401 * full timeout. If we get a CLOSE we know the peer has stopped all
402 * RDMA. Otherwise if we wait for the full timeout we can also be sure
403 * all RDMA has stopped. */
404 conn->rac_last_rx = jiffies;
407 conn->rac_state = RANAL_CONN_CLOSING;
408 kranal_schedule_conn(conn); /* schedule sending CLOSE */
410 kranal_conn_decref(conn); /* lose peer's ref */
414 kranal_close_conn (kra_conn_t *conn, int error)
419 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
421 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
422 kranal_close_conn_locked(conn, error);
424 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
428 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
429 __u32 peer_ip, int peer_port)
431 kra_device_t *dev = conn->rac_device;
435 /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
436 * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
437 conn->rac_last_tx = jiffies;
438 conn->rac_keepalive = 0;
440 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
441 if (rrc != RAP_SUCCESS) {
442 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
443 HIPQUAD(peer_ip), peer_port, rrc);
444 return -ECONNABORTED;
447 /* Schedule conn on rad_new_conns */
448 kranal_conn_addref(conn);
449 cfs_spin_lock_irqsave(&dev->rad_lock, flags);
450 cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
451 cfs_waitq_signal(&dev->rad_waitq);
452 cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
454 rrc = RapkWaitToConnect(conn->rac_rihandle);
455 if (rrc != RAP_SUCCESS) {
456 CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
457 HIPQUAD(peer_ip), peer_port, rrc);
458 return -ECONNABORTED;
461 /* Scheduler doesn't touch conn apart from to deschedule and decref it
462 * after RapkCompleteSync() return success, so conn is all mine */
464 conn->rac_peerstamp = connreq->racr_peerstamp;
465 conn->rac_peer_connstamp = connreq->racr_connstamp;
466 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
467 kranal_update_reaper_timeout(conn->rac_keepalive);
472 kranal_passive_conn_handshake (struct socket *sock, lnet_nid_t *src_nidp,
473 lnet_nid_t *dst_nidp, kra_conn_t **connp)
476 unsigned int peer_port;
477 kra_connreq_t rx_connreq;
478 kra_connreq_t tx_connreq;
484 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
486 CERROR("Can't get peer's IP: %d\n", rc);
490 rc = kranal_recv_connreq(sock, &rx_connreq, 0);
493 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
494 HIPQUAD(peer_ip), peer_port, rc);
499 /* Request from "new" peer: send reply with my MAGIC/VERSION to
500 * tell her I'm old... */
501 kranal_pack_connreq(&tx_connreq, NULL, LNET_NID_ANY);
503 rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
504 lnet_acceptor_timeout());
506 CERROR("Can't tx stub connreq to %u.%u.%u.%u/%d: %d\n",
507 HIPQUAD(peer_ip), peer_port, rc);
513 if (i == kranal_data.kra_ndevs) {
514 CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
515 rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
518 dev = &kranal_data.kra_devices[i];
519 if (dev->rad_id == rx_connreq.racr_devid)
523 rc = kranal_create_conn(&conn, dev);
527 kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
529 rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
530 lnet_acceptor_timeout());
532 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
533 HIPQUAD(peer_ip), peer_port, rc);
534 kranal_conn_decref(conn);
538 rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
540 kranal_conn_decref(conn);
545 *src_nidp = rx_connreq.racr_srcnid;
546 *dst_nidp = rx_connreq.racr_dstnid;
551 kranal_active_conn_handshake(kra_peer_t *peer,
552 lnet_nid_t *dst_nidp, kra_conn_t **connp)
554 kra_connreq_t connreq;
561 /* spread connections over all devices using both peer NIDs to ensure
562 * all nids use all devices */
563 idx = peer->rap_nid + kranal_data.kra_ni->ni_nid;
564 dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
566 rc = kranal_create_conn(&conn, dev);
570 kranal_pack_connreq(&connreq, conn, peer->rap_nid);
572 if (the_lnet.ln_testprotocompat != 0) {
573 /* single-shot proto test */
575 if ((the_lnet.ln_testprotocompat & 1) != 0) {
576 connreq.racr_version++;
577 the_lnet.ln_testprotocompat &= ~1;
579 if ((the_lnet.ln_testprotocompat & 2) != 0) {
580 connreq.racr_magic = LNET_PROTO_MAGIC;
581 the_lnet.ln_testprotocompat &= ~2;
586 rc = lnet_connect(&sock, peer->rap_nid,
587 0, peer->rap_ip, peer->rap_port);
591 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
592 * immediately after accepting a connection, so we connect and then
593 * send immediately. */
595 rc = libcfs_sock_write(sock, &connreq, sizeof(connreq),
596 lnet_acceptor_timeout());
598 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
599 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
603 rc = kranal_recv_connreq(sock, &connreq, 1);
605 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
606 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
610 libcfs_sock_release(sock);
613 if (connreq.racr_srcnid != peer->rap_nid) {
614 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
615 "received %s expected %s\n",
616 HIPQUAD(peer->rap_ip), peer->rap_port,
617 libcfs_nid2str(connreq.racr_srcnid),
618 libcfs_nid2str(peer->rap_nid));
622 if (connreq.racr_devid != dev->rad_id) {
623 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
624 "received %d expected %d\n",
625 HIPQUAD(peer->rap_ip), peer->rap_port,
626 connreq.racr_devid, dev->rad_id);
630 rc = kranal_set_conn_params(conn, &connreq,
631 peer->rap_ip, peer->rap_port);
636 *dst_nidp = connreq.racr_dstnid;
640 libcfs_sock_release(sock);
642 lnet_connect_console_error(rc, peer->rap_nid,
643 peer->rap_ip, peer->rap_port);
645 kranal_conn_decref(conn);
650 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
663 /* active: connd wants to connect to 'peer' */
664 LASSERT (peer != NULL);
665 LASSERT (peer->rap_connecting);
667 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
671 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
673 if (!kranal_peer_active(peer)) {
674 /* raced with peer getting unlinked */
675 cfs_write_unlock_irqrestore(&kranal_data. \
678 kranal_conn_decref(conn);
682 peer_nid = peer->rap_nid;
684 /* passive: listener accepted 'sock' */
685 LASSERT (peer == NULL);
687 rc = kranal_passive_conn_handshake(sock, &peer_nid,
692 /* assume this is a new peer */
693 rc = kranal_create_peer(&peer, peer_nid);
695 CERROR("Can't create conn for %s\n",
696 libcfs_nid2str(peer_nid));
697 kranal_conn_decref(conn);
701 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
703 peer2 = kranal_find_peer_locked(peer_nid);
707 /* peer_nid already in the peer table */
708 kranal_peer_decref(peer);
713 LASSERT ((!new_peer) != (!kranal_peer_active(peer)));
715 /* Refuse connection if peer thinks we are a different NID. We check
716 * this while holding the global lock, to synch with connection
717 * destruction on NID change. */
718 if (kranal_data.kra_ni->ni_nid != dst_nid) {
719 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
722 CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
723 libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid),
724 libcfs_nid2str(kranal_data.kra_ni->ni_nid));
729 /* Refuse to duplicate an existing connection (both sides might try to
730 * connect at once). NB we return success! We _are_ connected so we
731 * _don't_ have any blocked txs to complete with failure. */
732 rc = kranal_conn_isdup_locked(peer, conn);
734 LASSERT (!cfs_list_empty(&peer->rap_conns));
735 LASSERT (cfs_list_empty(&peer->rap_tx_queue));
736 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
738 CWARN("Not creating duplicate connection to %s: %d\n",
739 libcfs_nid2str(peer_nid), rc);
745 /* peer table takes my ref on the new peer */
746 cfs_list_add_tail(&peer->rap_list,
747 kranal_nid2peerlist(peer_nid));
750 /* initialise timestamps before reaper looks at them */
751 conn->rac_last_tx = conn->rac_last_rx = jiffies;
753 kranal_peer_addref(peer); /* +1 ref for conn */
754 conn->rac_peer = peer;
755 cfs_list_add_tail(&conn->rac_list, &peer->rap_conns);
757 kranal_conn_addref(conn); /* +1 ref for conn table */
758 cfs_list_add_tail(&conn->rac_hashlist,
759 kranal_cqid2connlist(conn->rac_cqid));
761 /* Schedule all packets blocking for a connection */
762 while (!cfs_list_empty(&peer->rap_tx_queue)) {
763 tx = cfs_list_entry(peer->rap_tx_queue.next,
766 cfs_list_del(&tx->tx_list);
767 kranal_post_fma(conn, tx);
770 nstale = kranal_close_stale_conns_locked(peer, conn);
772 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
774 /* CAVEAT EMPTOR: passive peer can disappear NOW */
777 CWARN("Closed %d stale conns to %s\n", nstale,
778 libcfs_nid2str(peer_nid));
780 CWARN("New connection to %s on devid[%d] = %d\n",
781 libcfs_nid2str(peer_nid),
782 conn->rac_device->rad_idx, conn->rac_device->rad_id);
784 /* Ensure conn gets checked. Transmits may have been queued and an
785 * FMA event may have happened before it got in the cq hash table */
786 kranal_schedule_conn(conn);
791 kranal_peer_decref(peer);
792 kranal_conn_decref(conn);
797 kranal_connect (kra_peer_t *peer)
804 LASSERT (peer->rap_connecting);
806 CDEBUG(D_NET, "About to handshake %s\n",
807 libcfs_nid2str(peer->rap_nid));
809 rc = kranal_conn_handshake(NULL, peer);
811 CDEBUG(D_NET, "Done handshake %s:%d \n",
812 libcfs_nid2str(peer->rap_nid), rc);
814 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
816 LASSERT (peer->rap_connecting);
817 peer->rap_connecting = 0;
820 /* kranal_conn_handshake() queues blocked txs immediately on
821 * success to avoid messages jumping the queue */
822 LASSERT (cfs_list_empty(&peer->rap_tx_queue));
824 peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
826 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
831 peer->rap_reconnect_interval *= 2;
832 peer->rap_reconnect_interval =
833 MAX(peer->rap_reconnect_interval,
834 *kranal_tunables.kra_min_reconnect_interval);
835 peer->rap_reconnect_interval =
836 MIN(peer->rap_reconnect_interval,
837 *kranal_tunables.kra_max_reconnect_interval);
839 peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval *
842 /* Grab all blocked packets while we have the global lock */
843 cfs_list_add(&zombies, &peer->rap_tx_queue);
844 cfs_list_del_init(&peer->rap_tx_queue);
846 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
848 if (cfs_list_empty(&zombies))
851 CNETERR("Dropping packets for %s: connection failed\n",
852 libcfs_nid2str(peer->rap_nid));
855 tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list);
857 cfs_list_del(&tx->tx_list);
858 kranal_tx_done(tx, -EHOSTUNREACH);
860 } while (!cfs_list_empty(&zombies));
864 kranal_free_acceptsock (kra_acceptsock_t *ras)
866 libcfs_sock_release(ras->ras_sock);
867 LIBCFS_FREE(ras, sizeof(*ras));
871 kranal_accept (lnet_ni_t *ni, struct socket *sock)
873 kra_acceptsock_t *ras;
879 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
880 LASSERT (rc == 0); /* we succeeded before */
882 LIBCFS_ALLOC(ras, sizeof(*ras));
884 CERROR("ENOMEM allocating connection request from "
885 "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
889 ras->ras_sock = sock;
891 cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
893 cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
894 cfs_waitq_signal(&kranal_data.kra_connd_waitq);
896 cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
901 kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
906 LASSERT (nid != LNET_NID_ANY);
908 LIBCFS_ALLOC(peer, sizeof(*peer));
912 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
915 cfs_atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
917 CFS_INIT_LIST_HEAD(&peer->rap_list);
918 CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
919 CFS_INIT_LIST_HEAD(&peer->rap_conns);
920 CFS_INIT_LIST_HEAD(&peer->rap_tx_queue);
922 peer->rap_reconnect_interval = 0; /* OK to connect at any time */
924 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
926 if (kranal_data.kra_nonewpeers) {
927 /* shutdown has started already */
928 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
931 LIBCFS_FREE(peer, sizeof(*peer));
932 CERROR("Can't create peer: network shutdown\n");
936 cfs_atomic_inc(&kranal_data.kra_npeers);
938 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
945 kranal_destroy_peer (kra_peer_t *peer)
947 CDEBUG(D_NET, "peer %s %p deleted\n",
948 libcfs_nid2str(peer->rap_nid), peer);
950 LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
951 LASSERT (peer->rap_persistence == 0);
952 LASSERT (!kranal_peer_active(peer));
953 LASSERT (!peer->rap_connecting);
954 LASSERT (cfs_list_empty(&peer->rap_conns));
955 LASSERT (cfs_list_empty(&peer->rap_tx_queue));
956 LASSERT (cfs_list_empty(&peer->rap_connd_list));
958 LIBCFS_FREE(peer, sizeof(*peer));
960 /* NB a peer's connections keep a reference on their peer until
961 * they are destroyed, so we can be assured that _all_ state to do
962 * with this peer has been cleaned up when its refcount drops to
964 cfs_atomic_dec(&kranal_data.kra_npeers);
968 kranal_find_peer_locked (lnet_nid_t nid)
970 cfs_list_t *peer_list = kranal_nid2peerlist(nid);
974 cfs_list_for_each (tmp, peer_list) {
976 peer = cfs_list_entry(tmp, kra_peer_t, rap_list);
978 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
979 !cfs_list_empty(&peer->rap_conns)); /* active conn */
981 if (peer->rap_nid != nid)
984 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
985 peer, libcfs_nid2str(nid),
986 cfs_atomic_read(&peer->rap_refcount));
993 kranal_find_peer (lnet_nid_t nid)
997 cfs_read_lock(&kranal_data.kra_global_lock);
998 peer = kranal_find_peer_locked(nid);
999 if (peer != NULL) /* +1 ref for caller? */
1000 kranal_peer_addref(peer);
1001 cfs_read_unlock(&kranal_data.kra_global_lock);
1007 kranal_unlink_peer_locked (kra_peer_t *peer)
1009 LASSERT (peer->rap_persistence == 0);
1010 LASSERT (cfs_list_empty(&peer->rap_conns));
1012 LASSERT (kranal_peer_active(peer));
1013 cfs_list_del_init(&peer->rap_list);
1015 /* lose peerlist's ref */
1016 kranal_peer_decref(peer);
1020 kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
1027 cfs_read_lock(&kranal_data.kra_global_lock);
1029 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1031 cfs_list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1033 peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
1034 LASSERT (peer->rap_persistence > 0 ||
1035 !cfs_list_empty(&peer->rap_conns));
1040 *nidp = peer->rap_nid;
1041 *ipp = peer->rap_ip;
1042 *portp = peer->rap_port;
1043 *persistencep = peer->rap_persistence;
1045 cfs_read_unlock(&kranal_data.kra_global_lock);
1050 cfs_read_unlock(&kranal_data.kra_global_lock);
1055 kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
1057 unsigned long flags;
1062 if (nid == LNET_NID_ANY)
1065 rc = kranal_create_peer(&peer, nid);
1069 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1071 peer2 = kranal_find_peer_locked(nid);
1072 if (peer2 != NULL) {
1073 kranal_peer_decref(peer);
1076 /* peer table takes existing ref on peer */
1077 cfs_list_add_tail(&peer->rap_list,
1078 kranal_nid2peerlist(nid));
1082 peer->rap_port = port;
1083 peer->rap_persistence++;
1085 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1090 kranal_del_peer_locked (kra_peer_t *peer)
1096 peer->rap_persistence = 0;
1098 if (cfs_list_empty(&peer->rap_conns)) {
1099 kranal_unlink_peer_locked(peer);
1101 cfs_list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1102 conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
1104 kranal_close_conn_locked(conn, 0);
1106 /* peer unlinks itself when last conn is closed */
1111 kranal_del_peer (lnet_nid_t nid)
1113 unsigned long flags;
1122 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1124 if (nid != LNET_NID_ANY)
1125 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1128 hi = kranal_data.kra_peer_hash_size - 1;
1131 for (i = lo; i <= hi; i++) {
1132 cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1133 peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
1134 LASSERT (peer->rap_persistence > 0 ||
1135 !cfs_list_empty(&peer->rap_conns));
1137 if (!(nid == LNET_NID_ANY || peer->rap_nid == nid))
1140 kranal_del_peer_locked(peer);
1141 rc = 0; /* matched something */
1145 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1151 kranal_get_conn_by_idx (int index)
1159 cfs_read_lock (&kranal_data.kra_global_lock);
1161 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1162 cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1164 peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
1165 LASSERT (peer->rap_persistence > 0 ||
1166 !cfs_list_empty(&peer->rap_conns));
1168 cfs_list_for_each (ctmp, &peer->rap_conns) {
1172 conn = cfs_list_entry(ctmp, kra_conn_t,
1174 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1175 libcfs_nid2str(conn->rac_peer->rap_nid),
1176 cfs_atomic_read(&conn->rac_refcount));
1177 cfs_atomic_inc(&conn->rac_refcount);
1178 cfs_read_unlock(&kranal_data.kra_global_lock);
1184 cfs_read_unlock(&kranal_data.kra_global_lock);
1189 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1196 cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1197 conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
1200 kranal_close_conn_locked(conn, why);
1207 kranal_close_matching_conns (lnet_nid_t nid)
1209 unsigned long flags;
1218 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1220 if (nid != LNET_NID_ANY)
1221 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1224 hi = kranal_data.kra_peer_hash_size - 1;
1227 for (i = lo; i <= hi; i++) {
1228 cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1230 peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
1231 LASSERT (peer->rap_persistence > 0 ||
1232 !cfs_list_empty(&peer->rap_conns));
1234 if (!(nid == LNET_NID_ANY || nid == peer->rap_nid))
1237 count += kranal_close_peer_conns_locked(peer, 0);
1241 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1243 /* wildcards always succeed */
1244 if (nid == LNET_NID_ANY)
1247 return (count == 0) ? -ENOENT : 0;
1251 kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1253 struct libcfs_ioctl_data *data = arg;
1256 LASSERT (ni == kranal_data.kra_ni);
1259 case IOC_LIBCFS_GET_PEER: {
1263 int share_count = 0;
1265 rc = kranal_get_peer_info(data->ioc_count,
1266 &nid, &ip, &port, &share_count);
1267 data->ioc_nid = nid;
1268 data->ioc_count = share_count;
1269 data->ioc_u32[0] = ip;
1270 data->ioc_u32[1] = port;
1273 case IOC_LIBCFS_ADD_PEER: {
1274 rc = kranal_add_persistent_peer(data->ioc_nid,
1275 data->ioc_u32[0], /* IP */
1276 data->ioc_u32[1]); /* port */
1279 case IOC_LIBCFS_DEL_PEER: {
1280 rc = kranal_del_peer(data->ioc_nid);
1283 case IOC_LIBCFS_GET_CONN: {
1284 kra_conn_t *conn = kranal_get_conn_by_idx(data->ioc_count);
1290 data->ioc_nid = conn->rac_peer->rap_nid;
1291 data->ioc_u32[0] = conn->rac_device->rad_id;
1292 kranal_conn_decref(conn);
1296 case IOC_LIBCFS_CLOSE_CONNECTION: {
1297 rc = kranal_close_matching_conns(data->ioc_nid);
1300 case IOC_LIBCFS_REGISTER_MYNID: {
1301 /* Ignore if this is a noop */
1302 if (data->ioc_nid == ni->ni_nid) {
1305 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1306 libcfs_nid2str(data->ioc_nid),
1307 libcfs_nid2str(ni->ni_nid));
1318 kranal_free_txdescs(cfs_list_t *freelist)
1322 while (!cfs_list_empty(freelist)) {
1323 tx = cfs_list_entry(freelist->next, kra_tx_t, tx_list);
1325 cfs_list_del(&tx->tx_list);
1326 LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
1327 LIBCFS_FREE(tx, sizeof(*tx));
1332 kranal_alloc_txdescs(cfs_list_t *freelist, int n)
1337 LASSERT (freelist == &kranal_data.kra_idle_txs);
1338 LASSERT (cfs_list_empty(freelist));
1340 for (i = 0; i < n; i++) {
1342 LIBCFS_ALLOC(tx, sizeof(*tx));
1344 CERROR("Can't allocate tx[%d]\n", i);
1345 kranal_free_txdescs(freelist);
1349 LIBCFS_ALLOC(tx->tx_phys,
1350 LNET_MAX_IOV * sizeof(*tx->tx_phys));
1351 if (tx->tx_phys == NULL) {
1352 CERROR("Can't allocate tx[%d]->tx_phys\n", i);
1354 LIBCFS_FREE(tx, sizeof(*tx));
1355 kranal_free_txdescs(freelist);
1359 tx->tx_buftype = RANAL_BUF_NONE;
1360 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1362 cfs_list_add(&tx->tx_list, freelist);
1369 kranal_device_init(int id, kra_device_t *dev)
1371 int total_ntx = *kranal_tunables.kra_ntx;
1375 rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1377 if (rrc != RAP_SUCCESS) {
1378 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1382 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1383 if (rrc != RAP_SUCCESS) {
1384 CERROR("Can't reserve %d RDMA descriptors"
1385 " for device %d: %d\n", total_ntx, id, rrc);
1389 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1390 &dev->rad_rdma_cqh);
1391 if (rrc != RAP_SUCCESS) {
1392 CERROR("Can't create rdma cq size %d for device %d: %d\n",
1393 total_ntx, id, rrc);
1397 rrc = RapkCreateCQ(dev->rad_handle,
1398 *kranal_tunables.kra_fma_cq_size,
1399 RAP_CQTYPE_RECV, &dev->rad_fma_cqh);
1400 if (rrc != RAP_SUCCESS) {
1401 CERROR("Can't create fma cq size %d for device %d: %d\n",
1402 *kranal_tunables.kra_fma_cq_size, id, rrc);
1409 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1411 RapkReleaseDevice(dev->rad_handle);
1417 kranal_device_fini(kra_device_t *dev)
1419 LASSERT (cfs_list_empty(&dev->rad_ready_conns));
1420 LASSERT (cfs_list_empty(&dev->rad_new_conns));
1421 LASSERT (dev->rad_nphysmap == 0);
1422 LASSERT (dev->rad_nppphysmap == 0);
1423 LASSERT (dev->rad_nvirtmap == 0);
1424 LASSERT (dev->rad_nobvirtmap == 0);
1426 LASSERT(dev->rad_scheduler == NULL);
1427 RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1428 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1429 RapkReleaseDevice(dev->rad_handle);
1433 kranal_shutdown (lnet_ni_t *ni)
1436 unsigned long flags;
1438 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1439 cfs_atomic_read(&libcfs_kmemory));
1441 LASSERT (ni == kranal_data.kra_ni);
1442 LASSERT (ni->ni_data == &kranal_data);
1444 switch (kranal_data.kra_init) {
1446 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1449 case RANAL_INIT_ALL:
1450 /* Prevent new peers from being created */
1451 cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1452 kranal_data.kra_nonewpeers = 1;
1453 cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
1456 /* Remove all existing peers from the peer table */
1457 kranal_del_peer(LNET_NID_ANY);
1459 /* Wait for pending conn reqs to be handled */
1461 cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1462 while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
1463 cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
1466 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
1467 "waiting for conn reqs to clean up\n");
1468 cfs_pause(cfs_time_seconds(1));
1470 cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
1473 cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1475 /* Wait for all peers to be freed */
1477 while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
1479 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
1480 "waiting for %d peers to close down\n",
1481 cfs_atomic_read(&kranal_data.kra_npeers));
1482 cfs_pause(cfs_time_seconds(1));
1486 case RANAL_INIT_DATA:
1490 /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
1491 * have to worry about shutdown races. NB connections may be created
1492 * while there are still active connds, but these will be temporary
1493 * since peer creation always fails after the listener has started to
1495 LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
1497 /* Flag threads to terminate */
1498 kranal_data.kra_shutdown = 1;
1500 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1501 kra_device_t *dev = &kranal_data.kra_devices[i];
1503 cfs_spin_lock_irqsave(&dev->rad_lock, flags);
1504 cfs_waitq_signal(&dev->rad_waitq);
1505 cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
1508 cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1509 cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
1510 cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1512 LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
1513 cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1514 cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
1515 cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1517 /* Wait for threads to exit */
1519 while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
1521 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1522 "Waiting for %d threads to terminate\n",
1523 cfs_atomic_read(&kranal_data.kra_nthreads));
1524 cfs_pause(cfs_time_seconds(1));
1527 LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
1528 if (kranal_data.kra_peers != NULL) {
1529 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1530 LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
1532 LIBCFS_FREE(kranal_data.kra_peers,
1533 sizeof (cfs_list_t) *
1534 kranal_data.kra_peer_hash_size);
1537 LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
1538 if (kranal_data.kra_conns != NULL) {
1539 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1540 LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
1542 LIBCFS_FREE(kranal_data.kra_conns,
1543 sizeof (cfs_list_t) *
1544 kranal_data.kra_conn_hash_size);
1547 for (i = 0; i < kranal_data.kra_ndevs; i++)
1548 kranal_device_fini(&kranal_data.kra_devices[i]);
1550 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1552 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1553 cfs_atomic_read(&libcfs_kmemory));
1555 kranal_data.kra_init = RANAL_INIT_NOTHING;
1556 PORTAL_MODULE_UNUSE;
1560 kranal_startup (lnet_ni_t *ni)
1563 int pkmem = cfs_atomic_read(&libcfs_kmemory);
1568 LASSERT (ni->ni_lnd == &the_kralnd);
1570 /* Only 1 instance supported */
1571 if (kranal_data.kra_init != RANAL_INIT_NOTHING) {
1572 CERROR ("Only 1 instance supported\n");
1576 if (lnet_set_ip_niaddr(ni) != 0) {
1577 CERROR ("Can't determine my NID\n");
1581 if (*kranal_tunables.kra_credits > *kranal_tunables.kra_ntx) {
1582 CERROR ("Can't set credits(%d) > ntx(%d)\n",
1583 *kranal_tunables.kra_credits,
1584 *kranal_tunables.kra_ntx);
1588 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1590 ni->ni_maxtxcredits = *kranal_tunables.kra_credits;
1591 ni->ni_peertxcredits = *kranal_tunables.kra_peercredits;
1593 ni->ni_data = &kranal_data;
1594 kranal_data.kra_ni = ni;
1596 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1597 * a unique (for all time) connstamp so we can uniquely identify
1598 * the sender. The connstamp is an incrementing counter
1599 * initialised with seconds + microseconds at startup time. So we
1600 * rely on NOT creating connections more frequently on average than
1601 * 1MHz to ensure we don't use old connstamps when we reboot. */
1602 cfs_gettimeofday(&tv);
1603 kranal_data.kra_connstamp =
1604 kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1606 cfs_rwlock_init(&kranal_data.kra_global_lock);
1608 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1609 kra_device_t *dev = &kranal_data.kra_devices[i];
1612 CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
1613 CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
1614 cfs_waitq_init(&dev->rad_waitq);
1615 cfs_spin_lock_init(&dev->rad_lock);
1618 kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
1619 cfs_waitq_init(&kranal_data.kra_reaper_waitq);
1620 cfs_spin_lock_init(&kranal_data.kra_reaper_lock);
1622 CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1623 CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1624 cfs_waitq_init(&kranal_data.kra_connd_waitq);
1625 cfs_spin_lock_init(&kranal_data.kra_connd_lock);
1627 CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1628 cfs_spin_lock_init(&kranal_data.kra_tx_lock);
1630 /* OK to call kranal_api_shutdown() to cleanup now */
1631 kranal_data.kra_init = RANAL_INIT_DATA;
1634 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1635 LIBCFS_ALLOC(kranal_data.kra_peers,
1636 sizeof(cfs_list_t) *
1637 kranal_data.kra_peer_hash_size);
1638 if (kranal_data.kra_peers == NULL)
1641 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1642 CFS_INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
1644 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
1645 LIBCFS_ALLOC(kranal_data.kra_conns,
1646 sizeof(cfs_list_t) *
1647 kranal_data.kra_conn_hash_size);
1648 if (kranal_data.kra_conns == NULL)
1651 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1652 CFS_INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
1654 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs,
1655 *kranal_tunables.kra_ntx);
1659 rc = kranal_thread_start(kranal_reaper, NULL);
1661 CERROR("Can't spawn ranal reaper: %d\n", rc);
1665 for (i = 0; i < *kranal_tunables.kra_n_connd; i++) {
1666 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
1668 CERROR("Can't spawn ranal connd[%d]: %d\n",
1674 LASSERT (kranal_data.kra_ndevs == 0);
1676 /* Use all available RapidArray devices */
1677 for (i = 0; i < RANAL_MAXDEVS; i++) {
1678 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
1680 rc = kranal_device_init(kranal_devids[i], dev);
1682 kranal_data.kra_ndevs++;
1685 if (kranal_data.kra_ndevs == 0) {
1686 CERROR("Can't initialise any RapidArray devices\n");
1690 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1691 dev = &kranal_data.kra_devices[i];
1692 rc = kranal_thread_start(kranal_scheduler, dev);
1694 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
1700 /* flag everything initialised */
1701 kranal_data.kra_init = RANAL_INIT_ALL;
1702 /*****************************************************/
1704 CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
1708 kranal_shutdown(ni);
1713 kranal_module_fini (void)
1715 lnet_unregister_lnd(&the_kralnd);
1716 kranal_tunables_fini();
1720 kranal_module_init (void)
1724 rc = kranal_tunables_init();
1728 lnet_register_lnd(&the_kralnd);
1733 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1734 MODULE_DESCRIPTION("Kernel RapidArray LND v0.01");
1735 MODULE_LICENSE("GPL");
1737 module_init(kranal_module_init);
1738 module_exit(kranal_module_fini);