1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/ralnd/ralnd.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
42 static int kranal_devids[RANAL_MAXDEVS] = {RAPK_MAIN_DEVICE_ID,
43 RAPK_EXPANSION_DEVICE_ID};
47 .lnd_startup = kranal_startup,
48 .lnd_shutdown = kranal_shutdown,
49 .lnd_ctl = kranal_ctl,
50 .lnd_send = kranal_send,
51 .lnd_recv = kranal_recv,
52 .lnd_eager_recv = kranal_eager_recv,
53 .lnd_accept = kranal_accept,
56 kra_data_t kranal_data;
59 kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, lnet_nid_t dstnid)
63 memset(connreq, 0, sizeof(*connreq));
65 connreq->racr_magic = RANAL_MSG_MAGIC;
66 connreq->racr_version = RANAL_MSG_VERSION;
68 if (conn == NULL) /* prepping a "stub" reply */
71 connreq->racr_devid = conn->rac_device->rad_id;
72 connreq->racr_srcnid = kranal_data.kra_ni->ni_nid;
73 connreq->racr_dstnid = dstnid;
74 connreq->racr_peerstamp = kranal_data.kra_peerstamp;
75 connreq->racr_connstamp = conn->rac_my_connstamp;
76 connreq->racr_timeout = conn->rac_timeout;
78 rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
79 LASSERT(rrc == RAP_SUCCESS);
83 kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active)
85 int timeout = active ? *kranal_tunables.kra_timeout :
86 lnet_acceptor_timeout();
90 /* return 0 on success, -ve on error, +ve to tell the peer I'm "old" */
92 rc = libcfs_sock_read(sock, &connreq->racr_magic,
93 sizeof(connreq->racr_magic), timeout);
95 CERROR("Read(magic) failed(1): %d\n", rc);
99 if (connreq->racr_magic != RANAL_MSG_MAGIC &&
100 connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) {
101 /* Unexpected magic! */
103 (connreq->racr_magic == LNET_PROTO_MAGIC ||
104 connreq->racr_magic == __swab32(LNET_PROTO_MAGIC))) {
105 /* future protocol version compatibility!
106 * When LNET unifies protocols over all LNDs, the first
107 * thing sent will be a version query. +ve rc means I
108 * reply with my current magic/version */
112 CERROR("Unexpected magic %08x (%s)\n",
113 connreq->racr_magic, active ? "active" : "passive");
117 swab = (connreq->racr_magic == __swab32(RANAL_MSG_MAGIC));
119 rc = libcfs_sock_read(sock, &connreq->racr_version,
120 sizeof(connreq->racr_version), timeout);
122 CERROR("Read(version) failed: %d\n", rc);
127 __swab16s(&connreq->racr_version);
129 if (connreq->racr_version != RANAL_MSG_VERSION) {
131 CERROR("Unexpected version %d\n", connreq->racr_version);
134 /* If this is a future version of the ralnd protocol, and I'm
135 * passive (accepted the connection), tell my peer I'm "old"
140 rc = libcfs_sock_read(sock, &connreq->racr_devid,
141 sizeof(connreq->racr_version) -
142 offsetof(kra_connreq_t, racr_devid),
145 CERROR("Read(body) failed: %d\n", rc);
150 __swab32s(&connreq->racr_magic);
151 __swab16s(&connreq->racr_version);
152 __swab16s(&connreq->racr_devid);
153 __swab64s(&connreq->racr_srcnid);
154 __swab64s(&connreq->racr_dstnid);
155 __swab64s(&connreq->racr_peerstamp);
156 __swab64s(&connreq->racr_connstamp);
157 __swab32s(&connreq->racr_timeout);
159 __swab32s(&connreq->racr_riparams.HostId);
160 __swab32s(&connreq->racr_riparams.FmaDomainHndl);
161 __swab32s(&connreq->racr_riparams.PTag);
162 __swab32s(&connreq->racr_riparams.CompletionCookie);
165 if (connreq->racr_srcnid == LNET_NID_ANY ||
166 connreq->racr_dstnid == LNET_NID_ANY) {
167 CERROR("Received LNET_NID_ANY\n");
171 if (connreq->racr_timeout < RANAL_MIN_TIMEOUT) {
172 CERROR("Received timeout %d < MIN %d\n",
173 connreq->racr_timeout, RANAL_MIN_TIMEOUT);
181 kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
184 struct list_head *ctmp;
185 struct list_head *cnxt;
189 loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
191 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
192 conn = list_entry(ctmp, kra_conn_t, rac_list);
197 if (conn->rac_peerstamp != newconn->rac_peerstamp) {
198 CDEBUG(D_NET, "Closing stale conn nid: %s "
199 " peerstamp:"LPX64"("LPX64")\n",
200 libcfs_nid2str(peer->rap_nid),
201 conn->rac_peerstamp, newconn->rac_peerstamp);
202 LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
204 kranal_close_conn_locked(conn, -ESTALE);
208 if (conn->rac_device != newconn->rac_device)
212 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
213 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
216 LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
218 CDEBUG(D_NET, "Closing stale conn nid: %s"
219 " connstamp:"LPX64"("LPX64")\n",
220 libcfs_nid2str(peer->rap_nid),
221 conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
224 kranal_close_conn_locked(conn, -ESTALE);
231 kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
234 struct list_head *tmp;
237 loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
239 list_for_each(tmp, &peer->rap_conns) {
240 conn = list_entry(tmp, kra_conn_t, rac_list);
242 /* 'newconn' is from an earlier version of 'peer'!!! */
243 if (newconn->rac_peerstamp < conn->rac_peerstamp)
246 /* 'conn' is from an earlier version of 'peer': it will be
247 * removed when we cull stale conns later on... */
248 if (newconn->rac_peerstamp > conn->rac_peerstamp)
251 /* Different devices are OK */
252 if (conn->rac_device != newconn->rac_device)
255 /* It's me connecting to myself */
257 newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
258 newconn->rac_peer_connstamp == conn->rac_my_connstamp)
261 /* 'newconn' is an earlier connection from 'peer'!!! */
262 if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
265 /* 'conn' is an earlier connection from 'peer': it will be
266 * removed when we cull stale conns later on... */
267 if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
270 /* 'newconn' has the SAME connection stamp; 'peer' isn't
271 * playing the game... */
279 kranal_set_conn_uniqueness (kra_conn_t *conn)
283 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
285 conn->rac_my_connstamp = kranal_data.kra_connstamp++;
287 do { /* allocate a unique cqid */
288 conn->rac_cqid = kranal_data.kra_next_cqid++;
289 } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
291 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
295 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
300 LASSERT (!in_interrupt());
301 LIBCFS_ALLOC(conn, sizeof(*conn));
306 memset(conn, 0, sizeof(*conn));
307 atomic_set(&conn->rac_refcount, 1);
308 INIT_LIST_HEAD(&conn->rac_list);
309 INIT_LIST_HEAD(&conn->rac_hashlist);
310 INIT_LIST_HEAD(&conn->rac_schedlist);
311 INIT_LIST_HEAD(&conn->rac_fmaq);
312 INIT_LIST_HEAD(&conn->rac_rdmaq);
313 INIT_LIST_HEAD(&conn->rac_replyq);
314 spin_lock_init(&conn->rac_lock);
316 kranal_set_conn_uniqueness(conn);
318 conn->rac_device = dev;
319 conn->rac_timeout = MAX(*kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
320 kranal_update_reaper_timeout(conn->rac_timeout);
322 rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
323 &conn->rac_rihandle);
324 if (rrc != RAP_SUCCESS) {
325 CERROR("RapkCreateRi failed: %d\n", rrc);
326 LIBCFS_FREE(conn, sizeof(*conn));
330 atomic_inc(&kranal_data.kra_nconns);
336 kranal_destroy_conn(kra_conn_t *conn)
340 LASSERT (!in_interrupt());
341 LASSERT (!conn->rac_scheduled);
342 LASSERT (list_empty(&conn->rac_list));
343 LASSERT (list_empty(&conn->rac_hashlist));
344 LASSERT (list_empty(&conn->rac_schedlist));
345 LASSERT (atomic_read(&conn->rac_refcount) == 0);
346 LASSERT (list_empty(&conn->rac_fmaq));
347 LASSERT (list_empty(&conn->rac_rdmaq));
348 LASSERT (list_empty(&conn->rac_replyq));
350 rrc = RapkDestroyRi(conn->rac_device->rad_handle,
352 LASSERT (rrc == RAP_SUCCESS);
354 if (conn->rac_peer != NULL)
355 kranal_peer_decref(conn->rac_peer);
357 LIBCFS_FREE(conn, sizeof(*conn));
358 atomic_dec(&kranal_data.kra_nconns);
362 kranal_terminate_conn_locked (kra_conn_t *conn)
364 LASSERT (!in_interrupt());
365 LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
366 LASSERT (!list_empty(&conn->rac_hashlist));
367 LASSERT (list_empty(&conn->rac_list));
369 /* Remove from conn hash table: no new callbacks */
370 list_del_init(&conn->rac_hashlist);
371 kranal_conn_decref(conn);
373 conn->rac_state = RANAL_CONN_CLOSED;
375 /* schedule to clear out all uncompleted comms in context of dev's
377 kranal_schedule_conn(conn);
381 kranal_close_conn_locked (kra_conn_t *conn, int error)
383 kra_peer_t *peer = conn->rac_peer;
385 CDEBUG(error == 0 ? D_NET : D_NETERROR,
386 "closing conn to %s: error %d\n",
387 libcfs_nid2str(peer->rap_nid), error);
389 LASSERT (!in_interrupt());
390 LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
391 LASSERT (!list_empty(&conn->rac_hashlist));
392 LASSERT (!list_empty(&conn->rac_list));
394 list_del_init(&conn->rac_list);
396 if (list_empty(&peer->rap_conns) &&
397 peer->rap_persistence == 0) {
398 /* Non-persistent peer with no more conns... */
399 kranal_unlink_peer_locked(peer);
402 /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
403 * full timeout. If we get a CLOSE we know the peer has stopped all
404 * RDMA. Otherwise if we wait for the full timeout we can also be sure
405 * all RDMA has stopped. */
406 conn->rac_last_rx = jiffies;
409 conn->rac_state = RANAL_CONN_CLOSING;
410 kranal_schedule_conn(conn); /* schedule sending CLOSE */
412 kranal_conn_decref(conn); /* lose peer's ref */
416 kranal_close_conn (kra_conn_t *conn, int error)
421 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
423 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
424 kranal_close_conn_locked(conn, error);
426 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
430 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
431 __u32 peer_ip, int peer_port)
433 kra_device_t *dev = conn->rac_device;
437 /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
438 * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
439 conn->rac_last_tx = jiffies;
440 conn->rac_keepalive = 0;
442 rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
443 if (rrc != RAP_SUCCESS) {
444 CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
445 HIPQUAD(peer_ip), peer_port, rrc);
446 return -ECONNABORTED;
449 /* Schedule conn on rad_new_conns */
450 kranal_conn_addref(conn);
451 spin_lock_irqsave(&dev->rad_lock, flags);
452 list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
453 wake_up(&dev->rad_waitq);
454 spin_unlock_irqrestore(&dev->rad_lock, flags);
456 rrc = RapkWaitToConnect(conn->rac_rihandle);
457 if (rrc != RAP_SUCCESS) {
458 CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
459 HIPQUAD(peer_ip), peer_port, rrc);
460 return -ECONNABORTED;
463 /* Scheduler doesn't touch conn apart from to deschedule and decref it
464 * after RapkCompleteSync() return success, so conn is all mine */
466 conn->rac_peerstamp = connreq->racr_peerstamp;
467 conn->rac_peer_connstamp = connreq->racr_connstamp;
468 conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
469 kranal_update_reaper_timeout(conn->rac_keepalive);
474 kranal_passive_conn_handshake (struct socket *sock, lnet_nid_t *src_nidp,
475 lnet_nid_t *dst_nidp, kra_conn_t **connp)
478 unsigned int peer_port;
479 kra_connreq_t rx_connreq;
480 kra_connreq_t tx_connreq;
486 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
488 CERROR("Can't get peer's IP: %d\n", rc);
492 rc = kranal_recv_connreq(sock, &rx_connreq, 0);
495 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
496 HIPQUAD(peer_ip), peer_port, rc);
501 /* Request from "new" peer: send reply with my MAGIC/VERSION to
502 * tell her I'm old... */
503 kranal_pack_connreq(&tx_connreq, NULL, LNET_NID_ANY);
505 rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
506 lnet_acceptor_timeout());
508 CERROR("Can't tx stub connreq to %u.%u.%u.%u/%d: %d\n",
509 HIPQUAD(peer_ip), peer_port, rc);
515 if (i == kranal_data.kra_ndevs) {
516 CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
517 rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
520 dev = &kranal_data.kra_devices[i];
521 if (dev->rad_id == rx_connreq.racr_devid)
525 rc = kranal_create_conn(&conn, dev);
529 kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
531 rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
532 lnet_acceptor_timeout());
534 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
535 HIPQUAD(peer_ip), peer_port, rc);
536 kranal_conn_decref(conn);
540 rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
542 kranal_conn_decref(conn);
547 *src_nidp = rx_connreq.racr_srcnid;
548 *dst_nidp = rx_connreq.racr_dstnid;
553 kranal_active_conn_handshake(kra_peer_t *peer,
554 lnet_nid_t *dst_nidp, kra_conn_t **connp)
556 kra_connreq_t connreq;
563 /* spread connections over all devices using both peer NIDs to ensure
564 * all nids use all devices */
565 idx = peer->rap_nid + kranal_data.kra_ni->ni_nid;
566 dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
568 rc = kranal_create_conn(&conn, dev);
572 kranal_pack_connreq(&connreq, conn, peer->rap_nid);
574 if (the_lnet.ln_testprotocompat != 0) {
575 /* single-shot proto test */
577 if ((the_lnet.ln_testprotocompat & 1) != 0) {
578 connreq.racr_version++;
579 the_lnet.ln_testprotocompat &= ~1;
581 if ((the_lnet.ln_testprotocompat & 2) != 0) {
582 connreq.racr_magic = LNET_PROTO_MAGIC;
583 the_lnet.ln_testprotocompat &= ~2;
588 rc = lnet_connect(&sock, peer->rap_nid,
589 0, peer->rap_ip, peer->rap_port);
593 /* CAVEAT EMPTOR: the passive side receives with a SHORT rx timeout
594 * immediately after accepting a connection, so we connect and then
595 * send immediately. */
597 rc = libcfs_sock_write(sock, &connreq, sizeof(connreq),
598 lnet_acceptor_timeout());
600 CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
601 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
605 rc = kranal_recv_connreq(sock, &connreq, 1);
607 CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
608 HIPQUAD(peer->rap_ip), peer->rap_port, rc);
612 libcfs_sock_release(sock);
615 if (connreq.racr_srcnid != peer->rap_nid) {
616 CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
617 "received %s expected %s\n",
618 HIPQUAD(peer->rap_ip), peer->rap_port,
619 libcfs_nid2str(connreq.racr_srcnid),
620 libcfs_nid2str(peer->rap_nid));
624 if (connreq.racr_devid != dev->rad_id) {
625 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
626 "received %d expected %d\n",
627 HIPQUAD(peer->rap_ip), peer->rap_port,
628 connreq.racr_devid, dev->rad_id);
632 rc = kranal_set_conn_params(conn, &connreq,
633 peer->rap_ip, peer->rap_port);
638 *dst_nidp = connreq.racr_dstnid;
642 libcfs_sock_release(sock);
644 lnet_connect_console_error(rc, peer->rap_nid,
645 peer->rap_ip, peer->rap_port);
647 kranal_conn_decref(conn);
652 kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
665 /* active: connd wants to connect to 'peer' */
666 LASSERT (peer != NULL);
667 LASSERT (peer->rap_connecting);
669 rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
673 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
675 if (!kranal_peer_active(peer)) {
676 /* raced with peer getting unlinked */
677 write_unlock_irqrestore(&kranal_data.kra_global_lock,
679 kranal_conn_decref(conn);
683 peer_nid = peer->rap_nid;
685 /* passive: listener accepted 'sock' */
686 LASSERT (peer == NULL);
688 rc = kranal_passive_conn_handshake(sock, &peer_nid,
693 /* assume this is a new peer */
694 rc = kranal_create_peer(&peer, peer_nid);
696 CERROR("Can't create conn for %s\n",
697 libcfs_nid2str(peer_nid));
698 kranal_conn_decref(conn);
702 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
704 peer2 = kranal_find_peer_locked(peer_nid);
708 /* peer_nid already in the peer table */
709 kranal_peer_decref(peer);
714 LASSERT ((!new_peer) != (!kranal_peer_active(peer)));
716 /* Refuse connection if peer thinks we are a different NID. We check
717 * this while holding the global lock, to synch with connection
718 * destruction on NID change. */
719 if (kranal_data.kra_ni->ni_nid != dst_nid) {
720 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
722 CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
723 libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid),
724 libcfs_nid2str(kranal_data.kra_ni->ni_nid));
729 /* Refuse to duplicate an existing connection (both sides might try to
730 * connect at once). NB we return success! We _are_ connected so we
731 * _don't_ have any blocked txs to complete with failure. */
732 rc = kranal_conn_isdup_locked(peer, conn);
734 LASSERT (!list_empty(&peer->rap_conns));
735 LASSERT (list_empty(&peer->rap_tx_queue));
736 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
737 CWARN("Not creating duplicate connection to %s: %d\n",
738 libcfs_nid2str(peer_nid), rc);
744 /* peer table takes my ref on the new peer */
745 list_add_tail(&peer->rap_list,
746 kranal_nid2peerlist(peer_nid));
749 /* initialise timestamps before reaper looks at them */
750 conn->rac_last_tx = conn->rac_last_rx = jiffies;
752 kranal_peer_addref(peer); /* +1 ref for conn */
753 conn->rac_peer = peer;
754 list_add_tail(&conn->rac_list, &peer->rap_conns);
756 kranal_conn_addref(conn); /* +1 ref for conn table */
757 list_add_tail(&conn->rac_hashlist,
758 kranal_cqid2connlist(conn->rac_cqid));
760 /* Schedule all packets blocking for a connection */
761 while (!list_empty(&peer->rap_tx_queue)) {
762 tx = list_entry(peer->rap_tx_queue.next,
765 list_del(&tx->tx_list);
766 kranal_post_fma(conn, tx);
769 nstale = kranal_close_stale_conns_locked(peer, conn);
771 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
773 /* CAVEAT EMPTOR: passive peer can disappear NOW */
776 CWARN("Closed %d stale conns to %s\n", nstale,
777 libcfs_nid2str(peer_nid));
779 CWARN("New connection to %s on devid[%d] = %d\n",
780 libcfs_nid2str(peer_nid),
781 conn->rac_device->rad_idx, conn->rac_device->rad_id);
783 /* Ensure conn gets checked. Transmits may have been queued and an
784 * FMA event may have happened before it got in the cq hash table */
785 kranal_schedule_conn(conn);
790 kranal_peer_decref(peer);
791 kranal_conn_decref(conn);
796 kranal_connect (kra_peer_t *peer)
800 struct list_head zombies;
803 LASSERT (peer->rap_connecting);
805 CDEBUG(D_NET, "About to handshake %s\n",
806 libcfs_nid2str(peer->rap_nid));
808 rc = kranal_conn_handshake(NULL, peer);
810 CDEBUG(D_NET, "Done handshake %s:%d \n",
811 libcfs_nid2str(peer->rap_nid), rc);
813 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
815 LASSERT (peer->rap_connecting);
816 peer->rap_connecting = 0;
819 /* kranal_conn_handshake() queues blocked txs immediately on
820 * success to avoid messages jumping the queue */
821 LASSERT (list_empty(&peer->rap_tx_queue));
823 peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
825 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
829 peer->rap_reconnect_interval *= 2;
830 peer->rap_reconnect_interval =
831 MAX(peer->rap_reconnect_interval,
832 *kranal_tunables.kra_min_reconnect_interval);
833 peer->rap_reconnect_interval =
834 MIN(peer->rap_reconnect_interval,
835 *kranal_tunables.kra_max_reconnect_interval);
837 peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
839 /* Grab all blocked packets while we have the global lock */
840 list_add(&zombies, &peer->rap_tx_queue);
841 list_del_init(&peer->rap_tx_queue);
843 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
845 if (list_empty(&zombies))
848 CDEBUG(D_NETERROR, "Dropping packets for %s: connection failed\n",
849 libcfs_nid2str(peer->rap_nid));
852 tx = list_entry(zombies.next, kra_tx_t, tx_list);
854 list_del(&tx->tx_list);
855 kranal_tx_done(tx, -EHOSTUNREACH);
857 } while (!list_empty(&zombies));
861 kranal_free_acceptsock (kra_acceptsock_t *ras)
863 libcfs_sock_release(ras->ras_sock);
864 LIBCFS_FREE(ras, sizeof(*ras));
868 kranal_accept (lnet_ni_t *ni, struct socket *sock)
870 kra_acceptsock_t *ras;
876 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
877 LASSERT (rc == 0); /* we succeeded before */
879 LIBCFS_ALLOC(ras, sizeof(*ras));
881 CERROR("ENOMEM allocating connection request from "
882 "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
886 ras->ras_sock = sock;
888 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
890 list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
891 wake_up(&kranal_data.kra_connd_waitq);
893 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
898 kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
903 LASSERT (nid != LNET_NID_ANY);
905 LIBCFS_ALLOC(peer, sizeof(*peer));
909 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
912 atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
914 INIT_LIST_HEAD(&peer->rap_list);
915 INIT_LIST_HEAD(&peer->rap_connd_list);
916 INIT_LIST_HEAD(&peer->rap_conns);
917 INIT_LIST_HEAD(&peer->rap_tx_queue);
919 peer->rap_reconnect_interval = 0; /* OK to connect at any time */
921 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
923 if (kranal_data.kra_nonewpeers) {
924 /* shutdown has started already */
925 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
927 LIBCFS_FREE(peer, sizeof(*peer));
928 CERROR("Can't create peer: network shutdown\n");
932 atomic_inc(&kranal_data.kra_npeers);
934 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
941 kranal_destroy_peer (kra_peer_t *peer)
943 CDEBUG(D_NET, "peer %s %p deleted\n",
944 libcfs_nid2str(peer->rap_nid), peer);
946 LASSERT (atomic_read(&peer->rap_refcount) == 0);
947 LASSERT (peer->rap_persistence == 0);
948 LASSERT (!kranal_peer_active(peer));
949 LASSERT (!peer->rap_connecting);
950 LASSERT (list_empty(&peer->rap_conns));
951 LASSERT (list_empty(&peer->rap_tx_queue));
952 LASSERT (list_empty(&peer->rap_connd_list));
954 LIBCFS_FREE(peer, sizeof(*peer));
956 /* NB a peer's connections keep a reference on their peer until
957 * they are destroyed, so we can be assured that _all_ state to do
958 * with this peer has been cleaned up when its refcount drops to
960 atomic_dec(&kranal_data.kra_npeers);
964 kranal_find_peer_locked (lnet_nid_t nid)
966 struct list_head *peer_list = kranal_nid2peerlist(nid);
967 struct list_head *tmp;
970 list_for_each (tmp, peer_list) {
972 peer = list_entry(tmp, kra_peer_t, rap_list);
974 LASSERT (peer->rap_persistence > 0 || /* persistent peer */
975 !list_empty(&peer->rap_conns)); /* active conn */
977 if (peer->rap_nid != nid)
980 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
981 peer, libcfs_nid2str(nid),
982 atomic_read(&peer->rap_refcount));
989 kranal_find_peer (lnet_nid_t nid)
993 read_lock(&kranal_data.kra_global_lock);
994 peer = kranal_find_peer_locked(nid);
995 if (peer != NULL) /* +1 ref for caller? */
996 kranal_peer_addref(peer);
997 read_unlock(&kranal_data.kra_global_lock);
1003 kranal_unlink_peer_locked (kra_peer_t *peer)
1005 LASSERT (peer->rap_persistence == 0);
1006 LASSERT (list_empty(&peer->rap_conns));
1008 LASSERT (kranal_peer_active(peer));
1009 list_del_init(&peer->rap_list);
1011 /* lose peerlist's ref */
1012 kranal_peer_decref(peer);
1016 kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
1020 struct list_head *ptmp;
1023 read_lock(&kranal_data.kra_global_lock);
1025 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1027 list_for_each(ptmp, &kranal_data.kra_peers[i]) {
1029 peer = list_entry(ptmp, kra_peer_t, rap_list);
1030 LASSERT (peer->rap_persistence > 0 ||
1031 !list_empty(&peer->rap_conns));
1036 *nidp = peer->rap_nid;
1037 *ipp = peer->rap_ip;
1038 *portp = peer->rap_port;
1039 *persistencep = peer->rap_persistence;
1041 read_unlock(&kranal_data.kra_global_lock);
1046 read_unlock(&kranal_data.kra_global_lock);
1051 kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
1053 unsigned long flags;
1058 if (nid == LNET_NID_ANY)
1061 rc = kranal_create_peer(&peer, nid);
1065 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1067 peer2 = kranal_find_peer_locked(nid);
1068 if (peer2 != NULL) {
1069 kranal_peer_decref(peer);
1072 /* peer table takes existing ref on peer */
1073 list_add_tail(&peer->rap_list,
1074 kranal_nid2peerlist(nid));
1078 peer->rap_port = port;
1079 peer->rap_persistence++;
1081 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1086 kranal_del_peer_locked (kra_peer_t *peer)
1088 struct list_head *ctmp;
1089 struct list_head *cnxt;
1092 peer->rap_persistence = 0;
1094 if (list_empty(&peer->rap_conns)) {
1095 kranal_unlink_peer_locked(peer);
1097 list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
1098 conn = list_entry(ctmp, kra_conn_t, rac_list);
1100 kranal_close_conn_locked(conn, 0);
1102 /* peer unlinks itself when last conn is closed */
1107 kranal_del_peer (lnet_nid_t nid)
1109 unsigned long flags;
1110 struct list_head *ptmp;
1111 struct list_head *pnxt;
1118 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1120 if (nid != LNET_NID_ANY)
1121 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1124 hi = kranal_data.kra_peer_hash_size - 1;
1127 for (i = lo; i <= hi; i++) {
1128 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1129 peer = list_entry(ptmp, kra_peer_t, rap_list);
1130 LASSERT (peer->rap_persistence > 0 ||
1131 !list_empty(&peer->rap_conns));
1133 if (!(nid == LNET_NID_ANY || peer->rap_nid == nid))
1136 kranal_del_peer_locked(peer);
1137 rc = 0; /* matched something */
1141 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1147 kranal_get_conn_by_idx (int index)
1150 struct list_head *ptmp;
1152 struct list_head *ctmp;
1155 read_lock (&kranal_data.kra_global_lock);
1157 for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
1158 list_for_each (ptmp, &kranal_data.kra_peers[i]) {
1160 peer = list_entry(ptmp, kra_peer_t, rap_list);
1161 LASSERT (peer->rap_persistence > 0 ||
1162 !list_empty(&peer->rap_conns));
1164 list_for_each (ctmp, &peer->rap_conns) {
1168 conn = list_entry(ctmp, kra_conn_t, rac_list);
1169 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1170 libcfs_nid2str(conn->rac_peer->rap_nid),
1171 atomic_read(&conn->rac_refcount));
1172 atomic_inc(&conn->rac_refcount);
1173 read_unlock(&kranal_data.kra_global_lock);
1179 read_unlock(&kranal_data.kra_global_lock);
1184 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
1187 struct list_head *ctmp;
1188 struct list_head *cnxt;
1191 list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
1192 conn = list_entry(ctmp, kra_conn_t, rac_list);
1195 kranal_close_conn_locked(conn, why);
1202 kranal_close_matching_conns (lnet_nid_t nid)
1204 unsigned long flags;
1206 struct list_head *ptmp;
1207 struct list_head *pnxt;
1213 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1215 if (nid != LNET_NID_ANY)
1216 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
1219 hi = kranal_data.kra_peer_hash_size - 1;
1222 for (i = lo; i <= hi; i++) {
1223 list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
1225 peer = list_entry(ptmp, kra_peer_t, rap_list);
1226 LASSERT (peer->rap_persistence > 0 ||
1227 !list_empty(&peer->rap_conns));
1229 if (!(nid == LNET_NID_ANY || nid == peer->rap_nid))
1232 count += kranal_close_peer_conns_locked(peer, 0);
1236 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1238 /* wildcards always succeed */
1239 if (nid == LNET_NID_ANY)
1242 return (count == 0) ? -ENOENT : 0;
1246 kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1248 struct libcfs_ioctl_data *data = arg;
1251 LASSERT (ni == kranal_data.kra_ni);
1254 case IOC_LIBCFS_GET_PEER: {
1258 int share_count = 0;
1260 rc = kranal_get_peer_info(data->ioc_count,
1261 &nid, &ip, &port, &share_count);
1262 data->ioc_nid = nid;
1263 data->ioc_count = share_count;
1264 data->ioc_u32[0] = ip;
1265 data->ioc_u32[1] = port;
1268 case IOC_LIBCFS_ADD_PEER: {
1269 rc = kranal_add_persistent_peer(data->ioc_nid,
1270 data->ioc_u32[0], /* IP */
1271 data->ioc_u32[1]); /* port */
1274 case IOC_LIBCFS_DEL_PEER: {
1275 rc = kranal_del_peer(data->ioc_nid);
1278 case IOC_LIBCFS_GET_CONN: {
1279 kra_conn_t *conn = kranal_get_conn_by_idx(data->ioc_count);
1285 data->ioc_nid = conn->rac_peer->rap_nid;
1286 data->ioc_u32[0] = conn->rac_device->rad_id;
1287 kranal_conn_decref(conn);
1291 case IOC_LIBCFS_CLOSE_CONNECTION: {
1292 rc = kranal_close_matching_conns(data->ioc_nid);
1295 case IOC_LIBCFS_REGISTER_MYNID: {
1296 /* Ignore if this is a noop */
1297 if (data->ioc_nid == ni->ni_nid) {
1300 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1301 libcfs_nid2str(data->ioc_nid),
1302 libcfs_nid2str(ni->ni_nid));
1313 kranal_free_txdescs(struct list_head *freelist)
1317 while (!list_empty(freelist)) {
1318 tx = list_entry(freelist->next, kra_tx_t, tx_list);
1320 list_del(&tx->tx_list);
1321 LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
1322 LIBCFS_FREE(tx, sizeof(*tx));
1327 kranal_alloc_txdescs(struct list_head *freelist, int n)
1332 LASSERT (freelist == &kranal_data.kra_idle_txs);
1333 LASSERT (list_empty(freelist));
1335 for (i = 0; i < n; i++) {
1337 LIBCFS_ALLOC(tx, sizeof(*tx));
1339 CERROR("Can't allocate tx[%d]\n", i);
1340 kranal_free_txdescs(freelist);
1344 LIBCFS_ALLOC(tx->tx_phys,
1345 LNET_MAX_IOV * sizeof(*tx->tx_phys));
1346 if (tx->tx_phys == NULL) {
1347 CERROR("Can't allocate tx[%d]->tx_phys\n", i);
1349 LIBCFS_FREE(tx, sizeof(*tx));
1350 kranal_free_txdescs(freelist);
1354 tx->tx_buftype = RANAL_BUF_NONE;
1355 tx->tx_msg.ram_type = RANAL_MSG_NONE;
1357 list_add(&tx->tx_list, freelist);
1364 kranal_device_init(int id, kra_device_t *dev)
1366 int total_ntx = *kranal_tunables.kra_ntx;
1370 rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
1372 if (rrc != RAP_SUCCESS) {
1373 CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
1377 rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
1378 if (rrc != RAP_SUCCESS) {
1379 CERROR("Can't reserve %d RDMA descriptors"
1380 " for device %d: %d\n", total_ntx, id, rrc);
1384 rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
1385 &dev->rad_rdma_cqh);
1386 if (rrc != RAP_SUCCESS) {
1387 CERROR("Can't create rdma cq size %d for device %d: %d\n",
1388 total_ntx, id, rrc);
1392 rrc = RapkCreateCQ(dev->rad_handle,
1393 *kranal_tunables.kra_fma_cq_size,
1394 RAP_CQTYPE_RECV, &dev->rad_fma_cqh);
1395 if (rrc != RAP_SUCCESS) {
1396 CERROR("Can't create fma cq size %d for device %d: %d\n",
1397 *kranal_tunables.kra_fma_cq_size, id, rrc);
1404 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1406 RapkReleaseDevice(dev->rad_handle);
1412 kranal_device_fini(kra_device_t *dev)
1414 LASSERT (list_empty(&dev->rad_ready_conns));
1415 LASSERT (list_empty(&dev->rad_new_conns));
1416 LASSERT (dev->rad_nphysmap == 0);
1417 LASSERT (dev->rad_nppphysmap == 0);
1418 LASSERT (dev->rad_nvirtmap == 0);
1419 LASSERT (dev->rad_nobvirtmap == 0);
1421 LASSERT(dev->rad_scheduler == NULL);
1422 RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
1423 RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
1424 RapkReleaseDevice(dev->rad_handle);
1428 kranal_shutdown (lnet_ni_t *ni)
1431 unsigned long flags;
1433 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
1434 atomic_read(&libcfs_kmemory));
1436 LASSERT (ni == kranal_data.kra_ni);
1437 LASSERT (ni->ni_data == &kranal_data);
1439 switch (kranal_data.kra_init) {
1441 CERROR("Unexpected state %d\n", kranal_data.kra_init);
1444 case RANAL_INIT_ALL:
1445 /* Prevent new peers from being created */
1446 write_lock_irqsave(&kranal_data.kra_global_lock, flags);
1447 kranal_data.kra_nonewpeers = 1;
1448 write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
1450 /* Remove all existing peers from the peer table */
1451 kranal_del_peer(LNET_NID_ANY);
1453 /* Wait for pending conn reqs to be handled */
1455 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1456 while (!list_empty(&kranal_data.kra_connd_acceptq)) {
1457 spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
1460 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
1461 "waiting for conn reqs to clean up\n");
1462 cfs_pause(cfs_time_seconds(1));
1464 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1466 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1468 /* Wait for all peers to be freed */
1470 while (atomic_read(&kranal_data.kra_npeers) != 0) {
1472 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
1473 "waiting for %d peers to close down\n",
1474 atomic_read(&kranal_data.kra_npeers));
1475 cfs_pause(cfs_time_seconds(1));
1479 case RANAL_INIT_DATA:
1483 /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
1484 * have to worry about shutdown races. NB connections may be created
1485 * while there are still active connds, but these will be temporary
1486 * since peer creation always fails after the listener has started to
1488 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1490 /* Flag threads to terminate */
1491 kranal_data.kra_shutdown = 1;
1493 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1494 kra_device_t *dev = &kranal_data.kra_devices[i];
1496 spin_lock_irqsave(&dev->rad_lock, flags);
1497 wake_up(&dev->rad_waitq);
1498 spin_unlock_irqrestore(&dev->rad_lock, flags);
1501 spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
1502 wake_up_all(&kranal_data.kra_reaper_waitq);
1503 spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
1505 LASSERT (list_empty(&kranal_data.kra_connd_peers));
1506 spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
1507 wake_up_all(&kranal_data.kra_connd_waitq);
1508 spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
1510 /* Wait for threads to exit */
1512 while (atomic_read(&kranal_data.kra_nthreads) != 0) {
1514 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1515 "Waiting for %d threads to terminate\n",
1516 atomic_read(&kranal_data.kra_nthreads));
1517 cfs_pause(cfs_time_seconds(1));
1520 LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
1521 if (kranal_data.kra_peers != NULL) {
1522 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1523 LASSERT (list_empty(&kranal_data.kra_peers[i]));
1525 LIBCFS_FREE(kranal_data.kra_peers,
1526 sizeof (struct list_head) *
1527 kranal_data.kra_peer_hash_size);
1530 LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
1531 if (kranal_data.kra_conns != NULL) {
1532 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1533 LASSERT (list_empty(&kranal_data.kra_conns[i]));
1535 LIBCFS_FREE(kranal_data.kra_conns,
1536 sizeof (struct list_head) *
1537 kranal_data.kra_conn_hash_size);
1540 for (i = 0; i < kranal_data.kra_ndevs; i++)
1541 kranal_device_fini(&kranal_data.kra_devices[i]);
1543 kranal_free_txdescs(&kranal_data.kra_idle_txs);
1545 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
1546 atomic_read(&libcfs_kmemory));
1548 kranal_data.kra_init = RANAL_INIT_NOTHING;
1549 PORTAL_MODULE_UNUSE;
1553 kranal_startup (lnet_ni_t *ni)
1556 int pkmem = atomic_read(&libcfs_kmemory);
1561 LASSERT (ni->ni_lnd == &the_kralnd);
1563 /* Only 1 instance supported */
1564 if (kranal_data.kra_init != RANAL_INIT_NOTHING) {
1565 CERROR ("Only 1 instance supported\n");
1569 if (lnet_set_ip_niaddr(ni) != 0) {
1570 CERROR ("Can't determine my NID\n");
1574 if (*kranal_tunables.kra_credits > *kranal_tunables.kra_ntx) {
1575 CERROR ("Can't set credits(%d) > ntx(%d)\n",
1576 *kranal_tunables.kra_credits,
1577 *kranal_tunables.kra_ntx);
1581 memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
1583 ni->ni_maxtxcredits = *kranal_tunables.kra_credits;
1584 ni->ni_peertxcredits = *kranal_tunables.kra_peercredits;
1586 ni->ni_data = &kranal_data;
1587 kranal_data.kra_ni = ni;
1589 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
1590 * a unique (for all time) connstamp so we can uniquely identify
1591 * the sender. The connstamp is an incrementing counter
1592 * initialised with seconds + microseconds at startup time. So we
1593 * rely on NOT creating connections more frequently on average than
1594 * 1MHz to ensure we don't use old connstamps when we reboot. */
1595 do_gettimeofday(&tv);
1596 kranal_data.kra_connstamp =
1597 kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1599 rwlock_init(&kranal_data.kra_global_lock);
1601 for (i = 0; i < RANAL_MAXDEVS; i++ ) {
1602 kra_device_t *dev = &kranal_data.kra_devices[i];
1605 INIT_LIST_HEAD(&dev->rad_ready_conns);
1606 INIT_LIST_HEAD(&dev->rad_new_conns);
1607 init_waitqueue_head(&dev->rad_waitq);
1608 spin_lock_init(&dev->rad_lock);
1611 kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
1612 init_waitqueue_head(&kranal_data.kra_reaper_waitq);
1613 spin_lock_init(&kranal_data.kra_reaper_lock);
1615 INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
1616 INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
1617 init_waitqueue_head(&kranal_data.kra_connd_waitq);
1618 spin_lock_init(&kranal_data.kra_connd_lock);
1620 INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
1621 spin_lock_init(&kranal_data.kra_tx_lock);
1623 /* OK to call kranal_api_shutdown() to cleanup now */
1624 kranal_data.kra_init = RANAL_INIT_DATA;
1627 kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
1628 LIBCFS_ALLOC(kranal_data.kra_peers,
1629 sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
1630 if (kranal_data.kra_peers == NULL)
1633 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
1634 INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
1636 kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
1637 LIBCFS_ALLOC(kranal_data.kra_conns,
1638 sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
1639 if (kranal_data.kra_conns == NULL)
1642 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
1643 INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
1645 rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs,
1646 *kranal_tunables.kra_ntx);
1650 rc = kranal_thread_start(kranal_reaper, NULL);
1652 CERROR("Can't spawn ranal reaper: %d\n", rc);
1656 for (i = 0; i < *kranal_tunables.kra_n_connd; i++) {
1657 rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
1659 CERROR("Can't spawn ranal connd[%d]: %d\n",
1665 LASSERT (kranal_data.kra_ndevs == 0);
1667 /* Use all available RapidArray devices */
1668 for (i = 0; i < RANAL_MAXDEVS; i++) {
1669 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
1671 rc = kranal_device_init(kranal_devids[i], dev);
1673 kranal_data.kra_ndevs++;
1676 if (kranal_data.kra_ndevs == 0) {
1677 CERROR("Can't initialise any RapidArray devices\n");
1681 for (i = 0; i < kranal_data.kra_ndevs; i++) {
1682 dev = &kranal_data.kra_devices[i];
1683 rc = kranal_thread_start(kranal_scheduler, dev);
1685 CERROR("Can't spawn ranal scheduler[%d]: %d\n",
1691 /* flag everything initialised */
1692 kranal_data.kra_init = RANAL_INIT_ALL;
1693 /*****************************************************/
1695 CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
1699 kranal_shutdown(ni);
1704 kranal_module_fini (void)
1706 lnet_unregister_lnd(&the_kralnd);
1707 kranal_tunables_fini();
1711 kranal_module_init (void)
1715 rc = kranal_tunables_init();
1719 lnet_register_lnd(&the_kralnd);
1724 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1725 MODULE_DESCRIPTION("Kernel RapidArray LND v0.01");
1726 MODULE_LICENSE("GPL");
1728 module_init(kranal_module_init);
1729 module_exit(kranal_module_fini);