X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fralnd%2Fralnd.c;h=a6d61e2083e5e59b5bb67d82c2aaf02a07d5ce19;hb=f2a404d1fec2287ef9ffda105727e8cd3f8e0b7b;hp=a0a4d93f164862731f788505c3457625b3913538;hpb=ed88907a96ba81d3558e71ade9def98bdc785169;p=fs%2Flustre-release.git diff --git a/lnet/klnds/ralnd/ralnd.c b/lnet/klnds/ralnd/ralnd.c index a0a4d93..a6d61e2 100644 --- a/lnet/klnds/ralnd/ralnd.c +++ b/lnet/klnds/ralnd/ralnd.c @@ -1,24 +1,41 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * Copyright (C) 2004 Cluster File Systems, Inc. - * Author: Eric Barton + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of Lustre, http://www.lustre.org. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. * + * GPL HEADER END + */ +/* + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/ralnd/ralnd.c + * + * Author: Eric Barton */ #include "ralnd.h" @@ -52,8 +69,7 @@ kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, lnet_nid_t dstnid) return; connreq->racr_devid = conn->rac_device->rad_id; - connreq->racr_srcnid = lnet_ptlcompat_srcnid(kranal_data.kra_ni->ni_nid, - dstnid); + connreq->racr_srcnid = kranal_data.kra_ni->ni_nid; connreq->racr_dstnid = dstnid; connreq->racr_peerstamp = kranal_data.kra_peerstamp; connreq->racr_connstamp = conn->rac_my_connstamp; @@ -84,7 +100,6 @@ kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active) connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) { /* Unexpected magic! */ if (!active && - the_lnet.ln_ptlcompat == 0 && (connreq->racr_magic == LNET_PROTO_MAGIC || connreq->racr_magic == __swab32(LNET_PROTO_MAGIC))) { /* future protocol version compatibility! @@ -94,36 +109,9 @@ kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active) return EPROTO; } - if (active || - the_lnet.ln_ptlcompat == 0) { - CERROR("Unexpected magic %08x (1)\n", - connreq->racr_magic); - return -EPROTO; - } - - /* When portals compatibility is set, I may be passed a new - * connection "blindly" by the acceptor, and I have to - * determine if my peer has sent an acceptor connection request - * or not. This isn't a connreq, so I'll get the acceptor to - * look at it... */ - rc = lnet_accept(kranal_data.kra_ni, sock, connreq->racr_magic); - if (rc != 0) - return -EPROTO; - - /* ...and if it's OK I'm back to looking for a connreq... */ - rc = libcfs_sock_read(sock, &connreq->racr_magic, - sizeof(connreq->racr_magic), timeout); - if (rc != 0) { - CERROR("Read(magic) failed(2): %d\n", rc); - return -EIO; - } - - if (connreq->racr_magic != RANAL_MSG_MAGIC && - connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) { - CERROR("Unexpected magic %08x(2)\n", - connreq->racr_magic); - return -EPROTO; - } + CERROR("Unexpected magic %08x (%s)\n", + connreq->racr_magic, active ? "active" : "passive"); + return -EPROTO; } swab = (connreq->racr_magic == __swab32(RANAL_MSG_MAGIC)); @@ -193,15 +181,15 @@ int kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn) { kra_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; + cfs_list_t *ctmp; + cfs_list_t *cnxt; int loopback; int count = 0; loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid; - list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { - conn = list_entry(ctmp, kra_conn_t, rac_list); + cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { + conn = cfs_list_entry(ctmp, kra_conn_t, rac_list); if (conn == newconn) continue; @@ -243,13 +231,13 @@ int kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn) { kra_conn_t *conn; - struct list_head *tmp; + cfs_list_t *tmp; int loopback; loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid; - list_for_each(tmp, &peer->rap_conns) { - conn = list_entry(tmp, kra_conn_t, rac_list); + cfs_list_for_each(tmp, &peer->rap_conns) { + conn = cfs_list_entry(tmp, kra_conn_t, rac_list); /* 'newconn' is from an earlier version of 'peer'!!! */ if (newconn->rac_peerstamp < conn->rac_peerstamp) @@ -292,7 +280,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn) { unsigned long flags; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); conn->rac_my_connstamp = kranal_data.kra_connstamp++; @@ -300,30 +288,30 @@ kranal_set_conn_uniqueness (kra_conn_t *conn) conn->rac_cqid = kranal_data.kra_next_cqid++; } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); } int kranal_create_conn(kra_conn_t **connp, kra_device_t *dev) { - kra_conn_t *conn; - RAP_RETURN rrc; + kra_conn_t *conn; + RAP_RETURN rrc; - LASSERT (!in_interrupt()); - LIBCFS_ALLOC(conn, sizeof(*conn)); + LASSERT (!in_interrupt()); + LIBCFS_ALLOC(conn, sizeof(*conn)); - if (conn == NULL) - return -ENOMEM; + if (conn == NULL) + return -ENOMEM; memset(conn, 0, sizeof(*conn)); - atomic_set(&conn->rac_refcount, 1); - INIT_LIST_HEAD(&conn->rac_list); - INIT_LIST_HEAD(&conn->rac_hashlist); - INIT_LIST_HEAD(&conn->rac_schedlist); - INIT_LIST_HEAD(&conn->rac_fmaq); - INIT_LIST_HEAD(&conn->rac_rdmaq); - INIT_LIST_HEAD(&conn->rac_replyq); - spin_lock_init(&conn->rac_lock); + atomic_set(&conn->rac_refcount, 1); + CFS_INIT_LIST_HEAD(&conn->rac_list); + CFS_INIT_LIST_HEAD(&conn->rac_hashlist); + CFS_INIT_LIST_HEAD(&conn->rac_schedlist); + CFS_INIT_LIST_HEAD(&conn->rac_fmaq); + CFS_INIT_LIST_HEAD(&conn->rac_rdmaq); + CFS_INIT_LIST_HEAD(&conn->rac_replyq); + spin_lock_init(&conn->rac_lock); kranal_set_conn_uniqueness(conn); @@ -339,7 +327,7 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev) return -ENETDOWN; } - atomic_inc(&kranal_data.kra_nconns); + atomic_inc(&kranal_data.kra_nconns); *connp = conn; return 0; } @@ -347,81 +335,81 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev) void kranal_destroy_conn(kra_conn_t *conn) { - RAP_RETURN rrc; - - LASSERT (!in_interrupt()); - LASSERT (!conn->rac_scheduled); - LASSERT (list_empty(&conn->rac_list)); - LASSERT (list_empty(&conn->rac_hashlist)); - LASSERT (list_empty(&conn->rac_schedlist)); - LASSERT (atomic_read(&conn->rac_refcount) == 0); - LASSERT (list_empty(&conn->rac_fmaq)); - LASSERT (list_empty(&conn->rac_rdmaq)); - LASSERT (list_empty(&conn->rac_replyq)); - - rrc = RapkDestroyRi(conn->rac_device->rad_handle, - conn->rac_rihandle); - LASSERT (rrc == RAP_SUCCESS); - - if (conn->rac_peer != NULL) - kranal_peer_decref(conn->rac_peer); - - LIBCFS_FREE(conn, sizeof(*conn)); - atomic_dec(&kranal_data.kra_nconns); + RAP_RETURN rrc; + + LASSERT (!in_interrupt()); + LASSERT (!conn->rac_scheduled); + LASSERT (cfs_list_empty(&conn->rac_list)); + LASSERT (cfs_list_empty(&conn->rac_hashlist)); + LASSERT (cfs_list_empty(&conn->rac_schedlist)); + LASSERT (atomic_read(&conn->rac_refcount) == 0); + LASSERT (cfs_list_empty(&conn->rac_fmaq)); + LASSERT (cfs_list_empty(&conn->rac_rdmaq)); + LASSERT (cfs_list_empty(&conn->rac_replyq)); + + rrc = RapkDestroyRi(conn->rac_device->rad_handle, + conn->rac_rihandle); + LASSERT (rrc == RAP_SUCCESS); + + if (conn->rac_peer != NULL) + kranal_peer_decref(conn->rac_peer); + + LIBCFS_FREE(conn, sizeof(*conn)); + atomic_dec(&kranal_data.kra_nconns); } void kranal_terminate_conn_locked (kra_conn_t *conn) { - LASSERT (!in_interrupt()); - LASSERT (conn->rac_state == RANAL_CONN_CLOSING); - LASSERT (!list_empty(&conn->rac_hashlist)); - LASSERT (list_empty(&conn->rac_list)); + LASSERT (!in_interrupt()); + LASSERT (conn->rac_state == RANAL_CONN_CLOSING); + LASSERT (!cfs_list_empty(&conn->rac_hashlist)); + LASSERT (cfs_list_empty(&conn->rac_list)); - /* Remove from conn hash table: no new callbacks */ - list_del_init(&conn->rac_hashlist); - kranal_conn_decref(conn); + /* Remove from conn hash table: no new callbacks */ + cfs_list_del_init(&conn->rac_hashlist); + kranal_conn_decref(conn); - conn->rac_state = RANAL_CONN_CLOSED; + conn->rac_state = RANAL_CONN_CLOSED; - /* schedule to clear out all uncompleted comms in context of dev's - * scheduler */ - kranal_schedule_conn(conn); + /* schedule to clear out all uncompleted comms in context of dev's + * scheduler */ + kranal_schedule_conn(conn); } void kranal_close_conn_locked (kra_conn_t *conn, int error) { - kra_peer_t *peer = conn->rac_peer; + kra_peer_t *peer = conn->rac_peer; - CDEBUG(error == 0 ? D_NET : D_NETERROR, - "closing conn to %s: error %d\n", - libcfs_nid2str(peer->rap_nid), error); + CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR, + "closing conn to %s: error %d\n", + libcfs_nid2str(peer->rap_nid), error); - LASSERT (!in_interrupt()); - LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED); - LASSERT (!list_empty(&conn->rac_hashlist)); - LASSERT (!list_empty(&conn->rac_list)); + LASSERT (!in_interrupt()); + LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED); + LASSERT (!cfs_list_empty(&conn->rac_hashlist)); + LASSERT (!cfs_list_empty(&conn->rac_list)); - list_del_init(&conn->rac_list); + cfs_list_del_init(&conn->rac_list); - if (list_empty(&peer->rap_conns) && - peer->rap_persistence == 0) { - /* Non-persistent peer with no more conns... */ - kranal_unlink_peer_locked(peer); - } + if (cfs_list_empty(&peer->rap_conns) && + peer->rap_persistence == 0) { + /* Non-persistent peer with no more conns... */ + kranal_unlink_peer_locked(peer); + } - /* Reset RX timeout to ensure we wait for an incoming CLOSE for the - * full timeout. If we get a CLOSE we know the peer has stopped all - * RDMA. Otherwise if we wait for the full timeout we can also be sure - * all RDMA has stopped. */ - conn->rac_last_rx = jiffies; - mb(); + /* Reset RX timeout to ensure we wait for an incoming CLOSE for the + * full timeout. If we get a CLOSE we know the peer has stopped all + * RDMA. Otherwise if we wait for the full timeout we can also be sure + * all RDMA has stopped. */ + conn->rac_last_rx = jiffies; + smp_mb(); - conn->rac_state = RANAL_CONN_CLOSING; - kranal_schedule_conn(conn); /* schedule sending CLOSE */ + conn->rac_state = RANAL_CONN_CLOSING; + kranal_schedule_conn(conn); /* schedule sending CLOSE */ - kranal_conn_decref(conn); /* lose peer's ref */ + kranal_conn_decref(conn); /* lose peer's ref */ } void @@ -430,56 +418,56 @@ kranal_close_conn (kra_conn_t *conn, int error) unsigned long flags; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (conn->rac_state == RANAL_CONN_ESTABLISHED) kranal_close_conn_locked(conn, error); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); } int kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq, __u32 peer_ip, int peer_port) { - kra_device_t *dev = conn->rac_device; - unsigned long flags; - RAP_RETURN rrc; - - /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive - * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */ - conn->rac_last_tx = jiffies; - conn->rac_keepalive = 0; - - rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams); - if (rrc != RAP_SUCCESS) { - CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n", - HIPQUAD(peer_ip), peer_port, rrc); - return -ECONNABORTED; - } - - /* Schedule conn on rad_new_conns */ - kranal_conn_addref(conn); - spin_lock_irqsave(&dev->rad_lock, flags); - list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns); - wake_up(&dev->rad_waitq); - spin_unlock_irqrestore(&dev->rad_lock, flags); - - rrc = RapkWaitToConnect(conn->rac_rihandle); - if (rrc != RAP_SUCCESS) { - CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n", - HIPQUAD(peer_ip), peer_port, rrc); - return -ECONNABORTED; - } - - /* Scheduler doesn't touch conn apart from to deschedule and decref it - * after RapkCompleteSync() return success, so conn is all mine */ - - conn->rac_peerstamp = connreq->racr_peerstamp; - conn->rac_peer_connstamp = connreq->racr_connstamp; - conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout); - kranal_update_reaper_timeout(conn->rac_keepalive); - return 0; + kra_device_t *dev = conn->rac_device; + unsigned long flags; + RAP_RETURN rrc; + + /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive + * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */ + conn->rac_last_tx = jiffies; + conn->rac_keepalive = 0; + + rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams); + if (rrc != RAP_SUCCESS) { + CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n", + HIPQUAD(peer_ip), peer_port, rrc); + return -ECONNABORTED; + } + + /* Schedule conn on rad_new_conns */ + kranal_conn_addref(conn); + spin_lock_irqsave(&dev->rad_lock, flags); + cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns); + wake_up(&dev->rad_waitq); + spin_unlock_irqrestore(&dev->rad_lock, flags); + + rrc = RapkWaitToConnect(conn->rac_rihandle); + if (rrc != RAP_SUCCESS) { + CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n", + HIPQUAD(peer_ip), peer_port, rrc); + return -ECONNABORTED; + } + + /* Scheduler doesn't touch conn apart from to deschedule and decref it + * after RapkCompleteSync() return success, so conn is all mine */ + + conn->rac_peerstamp = connreq->racr_peerstamp; + conn->rac_peer_connstamp = connreq->racr_connstamp; + conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout); + kranal_update_reaper_timeout(conn->rac_keepalive); + return 0; } int @@ -665,8 +653,8 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) { kra_peer_t *peer2; kra_tx_t *tx; - lnet_nid_t peer_nid; - lnet_nid_t dst_nid; + lnet_nid_t peer_nid; + lnet_nid_t dst_nid; unsigned long flags; kra_conn_t *conn; int rc; @@ -682,12 +670,13 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) if (rc != 0) return rc; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (!kranal_peer_active(peer)) { /* raced with peer getting unlinked */ - write_unlock_irqrestore(&kranal_data.kra_global_lock, - flags); + write_unlock_irqrestore(&kranal_data. \ + kra_global_lock, + flags); kranal_conn_decref(conn); return -ESTALE; } @@ -711,7 +700,7 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) return -ENOMEM; } - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); peer2 = kranal_find_peer_locked(peer_nid); if (peer2 == NULL) { @@ -728,8 +717,9 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) /* Refuse connection if peer thinks we are a different NID. We check * this while holding the global lock, to synch with connection * destruction on NID change. */ - if (!lnet_ptlcompat_matchnid(kranal_data.kra_ni->ni_nid, dst_nid)) { - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + if (kranal_data.kra_ni->ni_nid != dst_nid) { + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n", libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid), @@ -743,9 +733,10 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) * _don't_ have any blocked txs to complete with failure. */ rc = kranal_conn_isdup_locked(peer, conn); if (rc != 0) { - LASSERT (!list_empty(&peer->rap_conns)); - LASSERT (list_empty(&peer->rap_tx_queue)); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + LASSERT (!cfs_list_empty(&peer->rap_conns)); + LASSERT (cfs_list_empty(&peer->rap_tx_queue)); + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); CWARN("Not creating duplicate connection to %s: %d\n", libcfs_nid2str(peer_nid), rc); rc = 0; @@ -754,8 +745,8 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) if (new_peer) { /* peer table takes my ref on the new peer */ - list_add_tail(&peer->rap_list, - kranal_nid2peerlist(peer_nid)); + cfs_list_add_tail(&peer->rap_list, + kranal_nid2peerlist(peer_nid)); } /* initialise timestamps before reaper looks at them */ @@ -763,24 +754,24 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) kranal_peer_addref(peer); /* +1 ref for conn */ conn->rac_peer = peer; - list_add_tail(&conn->rac_list, &peer->rap_conns); + cfs_list_add_tail(&conn->rac_list, &peer->rap_conns); kranal_conn_addref(conn); /* +1 ref for conn table */ - list_add_tail(&conn->rac_hashlist, - kranal_cqid2connlist(conn->rac_cqid)); + cfs_list_add_tail(&conn->rac_hashlist, + kranal_cqid2connlist(conn->rac_cqid)); /* Schedule all packets blocking for a connection */ - while (!list_empty(&peer->rap_tx_queue)) { - tx = list_entry(peer->rap_tx_queue.next, - kra_tx_t, tx_list); + while (!cfs_list_empty(&peer->rap_tx_queue)) { + tx = cfs_list_entry(peer->rap_tx_queue.next, + kra_tx_t, tx_list); - list_del(&tx->tx_list); + cfs_list_del(&tx->tx_list); kranal_post_fma(conn, tx); } nstale = kranal_close_stale_conns_locked(peer, conn); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); /* CAVEAT EMPTOR: passive peer can disappear NOW */ @@ -809,7 +800,7 @@ kranal_connect (kra_peer_t *peer) { kra_tx_t *tx; unsigned long flags; - struct list_head zombies; + cfs_list_t zombies; int rc; LASSERT (peer->rap_connecting); @@ -822,7 +813,7 @@ kranal_connect (kra_peer_t *peer) CDEBUG(D_NET, "Done handshake %s:%d \n", libcfs_nid2str(peer->rap_nid), rc); - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); LASSERT (peer->rap_connecting); peer->rap_connecting = 0; @@ -830,11 +821,12 @@ kranal_connect (kra_peer_t *peer) if (rc == 0) { /* kranal_conn_handshake() queues blocked txs immediately on * success to avoid messages jumping the queue */ - LASSERT (list_empty(&peer->rap_tx_queue)); + LASSERT (cfs_list_empty(&peer->rap_tx_queue)); peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */ - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); return; } @@ -846,27 +838,27 @@ kranal_connect (kra_peer_t *peer) MIN(peer->rap_reconnect_interval, *kranal_tunables.kra_max_reconnect_interval); - peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ; + peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ; /* Grab all blocked packets while we have the global lock */ - list_add(&zombies, &peer->rap_tx_queue); - list_del_init(&peer->rap_tx_queue); + cfs_list_add(&zombies, &peer->rap_tx_queue); + cfs_list_del_init(&peer->rap_tx_queue); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - if (list_empty(&zombies)) + if (cfs_list_empty(&zombies)) return; - CDEBUG(D_NETERROR, "Dropping packets for %s: connection failed\n", - libcfs_nid2str(peer->rap_nid)); + CNETERR("Dropping packets for %s: connection failed\n", + libcfs_nid2str(peer->rap_nid)); do { - tx = list_entry(zombies.next, kra_tx_t, tx_list); + tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list); - list_del(&tx->tx_list); + cfs_list_del(&tx->tx_list); kranal_tx_done(tx, -EHOSTUNREACH); - } while (!list_empty(&zombies)); + } while (!cfs_list_empty(&zombies)); } void @@ -879,31 +871,31 @@ kranal_free_acceptsock (kra_acceptsock_t *ras) int kranal_accept (lnet_ni_t *ni, struct socket *sock) { - kra_acceptsock_t *ras; - int rc; - __u32 peer_ip; - int peer_port; - unsigned long flags; + kra_acceptsock_t *ras; + int rc; + __u32 peer_ip; + int peer_port; + unsigned long flags; - rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT (rc == 0); /* we succeeded before */ + rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); + LASSERT (rc == 0); /* we succeeded before */ - LIBCFS_ALLOC(ras, sizeof(*ras)); - if (ras == NULL) { - CERROR("ENOMEM allocating connection request from " - "%u.%u.%u.%u\n", HIPQUAD(peer_ip)); - return -ENOMEM; - } + LIBCFS_ALLOC(ras, sizeof(*ras)); + if (ras == NULL) { + CERROR("ENOMEM allocating connection request from " + "%u.%u.%u.%u\n", HIPQUAD(peer_ip)); + return -ENOMEM; + } - ras->ras_sock = sock; + ras->ras_sock = sock; - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); - list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq); - wake_up(&kranal_data.kra_connd_waitq); + cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq); + wake_up(&kranal_data.kra_connd_waitq); - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); - return 0; + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + return 0; } int @@ -921,29 +913,30 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid) memset(peer, 0, sizeof(*peer)); /* zero flags etc */ peer->rap_nid = nid; - atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */ + atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */ - INIT_LIST_HEAD(&peer->rap_list); - INIT_LIST_HEAD(&peer->rap_connd_list); - INIT_LIST_HEAD(&peer->rap_conns); - INIT_LIST_HEAD(&peer->rap_tx_queue); + CFS_INIT_LIST_HEAD(&peer->rap_list); + CFS_INIT_LIST_HEAD(&peer->rap_connd_list); + CFS_INIT_LIST_HEAD(&peer->rap_conns); + CFS_INIT_LIST_HEAD(&peer->rap_tx_queue); peer->rap_reconnect_interval = 0; /* OK to connect at any time */ - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (kranal_data.kra_nonewpeers) { /* shutdown has started already */ - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); + LIBCFS_FREE(peer, sizeof(*peer)); CERROR("Can't create peer: network shutdown\n"); return -ESHUTDOWN; } - atomic_inc(&kranal_data.kra_npeers); + atomic_inc(&kranal_data.kra_npeers); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); *peerp = peer; return 0; @@ -955,13 +948,13 @@ kranal_destroy_peer (kra_peer_t *peer) CDEBUG(D_NET, "peer %s %p deleted\n", libcfs_nid2str(peer->rap_nid), peer); - LASSERT (atomic_read(&peer->rap_refcount) == 0); + LASSERT (atomic_read(&peer->rap_refcount) == 0); LASSERT (peer->rap_persistence == 0); LASSERT (!kranal_peer_active(peer)); LASSERT (!peer->rap_connecting); - LASSERT (list_empty(&peer->rap_conns)); - LASSERT (list_empty(&peer->rap_tx_queue)); - LASSERT (list_empty(&peer->rap_connd_list)); + LASSERT (cfs_list_empty(&peer->rap_conns)); + LASSERT (cfs_list_empty(&peer->rap_tx_queue)); + LASSERT (cfs_list_empty(&peer->rap_connd_list)); LIBCFS_FREE(peer, sizeof(*peer)); @@ -969,29 +962,29 @@ kranal_destroy_peer (kra_peer_t *peer) * they are destroyed, so we can be assured that _all_ state to do * with this peer has been cleaned up when its refcount drops to * zero. */ - atomic_dec(&kranal_data.kra_npeers); + atomic_dec(&kranal_data.kra_npeers); } kra_peer_t * kranal_find_peer_locked (lnet_nid_t nid) { - struct list_head *peer_list = kranal_nid2peerlist(nid); - struct list_head *tmp; + cfs_list_t *peer_list = kranal_nid2peerlist(nid); + cfs_list_t *tmp; kra_peer_t *peer; - list_for_each (tmp, peer_list) { + cfs_list_for_each (tmp, peer_list) { - peer = list_entry(tmp, kra_peer_t, rap_list); + peer = cfs_list_entry(tmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || /* persistent peer */ - !list_empty(&peer->rap_conns)); /* active conn */ + !cfs_list_empty(&peer->rap_conns)); /* active conn */ if (peer->rap_nid != nid) continue; CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n", peer, libcfs_nid2str(nid), - atomic_read(&peer->rap_refcount)); + atomic_read(&peer->rap_refcount)); return peer; } return NULL; @@ -1002,11 +995,11 @@ kranal_find_peer (lnet_nid_t nid) { kra_peer_t *peer; - read_lock(&kranal_data.kra_global_lock); + read_lock(&kranal_data.kra_global_lock); peer = kranal_find_peer_locked(nid); if (peer != NULL) /* +1 ref for caller? */ kranal_peer_addref(peer); - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return peer; } @@ -1015,10 +1008,10 @@ void kranal_unlink_peer_locked (kra_peer_t *peer) { LASSERT (peer->rap_persistence == 0); - LASSERT (list_empty(&peer->rap_conns)); + LASSERT (cfs_list_empty(&peer->rap_conns)); LASSERT (kranal_peer_active(peer)); - list_del_init(&peer->rap_list); + cfs_list_del_init(&peer->rap_list); /* lose peerlist's ref */ kranal_peer_decref(peer); @@ -1029,18 +1022,18 @@ kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp, int *persistencep) { kra_peer_t *peer; - struct list_head *ptmp; + cfs_list_t *ptmp; int i; - read_lock(&kranal_data.kra_global_lock); + read_lock(&kranal_data.kra_global_lock); for (i = 0; i < kranal_data.kra_peer_hash_size; i++) { - list_for_each(ptmp, &kranal_data.kra_peers[i]) { + cfs_list_for_each(ptmp, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); if (index-- > 0) continue; @@ -1050,12 +1043,12 @@ kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp, *portp = peer->rap_port; *persistencep = peer->rap_persistence; - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return 0; } } - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return -ENOENT; } @@ -1074,7 +1067,7 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port) if (rc != 0) return rc; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); peer2 = kranal_find_peer_locked(nid); if (peer2 != NULL) { @@ -1082,7 +1075,7 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port) peer = peer2; } else { /* peer table takes existing ref on peer */ - list_add_tail(&peer->rap_list, + cfs_list_add_tail(&peer->rap_list, kranal_nid2peerlist(nid)); } @@ -1090,24 +1083,24 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port) peer->rap_port = port; peer->rap_persistence++; - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); return 0; } void kranal_del_peer_locked (kra_peer_t *peer) { - struct list_head *ctmp; - struct list_head *cnxt; + cfs_list_t *ctmp; + cfs_list_t *cnxt; kra_conn_t *conn; peer->rap_persistence = 0; - if (list_empty(&peer->rap_conns)) { + if (cfs_list_empty(&peer->rap_conns)) { kranal_unlink_peer_locked(peer); } else { - list_for_each_safe(ctmp, cnxt, &peer->rap_conns) { - conn = list_entry(ctmp, kra_conn_t, rac_list); + cfs_list_for_each_safe(ctmp, cnxt, &peer->rap_conns) { + conn = cfs_list_entry(ctmp, kra_conn_t, rac_list); kranal_close_conn_locked(conn, 0); } @@ -1119,15 +1112,15 @@ int kranal_del_peer (lnet_nid_t nid) { unsigned long flags; - struct list_head *ptmp; - struct list_head *pnxt; + cfs_list_t *ptmp; + cfs_list_t *pnxt; kra_peer_t *peer; int lo; int hi; int i; int rc = -ENOENT; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (nid != LNET_NID_ANY) lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers; @@ -1137,10 +1130,10 @@ kranal_del_peer (lnet_nid_t nid) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); if (!(nid == LNET_NID_ANY || peer->rap_nid == nid)) continue; @@ -1150,7 +1143,7 @@ kranal_del_peer (lnet_nid_t nid) } } - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); return rc; } @@ -1159,36 +1152,37 @@ kra_conn_t * kranal_get_conn_by_idx (int index) { kra_peer_t *peer; - struct list_head *ptmp; + cfs_list_t *ptmp; kra_conn_t *conn; - struct list_head *ctmp; + cfs_list_t *ctmp; int i; - read_lock (&kranal_data.kra_global_lock); + read_lock(&kranal_data.kra_global_lock); for (i = 0; i < kranal_data.kra_peer_hash_size; i++) { - list_for_each (ptmp, &kranal_data.kra_peers[i]) { + cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); - list_for_each (ctmp, &peer->rap_conns) { + cfs_list_for_each (ctmp, &peer->rap_conns) { if (index-- > 0) continue; - conn = list_entry(ctmp, kra_conn_t, rac_list); - CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn, + conn = cfs_list_entry(ctmp, kra_conn_t, + rac_list); + CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn, libcfs_nid2str(conn->rac_peer->rap_nid), - atomic_read(&conn->rac_refcount)); - atomic_inc(&conn->rac_refcount); - read_unlock(&kranal_data.kra_global_lock); + atomic_read(&conn->rac_refcount)); + atomic_inc(&conn->rac_refcount); + read_unlock(&kranal_data.kra_global_lock); return conn; } } } - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return NULL; } @@ -1196,12 +1190,12 @@ int kranal_close_peer_conns_locked (kra_peer_t *peer, int why) { kra_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; + cfs_list_t *ctmp; + cfs_list_t *cnxt; int count = 0; - list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { - conn = list_entry(ctmp, kra_conn_t, rac_list); + cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { + conn = cfs_list_entry(ctmp, kra_conn_t, rac_list); count++; kranal_close_conn_locked(conn, why); @@ -1215,14 +1209,14 @@ kranal_close_matching_conns (lnet_nid_t nid) { unsigned long flags; kra_peer_t *peer; - struct list_head *ptmp; - struct list_head *pnxt; + cfs_list_t *ptmp; + cfs_list_t *pnxt; int lo; int hi; int i; int count = 0; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (nid != LNET_NID_ANY) lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers; @@ -1232,11 +1226,11 @@ kranal_close_matching_conns (lnet_nid_t nid) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { + cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); if (!(nid == LNET_NID_ANY || nid == peer->rap_nid)) continue; @@ -1245,7 +1239,7 @@ kranal_close_matching_conns (lnet_nid_t nid) } } - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); /* wildcards always succeed */ if (nid == LNET_NID_ANY) @@ -1322,27 +1316,27 @@ kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) } void -kranal_free_txdescs(struct list_head *freelist) +kranal_free_txdescs(cfs_list_t *freelist) { kra_tx_t *tx; - while (!list_empty(freelist)) { - tx = list_entry(freelist->next, kra_tx_t, tx_list); + while (!cfs_list_empty(freelist)) { + tx = cfs_list_entry(freelist->next, kra_tx_t, tx_list); - list_del(&tx->tx_list); + cfs_list_del(&tx->tx_list); LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys)); LIBCFS_FREE(tx, sizeof(*tx)); } } int -kranal_alloc_txdescs(struct list_head *freelist, int n) +kranal_alloc_txdescs(cfs_list_t *freelist, int n) { int i; kra_tx_t *tx; LASSERT (freelist == &kranal_data.kra_idle_txs); - LASSERT (list_empty(freelist)); + LASSERT (cfs_list_empty(freelist)); for (i = 0; i < n; i++) { @@ -1366,7 +1360,7 @@ kranal_alloc_txdescs(struct list_head *freelist, int n) tx->tx_buftype = RANAL_BUF_NONE; tx->tx_msg.ram_type = RANAL_MSG_NONE; - list_add(&tx->tx_list, freelist); + cfs_list_add(&tx->tx_list, freelist); } return 0; @@ -1423,13 +1417,13 @@ kranal_device_init(int id, kra_device_t *dev) void kranal_device_fini(kra_device_t *dev) { - LASSERT (list_empty(&dev->rad_ready_conns)); - LASSERT (list_empty(&dev->rad_new_conns)); + LASSERT (cfs_list_empty(&dev->rad_ready_conns)); + LASSERT (cfs_list_empty(&dev->rad_new_conns)); LASSERT (dev->rad_nphysmap == 0); LASSERT (dev->rad_nppphysmap == 0); LASSERT (dev->rad_nvirtmap == 0); LASSERT (dev->rad_nobvirtmap == 0); - + LASSERT(dev->rad_scheduler == NULL); RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh); RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh); @@ -1443,7 +1437,7 @@ kranal_shutdown (lnet_ni_t *ni) unsigned long flags; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", - atomic_read(&libcfs_kmemory)); + atomic_read(&libcfs_kmemory)); LASSERT (ni == kranal_data.kra_ni); LASSERT (ni->ni_data == &kranal_data); @@ -1455,35 +1449,37 @@ kranal_shutdown (lnet_ni_t *ni) case RANAL_INIT_ALL: /* Prevent new peers from being created */ - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); kranal_data.kra_nonewpeers = 1; - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); + /* Remove all existing peers from the peer table */ kranal_del_peer(LNET_NID_ANY); /* Wait for pending conn reqs to be handled */ i = 2; - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); - while (!list_empty(&kranal_data.kra_connd_acceptq)) { - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, - flags); + spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) { + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, + flags); i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */ "waiting for conn reqs to clean up\n"); cfs_pause(cfs_time_seconds(1)); - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + spin_lock_irqsave(&kranal_data.kra_connd_lock, + flags); } - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); /* Wait for all peers to be freed */ i = 2; - while (atomic_read(&kranal_data.kra_npeers) != 0) { + while (atomic_read(&kranal_data.kra_npeers) != 0) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */ "waiting for %d peers to close down\n", - atomic_read(&kranal_data.kra_npeers)); + atomic_read(&kranal_data.kra_npeers)); cfs_pause(cfs_time_seconds(1)); } /* fall through */ @@ -1497,55 +1493,55 @@ kranal_shutdown (lnet_ni_t *ni) * while there are still active connds, but these will be temporary * since peer creation always fails after the listener has started to * shut down. */ - LASSERT (atomic_read(&kranal_data.kra_npeers) == 0); + LASSERT (atomic_read(&kranal_data.kra_npeers) == 0); /* Flag threads to terminate */ kranal_data.kra_shutdown = 1; - for (i = 0; i < kranal_data.kra_ndevs; i++) { - kra_device_t *dev = &kranal_data.kra_devices[i]; + for (i = 0; i < kranal_data.kra_ndevs; i++) { + kra_device_t *dev = &kranal_data.kra_devices[i]; - spin_lock_irqsave(&dev->rad_lock, flags); - wake_up(&dev->rad_waitq); - spin_unlock_irqrestore(&dev->rad_lock, flags); - } + spin_lock_irqsave(&dev->rad_lock, flags); + wake_up(&dev->rad_waitq); + spin_unlock_irqrestore(&dev->rad_lock, flags); + } - spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags); - wake_up_all(&kranal_data.kra_reaper_waitq); - spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags); + spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags); + wake_up_all(&kranal_data.kra_reaper_waitq); + spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags); - LASSERT (list_empty(&kranal_data.kra_connd_peers)); - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); - wake_up_all(&kranal_data.kra_connd_waitq); - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers)); + spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + wake_up_all(&kranal_data.kra_connd_waitq); + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); /* Wait for threads to exit */ i = 2; - while (atomic_read(&kranal_data.kra_nthreads) != 0) { + while (atomic_read(&kranal_data.kra_nthreads) != 0) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "Waiting for %d threads to terminate\n", - atomic_read(&kranal_data.kra_nthreads)); + atomic_read(&kranal_data.kra_nthreads)); cfs_pause(cfs_time_seconds(1)); } - LASSERT (atomic_read(&kranal_data.kra_npeers) == 0); + LASSERT (atomic_read(&kranal_data.kra_npeers) == 0); if (kranal_data.kra_peers != NULL) { for (i = 0; i < kranal_data.kra_peer_hash_size; i++) - LASSERT (list_empty(&kranal_data.kra_peers[i])); + LASSERT (cfs_list_empty(&kranal_data.kra_peers[i])); LIBCFS_FREE(kranal_data.kra_peers, - sizeof (struct list_head) * + sizeof (cfs_list_t) * kranal_data.kra_peer_hash_size); } - LASSERT (atomic_read(&kranal_data.kra_nconns) == 0); + LASSERT (atomic_read(&kranal_data.kra_nconns) == 0); if (kranal_data.kra_conns != NULL) { for (i = 0; i < kranal_data.kra_conn_hash_size; i++) - LASSERT (list_empty(&kranal_data.kra_conns[i])); + LASSERT (cfs_list_empty(&kranal_data.kra_conns[i])); LIBCFS_FREE(kranal_data.kra_conns, - sizeof (struct list_head) * + sizeof (cfs_list_t) * kranal_data.kra_conn_hash_size); } @@ -1555,20 +1551,21 @@ kranal_shutdown (lnet_ni_t *ni) kranal_free_txdescs(&kranal_data.kra_idle_txs); CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n", - atomic_read(&libcfs_kmemory)); + atomic_read(&libcfs_kmemory)); - kranal_data.kra_init = RANAL_INIT_NOTHING; - PORTAL_MODULE_UNUSE; + kranal_data.kra_init = RANAL_INIT_NOTHING; + module_put(THIS_MODULE); } int kranal_startup (lnet_ni_t *ni) { struct timeval tv; - int pkmem = atomic_read(&libcfs_kmemory); + int pkmem = atomic_read(&libcfs_kmemory); int rc; int i; kra_device_t *dev; + char name[16]; LASSERT (ni->ni_lnd == &the_kralnd); @@ -1598,75 +1595,79 @@ kranal_startup (lnet_ni_t *ni) ni->ni_data = &kranal_data; kranal_data.kra_ni = ni; - /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and - * a unique (for all time) connstamp so we can uniquely identify - * the sender. The connstamp is an incrementing counter - * initialised with seconds + microseconds at startup time. So we - * rely on NOT creating connections more frequently on average than - * 1MHz to ensure we don't use old connstamps when we reboot. */ - do_gettimeofday(&tv); - kranal_data.kra_connstamp = - kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; + /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and + * a unique (for all time) connstamp so we can uniquely identify + * the sender. The connstamp is an incrementing counter + * initialised with seconds + microseconds at startup time. So we + * rely on NOT creating connections more frequently on average than + * 1MHz to ensure we don't use old connstamps when we reboot. */ + do_gettimeofday(&tv); + kranal_data.kra_connstamp = + kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; - rwlock_init(&kranal_data.kra_global_lock); + rwlock_init(&kranal_data.kra_global_lock); - for (i = 0; i < RANAL_MAXDEVS; i++ ) { - kra_device_t *dev = &kranal_data.kra_devices[i]; + for (i = 0; i < RANAL_MAXDEVS; i++ ) { + kra_device_t *dev = &kranal_data.kra_devices[i]; - dev->rad_idx = i; - INIT_LIST_HEAD(&dev->rad_ready_conns); - INIT_LIST_HEAD(&dev->rad_new_conns); - init_waitqueue_head(&dev->rad_waitq); - spin_lock_init(&dev->rad_lock); - } + dev->rad_idx = i; + CFS_INIT_LIST_HEAD(&dev->rad_ready_conns); + CFS_INIT_LIST_HEAD(&dev->rad_new_conns); + init_waitqueue_head(&dev->rad_waitq); + spin_lock_init(&dev->rad_lock); + } - kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT; - init_waitqueue_head(&kranal_data.kra_reaper_waitq); - spin_lock_init(&kranal_data.kra_reaper_lock); + kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT; + init_waitqueue_head(&kranal_data.kra_reaper_waitq); + spin_lock_init(&kranal_data.kra_reaper_lock); - INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq); - INIT_LIST_HEAD(&kranal_data.kra_connd_peers); - init_waitqueue_head(&kranal_data.kra_connd_waitq); - spin_lock_init(&kranal_data.kra_connd_lock); + CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq); + CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers); + init_waitqueue_head(&kranal_data.kra_connd_waitq); + spin_lock_init(&kranal_data.kra_connd_lock); - INIT_LIST_HEAD(&kranal_data.kra_idle_txs); - spin_lock_init(&kranal_data.kra_tx_lock); + CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs); + spin_lock_init(&kranal_data.kra_tx_lock); - /* OK to call kranal_api_shutdown() to cleanup now */ - kranal_data.kra_init = RANAL_INIT_DATA; - PORTAL_MODULE_USE; + /* OK to call kranal_api_shutdown() to cleanup now */ + kranal_data.kra_init = RANAL_INIT_DATA; + try_module_get(THIS_MODULE); kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE; LIBCFS_ALLOC(kranal_data.kra_peers, - sizeof(struct list_head) * kranal_data.kra_peer_hash_size); + sizeof(cfs_list_t) * + kranal_data.kra_peer_hash_size); if (kranal_data.kra_peers == NULL) goto failed; for (i = 0; i < kranal_data.kra_peer_hash_size; i++) - INIT_LIST_HEAD(&kranal_data.kra_peers[i]); + CFS_INIT_LIST_HEAD(&kranal_data.kra_peers[i]); kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE; LIBCFS_ALLOC(kranal_data.kra_conns, - sizeof(struct list_head) * kranal_data.kra_conn_hash_size); + sizeof(cfs_list_t) * + kranal_data.kra_conn_hash_size); if (kranal_data.kra_conns == NULL) goto failed; for (i = 0; i < kranal_data.kra_conn_hash_size; i++) - INIT_LIST_HEAD(&kranal_data.kra_conns[i]); + CFS_INIT_LIST_HEAD(&kranal_data.kra_conns[i]); rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, *kranal_tunables.kra_ntx); if (rc != 0) goto failed; - rc = kranal_thread_start(kranal_reaper, NULL); + rc = kranal_thread_start(kranal_reaper, NULL, "kranal_reaper"); if (rc != 0) { CERROR("Can't spawn ranal reaper: %d\n", rc); goto failed; } for (i = 0; i < *kranal_tunables.kra_n_connd; i++) { - rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i); + snprintf(name, sizeof(name), "kranal_connd_%02ld", i); + rc = kranal_thread_start(kranal_connd, + (void *)(unsigned long)i, name); if (rc != 0) { CERROR("Can't spawn ranal connd[%d]: %d\n", i, rc); @@ -1692,7 +1693,8 @@ kranal_startup (lnet_ni_t *ni) for (i = 0; i < kranal_data.kra_ndevs; i++) { dev = &kranal_data.kra_devices[i]; - rc = kranal_thread_start(kranal_scheduler, dev); + snprintf(name, sizeof(name), "kranal_sd_%02d", dev->rad_idx); + rc = kranal_thread_start(kranal_scheduler, dev, name); if (rc != 0) { CERROR("Can't spawn ranal scheduler[%d]: %d\n", i, rc); @@ -1733,7 +1735,7 @@ kranal_module_init (void) return 0; } -MODULE_AUTHOR("Cluster File Systems, Inc. "); +MODULE_AUTHOR("Sun Microsystems, Inc. "); MODULE_DESCRIPTION("Kernel RapidArray LND v0.01"); MODULE_LICENSE("GPL");