-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
{
unsigned long flags;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
conn->rac_my_connstamp = kranal_data.kra_connstamp++;
conn->rac_cqid = kranal_data.kra_next_cqid++;
} while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
CFS_INIT_LIST_HEAD(&conn->rac_replyq);
- cfs_spin_lock_init(&conn->rac_lock);
+ spin_lock_init(&conn->rac_lock);
kranal_set_conn_uniqueness(conn);
{
kra_peer_t *peer = conn->rac_peer;
- CDEBUG(error == 0 ? D_NET : D_NETERROR,
- "closing conn to %s: error %d\n",
- libcfs_nid2str(peer->rap_nid), error);
+ CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
+ "closing conn to %s: error %d\n",
+ libcfs_nid2str(peer->rap_nid), error);
LASSERT (!cfs_in_interrupt());
LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
unsigned long flags;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_ESTABLISHED)
kranal_close_conn_locked(conn, error);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
/* Schedule conn on rad_new_conns */
kranal_conn_addref(conn);
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
cfs_waitq_signal(&dev->rad_waitq);
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
rrc = RapkWaitToConnect(conn->rac_rihandle);
if (rrc != RAP_SUCCESS) {
if (rc != 0)
return rc;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (!kranal_peer_active(peer)) {
/* raced with peer getting unlinked */
- cfs_write_unlock_irqrestore(&kranal_data. \
+ write_unlock_irqrestore(&kranal_data. \
kra_global_lock,
flags);
kranal_conn_decref(conn);
return -ENOMEM;
}
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
peer2 = kranal_find_peer_locked(peer_nid);
if (peer2 == NULL) {
* this while holding the global lock, to synch with connection
* destruction on NID change. */
if (kranal_data.kra_ni->ni_nid != dst_nid) {
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
if (rc != 0) {
LASSERT (!cfs_list_empty(&peer->rap_conns));
LASSERT (cfs_list_empty(&peer->rap_tx_queue));
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
CWARN("Not creating duplicate connection to %s: %d\n",
libcfs_nid2str(peer_nid), rc);
nstale = kranal_close_stale_conns_locked(peer, conn);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
/* CAVEAT EMPTOR: passive peer can disappear NOW */
CDEBUG(D_NET, "Done handshake %s:%d \n",
libcfs_nid2str(peer->rap_nid), rc);
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
LASSERT (peer->rap_connecting);
peer->rap_connecting = 0;
peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
return;
}
cfs_list_add(&zombies, &peer->rap_tx_queue);
cfs_list_del_init(&peer->rap_tx_queue);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
if (cfs_list_empty(&zombies))
return;
- CDEBUG(D_NETERROR, "Dropping packets for %s: connection failed\n",
- libcfs_nid2str(peer->rap_nid));
+ CNETERR("Dropping packets for %s: connection failed\n",
+ libcfs_nid2str(peer->rap_nid));
do {
tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list);
ras->ras_sock = sock;
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
cfs_waitq_signal(&kranal_data.kra_connd_waitq);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
return 0;
}
peer->rap_reconnect_interval = 0; /* OK to connect at any time */
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (kranal_data.kra_nonewpeers) {
/* shutdown has started already */
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
LIBCFS_FREE(peer, sizeof(*peer));
cfs_atomic_inc(&kranal_data.kra_npeers);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
*peerp = peer;
return 0;
{
kra_peer_t *peer;
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
peer = kranal_find_peer_locked(nid);
if (peer != NULL) /* +1 ref for caller? */
kranal_peer_addref(peer);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return peer;
}
cfs_list_t *ptmp;
int i;
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
*portp = peer->rap_port;
*persistencep = peer->rap_persistence;
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return 0;
}
}
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return -ENOENT;
}
if (rc != 0)
return rc;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
peer2 = kranal_find_peer_locked(nid);
if (peer2 != NULL) {
peer->rap_port = port;
peer->rap_persistence++;
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return 0;
}
int i;
int rc = -ENOENT;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
}
}
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return rc;
}
cfs_list_t *ctmp;
int i;
- cfs_read_lock (&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
libcfs_nid2str(conn->rac_peer->rap_nid),
cfs_atomic_read(&conn->rac_refcount));
cfs_atomic_inc(&conn->rac_refcount);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return conn;
}
}
}
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return NULL;
}
int i;
int count = 0;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
}
}
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
/* wildcards always succeed */
if (nid == LNET_NID_ANY)
case RANAL_INIT_ALL:
/* Prevent new peers from being created */
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
kranal_data.kra_nonewpeers = 1;
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
/* Remove all existing peers from the peer table */
/* Wait for pending conn reqs to be handled */
i = 2;
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
flags);
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
"waiting for conn reqs to clean up\n");
cfs_pause(cfs_time_seconds(1));
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ spin_lock_irqsave(&kranal_data.kra_connd_lock,
flags);
}
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for all peers to be freed */
i = 2;
for (i = 0; i < kranal_data.kra_ndevs; i++) {
kra_device_t *dev = &kranal_data.kra_devices[i];
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
cfs_waitq_signal(&dev->rad_waitq);
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
}
- cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for threads to exit */
i = 2;
int rc;
int i;
kra_device_t *dev;
+ char name[16];
LASSERT (ni->ni_lnd == &the_kralnd);
kranal_data.kra_connstamp =
kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
- cfs_rwlock_init(&kranal_data.kra_global_lock);
+ rwlock_init(&kranal_data.kra_global_lock);
for (i = 0; i < RANAL_MAXDEVS; i++ ) {
kra_device_t *dev = &kranal_data.kra_devices[i];
CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
cfs_waitq_init(&dev->rad_waitq);
- cfs_spin_lock_init(&dev->rad_lock);
+ spin_lock_init(&dev->rad_lock);
}
kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
cfs_waitq_init(&kranal_data.kra_reaper_waitq);
- cfs_spin_lock_init(&kranal_data.kra_reaper_lock);
+ spin_lock_init(&kranal_data.kra_reaper_lock);
CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
cfs_waitq_init(&kranal_data.kra_connd_waitq);
- cfs_spin_lock_init(&kranal_data.kra_connd_lock);
+ spin_lock_init(&kranal_data.kra_connd_lock);
CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
- cfs_spin_lock_init(&kranal_data.kra_tx_lock);
+ spin_lock_init(&kranal_data.kra_tx_lock);
/* OK to call kranal_api_shutdown() to cleanup now */
kranal_data.kra_init = RANAL_INIT_DATA;
if (rc != 0)
goto failed;
- rc = kranal_thread_start(kranal_reaper, NULL);
+ rc = kranal_thread_start(kranal_reaper, NULL, "kranal_reaper");
if (rc != 0) {
CERROR("Can't spawn ranal reaper: %d\n", rc);
goto failed;
}
for (i = 0; i < *kranal_tunables.kra_n_connd; i++) {
- rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
+ snprintf(name, sizeof(name), "kranal_connd_%02ld", i);
+ rc = kranal_thread_start(kranal_connd,
+ (void *)(unsigned long)i, name);
if (rc != 0) {
CERROR("Can't spawn ranal connd[%d]: %d\n",
i, rc);
for (i = 0; i < kranal_data.kra_ndevs; i++) {
dev = &kranal_data.kra_devices[i];
- rc = kranal_thread_start(kranal_scheduler, dev);
+ snprintf(name, sizeof(name), "kranal_sd_%02d", dev->rad_idx);
+ rc = kranal_thread_start(kranal_scheduler, dev, name);
if (rc != 0) {
CERROR("Can't spawn ranal scheduler[%d]: %d\n",
i, rc);