/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
unsigned long flags;
char *str;
-
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
+
+ spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
str = strs[idx++];
if (idx >= sizeof(strs)/sizeof(strs[0]))
idx = 0;
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
return str;
if (kptllnd_find_target(net, id, &peer) != 0)
return;
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (peer->peer_last_alive != 0)
*when = peer->peer_last_alive;
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
kptllnd_peer_decref(peer);
return;
}
unsigned long flags;
lnet_process_id_t process_id;
- cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
LASSERT (cfs_list_empty(&kptllnd_data.kptl_nets));
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
switch (kptllnd_data.kptl_init) {
default:
LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxbq));
/* lock to interleave cleanly with peer birth/death */
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
LASSERT (kptllnd_data.kptl_shutdown == 0);
kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
/* no new peers possible now */
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
/* nuke all existing peers */
process_id.pid = LNET_PID_ANY;
kptllnd_peer_del(process_id);
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
LASSERT (kptllnd_data.kptl_n_active_peers == 0);
"Waiting for %d peers to terminate\n",
kptllnd_data.kptl_npeers);
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
cfs_pause(cfs_time_seconds(1));
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
flags);
}
for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
LASSERT (cfs_list_empty (&kptllnd_data.kptl_peers[i]));
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
CDEBUG(D_NET, "All peers deleted\n");
- /* Shutdown phase 2: kill the daemons... */
- kptllnd_data.kptl_shutdown = 2;
- cfs_mb();
+ /* Shutdown phase 2: kill the daemons... */
+ kptllnd_data.kptl_shutdown = 2;
+ smp_mb();
- i = 2;
- while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
- /* Wake up all threads*/
- cfs_waitq_broadcast(&kptllnd_data.kptl_sched_waitq);
- cfs_waitq_broadcast(&kptllnd_data.kptl_watchdog_waitq);
+ i = 2;
+ while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+ /* Wake up all threads*/
+ wake_up_all(&kptllnd_data.kptl_sched_waitq);
+ wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "Waiting for %d threads to terminate\n",
- cfs_atomic_read(&kptllnd_data.kptl_nthreads));
- cfs_pause(cfs_time_seconds(1));
- }
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "Waiting for %d threads to terminate\n",
+ cfs_atomic_read(&kptllnd_data.kptl_nthreads));
+ cfs_pause(cfs_time_seconds(1));
+ }
CDEBUG(D_NET, "All Threads stopped\n");
LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs));
if (kptllnd_data.kptl_rx_cache != NULL)
- cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
+ kmem_cache_destroy(kptllnd_data.kptl_rx_cache);
if (kptllnd_data.kptl_peers != NULL)
LIBCFS_FREE(kptllnd_data.kptl_peers,
LIBCFS_FREE(kptllnd_data.kptl_nak_msg,
offsetof(kptl_msg_t, ptlm_u));
- memset(&kptllnd_data, 0, sizeof(kptllnd_data));
- PORTAL_MODULE_UNUSE;
- return;
+ memset(&kptllnd_data, 0, sizeof(kptllnd_data));
+ module_put(THIS_MODULE);
+ return;
}
int
struct timeval tv;
lnet_process_id_t target;
ptl_err_t ptl_rc;
+ char name[16];
if (*kptllnd_tunables.kptl_max_procs_per_node < 1) {
CERROR("max_procs_per_node must be >= 1\n");
kptllnd_data.kptl_eqh = PTL_INVALID_HANDLE;
kptllnd_data.kptl_nih = PTL_INVALID_HANDLE;
- cfs_rwlock_init(&kptllnd_data.kptl_net_rw_lock);
+ rwlock_init(&kptllnd_data.kptl_net_rw_lock);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
- /* Setup the sched locks/lists/waitq */
- cfs_spin_lock_init(&kptllnd_data.kptl_sched_lock);
- cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+ /* Setup the sched locks/lists/waitq */
+ spin_lock_init(&kptllnd_data.kptl_sched_lock);
+ init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
/* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
- cfs_spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
+ spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
/* Setup the tx locks/lists */
- cfs_spin_lock_init(&kptllnd_data.kptl_tx_lock);
+ spin_lock_init(&kptllnd_data.kptl_tx_lock);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
cfs_atomic_set(&kptllnd_data.kptl_ntx, 0);
- /* Uptick the module reference count */
- PORTAL_MODULE_USE;
+ /* Uptick the module reference count */
+ try_module_get(THIS_MODULE);
- kptllnd_data.kptl_expected_peers =
- *kptllnd_tunables.kptl_max_nodes *
- *kptllnd_tunables.kptl_max_procs_per_node;
+ kptllnd_data.kptl_expected_peers =
+ *kptllnd_tunables.kptl_max_nodes *
+ *kptllnd_tunables.kptl_max_procs_per_node;
/*
* Initialize the Network interface instance
goto failed;
}
- /* Initialized the incarnation - it must be for-all-time unique, even
- * accounting for the fact that we increment it when we disconnect a
- * peer that's using it */
- cfs_gettimeofday(&tv);
- kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
- tv.tv_usec;
- CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
+ /* Initialized the incarnation - it must be for-all-time unique, even
+ * accounting for the fact that we increment it when we disconnect a
+ * peer that's using it */
+ do_gettimeofday(&tv);
+ kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
+ tv.tv_usec;
+ CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
target.nid = LNET_NID_ANY;
target.pid = LNET_PID_ANY; /* NB target for NAK doesn't matter */
kptllnd_data.kptl_nak_msg->ptlm_srcpid = the_lnet.ln_pid;
kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
- cfs_rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
- cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+ rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
+ init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
/* Allocate and setup the peer hash table */
kptllnd_data.kptl_peer_hash_size =
kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
kptllnd_data.kptl_rx_cache =
- cfs_mem_cache_create("ptllnd_rx",
+ kmem_cache_create("ptllnd_rx",
sizeof(kptl_rx_t) +
*kptllnd_tunables.kptl_max_msg_size,
0, /* offset */
* now that PTLLND_INIT_DATA state has been entered */
CDEBUG(D_NET, "starting %d scheduler threads\n", PTLLND_N_SCHED);
for (i = 0; i < PTLLND_N_SCHED; i++) {
+ snprintf(name, sizeof(name), "kptllnd_sd_%02d", i);
rc = kptllnd_thread_start(kptllnd_scheduler, (void *)((long)i));
if (rc != 0) {
CERROR("Can't spawn scheduler[%d]: %d\n", i, rc);
}
}
- rc = kptllnd_thread_start(kptllnd_watchdog, NULL);
+ snprintf(name, sizeof(name), "kptllnd_wd_%02d", i);
+ rc = kptllnd_thread_start(kptllnd_watchdog, NULL, name);
if (rc != 0) {
CERROR("Can't spawn watchdog: %d\n", rc);
goto failed;
kptllnd_data.kptl_nak_msg->ptlm_srcnid = ni->ni_nid;
cfs_atomic_set(&net->net_refcount, 1);
- cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+ write_lock(&kptllnd_data.kptl_net_rw_lock);
cfs_list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
- cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
+ write_unlock(&kptllnd_data.kptl_net_rw_lock);
return 0;
failed:
ni->ni_data = NULL;
net->net_ni = NULL;
- cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+ write_lock(&kptllnd_data.kptl_net_rw_lock);
kptllnd_net_decref(net);
cfs_list_del_init(&net->net_list);
- cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
+ write_unlock(&kptllnd_data.kptl_net_rw_lock);
/* Can't nuke peers here - they are shared among all NIs */
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
net->net_shutdown = 1; /* Order with peer creation */
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
i = 2;
while (cfs_atomic_read(&net->net_refcount) != 0) {