spin_lock_init(&the_lnet.ln_eq_wait_lock);
spin_lock_init(&the_lnet.ln_msg_resend_lock);
init_waitqueue_head(&the_lnet.ln_eq_waitq);
- init_waitqueue_head(&the_lnet.ln_mt_waitq);
+ init_completion(&the_lnet.ln_mt_wait_complete);
mutex_init(&the_lnet.ln_lnd_mutex);
}
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
mutex_lock(&the_lnet.ln_lnd_mutex);
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT(lnd->lnd_refcount == 0);
list_del(&lnd->lnd_list);
CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
+struct list_head **
+lnet_create_array_of_queues(void)
+{
+ struct list_head **qs;
+ struct list_head *q;
+ int i;
+
+ qs = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(struct list_head));
+ if (!qs) {
+ CERROR("Failed to allocate queues\n");
+ return NULL;
+ }
+
+ cfs_percpt_for_each(q, i, qs)
+ INIT_LIST_HEAD(q);
+
+ return qs;
+}
+
static int lnet_unprepare(void);
static int
INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
init_waitqueue_head(&the_lnet.ln_dc_waitq);
LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
+ init_completion(&the_lnet.ln_started);
rc = lnet_descriptor_setup();
if (rc != 0)
goto failed;
}
+ the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
+ if (!the_lnet.ln_mt_zombie_rstqs) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
return 0;
failed:
LASSERT(list_empty(&the_lnet.ln_test_peers));
LASSERT(list_empty(&the_lnet.ln_nets));
+ if (the_lnet.ln_mt_zombie_rstqs) {
+ lnet_clean_zombie_rstqs();
+ the_lnet.ln_mt_zombie_rstqs = NULL;
+ }
+
if (!LNetEQHandleIsInvalid(the_lnet.ln_mt_eqh)) {
rc = LNetEQFree(the_lnet.ln_mt_eqh);
LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
__swab64s(&stat->ns_nid);
__swab32s(&stat->ns_status);
}
- return;
}
int
lnet_net_lock(LNET_LOCK_EX);
- net->net_state = LNET_NET_STATE_DELETING;
-
list_del_init(&net->net_list);
while (!list_empty(&net->net_ni_list)) {
/* Do peer table cleanup for this net */
lnet_peer_tables_cleanup(net);
- lnet_net_lock(LNET_LOCK_EX);
- /*
- * decrement ref count on lnd only when the entire network goes
- * away
- */
- net->net_lnd->lnd_refcount--;
-
- lnet_net_unlock(LNET_LOCK_EX);
-
lnet_net_free(net);
}
if (rc != 0) {
LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
rc, libcfs_lnd2str(net->net_lnd->lnd_type));
- lnet_net_lock(LNET_LOCK_EX);
- net->net_lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
goto failed0;
}
}
}
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount++;
- lnet_net_unlock(LNET_LOCK_EX);
-
net->net_lnd = lnd;
mutex_unlock(&the_lnet.ln_lnd_mutex);
* up is actually unique. if it's not fail. */
if (!lnet_ni_unique_net(&net_l->net_ni_list,
ni->ni_interfaces[0])) {
- rc = -EINVAL;
+ rc = -EEXIST;
goto failed1;
}
*/
lnet_net_free(net);
} else {
- net->net_state = LNET_NET_STATE_ACTIVE;
/*
* restore tunables after it has been overwitten by the
* lnd
mutex_unlock(&the_lnet.ln_api_mutex);
+ complete_all(&the_lnet.ln_started);
+
/* wait for all routers to start */
lnet_wait_router_start();
* \return always 0 for current implementation.
*/
int
-LNetNIFini()
+LNetNIFini(void)
{
mutex_lock(&the_lnet.ln_api_mutex);