+ rc = lnet_descriptor_setup();
+ if (rc != 0)
+ goto failed0;
+
+ memset(&the_lnet.ln_counters, 0,
+ sizeof(the_lnet.ln_counters));
+
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_active_msgs);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_active_mds);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_active_eqs);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_test_peers);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_nis);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_zombie_nis);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_remote_nets);
+ CFS_INIT_LIST_HEAD (&the_lnet.ln_routers);
+
+ the_lnet.ln_interface_cookie = lnet_create_interface_cookie();
+
+ lnet_init_rtrpools();
+
+ rc = lnet_setup_handle_hash ();
+ if (rc != 0)
+ goto failed0;
+
+ rc = lnet_create_peer_table();
+ if (rc != 0)
+ goto failed1;
+
+ rc = lnet_init_finalizers();
+ if (rc != 0)
+ goto failed2;
+
+ the_lnet.ln_nportals = MAX_PORTALS;
+ LIBCFS_ALLOC(the_lnet.ln_portals,
+ the_lnet.ln_nportals *
+ sizeof(*the_lnet.ln_portals));
+ if (the_lnet.ln_portals == NULL) {
+ rc = -ENOMEM;
+ goto failed3;
+ }
+
+ for (i = 0; i < the_lnet.ln_nportals; i++) {
+ CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_ml));
+ CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_msgq));
+ the_lnet.ln_portals[i].ptl_options = 0;
+ }
+
+ return 0;
+
+ failed3:
+ lnet_fini_finalizers();
+ failed2:
+ lnet_destroy_peer_table();
+ failed1:
+ lnet_cleanup_handle_hash();
+ failed0:
+ lnet_descriptor_cleanup();
+ return rc;
+}
+
+int
+lnet_unprepare (void)
+{
+ int idx;
+
+ /* NB no LNET_LOCK since this is the last reference. All LND instances
+ * have shut down already, so it is safe to unlink and free all
+ * descriptors, even those that appear committed to a network op (eg MD
+ * with non-zero pending count) */
+
+ lnet_fail_nid(LNET_NID_ANY, 0);
+
+ LASSERT (list_empty(&the_lnet.ln_test_peers));
+ LASSERT (the_lnet.ln_refcount == 0);
+ LASSERT (list_empty(&the_lnet.ln_nis));
+ LASSERT (list_empty(&the_lnet.ln_zombie_nis));
+ LASSERT (the_lnet.ln_nzombie_nis == 0);
+
+ for (idx = 0; idx < the_lnet.ln_nportals; idx++) {
+ LASSERT (list_empty(&the_lnet.ln_portals[idx].ptl_msgq));
+
+ while (!list_empty (&the_lnet.ln_portals[idx].ptl_ml)) {
+ lnet_me_t *me = list_entry (the_lnet.ln_portals[idx].ptl_ml.next,
+ lnet_me_t, me_list);
+
+ CERROR ("Active me %p on exit\n", me);
+ list_del (&me->me_list);
+ lnet_me_free (me);
+ }
+ }
+
+ while (!list_empty (&the_lnet.ln_active_mds)) {
+ lnet_libmd_t *md = list_entry (the_lnet.ln_active_mds.next,
+ lnet_libmd_t, md_list);
+
+ CERROR ("Active md %p on exit\n", md);
+ list_del (&md->md_list);
+ lnet_md_free (md);
+ }
+
+ while (!list_empty (&the_lnet.ln_active_eqs)) {
+ lnet_eq_t *eq = list_entry (the_lnet.ln_active_eqs.next,
+ lnet_eq_t, eq_list);
+
+ CERROR ("Active eq %p on exit\n", eq);
+ list_del (&eq->eq_list);
+ lnet_eq_free (eq);
+ }
+
+ while (!list_empty (&the_lnet.ln_active_msgs)) {
+ lnet_msg_t *msg = list_entry (the_lnet.ln_active_msgs.next,
+ lnet_msg_t, msg_activelist);
+
+ CERROR ("Active msg %p on exit\n", msg);
+ LASSERT (msg->msg_onactivelist);
+ msg->msg_onactivelist = 0;
+ list_del (&msg->msg_activelist);
+ lnet_msg_free (msg);
+ }
+
+ LIBCFS_FREE(the_lnet.ln_portals,
+ the_lnet.ln_nportals * sizeof(*the_lnet.ln_portals));
+
+ lnet_free_rtrpools();
+ lnet_fini_finalizers();
+ lnet_destroy_peer_table();
+ lnet_cleanup_handle_hash();
+ lnet_descriptor_cleanup();
+
+ return (0);
+}
+
+lnet_ni_t *
+lnet_net2ni_locked (__u32 net)
+{
+ struct list_head *tmp;
+ lnet_ni_t *ni;
+
+ list_for_each (tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+
+ if (lnet_ptlcompat_matchnet(LNET_NIDNET(ni->ni_nid), net)) {
+ lnet_ni_addref_locked(ni);
+ return ni;
+ }
+ }
+
+ return NULL;
+}
+
+int
+lnet_islocalnet (__u32 net)
+{
+ lnet_ni_t *ni;
+
+ LNET_LOCK();
+ ni = lnet_net2ni_locked(net);
+ if (ni != NULL)
+ lnet_ni_decref_locked(ni);
+ LNET_UNLOCK();
+
+ return ni != NULL;
+}
+
+lnet_ni_t *
+lnet_nid2ni_locked (lnet_nid_t nid)