+void
+kptllnd_base_shutdown (void)
+{
+ int i;
+ ptl_err_t prc;
+ unsigned long flags;
+ lnet_process_id_t process_id;
+
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
+ LASSERT (list_empty(&kptllnd_data.kptl_nets));
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
+
+ switch (kptllnd_data.kptl_init) {
+ default:
+ LBUG();
+
+ case PTLLND_INIT_ALL:
+ case PTLLND_INIT_DATA:
+ /* stop receiving */
+ kptllnd_rx_buffer_pool_fini(&kptllnd_data.kptl_rx_buffer_pool);
+ LASSERT (list_empty(&kptllnd_data.kptl_sched_rxq));
+ LASSERT (list_empty(&kptllnd_data.kptl_sched_rxbq));
+
+ /* lock to interleave cleanly with peer birth/death */
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ LASSERT (kptllnd_data.kptl_shutdown == 0);
+ kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
+ /* no new peers possible now */
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+
+ /* nuke all existing peers */
+ process_id.nid = LNET_NID_ANY;
+ process_id.pid = LNET_PID_ANY;
+ kptllnd_peer_del(process_id);
+
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+
+ LASSERT (kptllnd_data.kptl_n_active_peers == 0);
+
+ i = 2;
+ while (kptllnd_data.kptl_npeers != 0) {
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "Waiting for %d peers to terminate\n",
+ kptllnd_data.kptl_npeers);
+
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
+
+ cfs_pause(cfs_time_seconds(1));
+
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
+ flags);
+ }
+
+ LASSERT (list_empty(&kptllnd_data.kptl_closing_peers));
+ LASSERT (list_empty(&kptllnd_data.kptl_zombie_peers));
+ LASSERT (kptllnd_data.kptl_peers != NULL);
+ for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
+ LASSERT (list_empty (&kptllnd_data.kptl_peers[i]));
+
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ CDEBUG(D_NET, "All peers deleted\n");
+
+ /* Shutdown phase 2: kill the daemons... */
+ kptllnd_data.kptl_shutdown = 2;
+ mb();
+
+ i = 2;
+ while (atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+ /* Wake up all threads*/
+ wake_up_all(&kptllnd_data.kptl_sched_waitq);
+ wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "Waiting for %d threads to terminate\n",
+ atomic_read(&kptllnd_data.kptl_nthreads));
+ cfs_pause(cfs_time_seconds(1));
+ }
+
+ CDEBUG(D_NET, "All Threads stopped\n");
+ LASSERT(list_empty(&kptllnd_data.kptl_sched_txq));
+
+ kptllnd_cleanup_tx_descs();
+
+ /* Nothing here now, but libcfs might soon require
+ * us to explicitly destroy wait queues and semaphores
+ * that would be done here */
+
+ /* fall through */
+
+ case PTLLND_INIT_NOTHING:
+ CDEBUG(D_NET, "PTLLND_INIT_NOTHING\n");
+ break;
+ }
+
+ if (!PtlHandleIsEqual(kptllnd_data.kptl_eqh, PTL_INVALID_HANDLE)) {
+ prc = PtlEQFree(kptllnd_data.kptl_eqh);
+ if (prc != PTL_OK)
+ CERROR("Error %s(%d) freeing portals EQ\n",
+ kptllnd_errtype2str(prc), prc);
+ }
+
+ if (!PtlHandleIsEqual(kptllnd_data.kptl_nih, PTL_INVALID_HANDLE)) {
+ prc = PtlNIFini(kptllnd_data.kptl_nih);
+ if (prc != PTL_OK)
+ CERROR("Error %s(%d) finalizing portals NI\n",
+ kptllnd_errtype2str(prc), prc);
+ }
+
+ LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
+ LASSERT (list_empty(&kptllnd_data.kptl_idle_txs));
+
+ if (kptllnd_data.kptl_rx_cache != NULL)
+ cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
+
+ if (kptllnd_data.kptl_peers != NULL)
+ LIBCFS_FREE(kptllnd_data.kptl_peers,
+ sizeof (struct list_head) *
+ kptllnd_data.kptl_peer_hash_size);
+
+ if (kptllnd_data.kptl_nak_msg != NULL)
+ LIBCFS_FREE(kptllnd_data.kptl_nak_msg,
+ offsetof(kptl_msg_t, ptlm_u));
+
+ memset(&kptllnd_data, 0, sizeof(kptllnd_data));
+ PORTAL_MODULE_UNUSE;
+ return;
+}
+