/* recovery eq handler */
struct lnet_handle_eq ln_mt_eqh;
+ /*
+ * Completed when the discovery and monitor threads can enter their
+ * work loops
+ */
+ struct completion ln_started;
};
#endif
INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
init_waitqueue_head(&the_lnet.ln_dc_waitq);
LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
+ init_completion(&the_lnet.ln_started);
rc = lnet_descriptor_setup();
if (rc != 0)
mutex_unlock(&the_lnet.ln_api_mutex);
+ complete_all(&the_lnet.ln_started);
+
/* wait for all routers to start */
lnet_wait_router_start();
int interval;
time64_t now;
+ wait_for_completion(&the_lnet.ln_started);
/*
* The monitor thread takes care of the following:
* 1. Checks the aliveness of routers
struct lnet_peer *lp;
int rc;
+ wait_for_completion(&the_lnet.ln_started);
+
CDEBUG(D_NET, "started\n");
cfs_block_allsigs();
LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
- wake_up(&the_lnet.ln_dc_waitq);
+
+ /* In the LNetNIInit() path we may be stopping discovery before it
+ * entered its work loop
+ */
+ if (!completion_done(&the_lnet.ln_started))
+ complete(&the_lnet.ln_started);
+ else
+ wake_up(&the_lnet.ln_dc_waitq);
wait_event(the_lnet.ln_dc_waitq,
the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);