#define lh_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
-struct lnet_eq {
- lnet_eq_handler_t eq_callback;
-};
-
struct lnet_me {
struct list_head me_list;
int me_cpt;
unsigned int md_niov; /* # frags at end of struct */
void *md_user_ptr;
struct lnet_rsp_tracker *md_rspt_ptr;
- struct lnet_eq *md_eq;
+ lnet_eq_handler_t md_eq;
struct lnet_handle_md md_bulk_handle;
union {
struct kvec iov[LNET_MAX_IOV];
* ln_api_mutex.
*/
struct lnet_handle_md ln_ping_target_md;
- struct lnet_eq *ln_ping_target_eq;
+ lnet_eq_handler_t ln_ping_target_eq;
struct lnet_ping_buffer *ln_ping_target;
atomic_t ln_ping_target_seqno;
* buffer may linger a while after it has been unlinked, in
* which case the event handler cleans up.
*/
- struct lnet_eq *ln_push_target_eq;
+ lnet_eq_handler_t ln_push_target_eq;
struct lnet_handle_md ln_push_target_md;
struct lnet_ping_buffer *ln_push_target;
int ln_push_target_nnis;
/* discovery event queue handle */
- struct lnet_eq *ln_dc_eq;
+ lnet_eq_handler_t ln_dc_eq;
/* discovery requests */
struct list_head ln_dc_request;
/* discovery working list */
*/
struct list_head **ln_mt_zombie_rstqs;
/* recovery eq handler */
- struct lnet_eq *ln_mt_eq;
+ lnet_eq_handler_t ln_mt_eq;
/*
* Completed when the discovery and monitor threads can enter their