unsigned int len);
int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid);
int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis,
- void *user_ptr, lnet_eq_handler_t eq, bool recovery);
+ void *user_ptr, lnet_handler_t handler, bool recovery);
void lnet_return_tx_credits_locked(struct lnet_msg *msg);
void lnet_return_rx_credits_locked(struct lnet_msg *msg);
void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp);
unsigned int md_niov; /* # frags at end of struct */
void *md_user_ptr;
struct lnet_rsp_tracker *md_rspt_ptr;
- lnet_eq_handler_t md_eq;
+ lnet_handler_t md_handler;
struct lnet_handle_md md_bulk_handle;
union {
struct kvec iov[LNET_MAX_IOV];
* ln_api_mutex.
*/
struct lnet_handle_md ln_ping_target_md;
- lnet_eq_handler_t ln_ping_target_eq;
+ lnet_handler_t ln_ping_target_handler;
struct lnet_ping_buffer *ln_ping_target;
atomic_t ln_ping_target_seqno;
* buffer may linger a while after it has been unlinked, in
* which case the event handler cleans up.
*/
- lnet_eq_handler_t ln_push_target_eq;
+ lnet_handler_t ln_push_target_handler;
struct lnet_handle_md ln_push_target_md;
struct lnet_ping_buffer *ln_push_target;
int ln_push_target_nnis;
/* discovery event queue handle */
- lnet_eq_handler_t ln_dc_eq;
+ lnet_handler_t ln_dc_handler;
/* discovery requests */
struct list_head ln_dc_request;
/* discovery working list */
* operations on the MD complete or when LNet has shut down.
*/
struct list_head **ln_mt_zombie_rstqs;
- /* recovery eq handler */
- lnet_eq_handler_t ln_mt_eq;
+ /* recovery handler */
+ lnet_handler_t ln_mt_handler;
/*
* Completed when the discovery and monitor threads can enter their
/**
* Objects maintained by the LNet are accessed through handles. Handle types
* have names of the form lnet_handle_xx, where xx is one of the two letter
- * object type codes ('eq' for event queue, 'md' for memory descriptor, and
+ * object type codes ('md' for memory descriptor, and
* 'me' for match entry). Each type of object is given a unique handle type
* to enhance type checking.
*/
}
/**
- * Check whether eq handle \a h is invalid.
+ * Check whether handler \a h is invalid.
*
* \return 1 if handle is invalid, 0 if valid.
*/
* API functions. It should return as quickly as possible.
*/
struct lnet_event;
-typedef void (*lnet_eq_handler_t)(struct lnet_event *event);
-#define LNET_EQ_HANDLER_NONE NULL
+typedef void (*lnet_handler_t)(struct lnet_event *event);
/**
* Defines the visible parts of a memory descriptor. Values of this type
* the memory region. If this argument is NULL operations
* performed on this memory descriptor are not logged.
*/
- lnet_eq_handler_t eq_handle;
+ lnet_handler_t handler;
/**
* The bulk MD handle which was registered to describe the buffers
* either to be used to transfer data to the peer or receive data
INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
init_waitqueue_head(&the_lnet.ln_dc_waitq);
- the_lnet.ln_mt_eq = NULL;
+ the_lnet.ln_mt_handler = NULL;
init_completion(&the_lnet.ln_started);
rc = lnet_slab_setup();
the_lnet.ln_mt_zombie_rstqs = NULL;
}
- the_lnet.ln_mt_eq = NULL;
+ the_lnet.ln_mt_handler = NULL;
lnet_portals_destroy();
int rc;
if (set_eq)
- the_lnet.ln_ping_target_eq =
+ the_lnet.ln_ping_target_handler =
lnet_ping_target_event_handler;
*ppbuf = lnet_ping_target_create(ni_count);
md.max_size = 0;
md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
LNET_MD_MANAGE_REMOTE;
- md.eq_handle = the_lnet.ln_ping_target_eq;
+ md.handler = the_lnet.ln_ping_target_handler;
md.user_ptr = *ppbuf;
rc = LNetMDAttach(me, md, LNET_RETAIN, ping_mdh);
md.max_size = 0;
md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
md.user_ptr = pbuf;
- md.eq_handle = the_lnet.ln_push_target_eq;
+ md.handler = the_lnet.ln_push_target_handler;
rc = LNetMDAttach(me, md, LNET_UNLINK, mdhp);
if (rc) {
if (the_lnet.ln_push_target)
return -EALREADY;
- the_lnet.ln_push_target_eq =
+ the_lnet.ln_push_target_handler =
lnet_push_target_event_handler;
rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
if (rc) {
LNetClearLazyPortal(LNET_RESERVED_PORTAL);
- the_lnet.ln_push_target_eq = NULL;
+ the_lnet.ln_push_target_handler = NULL;
}
return rc;
the_lnet.ln_push_target_nnis = 0;
LNetClearLazyPortal(LNET_RESERVED_PORTAL);
- the_lnet.ln_push_target_eq = NULL;
+ the_lnet.ln_push_target_handler = NULL;
}
static int
lnet_ping_target_update(pbuf, ping_mdh);
- the_lnet.ln_mt_eq = lnet_mt_event_handler;
+ the_lnet.ln_mt_handler = lnet_mt_event_handler;
rc = lnet_push_target_init();
if (rc != 0)
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
md.user_ptr = &pd;
- md.eq_handle = lnet_ping_event_handler;
+ md.handler = lnet_ping_event_handler;
init_completion(&pd.completion);
lmd->md_max_size = umd->max_size;
lmd->md_options = umd->options;
lmd->md_user_ptr = umd->user_ptr;
- lmd->md_eq = NULL;
+ lmd->md_handler = NULL;
lmd->md_threshold = umd->threshold;
lmd->md_refcount = 0;
lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
/* must be called with resource lock held */
static int
-lnet_md_link(struct lnet_libmd *md, lnet_eq_handler_t eq, int cpt)
+lnet_md_link(struct lnet_libmd *md, lnet_handler_t handler, int cpt)
{
struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
/* TODO - reevaluate what should be here in light of
* the removal of the start and end events
* maybe there we shouldn't even allow LNET_EQ_NONE!)
- * LASSERT (eq == NULL);
+ * LASSERT (handler != NULL);
*/
- md->md_eq = eq;
+ md->md_handler = handler;
lnet_res_lh_initialize(container, &md->md_lh);
if (me->me_md)
rc = -EBUSY;
else
- rc = lnet_md_link(md, umd.eq_handle, cpt);
+ rc = lnet_md_link(md, umd.handler, cpt);
if (rc != 0)
goto out_unlock;
cpt = lnet_res_lock_current();
- rc = lnet_md_link(md, umd.eq_handle, cpt);
+ rc = lnet_md_link(md, umd.handler, cpt);
if (rc != 0)
goto out_unlock;
/* If the MD is busy, lnet_md_unlink just marks it for deletion, and
* when the LND is done, the completion event flags that the MD was
* unlinked. Otherwise, we enqueue an event now... */
- if (md->md_eq != NULL && md->md_refcount == 0) {
+ if (md->md_handler && md->md_refcount == 0) {
lnet_build_unlink_event(md, &ev);
- md->md_eq(&ev);
+ md->md_handler(&ev);
}
if (md->md_rspt_ptr != NULL)
md = me->me_md;
if (md != NULL) {
md->md_flags |= LNET_MD_FLAG_ABORTED;
- if (md->md_eq != NULL && md->md_refcount == 0) {
+ if (md->md_handler && md->md_refcount == 0) {
lnet_build_unlink_event(md, &ev);
- md->md_eq(&ev);
+ md->md_handler(&ev);
}
}
ev_info->mt_type = MT_TYPE_LOCAL_NI;
ev_info->mt_nid = nid;
rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
- ev_info, the_lnet.ln_mt_eq, true);
+ ev_info, the_lnet.ln_mt_handler,
+ true);
/* lookup the nid again */
lnet_net_lock(0);
ni = lnet_nid2ni_locked(nid, 0);
ev_info->mt_type = MT_TYPE_PEER_NI;
ev_info->mt_nid = nid;
rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
- ev_info, the_lnet.ln_mt_eq, true);
+ ev_info, the_lnet.ln_mt_handler,
+ true);
lnet_net_lock(0);
/*
* lnet_find_peer_ni_locked() grabs a refcount for
int
lnet_send_ping(lnet_nid_t dest_nid,
struct lnet_handle_md *mdh, int nnis,
- void *user_data, lnet_eq_handler_t eq, bool recovery)
+ void *user_data, lnet_handler_t handler, bool recovery)
{
struct lnet_md md = { NULL };
struct lnet_process_id id;
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
md.user_ptr = user_data;
- md.eq_handle = eq;
+ md.handler = handler;
rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (rc) {
lnet_clean_local_ni_recoveryq();
lnet_clean_peer_ni_recoveryq();
lnet_clean_resendqs();
- the_lnet.ln_mt_eq = NULL;
+ the_lnet.ln_mt_handler = NULL;
return rc;
clean_queues:
lnet_rsp_tracker_clean();
LASSERT(md->md_refcount >= 0);
unlink = lnet_md_unlinkable(md);
- if (md->md_eq != NULL) {
+ if (md->md_handler) {
if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) {
msg->msg_ev.status = -ETIMEDOUT;
CDEBUG(D_NET, "md 0x%p already unlinked\n", md);
msg->msg_ev.status = status;
}
msg->msg_ev.unlinked = unlink;
- md->md_eq(&msg->msg_ev);
+ md->md_handler(&msg->msg_ev);
}
if (unlink || (md->md_refcount == 0 &&
nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
- the_lnet.ln_dc_eq, false);
+ the_lnet.ln_dc_handler, false);
/*
* if LNetMDBind in lnet_send_ping fails we need to decrement the
md.threshold = 2; /* Put/Ack */
md.max_size = 0;
md.options = 0;
- md.eq_handle = the_lnet.ln_dc_eq;
+ md.handler = the_lnet.ln_dc_handler;
md.user_ptr = lp;
rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
}
lnet_net_unlock(LNET_LOCK_EX);
- the_lnet.ln_dc_eq = NULL;
+ the_lnet.ln_dc_handler = NULL;
the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
wake_up(&the_lnet.ln_dc_waitq);
if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
return -EALREADY;
- the_lnet.ln_dc_eq = lnet_discovery_event_handler;
+ the_lnet.ln_dc_handler = lnet_discovery_event_handler;
the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("Can't start peer discovery thread: %d\n", rc);
- the_lnet.ln_dc_eq = NULL;
+ the_lnet.ln_dc_handler = NULL;
the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
}
static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- lnet_eq_handler_t rpc_lnet_eq; /* _the_ LNet event handler */
+ lnet_handler_t rpc_lnet_handler;/* _the_ LNet event handler */
enum srpc_state rpc_state;
struct srpc_counters rpc_counters;
__u64 rpc_matchbits; /* matchbits counter */
md.start = buf;
md.length = len;
md.options = options;
- md.eq_handle = srpc_data.rpc_lnet_eq;
+ md.handler = srpc_data.rpc_lnet_handler;
rc = LNetMDAttach(me, md, LNET_UNLINK, mdh);
if (rc != 0) {
md.user_ptr = ev;
md.start = buf;
md.length = len;
- md.eq_handle = srpc_data.rpc_lnet_eq;
+ md.handler = srpc_data.rpc_lnet_handler;
md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
- srpc_data.rpc_lnet_eq = srpc_lnet_ev_handler;
+ srpc_data.rpc_lnet_handler = srpc_lnet_ev_handler;
rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
LASSERT(rc == 0);
#include <lustre_sec.h>
#include "ptlrpc_internal.h"
-lnet_eq_handler_t ptlrpc_eq;
+lnet_handler_t ptlrpc_handler;
struct percpu_ref ptlrpc_pending;
/*
* because we are guaranteed to get every event via callback,
* so we just set EQ size to 0 to avoid overhread of serializing
* enqueue/dequeue operations in LNet. */
- ptlrpc_eq = ptlrpc_master_callback;
+ ptlrpc_handler = ptlrpc_master_callback;
return 0;
}
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
md.options = PTLRPC_MD_OPTIONS;
md.user_ptr = cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
LNetInvalidateMDHandle(&md.bulk_handle);
if (bulk_cookie) {
desc->bd_failure = 0;
md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
md.threshold = 2; /* SENT and ACK/REPLY */
for (posted_md = 0; posted_md < total_md; mbits++) {
desc->bd_last_mbits = mbits;
desc->bd_md_count = total_md;
md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
md.threshold = 1; /* PUT or GET */
for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
LNET_MD_MANAGE_REMOTE |
LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
reply_md.user_ptr = &request->rq_reply_cbid;
- reply_md.eq_handle = ptlrpc_eq;
+ reply_md.handler = ptlrpc_handler;
/* We must see the unlink callback to set rq_reply_unlinked,
* so we can't auto-unlink */
md.threshold = LNET_MD_THRESH_INF;
md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
md.user_ptr = &rqbd->rqbd_cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
rc = LNetMDAttach(me, md, LNET_UNLINK, &rqbd->rqbd_md_h);
if (rc == 0) {
extern struct mutex ptlrpcd_mutex;
extern struct mutex pinger_mutex;
-extern lnet_eq_handler_t ptlrpc_eq;
+extern lnet_handler_t ptlrpc_handler;
extern struct percpu_ref ptlrpc_pending;
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);