From ff8a305376bf8833011ec81d4c3fe750d15597a8 Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Fri, 31 Jan 2020 11:16:20 +1100 Subject: [PATCH] LU-13005 lnet: remove 'eq' from names, use 'handler' Now that we don't have an event-queue subsystem, using 'eq' in various names is confusing. We have events and event handlers, so change lnet_eq_handler_t to lnet_handler_t and change variable names mentioned 'eq' to just mention 'handler'. Signed-off-by: Mr NeilBrown Change-Id: If6f34ed41c565cb37ac798b5ba0411b6a471d6d0 Reviewed-on: https://review.whamcloud.com/37378 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Chris Horn Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lnet/include/lnet/lib-lnet.h | 2 +- lnet/include/lnet/lib-types.h | 12 ++++++------ lnet/include/uapi/linux/lnet/lnet-types.h | 9 ++++----- lnet/lnet/api-ni.c | 20 ++++++++++---------- lnet/lnet/lib-md.c | 16 ++++++++-------- lnet/lnet/lib-me.c | 4 ++-- lnet/lnet/lib-move.c | 12 +++++++----- lnet/lnet/lib-msg.c | 4 ++-- lnet/lnet/peer.c | 10 +++++----- lnet/selftest/rpc.c | 8 ++++---- lustre/ptlrpc/events.c | 4 ++-- lustre/ptlrpc/niobuf.c | 10 +++++----- lustre/ptlrpc/ptlrpc_internal.h | 2 +- 13 files changed, 57 insertions(+), 56 deletions(-) diff --git a/lnet/include/lnet/lib-lnet.h b/lnet/include/lnet/lib-lnet.h index 34c1411..1179129 100644 --- a/lnet/include/lnet/lib-lnet.h +++ b/lnet/include/lnet/lib-lnet.h @@ -572,7 +572,7 @@ void lnet_prep_send(struct lnet_msg *msg, int type, unsigned int len); int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid); int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis, - void *user_ptr, lnet_eq_handler_t eq, bool recovery); + void *user_ptr, lnet_handler_t handler, bool recovery); void lnet_return_tx_credits_locked(struct lnet_msg *msg); void lnet_return_rx_credits_locked(struct lnet_msg *msg); void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp); diff --git a/lnet/include/lnet/lib-types.h b/lnet/include/lnet/lib-types.h index 41d7ee7..8cc0609 100644 --- a/lnet/include/lnet/lib-types.h +++ b/lnet/include/lnet/lib-types.h @@ -209,7 +209,7 @@ struct lnet_libmd { unsigned int md_niov; /* # frags at end of struct */ void *md_user_ptr; struct lnet_rsp_tracker *md_rspt_ptr; - lnet_eq_handler_t md_eq; + lnet_handler_t md_handler; struct lnet_handle_md md_bulk_handle; union { struct kvec iov[LNET_MAX_IOV]; @@ -1052,7 +1052,7 @@ struct lnet { * ln_api_mutex. */ struct lnet_handle_md ln_ping_target_md; - lnet_eq_handler_t ln_ping_target_eq; + lnet_handler_t ln_ping_target_handler; struct lnet_ping_buffer *ln_ping_target; atomic_t ln_ping_target_seqno; @@ -1064,13 +1064,13 @@ struct lnet { * buffer may linger a while after it has been unlinked, in * which case the event handler cleans up. */ - lnet_eq_handler_t ln_push_target_eq; + lnet_handler_t ln_push_target_handler; struct lnet_handle_md ln_push_target_md; struct lnet_ping_buffer *ln_push_target; int ln_push_target_nnis; /* discovery event queue handle */ - lnet_eq_handler_t ln_dc_eq; + lnet_handler_t ln_dc_handler; /* discovery requests */ struct list_head ln_dc_request; /* discovery working list */ @@ -1140,8 +1140,8 @@ struct lnet { * operations on the MD complete or when LNet has shut down. */ struct list_head **ln_mt_zombie_rstqs; - /* recovery eq handler */ - lnet_eq_handler_t ln_mt_eq; + /* recovery handler */ + lnet_handler_t ln_mt_handler; /* * Completed when the discovery and monitor threads can enter their diff --git a/lnet/include/uapi/linux/lnet/lnet-types.h b/lnet/include/uapi/linux/lnet/lnet-types.h index be0c510..ba41d6e 100644 --- a/lnet/include/uapi/linux/lnet/lnet-types.h +++ b/lnet/include/uapi/linux/lnet/lnet-types.h @@ -322,7 +322,7 @@ struct lnet_ping_info { /** * Objects maintained by the LNet are accessed through handles. Handle types * have names of the form lnet_handle_xx, where xx is one of the two letter - * object type codes ('eq' for event queue, 'md' for memory descriptor, and + * object type codes ('md' for memory descriptor, and * 'me' for match entry). Each type of object is given a unique handle type * to enhance type checking. */ @@ -341,7 +341,7 @@ static inline void LNetInvalidateMDHandle(struct lnet_handle_md *h) } /** - * Check whether eq handle \a h is invalid. + * Check whether handler \a h is invalid. * * \return 1 if handle is invalid, 0 if valid. */ @@ -405,8 +405,7 @@ enum lnet_ins_pos { * API functions. It should return as quickly as possible. */ struct lnet_event; -typedef void (*lnet_eq_handler_t)(struct lnet_event *event); -#define LNET_EQ_HANDLER_NONE NULL +typedef void (*lnet_handler_t)(struct lnet_event *event); /** * Defines the visible parts of a memory descriptor. Values of this type @@ -498,7 +497,7 @@ struct lnet_md { * the memory region. If this argument is NULL operations * performed on this memory descriptor are not logged. */ - lnet_eq_handler_t eq_handle; + lnet_handler_t handler; /** * The bulk MD handle which was registered to describe the buffers * either to be used to transfer data to the peer or receive data diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index 2b49505..f59d581 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -1186,7 +1186,7 @@ lnet_prepare(lnet_pid_t requested_pid) INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq); INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq); init_waitqueue_head(&the_lnet.ln_dc_waitq); - the_lnet.ln_mt_eq = NULL; + the_lnet.ln_mt_handler = NULL; init_completion(&the_lnet.ln_started); rc = lnet_slab_setup(); @@ -1270,7 +1270,7 @@ lnet_unprepare (void) the_lnet.ln_mt_zombie_rstqs = NULL; } - the_lnet.ln_mt_eq = NULL; + the_lnet.ln_mt_handler = NULL; lnet_portals_destroy(); @@ -1694,7 +1694,7 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf, int rc; if (set_eq) - the_lnet.ln_ping_target_eq = + the_lnet.ln_ping_target_handler = lnet_ping_target_event_handler; *ppbuf = lnet_ping_target_create(ni_count); @@ -1720,7 +1720,7 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf, md.max_size = 0; md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | LNET_MD_MANAGE_REMOTE; - md.eq_handle = the_lnet.ln_ping_target_eq; + md.handler = the_lnet.ln_ping_target_handler; md.user_ptr = *ppbuf; rc = LNetMDAttach(me, md, LNET_RETAIN, ping_mdh); @@ -1929,7 +1929,7 @@ int lnet_push_target_post(struct lnet_ping_buffer *pbuf, md.max_size = 0; md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE; md.user_ptr = pbuf; - md.eq_handle = the_lnet.ln_push_target_eq; + md.handler = the_lnet.ln_push_target_handler; rc = LNetMDAttach(me, md, LNET_UNLINK, mdhp); if (rc) { @@ -1975,7 +1975,7 @@ static int lnet_push_target_init(void) if (the_lnet.ln_push_target) return -EALREADY; - the_lnet.ln_push_target_eq = + the_lnet.ln_push_target_handler = lnet_push_target_event_handler; rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL); @@ -1988,7 +1988,7 @@ static int lnet_push_target_init(void) if (rc) { LNetClearLazyPortal(LNET_RESERVED_PORTAL); - the_lnet.ln_push_target_eq = NULL; + the_lnet.ln_push_target_handler = NULL; } return rc; @@ -2015,7 +2015,7 @@ static void lnet_push_target_fini(void) the_lnet.ln_push_target_nnis = 0; LNetClearLazyPortal(LNET_RESERVED_PORTAL); - the_lnet.ln_push_target_eq = NULL; + the_lnet.ln_push_target_handler = NULL; } static int @@ -2675,7 +2675,7 @@ LNetNIInit(lnet_pid_t requested_pid) lnet_ping_target_update(pbuf, ping_mdh); - the_lnet.ln_mt_eq = lnet_mt_event_handler; + the_lnet.ln_mt_handler = lnet_mt_event_handler; rc = lnet_push_target_init(); if (rc != 0) @@ -4153,7 +4153,7 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, md.max_size = 0; md.options = LNET_MD_TRUNCATE; md.user_ptr = &pd; - md.eq_handle = lnet_ping_event_handler; + md.handler = lnet_ping_event_handler; init_completion(&pd.completion); diff --git a/lnet/lnet/lib-md.c b/lnet/lnet/lib-md.c index c3b9daa..5927de6 100644 --- a/lnet/lnet/lib-md.c +++ b/lnet/lnet/lib-md.c @@ -195,7 +195,7 @@ lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink) lmd->md_max_size = umd->max_size; lmd->md_options = umd->options; lmd->md_user_ptr = umd->user_ptr; - lmd->md_eq = NULL; + lmd->md_handler = NULL; lmd->md_threshold = umd->threshold; lmd->md_refcount = 0; lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0; @@ -238,7 +238,7 @@ lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink) /* must be called with resource lock held */ static int -lnet_md_link(struct lnet_libmd *md, lnet_eq_handler_t eq, int cpt) +lnet_md_link(struct lnet_libmd *md, lnet_handler_t handler, int cpt) { struct lnet_res_container *container = the_lnet.ln_md_containers[cpt]; @@ -252,9 +252,9 @@ lnet_md_link(struct lnet_libmd *md, lnet_eq_handler_t eq, int cpt) /* TODO - reevaluate what should be here in light of * the removal of the start and end events * maybe there we shouldn't even allow LNET_EQ_NONE!) - * LASSERT (eq == NULL); + * LASSERT (handler != NULL); */ - md->md_eq = eq; + md->md_handler = handler; lnet_res_lh_initialize(container, &md->md_lh); @@ -361,7 +361,7 @@ LNetMDAttach(struct lnet_me *me, struct lnet_md umd, if (me->me_md) rc = -EBUSY; else - rc = lnet_md_link(md, umd.eq_handle, cpt); + rc = lnet_md_link(md, umd.handler, cpt); if (rc != 0) goto out_unlock; @@ -438,7 +438,7 @@ LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink, cpt = lnet_res_lock_current(); - rc = lnet_md_link(md, umd.eq_handle, cpt); + rc = lnet_md_link(md, umd.handler, cpt); if (rc != 0) goto out_unlock; @@ -508,9 +508,9 @@ LNetMDUnlink(struct lnet_handle_md mdh) /* If the MD is busy, lnet_md_unlink just marks it for deletion, and * when the LND is done, the completion event flags that the MD was * unlinked. Otherwise, we enqueue an event now... */ - if (md->md_eq != NULL && md->md_refcount == 0) { + if (md->md_handler && md->md_refcount == 0) { lnet_build_unlink_event(md, &ev); - md->md_eq(&ev); + md->md_handler(&ev); } if (md->md_rspt_ptr != NULL) diff --git a/lnet/lnet/lib-me.c b/lnet/lnet/lib-me.c index 3e0c940..a87cf74 100644 --- a/lnet/lnet/lib-me.c +++ b/lnet/lnet/lib-me.c @@ -148,9 +148,9 @@ LNetMEUnlink(struct lnet_me *me) md = me->me_md; if (md != NULL) { md->md_flags |= LNET_MD_FLAG_ABORTED; - if (md->md_eq != NULL && md->md_refcount == 0) { + if (md->md_handler && md->md_refcount == 0) { lnet_build_unlink_event(md, &ev); - md->md_eq(&ev); + md->md_handler(&ev); } } diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index 0c9c4f3..b18e6dd 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -3202,7 +3202,8 @@ lnet_recover_local_nis(void) ev_info->mt_type = MT_TYPE_LOCAL_NI; ev_info->mt_nid = nid; rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN, - ev_info, the_lnet.ln_mt_eq, true); + ev_info, the_lnet.ln_mt_handler, + true); /* lookup the nid again */ lnet_net_lock(0); ni = lnet_nid2ni_locked(nid, 0); @@ -3435,7 +3436,8 @@ lnet_recover_peer_nis(void) ev_info->mt_type = MT_TYPE_PEER_NI; ev_info->mt_nid = nid; rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN, - ev_info, the_lnet.ln_mt_eq, true); + ev_info, the_lnet.ln_mt_handler, + true); lnet_net_lock(0); /* * lnet_find_peer_ni_locked() grabs a refcount for @@ -3565,7 +3567,7 @@ lnet_monitor_thread(void *arg) int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis, - void *user_data, lnet_eq_handler_t eq, bool recovery) + void *user_data, lnet_handler_t handler, bool recovery) { struct lnet_md md = { NULL }; struct lnet_process_id id; @@ -3590,7 +3592,7 @@ lnet_send_ping(lnet_nid_t dest_nid, md.max_size = 0; md.options = LNET_MD_TRUNCATE; md.user_ptr = user_data; - md.eq_handle = eq; + md.handler = handler; rc = LNetMDBind(md, LNET_UNLINK, mdh); if (rc) { @@ -3789,7 +3791,7 @@ clean_thread: lnet_clean_local_ni_recoveryq(); lnet_clean_peer_ni_recoveryq(); lnet_clean_resendqs(); - the_lnet.ln_mt_eq = NULL; + the_lnet.ln_mt_handler = NULL; return rc; clean_queues: lnet_rsp_tracker_clean(); diff --git a/lnet/lnet/lib-msg.c b/lnet/lnet/lib-msg.c index 69f0700..0c469d4 100644 --- a/lnet/lnet/lib-msg.c +++ b/lnet/lnet/lib-msg.c @@ -939,7 +939,7 @@ lnet_msg_detach_md(struct lnet_msg *msg, int cpt, int status) LASSERT(md->md_refcount >= 0); unlink = lnet_md_unlinkable(md); - if (md->md_eq != NULL) { + if (md->md_handler) { if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) { msg->msg_ev.status = -ETIMEDOUT; CDEBUG(D_NET, "md 0x%p already unlinked\n", md); @@ -947,7 +947,7 @@ lnet_msg_detach_md(struct lnet_msg *msg, int cpt, int status) msg->msg_ev.status = status; } msg->msg_ev.unlinked = unlink; - md->md_eq(&msg->msg_ev); + md->md_handler(&msg->msg_ev); } if (unlink || (md->md_refcount == 0 && diff --git a/lnet/lnet/peer.c b/lnet/lnet/peer.c index b2750ac..283839c 100644 --- a/lnet/lnet/peer.c +++ b/lnet/lnet/peer.c @@ -2999,7 +2999,7 @@ __must_hold(&lp->lp_lock) nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN); rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp, - the_lnet.ln_dc_eq, false); + the_lnet.ln_dc_handler, false); /* * if LNetMDBind in lnet_send_ping fails we need to decrement the @@ -3091,7 +3091,7 @@ __must_hold(&lp->lp_lock) md.threshold = 2; /* Put/Ack */ md.max_size = 0; md.options = 0; - md.eq_handle = the_lnet.ln_dc_eq; + md.handler = the_lnet.ln_dc_handler; md.user_ptr = lp; rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh); @@ -3475,7 +3475,7 @@ static int lnet_peer_discovery(void *arg) } lnet_net_unlock(LNET_LOCK_EX); - the_lnet.ln_dc_eq = NULL; + the_lnet.ln_dc_handler = NULL; the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN; wake_up(&the_lnet.ln_dc_waitq); @@ -3494,14 +3494,14 @@ int lnet_peer_discovery_start(void) if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN) return -EALREADY; - the_lnet.ln_dc_eq = lnet_discovery_event_handler; + the_lnet.ln_dc_handler = lnet_discovery_event_handler; the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING; task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery"); if (IS_ERR(task)) { rc = PTR_ERR(task); CERROR("Can't start peer discovery thread: %d\n", rc); - the_lnet.ln_dc_eq = NULL; + the_lnet.ln_dc_handler = NULL; the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN; } diff --git a/lnet/selftest/rpc.c b/lnet/selftest/rpc.c index 71fa157..6d71a02 100644 --- a/lnet/selftest/rpc.c +++ b/lnet/selftest/rpc.c @@ -53,7 +53,7 @@ enum srpc_state { static struct smoketest_rpc { spinlock_t rpc_glock; /* global lock */ struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1]; - lnet_eq_handler_t rpc_lnet_eq; /* _the_ LNet event handler */ + lnet_handler_t rpc_lnet_handler;/* _the_ LNet event handler */ enum srpc_state rpc_state; struct srpc_counters rpc_counters; __u64 rpc_matchbits; /* matchbits counter */ @@ -373,7 +373,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, md.start = buf; md.length = len; md.options = options; - md.eq_handle = srpc_data.rpc_lnet_eq; + md.handler = srpc_data.rpc_lnet_handler; rc = LNetMDAttach(me, md, LNET_UNLINK, mdh); if (rc != 0) { @@ -402,7 +402,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, md.user_ptr = ev; md.start = buf; md.length = len; - md.eq_handle = srpc_data.rpc_lnet_eq; + md.handler = srpc_data.rpc_lnet_handler; md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1; md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); @@ -1621,7 +1621,7 @@ srpc_startup (void) srpc_data.rpc_state = SRPC_STATE_NI_INIT; - srpc_data.rpc_lnet_eq = srpc_lnet_ev_handler; + srpc_data.rpc_lnet_handler = srpc_lnet_ev_handler; rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); LASSERT(rc == 0); diff --git a/lustre/ptlrpc/events.c b/lustre/ptlrpc/events.c index 0a76d9a..b6d4cdd 100644 --- a/lustre/ptlrpc/events.c +++ b/lustre/ptlrpc/events.c @@ -40,7 +40,7 @@ #include #include "ptlrpc_internal.h" -lnet_eq_handler_t ptlrpc_eq; +lnet_handler_t ptlrpc_handler; struct percpu_ref ptlrpc_pending; /* @@ -602,7 +602,7 @@ int ptlrpc_ni_init(void) * because we are guaranteed to get every event via callback, * so we just set EQ size to 0 to avoid overhread of serializing * enqueue/dequeue operations in LNet. */ - ptlrpc_eq = ptlrpc_master_callback; + ptlrpc_handler = ptlrpc_master_callback; return 0; } diff --git a/lustre/ptlrpc/niobuf.c b/lustre/ptlrpc/niobuf.c index 2f696a1..aed657d 100644 --- a/lustre/ptlrpc/niobuf.c +++ b/lustre/ptlrpc/niobuf.c @@ -61,7 +61,7 @@ static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len, md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1; md.options = PTLRPC_MD_OPTIONS; md.user_ptr = cbid; - md.eq_handle = ptlrpc_eq; + md.handler = ptlrpc_handler; LNetInvalidateMDHandle(&md.bulk_handle); if (bulk_cookie) { @@ -197,7 +197,7 @@ int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc) desc->bd_failure = 0; md.user_ptr = &desc->bd_cbid; - md.eq_handle = ptlrpc_eq; + md.handler = ptlrpc_handler; md.threshold = 2; /* SENT and ACK/REPLY */ for (posted_md = 0; posted_md < total_md; mbits++) { @@ -366,7 +366,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req) desc->bd_last_mbits = mbits; desc->bd_md_count = total_md; md.user_ptr = &desc->bd_cbid; - md.eq_handle = ptlrpc_eq; + md.handler = ptlrpc_handler; md.threshold = 1; /* PUT or GET */ for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) { @@ -860,7 +860,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) LNET_MD_MANAGE_REMOTE | LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */; reply_md.user_ptr = &request->rq_reply_cbid; - reply_md.eq_handle = ptlrpc_eq; + reply_md.handler = ptlrpc_handler; /* We must see the unlink callback to set rq_reply_unlinked, * so we can't auto-unlink */ @@ -986,7 +986,7 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd) md.threshold = LNET_MD_THRESH_INF; md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE; md.user_ptr = &rqbd->rqbd_cbid; - md.eq_handle = ptlrpc_eq; + md.handler = ptlrpc_handler; rc = LNetMDAttach(me, md, LNET_UNLINK, &rqbd->rqbd_md_h); if (rc == 0) { diff --git a/lustre/ptlrpc/ptlrpc_internal.h b/lustre/ptlrpc/ptlrpc_internal.h index 5624e50..c06885e 100644 --- a/lustre/ptlrpc/ptlrpc_internal.h +++ b/lustre/ptlrpc/ptlrpc_internal.h @@ -63,7 +63,7 @@ extern struct nrs_core nrs_core; extern struct mutex ptlrpcd_mutex; extern struct mutex pinger_mutex; -extern lnet_eq_handler_t ptlrpc_eq; +extern lnet_handler_t ptlrpc_handler; extern struct percpu_ref ptlrpc_pending; int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait); -- 1.8.3.1