int LNetMDUnlink(struct lnet_handle_md md_in);
/** @} lnet_md */
-/** \defgroup lnet_eq Events and event queues
- *
- * Event queues (abbreviated as EQ) are used to log operations performed on
- * local MDs. In particular, they signal the completion of a data transmission
- * into or out of a MD. They can also be used to hold acknowledgments for
- * completed PUT operations and indicate when a MD has been unlinked. Multiple
- * MDs can share a single EQ. An EQ must have an event handler
- * associated with it. It will be run for each event that is deposited into
- * the EQ.
- *
- * In addition to the struct lnet_eq, the LNet API defines two types
- * associated with events: The ::lnet_event_kind defines the kinds of events
- * that can be stored in an EQ. The struct lnet_event defines a structure that
- * holds the information about with an event.
- *
- * There are two functions for dealing with EQs: LNetEQAlloc() is used
- * to create an EQ and allocate the resources needed, while LNetEQFree()
- * releases these resources and frees the EQ.
- * @{ */
-struct lnet_eq *
-LNetEQAlloc(lnet_eq_handler_t handler);
-
-void LNetEQFree(struct lnet_eq *eventq_in);
-
-/** @} lnet_eq */
-
/** \defgroup lnet_data Data movement operations
*
* The LNet API provides two data movement operations: LNetPut()
#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
extern struct kmem_cache *lnet_rspt_cachep;
extern struct kmem_cache *lnet_msg_cachep;
-static inline struct lnet_eq *
-lnet_eq_alloc (void)
-{
- struct lnet_eq *eq;
-
- LIBCFS_ALLOC(eq, sizeof(*eq));
- return (eq);
-}
-
-static inline void
-lnet_eq_free(struct lnet_eq *eq)
-{
- LIBCFS_FREE(eq, sizeof(*eq));
-}
-
static inline struct lnet_libmd *
lnet_md_alloc(struct lnet_md *umd)
{
void lnet_msg_commit(struct lnet_msg *msg, int cpt);
void lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status);
-void lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev);
void lnet_prep_send(struct lnet_msg *msg, int type,
struct lnet_process_id target, unsigned int offset,
unsigned int len);
int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid);
int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis,
- void *user_ptr, struct lnet_eq *eq, bool recovery);
+ void *user_ptr, lnet_eq_handler_t eq, bool recovery);
void lnet_return_tx_credits_locked(struct lnet_msg *msg);
void lnet_return_rx_credits_locked(struct lnet_msg *msg);
void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp);
#define lh_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
-struct lnet_eq {
- lnet_eq_handler_t eq_callback;
-};
-
struct lnet_me {
struct list_head me_list;
int me_cpt;
unsigned int md_niov; /* # frags at end of struct */
void *md_user_ptr;
struct lnet_rsp_tracker *md_rspt_ptr;
- struct lnet_eq *md_eq;
+ lnet_eq_handler_t md_eq;
struct lnet_handle_md md_bulk_handle;
union {
struct kvec iov[LNET_MAX_IOV];
* ln_api_mutex.
*/
struct lnet_handle_md ln_ping_target_md;
- struct lnet_eq *ln_ping_target_eq;
+ lnet_eq_handler_t ln_ping_target_eq;
struct lnet_ping_buffer *ln_ping_target;
atomic_t ln_ping_target_seqno;
* buffer may linger a while after it has been unlinked, in
* which case the event handler cleans up.
*/
- struct lnet_eq *ln_push_target_eq;
+ lnet_eq_handler_t ln_push_target_eq;
struct lnet_handle_md ln_push_target_md;
struct lnet_ping_buffer *ln_push_target;
int ln_push_target_nnis;
/* discovery event queue handle */
- struct lnet_eq *ln_dc_eq;
+ lnet_eq_handler_t ln_dc_eq;
/* discovery requests */
struct list_head ln_dc_request;
/* discovery working list */
*/
struct list_head **ln_mt_zombie_rstqs;
/* recovery eq handler */
- struct lnet_eq *ln_mt_eq;
+ lnet_eq_handler_t ln_mt_eq;
/*
* Completed when the discovery and monitor threads can enter their
* @{ */
/**
+ * Event queue handler function type.
+ *
+ * The EQ handler runs for each event that is deposited into the EQ. The
+ * handler is supplied with a pointer to the event that triggered the
+ * handler invocation.
+ *
+ * The handler must not block, must be reentrant, and must not call any LNet
+ * API functions. It should return as quickly as possible.
+ */
+struct lnet_event;
+typedef void (*lnet_eq_handler_t)(struct lnet_event *event);
+#define LNET_EQ_HANDLER_NONE NULL
+
+/**
* Defines the visible parts of a memory descriptor. Values of this type
* are used to initialize memory descriptors.
*/
*/
void *user_ptr;
/**
- * A handle for the event queue used to log the operations performed on
- * the memory region. If this argument is a NULL handle operations
+ * The event handler used to log the operations performed on
+ * the memory region. If this argument is NULL operations
* performed on this memory descriptor are not logged.
*/
- struct lnet_eq *eq_handle;
+ lnet_eq_handler_t eq_handle;
/**
* The bulk MD handle which was registered to describe the buffers
* either to be used to transfer data to the peer or receive data
volatile unsigned long sequence;
};
-/**
- * Event queue handler function type.
- *
- * The EQ handler runs for each event that is deposited into the EQ. The
- * handler is supplied with a pointer to the event that triggered the
- * handler invocation.
- *
- * The handler must not block, must be reentrant, and must not call any LNet
- * API functions. It should return as quickly as possible.
- */
-typedef void (*lnet_eq_handler_t)(struct lnet_event *event);
-#define LNET_EQ_HANDLER_NONE NULL
-/** @} lnet_eq */
-
/** \addtogroup lnet_data
* @{ */
MODULES := lnet
lnet-objs := api-ni.o config.o nidstrings.o
-lnet-objs += lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o
+lnet-objs += lib-me.o lib-msg.o lib-md.o lib-ptl.o
lnet-objs += lib-socket.o lib-move.o module.o lo.o
lnet-objs += router.o router_proc.o acceptor.o peer.o net_fault.o
the_lnet.ln_mt_zombie_rstqs = NULL;
}
- if (the_lnet.ln_mt_eq) {
- LNetEQFree(the_lnet.ln_mt_eq);
- the_lnet.ln_mt_eq = NULL;
- }
+ the_lnet.ln_mt_eq = NULL;
lnet_portals_destroy();
struct lnet_md md = { NULL };
int rc;
- if (set_eq) {
+ if (set_eq)
the_lnet.ln_ping_target_eq =
- LNetEQAlloc(lnet_ping_target_event_handler);
- if (IS_ERR(the_lnet.ln_ping_target_eq)) {
- rc = PTR_ERR(the_lnet.ln_ping_target_eq);
- CERROR("Can't allocate ping buffer EQ: %d\n", rc);
- return rc;
- }
- }
+ lnet_ping_target_event_handler;
*ppbuf = lnet_ping_target_create(ni_count);
if (*ppbuf == NULL) {
lnet_ping_buffer_decref(*ppbuf);
*ppbuf = NULL;
fail_free_eq:
- if (set_eq)
- LNetEQFree(the_lnet.ln_ping_target_eq);
-
return rc;
}
lnet_ping_md_unlink(the_lnet.ln_ping_target,
&the_lnet.ln_ping_target_md);
- LNetEQFree(the_lnet.ln_ping_target_eq);
-
lnet_ping_target_destroy();
}
return -EALREADY;
the_lnet.ln_push_target_eq =
- LNetEQAlloc(lnet_push_target_event_handler);
- if (IS_ERR(the_lnet.ln_push_target_eq)) {
- rc = PTR_ERR(the_lnet.ln_push_target_eq);
- CERROR("Can't allocated push target EQ: %d\n", rc);
- return rc;
- }
+ lnet_push_target_event_handler;
rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
LASSERT(rc == 0);
if (rc) {
LNetClearLazyPortal(LNET_RESERVED_PORTAL);
- LNetEQFree(the_lnet.ln_push_target_eq);
the_lnet.ln_push_target_eq = NULL;
}
the_lnet.ln_push_target_nnis = 0;
LNetClearLazyPortal(LNET_RESERVED_PORTAL);
- LNetEQFree(the_lnet.ln_push_target_eq);
the_lnet.ln_push_target_eq = NULL;
}
lnet_ping_target_update(pbuf, ping_mdh);
- the_lnet.ln_mt_eq = LNetEQAlloc(lnet_mt_event_handler);
- if (IS_ERR(the_lnet.ln_mt_eq)) {
- rc = PTR_ERR(the_lnet.ln_mt_eq);
- CERROR("Can't allocate monitor thread EQ: %d\n", rc);
- goto err_stop_ping;
- }
+ the_lnet.ln_mt_eq = lnet_mt_event_handler;
rc = lnet_push_target_init();
if (rc != 0)
static int lnet_ping(struct lnet_process_id id, signed long timeout,
struct lnet_process_id __user *ids, int n_ids)
{
- struct lnet_eq *eq;
struct lnet_md md = { NULL };
struct ping_data pd = { 0 };
struct lnet_ping_buffer *pbuf;
if (!pbuf)
return -ENOMEM;
- eq = LNetEQAlloc(lnet_ping_event_handler);
- if (IS_ERR(eq)) {
- rc = PTR_ERR(eq);
- CERROR("Can't allocate EQ: %d\n", rc);
- goto fail_ping_buffer_decref;
- }
-
/* initialize md content */
md.start = &pbuf->pb_info;
md.length = LNET_PING_INFO_SIZE(n_ids);
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
md.user_ptr = &pd;
- md.eq_handle = eq;
+ md.eq_handle = lnet_ping_event_handler;
init_completion(&pd.completion);
rc = LNetMDBind(md, LNET_UNLINK, &pd.mdh);
if (rc != 0) {
CERROR("Can't bind MD: %d\n", rc);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
}
if (!pd.replied) {
rc = -EIO;
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
nob = pd.rc;
if (nob < 8) {
CERROR("%s: ping info too short %d\n",
libcfs_id2str(id), nob);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
} else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
CERROR("%s: Unexpected magic %08x\n",
libcfs_id2str(id), pbuf->pb_info.pi_magic);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
CERROR("%s: ping w/o NI status: 0x%x\n",
libcfs_id2str(id), pbuf->pb_info.pi_features);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (nob < LNET_PING_INFO_SIZE(0)) {
CERROR("%s: Short reply %d(%d min)\n",
libcfs_id2str(id),
nob, (int)LNET_PING_INFO_SIZE(0));
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (pbuf->pb_info.pi_nnis < n_ids)
CERROR("%s: Short reply %d(%d expected)\n",
libcfs_id2str(id),
nob, (int)LNET_PING_INFO_SIZE(n_ids));
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = -EFAULT; /* if I segv in copy_to_user()... */
tmpid.pid = pbuf->pb_info.pi_pid;
tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = pbuf->pb_info.pi_nnis;
- fail_free_eq:
- LNetEQFree(eq);
-
fail_ping_buffer_decref:
lnet_ping_buffer_decref(pbuf);
return rc;
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2016, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-eq.c
- *
- * Library level Event queue management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <lnet/lib-lnet.h>
-
-/**
- * Create an event queue that calls a @callback on each event.
- *
- * \param callback A handler function that runs when an event is deposited
- * into the EQ.
- *
- * \retval eq On successful return, the newly created EQ is returned.
- * On failure, an error code encoded with ERR_PTR() is returned.
- * \retval -EINVAL If an parameter is not valid.
- * \retval -ENOMEM If memory for the EQ can't be allocated.
- *
- * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
- */
-struct lnet_eq *
-LNetEQAlloc(lnet_eq_handler_t callback)
-{
- struct lnet_eq *eq;
-
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (callback == LNET_EQ_HANDLER_NONE)
- return ERR_PTR(-EINVAL);
-
- eq = lnet_eq_alloc();
- if (eq == NULL)
- return ERR_PTR(-ENOMEM);
-
- eq->eq_callback = callback;
-
- return eq;
-}
-EXPORT_SYMBOL(LNetEQAlloc);
-
-/**
- * Release the resources associated with an event queue if it's idle;
- * otherwise do nothing and it's up to the user to try again.
- *
- * \param eq The event queue to be released.
- *
- */
-void
-LNetEQFree(struct lnet_eq *eq)
-{
- lnet_eq_free(eq);
-}
-EXPORT_SYMBOL(LNetEQFree);
-
-void
-lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev)
-{
- LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
- eq->eq_callback(ev);
-}
/* must be called with resource lock held */
static int
-lnet_md_link(struct lnet_libmd *md, struct lnet_eq *eq, int cpt)
+lnet_md_link(struct lnet_libmd *md, lnet_eq_handler_t eq, int cpt)
{
struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
* maybe there we shouldn't even allow LNET_EQ_NONE!)
* LASSERT (eq == NULL);
*/
- if (eq)
- md->md_eq = eq;
+ md->md_eq = eq;
lnet_res_lh_initialize(container, &md->md_lh);
* unlinked. Otherwise, we enqueue an event now... */
if (md->md_eq != NULL && md->md_refcount == 0) {
lnet_build_unlink_event(md, &ev);
- lnet_eq_enqueue_event(md->md_eq, &ev);
+ md->md_eq(&ev);
}
if (md->md_rspt_ptr != NULL)
md->md_flags |= LNET_MD_FLAG_ABORTED;
if (md->md_eq != NULL && md->md_refcount == 0) {
lnet_build_unlink_event(md, &ev);
- lnet_eq_enqueue_event(md->md_eq, &ev);
+ md->md_eq(&ev);
}
}
int
lnet_send_ping(lnet_nid_t dest_nid,
struct lnet_handle_md *mdh, int nnis,
- void *user_data, struct lnet_eq *eq, bool recovery)
+ void *user_data, lnet_eq_handler_t eq, bool recovery)
{
struct lnet_md md = { NULL };
struct lnet_process_id id;
msg->msg_ev.status = status;
}
msg->msg_ev.unlinked = unlink;
- lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
+ md->md_eq(&msg->msg_ev);
}
if (unlink || (md->md_refcount == 0 &&
}
lnet_net_unlock(LNET_LOCK_EX);
- LNetEQFree(the_lnet.ln_dc_eq);
the_lnet.ln_dc_eq = NULL;
the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
return -EALREADY;
- the_lnet.ln_dc_eq = LNetEQAlloc(lnet_discovery_event_handler);
- if (IS_ERR(the_lnet.ln_dc_eq)) {
- rc = PTR_ERR(the_lnet.ln_dc_eq);
- CERROR("Can't allocate discovery EQ: %d\n", rc);
- return rc;
- }
-
+ the_lnet.ln_dc_eq = lnet_discovery_event_handler;
the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("Can't start peer discovery thread: %d\n", rc);
- LNetEQFree(the_lnet.ln_dc_eq);
the_lnet.ln_dc_eq = NULL;
the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- struct lnet_eq *rpc_lnet_eq; /* _the_ LNet event queue */
+ lnet_eq_handler_t rpc_lnet_eq; /* _the_ LNet event handler */
enum srpc_state rpc_state;
struct srpc_counters rpc_counters;
__u64 rpc_matchbits; /* matchbits counter */
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
- srpc_data.rpc_lnet_eq = LNetEQAlloc(srpc_lnet_ev_handler);
- if (IS_ERR(srpc_data.rpc_lnet_eq)) {
- rc = PTR_ERR(srpc_data.rpc_lnet_eq);
- CERROR("LNetEQAlloc() has failed: %d\n", rc);
- goto bail;
- }
+ srpc_data.rpc_lnet_eq = srpc_lnet_ev_handler;
rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
LASSERT(rc == 0);
rc = stt_startup();
-bail:
if (rc != 0)
srpc_shutdown();
else
rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
LASSERT(rc == 0);
- LNetEQFree(srpc_data.rpc_lnet_eq);
/* fallthrough */
case SRPC_STATE_NI_INIT:
#include <lustre_sec.h>
#include "ptlrpc_internal.h"
-struct lnet_eq *ptlrpc_eq;
+lnet_eq_handler_t ptlrpc_eq;
struct percpu_ref ptlrpc_pending;
/*
percpu_ref_kill(&ptlrpc_pending);
wait_for_completion(&ptlrpc_done);
- LNetEQFree(ptlrpc_eq);
LNetNIFini();
}
* because we are guaranteed to get every event via callback,
* so we just set EQ size to 0 to avoid overhread of serializing
* enqueue/dequeue operations in LNet. */
- ptlrpc_eq = LNetEQAlloc(ptlrpc_master_callback);
- if (!IS_ERR(ptlrpc_eq))
- return 0;
-
- rc = PTR_ERR(ptlrpc_eq);
- CERROR("Failed to allocate event queue: %d\n", rc);
- LNetNIFini();
-
- return rc;
+ ptlrpc_eq = ptlrpc_master_callback;
+ return 0;
}
int ptlrpc_init_portals(void)
extern struct mutex ptlrpcd_mutex;
extern struct mutex pinger_mutex;
-extern struct lnet_eq *ptlrpc_eq;
+extern lnet_eq_handler_t ptlrpc_eq;
extern struct percpu_ref ptlrpc_pending;
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);