void lnet_me_unlink(struct lnet_me *me);
void lnet_md_unlink(struct lnet_libmd *md);
-void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd);
+void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_event *ev);
struct page *lnet_kvaddr_to_page(unsigned long vaddr);
int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset);
*/
struct lnet_handle_md md_handle;
/**
- * A snapshot of the state of the MD immediately after the event has
- * been processed. In particular, the threshold field in md will
- * reflect the value of the threshold after the operation occurred.
+ * A snapshot of relevant state of the MD immediately after the event
+ * has been processed.
*/
- struct lnet_md md;
+ void *md_start;
+ void *md_user_ptr;
+ unsigned int md_options;
/**
* 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
* \see LNetPut
static void
lnet_ping_target_event_handler(struct lnet_event *event)
{
- struct lnet_ping_buffer *pbuf = event->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = event->md_user_ptr;
if (event->unlinked)
lnet_ping_buffer_decref(pbuf);
static void lnet_push_target_event_handler(struct lnet_event *ev)
{
- struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
ev->unlinked);
static void
lnet_ping_event_handler(struct lnet_event *event)
{
- struct ping_data *pd = event->md.user_ptr;
+ struct ping_data *pd = event->md_user_ptr;
CDEBUG(D_NET, "ping event (%d %d)%s\n",
event->type, event->status,
/* must be called with lnet_res_lock held */
void
-lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd)
+lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_event *ev)
{
- /* NB this doesn't copy out all the iov entries so when a
- * discontiguous MD is copied out, the target gets to know the
- * original iov pointer (in start) and the number of entries it had
- * and that's all.
- */
- umd->start = lmd->md_start;
- umd->length = ((lmd->md_options & LNET_MD_KIOV) == 0) ?
- lmd->md_length : lmd->md_niov;
- umd->threshold = lmd->md_threshold;
- umd->max_size = lmd->md_max_size;
- umd->options = lmd->md_options;
- umd->user_ptr = lmd->md_user_ptr;
+ ev->md_start = lmd->md_start;
+ ev->md_options = lmd->md_options;
+ ev->md_user_ptr = lmd->md_user_ptr;
}
static int
void
lnet_mt_event_handler(struct lnet_event *event)
{
- struct lnet_mt_event_info *ev_info = event->md.user_ptr;
+ struct lnet_mt_event_info *ev_info = event->md_user_ptr;
struct lnet_ping_buffer *pbuf;
/* TODO: remove assert */
}
if (event->unlinked) {
LIBCFS_FREE(ev_info, sizeof(*ev_info));
- pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
lnet_ping_buffer_decref(pbuf);
}
}
ev->status = 0;
ev->unlinked = 1;
ev->type = LNET_EVENT_UNLINK;
- lnet_md_deconstruct(md, &ev->md);
+ lnet_md_deconstruct(md, ev);
lnet_md2handle(&ev->md_handle, md);
EXIT;
}
/* build umd in event */
lnet_md2handle(&msg->msg_ev.md_handle, md);
- lnet_md_deconstruct(md, &msg->msg_ev.md);
+ lnet_md_deconstruct(md, &msg->msg_ev);
}
static int
struct lnet_ping_buffer *pbuf;
struct lnet_peer *lp;
- pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start + ev->offset);
+ pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
/* lnet_find_peer() adds a refcount */
lp = lnet_find_peer(ev->source.nid);
{
struct lnet_ping_buffer *pbuf;
- pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
spin_lock(&lp->lp_lock);
lp->lp_state &= ~LNET_PEER_PUSH_SENT;
lp->lp_push_error = ev->status;
goto out;
}
- pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
lnet_swap_pinginfo(pbuf);
*/
static void lnet_discovery_event_handler(struct lnet_event *event)
{
- struct lnet_peer *lp = event->md.user_ptr;
+ struct lnet_peer *lp = event->md_user_ptr;
struct lnet_ping_buffer *pbuf;
int rc;
}
lnet_net_lock(LNET_LOCK_EX);
if (event->unlinked) {
- pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
lnet_ping_buffer_decref(pbuf);
lnet_peer_decref_locked(lp);
}
srpc_lnet_ev_handler(struct lnet_event *ev)
{
struct srpc_service_cd *scd;
- struct srpc_event *rpcev = ev->md.user_ptr;
+ struct srpc_event *rpcev = ev->md_user_ptr;
struct srpc_client_rpc *crpc;
struct srpc_server_rpc *srpc;
struct srpc_buffer *buffer;
LASSERT(ev->type != LNET_EVENT_UNLINK ||
sv->sv_shuttingdown);
- buffer = container_of(ev->md.start, struct srpc_buffer,
+ buffer = container_of(ev->md_start, struct srpc_buffer,
buf_msg);
buffer->buf_peer = ev->source;
buffer->buf_self = ev->target.nid;
*/
void request_out_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
bool wakeup = false;
ENTRY;
*/
void reply_in_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request *req = cbid->cbid_arg;
- ENTRY;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
+ struct ptlrpc_request *req = cbid->cbid_arg;
+ ENTRY;
- DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
+ DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
- LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->md.start == req->rq_repbuf);
- LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
- /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
- for adaptive timeouts' early reply. */
- LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
+ LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->md_start == req->rq_repbuf);
+ LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
+ /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
+ * for adaptive timeouts' early reply.
+ */
+ LASSERT((ev->md_options & LNET_MD_MANAGE_REMOTE) != 0);
spin_lock(&req->rq_lock);
*/
void client_bulk_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
- struct ptlrpc_request *req;
- ENTRY;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
+ struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
+ struct ptlrpc_request *req;
+ ENTRY;
LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
ev->type == LNET_EVENT_PUT) ||
*/
void request_in_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
- struct ptlrpc_service *service = svcpt->scp_service;
- struct ptlrpc_request *req;
- ENTRY;
+ struct ptlrpc_service *service = svcpt->scp_service;
+ struct ptlrpc_request *req;
+ ENTRY;
- LASSERT (ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
- LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
- rqbd->rqbd_buffer + service->srv_buf_size);
+ LASSERT(ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT((char *)ev->md_start >= rqbd->rqbd_buffer);
+ LASSERT((char *)ev->md_start + ev->offset + ev->mlength <=
+ rqbd->rqbd_buffer + service->srv_buf_size);
- CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
- "event type %d, status %d, service %s\n",
- ev->type, ev->status, service->srv_name);
+ CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
+ "event type %d, status %d, service %s\n",
+ ev->type, ev->status, service->srv_name);
if (ev->unlinked) {
/* If this is the last request message to fit in the
* flags are reset and scalars are zero. We only set the message
* size to non-zero if this was a successful receive. */
req->rq_xid = ev->match_bits;
- req->rq_reqbuf = ev->md.start + ev->offset;
+ req->rq_reqbuf = ev->md_start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
ktime_get_real_ts64(&req->rq_arrival_time);
*/
void reply_out_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
ENTRY;
*/
void server_bulk_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
ENTRY;
static void ptlrpc_master_callback(struct lnet_event *ev)
{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */