X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fevents.c;h=fe07a4919fe0bcb300b5e8f054ee8e05e4da05d4;hb=f7f31f8f969f410cca0b4b8b02f81391148e01f2;hp=0ce8ca88737211c6e4ec47aa33b62272fdb316f1;hpb=368689640e6a42a152eee73d354dd6f68836065b;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/events.c b/lustre/ptlrpc/events.c index 0ce8ca8..fe07a49 100644 --- a/lustre/ptlrpc/events.c +++ b/lustre/ptlrpc/events.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,134 +15,143 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -# include -#else -# include -# ifdef __mips64__ -# include -# endif -#endif - +#include +#include +#include #include #include #include #include "ptlrpc_internal.h" -lnet_handle_eq_t ptlrpc_eq_h; +lnet_handler_t ptlrpc_handler; +struct percpu_ref ptlrpc_pending; /* * Client's outgoing request callback */ -void request_out_callback(lnet_event_t *ev) +void request_out_callback(struct lnet_event *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request *req = cbid->cbid_arg; - ENTRY; - - LASSERT (ev->type == LNET_EVENT_SEND || - ev->type == LNET_EVENT_UNLINK); - LASSERT (ev->unlinked); - - DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req, - "type %d, status %d", ev->type, ev->status); - - sptlrpc_request_out_callback(req); - req->rq_real_sent = cfs_time_current_sec(); - - if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { - - /* Failed send: make it seem like the reply timed out, just - * like failing sends in client.c does currently... */ - - cfs_spin_lock(&req->rq_lock); - req->rq_net_err = 1; - cfs_spin_unlock(&req->rq_lock); - - ptlrpc_client_wake_req(req); - } - - ptlrpc_req_finished(req); - - EXIT; + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + struct ptlrpc_request *req = cbid->cbid_arg; + bool wakeup = false; + ENTRY; + + LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK); + LASSERT(ev->unlinked); + + if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val && + CFS_FAIL_CHECK_RESET(OBD_FAIL_NET_ERROR_RPC, + OBD_FAIL_OSP_PRECREATE_PAUSE | + CFS_FAIL_ONCE))) + ev->status = -ECONNABORTED; + + DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); + + /* Do not update imp_next_ping for connection request */ + if (lustre_msg_get_opc(req->rq_reqmsg) != + req->rq_import->imp_connect_op) + ptlrpc_pinger_sending_on_import(req->rq_import); + + sptlrpc_request_out_callback(req); + + spin_lock(&req->rq_lock); + req->rq_real_sent = ktime_get_real_seconds(); + req->rq_req_unlinked = 1; + /* reply_in_callback happened before request_out_callback? */ + if (req->rq_reply_unlinked) + wakeup = true; + + if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { + /* Failed send: make it seem like the reply timed out, just + * like failing sends in client.c does currently... */ + req->rq_net_err = 1; + wakeup = true; + } + + if (wakeup) + ptlrpc_client_wake_req(req); + + spin_unlock(&req->rq_lock); + + ptlrpc_req_finished(req); + EXIT; } /* * Client's incoming reply callback */ -void reply_in_callback(lnet_event_t *ev) +void reply_in_callback(struct lnet_event *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request *req = cbid->cbid_arg; - ENTRY; + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + struct ptlrpc_request *req = cbid->cbid_arg; + ENTRY; - DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req, - "type %d, status %d", ev->type, ev->status); + DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); - LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK); - LASSERT (ev->md.start == req->rq_repbuf); - LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len); - /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests - for adaptive timeouts' early reply. */ - LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); + LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK); + LASSERT(ev->md_start == req->rq_repbuf); + LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len); + /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests + * for adaptive timeouts' early reply. + */ + LASSERT((ev->md_options & LNET_MD_MANAGE_REMOTE) != 0); - cfs_spin_lock(&req->rq_lock); + spin_lock(&req->rq_lock); - req->rq_receiving_reply = 0; - req->rq_early = 0; - if (ev->unlinked) - req->rq_must_unlink = 0; + req->rq_receiving_reply = 0; + req->rq_early = 0; + if (ev->unlinked) + req->rq_reply_unlinked = 1; if (ev->status) goto out_wake; if (ev->type == LNET_EVENT_UNLINK) { LASSERT(ev->unlinked); - DEBUG_REQ(D_RPCTRACE, req, "unlink"); + DEBUG_REQ(D_NET, req, "unlink"); goto out_wake; } if (ev->mlength < ev->rlength ) { CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req, req->rq_replen, ev->rlength, ev->offset); - req->rq_reply_truncate = 1; + req->rq_reply_truncated = 1; req->rq_replied = 1; req->rq_status = -EOVERFLOW; req->rq_nob_received = ev->rlength + ev->offset; goto out_wake; } - if ((ev->offset == 0) && - ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) { - /* Early reply */ - DEBUG_REQ(D_ADAPTTO, req, - "Early reply received: mlen=%u offset=%d replen=%d " - "replied=%d unlinked=%d", ev->mlength, ev->offset, - req->rq_replen, req->rq_replied, ev->unlinked); + if ((ev->offset == 0) && + ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) { + /* Early reply */ + DEBUG_REQ(D_ADAPTTO, req, + "Early reply received, mlen=%u offset=%d replen=%d replied=%d unlinked=%d", + ev->mlength, ev->offset, + req->rq_replen, req->rq_replied, ev->unlinked); - req->rq_early_count++; /* number received, client side */ + req->rq_early_count++; /* number received, client side */ - if (req->rq_replied) /* already got the real reply */ - goto out_wake; + /* already got the real reply or buffers are already unlinked */ + if (req->rq_replied || + req->rq_reply_unlinked == 1) + goto out_wake; req->rq_early = 1; req->rq_reply_off = ev->offset; @@ -155,6 +162,8 @@ void reply_in_callback(lnet_event_t *ev) /* Real reply */ req->rq_rep_swab_mask = 0; req->rq_replied = 1; + /* Got reply, no resend required */ + req->rq_resend = 0; req->rq_reply_off = ev->offset; req->rq_nob_received = ev->mlength; /* LNetMDUnlink can't be called under the LNET_LOCK, @@ -165,95 +174,162 @@ void reply_in_callback(lnet_event_t *ev) ev->mlength, ev->offset, req->rq_replen); } - req->rq_import->imp_last_reply_time = cfs_time_current_sec(); + if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING) + req->rq_import->imp_last_reply_time = ktime_get_real_seconds(); out_wake: - /* NB don't unlock till after wakeup; req can disappear under us - * since we don't have our own ref */ - ptlrpc_client_wake_req(req); - cfs_spin_unlock(&req->rq_lock); - EXIT; + /* NB don't unlock till after wakeup; req can disappear under us + * since we don't have our own ref */ + ptlrpc_client_wake_req(req); + spin_unlock(&req->rq_lock); + EXIT; } /* * Client's bulk has been written/read */ -void client_bulk_callback (lnet_event_t *ev) +void client_bulk_callback(struct lnet_event *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; - ENTRY; - - LASSERT ((desc->bd_type == BULK_PUT_SINK && - ev->type == LNET_EVENT_PUT) || - (desc->bd_type == BULK_GET_SOURCE && - ev->type == LNET_EVENT_GET) || - ev->type == LNET_EVENT_UNLINK); - LASSERT (ev->unlinked); - - CDEBUG((ev->status == 0) ? D_NET : D_ERROR, - "event type %d, status %d, desc %p\n", - ev->type, ev->status, desc); - - cfs_spin_lock(&desc->bd_lock); - - LASSERT(desc->bd_network_rw); - desc->bd_network_rw = 0; + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; + struct ptlrpc_request *req; + ENTRY; + + LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) && + ev->type == LNET_EVENT_PUT) || + (ptlrpc_is_bulk_get_source(desc->bd_type) && + ev->type == LNET_EVENT_GET) || + ev->type == LNET_EVENT_UNLINK); + LASSERT(ev->unlinked); + + if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE)) + ev->status = -EIO; + + if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE)) + ev->status = -EIO; + + CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR, + "event type %d, status %d, desc %p\n", + ev->type, ev->status, desc); + + spin_lock(&desc->bd_lock); + req = desc->bd_req; + LASSERT(desc->bd_refs > 0); + desc->bd_refs--; + + if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) { + desc->bd_nob_transferred += ev->mlength; + desc->bd_sender = ev->sender; + } else { + /* start reconnect and resend if network error hit */ + spin_lock(&req->rq_lock); + req->rq_net_err = 1; + spin_unlock(&req->rq_lock); + desc->bd_failure = 1; + } + + + /* NB don't unlock till after wakeup; desc can disappear under us + * otherwise */ + if (desc->bd_refs == 0) + ptlrpc_client_wake_req(desc->bd_req); + + spin_unlock(&desc->bd_lock); + EXIT; +} - if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) { - desc->bd_success = 1; - desc->bd_nob_transferred = ev->mlength; - desc->bd_sender = ev->sender; - } +/* + * We will have percpt request history list for ptlrpc service in upcoming + * patches because we don't want to be serialized by current per-service + * history operations. So we require history ID can (somehow) show arriving + * order w/o grabbing global lock, and user can sort them in userspace. + * + * This is how we generate history ID for ptlrpc_request: + * ---------------------------------------------------- + * | 32 bits | 16 bits | (16 - X)bits | X bits | + * ---------------------------------------------------- + * | seconds | usec / 16 | sequence | CPT id | + * ---------------------------------------------------- + * + * it might not be precise but should be good enough. + */ - /* release the encrypted pages for write */ - if (desc->bd_req->rq_bulk_write) - sptlrpc_enc_pool_put_pages(desc); +#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits) - /* NB don't unlock till after wakeup; desc can disappear under us - * otherwise */ - ptlrpc_client_wake_req(desc->bd_req); +#define REQS_SEC_SHIFT 32 +#define REQS_USEC_SHIFT 16 +#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt) - cfs_spin_unlock(&desc->bd_lock); - EXIT; +static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, + struct ptlrpc_request *req) +{ + u64 sec = req->rq_arrival_time.tv_sec; + u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */ + u64 new_seq; + + /* set sequence ID for request and add it to history list, + * it must be called with hold svcpt::scp_lock */ + + new_seq = (sec << REQS_SEC_SHIFT) | + (usec << REQS_USEC_SHIFT) | + (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt); + + if (new_seq > svcpt->scp_hist_seq) { + /* This handles the initial case of scp_hist_seq == 0 or + * we just jumped into a new time window */ + svcpt->scp_hist_seq = new_seq; + } else { + LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT); + /* NB: increase sequence number in current usec bucket, + * however, it's possible that we used up all bits for + * sequence and jumped into the next usec bucket (future time), + * then we hope there will be less RPCs per bucket at some + * point, and sequence will catch up again */ + svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt)); + new_seq = svcpt->scp_hist_seq; + } + + req->rq_history_seq = new_seq; + + list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs); } /* * Server's incoming request callback */ -void request_in_callback(lnet_event_t *ev) +void request_in_callback(struct lnet_event *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; - struct ptlrpc_service *service = rqbd->rqbd_service; - struct ptlrpc_request *req; - ENTRY; - - LASSERT (ev->type == LNET_EVENT_PUT || - ev->type == LNET_EVENT_UNLINK); - LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer); - LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <= - rqbd->rqbd_buffer + service->srv_buf_size); - - CDEBUG((ev->status == 0) ? D_NET : D_ERROR, - "event type %d, status %d, service %s\n", - ev->type, ev->status, service->srv_name); - - if (ev->unlinked) { - /* If this is the last request message to fit in the - * request buffer we can use the request object embedded in - * rqbd. Note that if we failed to allocate a request, - * we'd have to re-post the rqbd, which we can't do in this - * context. */ - req = &rqbd->rqbd_req; - memset(req, 0, sizeof (*req)); - } else { - LASSERT (ev->type == LNET_EVENT_PUT); - if (ev->status != 0) { - /* We moaned above already... */ - return; - } - OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY); + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; + struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; + struct ptlrpc_service *service = svcpt->scp_service; + struct ptlrpc_request *req; + ENTRY; + + LASSERT(ev->type == LNET_EVENT_PUT || + ev->type == LNET_EVENT_UNLINK); + LASSERT((char *)ev->md_start >= rqbd->rqbd_buffer); + LASSERT((char *)ev->md_start + ev->offset + ev->mlength <= + rqbd->rqbd_buffer + service->srv_buf_size); + + CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR, + "event type %d, status %d, service %s\n", + ev->type, ev->status, service->srv_name); + + if (ev->unlinked) { + /* If this is the last request message to fit in the + * request buffer we can use the request object embedded in + * rqbd. Note that if we failed to allocate a request, + * we'd have to re-post the rqbd, which we can't do in this + * context. + */ + req = &rqbd->rqbd_req; + memset(req, 0, sizeof(*req)); + } else { + LASSERT(ev->type == LNET_EVENT_PUT); + if (ev->status != 0) /* We moaned above already... */ + return; + req = ptlrpc_request_cache_alloc(GFP_ATOMIC); if (req == NULL) { CERROR("Can't allocate incoming request descriptor: " "Dropping %s RPC from %s\n", @@ -263,45 +339,42 @@ void request_in_callback(lnet_event_t *ev) } } - /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, - * flags are reset and scalars are zero. We only set the message - * size to non-zero if this was a successful receive. */ - req->rq_xid = ev->match_bits; - req->rq_reqbuf = ev->md.start + ev->offset; - if (ev->type == LNET_EVENT_PUT && ev->status == 0) - req->rq_reqdata_len = ev->mlength; - cfs_gettimeofday(&req->rq_arrival_time); - req->rq_peer = ev->initiator; - req->rq_self = ev->target.nid; - req->rq_rqbd = rqbd; - req->rq_phase = RQ_PHASE_NEW; -#ifdef CRAY_XT3 - req->rq_uid = ev->uid; -#endif - cfs_spin_lock_init(&req->rq_lock); - CFS_INIT_LIST_HEAD(&req->rq_timed_list); - cfs_atomic_set(&req->rq_refcount, 1); - if (ev->type == LNET_EVENT_PUT) - CDEBUG(D_RPCTRACE, "incoming req@%p x"LPU64" msgsize %u\n", - req, req->rq_xid, ev->mlength); - - CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer)); - - cfs_spin_lock(&service->srv_lock); - - req->rq_history_seq = service->srv_request_seq++; - cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history); - - if (ev->unlinked) { - service->srv_nrqbd_receiving--; - CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", - service->srv_nrqbd_receiving); - - /* Normally, don't complain about 0 buffers posted; LNET won't - * drop incoming reqs since we set the portal lazy */ - if (test_req_buffer_pressure && - ev->type != LNET_EVENT_UNLINK && - service->srv_nrqbd_receiving == 0) + ptlrpc_srv_req_init(req); + /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, + * flags are reset and scalars are zero. We only set the message + * size to non-zero if this was a successful receive. */ + req->rq_xid = ev->match_bits; + req->rq_reqbuf = ev->md_start + ev->offset; + if (ev->type == LNET_EVENT_PUT && ev->status == 0) + req->rq_reqdata_len = ev->mlength; + ktime_get_real_ts64(&req->rq_arrival_time); + /* Multi-Rail: keep track of both initiator and source NID. */ + req->rq_peer = ev->initiator; + req->rq_source = ev->source; + req->rq_self = ev->target.nid; + req->rq_rqbd = rqbd; + req->rq_phase = RQ_PHASE_NEW; + if (ev->type == LNET_EVENT_PUT) + CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n", + req, req->rq_xid, ev->mlength); + + CDEBUG(D_RPCTRACE, "peer: %s (source: %s)\n", + libcfs_id2str(req->rq_peer), libcfs_id2str(req->rq_source)); + + spin_lock(&svcpt->scp_lock); + + ptlrpc_req_add_history(svcpt, req); + + if (ev->unlinked) { + svcpt->scp_nrqbds_posted--; + CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", + svcpt->scp_nrqbds_posted); + + /* Normally, don't complain about 0 buffers posted; LNET won't + * drop incoming reqs since we set the portal lazy */ + if (test_req_buffer_pressure && + ev->type != LNET_EVENT_UNLINK && + svcpt->scp_nrqbds_posted == 0) CWARN("All %s request buffers busy\n", service->srv_name); @@ -311,25 +384,25 @@ void request_in_callback(lnet_event_t *ev) rqbd->rqbd_refcount++; } - cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue); - service->srv_n_queued_reqs++; + list_add_tail(&req->rq_list, &svcpt->scp_req_incoming); + svcpt->scp_nreqs_incoming++; - /* NB everything can disappear under us once the request - * has been queued and we unlock, so do the wake now... */ - cfs_waitq_signal(&service->srv_waitq); + /* NB everything can disappear under us once the request + * has been queued and we unlock, so do the wake now... */ + wake_up(&svcpt->scp_waitq); - cfs_spin_unlock(&service->srv_lock); - EXIT; + spin_unlock(&svcpt->scp_lock); + EXIT; } /* * Server's outgoing reply callback */ -void reply_out_callback(lnet_event_t *ev) +void reply_out_callback(struct lnet_event *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_reply_state *rs = cbid->cbid_arg; - struct ptlrpc_service *svc = rs->rs_service; + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + struct ptlrpc_reply_state *rs = cbid->cbid_arg; + struct ptlrpc_service_part *svcpt = rs->rs_svcpt; ENTRY; LASSERT (ev->type == LNET_EVENT_SEND || @@ -341,7 +414,6 @@ void reply_out_callback(lnet_event_t *ev) * net's ref on 'rs' */ LASSERT (ev->unlinked); ptlrpc_rs_decref(rs); - cfs_atomic_dec (&svc->srv_outstanding_replies); EXIT; return; } @@ -351,410 +423,218 @@ void reply_out_callback(lnet_event_t *ev) if (ev->unlinked) { /* Last network callback. The net's ref on 'rs' stays put * until ptlrpc_handle_rs() is done with it */ - cfs_spin_lock(&svc->srv_lock); - cfs_spin_lock(&rs->rs_lock); - rs->rs_on_net = 0; - if (!rs->rs_no_ack || - rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed) - ptlrpc_schedule_difficult_reply (rs); - cfs_spin_unlock(&rs->rs_lock); - cfs_spin_unlock(&svc->srv_lock); - } - - EXIT; + spin_lock(&svcpt->scp_rep_lock); + spin_lock(&rs->rs_lock); + + rs->rs_on_net = 0; + if (!rs->rs_no_ack || + rs->rs_transno <= + rs->rs_export->exp_obd->obd_last_committed || + list_empty(&rs->rs_obd_list)) + ptlrpc_schedule_difficult_reply(rs); + + spin_unlock(&rs->rs_lock); + spin_unlock(&svcpt->scp_rep_lock); + } + EXIT; } +#ifdef HAVE_SERVER_SUPPORT /* * Server's bulk completion callback */ -void server_bulk_callback (lnet_event_t *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; - ENTRY; - - LASSERT (ev->type == LNET_EVENT_SEND || - ev->type == LNET_EVENT_UNLINK || - (desc->bd_type == BULK_PUT_SOURCE && - ev->type == LNET_EVENT_ACK) || - (desc->bd_type == BULK_GET_SINK && - ev->type == LNET_EVENT_REPLY)); - - CDEBUG((ev->status == 0) ? D_NET : D_ERROR, - "event type %d, status %d, desc %p\n", - ev->type, ev->status, desc); - - cfs_spin_lock(&desc->bd_lock); - - if ((ev->type == LNET_EVENT_ACK || - ev->type == LNET_EVENT_REPLY) && - ev->status == 0) { - /* We heard back from the peer, so even if we get this - * before the SENT event (oh yes we can), we know we - * read/wrote the peer buffer and how much... */ - desc->bd_success = 1; - desc->bd_nob_transferred = ev->mlength; - desc->bd_sender = ev->sender; - } - - if (ev->unlinked) { - /* This is the last callback no matter what... */ - desc->bd_network_rw = 0; - cfs_waitq_signal(&desc->bd_waitq); - } - - cfs_spin_unlock(&desc->bd_lock); - EXIT; -} - -static void ptlrpc_master_callback(lnet_event_t *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - void (*callback)(lnet_event_t *ev) = cbid->cbid_fn; - - /* Honestly, it's best to find out early. */ - LASSERT (cbid->cbid_arg != LP_POISON); - LASSERT (callback == request_out_callback || - callback == reply_in_callback || - callback == client_bulk_callback || - callback == request_in_callback || - callback == reply_out_callback || - callback == server_bulk_callback); - - callback (ev); -} - -int ptlrpc_uuid_to_peer (struct obd_uuid *uuid, - lnet_process_id_t *peer, lnet_nid_t *self) -{ - int best_dist = 0; - __u32 best_order = 0; - int count = 0; - int rc = -ENOENT; - int portals_compatibility; - int dist; - __u32 order; - lnet_nid_t dst_nid; - lnet_nid_t src_nid; - - portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL); - - peer->pid = LUSTRE_SRV_LNET_PID; - - /* Choose the matching UUID that's closest */ - while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { - dist = LNetDist(dst_nid, &src_nid, &order); - if (dist < 0) - continue; - - if (dist == 0) { /* local! use loopback LND */ - peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0); - rc = 0; - break; - } - - if (rc < 0 || - dist < best_dist || - (dist == best_dist && order < best_order)) { - best_dist = dist; - best_order = order; - - if (portals_compatibility > 1) { - /* Strong portals compatibility: Zero the nid's - * NET, so if I'm reading new config logs, or - * getting configured by (new) lconf I can - * still talk to old servers. */ - dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid)); - src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid)); - } - peer->nid = dst_nid; - *self = src_nid; - rc = 0; - } - } - - CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer)); - return rc; -} - -void ptlrpc_ni_fini(void) +void server_bulk_callback(struct lnet_event *ev) { - cfs_waitq_t waitq; - struct l_wait_info lwi; - int rc; - int retries; - - /* Wait for the event queue to become idle since there may still be - * messages in flight with pending events (i.e. the fire-and-forget - * messages == client requests and "non-difficult" server - * replies */ - - for (retries = 0;; retries++) { - rc = LNetEQFree(ptlrpc_eq_h); - switch (rc) { - default: - LBUG(); - - case 0: - LNetNIFini(); - return; - - case -EBUSY: - if (retries != 0) - CWARN("Event queue still busy\n"); - - /* Wait for a bit */ - cfs_waitq_init(&waitq); - lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL); - l_wait_event(waitq, 0, &lwi); - break; - } - } - /* notreached */ + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; + ENTRY; + + LASSERT(ev->type == LNET_EVENT_SEND || + ev->type == LNET_EVENT_UNLINK || + (ptlrpc_is_bulk_put_source(desc->bd_type) && + ev->type == LNET_EVENT_ACK) || + (ptlrpc_is_bulk_get_sink(desc->bd_type) && + ev->type == LNET_EVENT_REPLY)); + + CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR, + "event type %d, status %d, desc %p\n", + ev->type, ev->status, desc); + + spin_lock(&desc->bd_lock); + + LASSERT(desc->bd_refs > 0); + + if ((ev->type == LNET_EVENT_ACK || + ev->type == LNET_EVENT_REPLY) && + ev->status == 0) { + /* We heard back from the peer, so even if we get this + * before the SENT event (oh yes we can), we know we + * read/wrote the peer buffer and how much... */ + desc->bd_nob_transferred += ev->mlength; + desc->bd_sender = ev->sender; + } + + if (ev->status != 0) + desc->bd_failure = 1; + + if (ev->unlinked) { + desc->bd_refs--; + /* This is the last callback no matter what... */ + if (desc->bd_refs == 0) + wake_up(&desc->bd_waitq); + } + + spin_unlock(&desc->bd_lock); + EXIT; } - -lnet_pid_t ptl_get_pid(void) -{ - lnet_pid_t pid; - -#ifndef __KERNEL__ - pid = getpid(); -#else - pid = LUSTRE_SRV_LNET_PID; #endif - return pid; -} -int ptlrpc_ni_init(void) +static void ptlrpc_master_callback(struct lnet_event *ev) { - int rc; - lnet_pid_t pid; - - pid = ptl_get_pid(); - CDEBUG(D_NET, "My pid is: %x\n", pid); - - /* We're not passing any limits yet... */ - rc = LNetNIInit(pid); - if (rc < 0) { - CDEBUG (D_NET, "Can't init network interface: %d\n", rc); - return (-ENOENT); - } - - /* CAVEAT EMPTOR: how we process portals events is _radically_ - * different depending on... */ -#ifdef __KERNEL__ - /* kernel portals calls our master callback when events are added to - * the event queue. In fact lustre never pulls events off this queue, - * so it's only sized for some debug history. */ - rc = LNetEQAlloc(1024, ptlrpc_master_callback, &ptlrpc_eq_h); -#else - /* liblustre calls the master callback when it removes events from the - * event queue. The event queue has to be big enough not to drop - * anything */ - rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h); + struct ptlrpc_cb_id *cbid = ev->md_user_ptr; + void (*callback)(struct lnet_event *ev) = cbid->cbid_fn; + + /* Honestly, it's best to find out early. */ + LASSERT(cbid->cbid_arg != LP_POISON); + LASSERT(callback == request_out_callback || + callback == reply_in_callback || + callback == client_bulk_callback || + callback == request_in_callback || + callback == reply_out_callback +#ifdef HAVE_SERVER_SUPPORT + || callback == server_bulk_callback #endif - if (rc == 0) - return 0; - - CERROR ("Failed to allocate event queue: %d\n", rc); - LNetNIFini(); + ); - return (-ENOMEM); + callback(ev); + if (ev->unlinked) + percpu_ref_put(&ptlrpc_pending); } -#ifndef __KERNEL__ -CFS_LIST_HEAD(liblustre_wait_callbacks); -CFS_LIST_HEAD(liblustre_idle_callbacks); -void *liblustre_services_callback; - -void * -liblustre_register_waitidle_callback (cfs_list_t *callback_list, - const char *name, - int (*fn)(void *arg), void *arg) +int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, + struct lnet_process_id *peer, lnet_nid_t *self) { - struct liblustre_wait_callback *llwc; - - OBD_ALLOC(llwc, sizeof(*llwc)); - LASSERT (llwc != NULL); - - llwc->llwc_name = name; - llwc->llwc_fn = fn; - llwc->llwc_arg = arg; - cfs_list_add_tail(&llwc->llwc_list, callback_list); - - return (llwc); + int best_dist = 0; + __u32 best_order = 0; + int count = 0; + int rc = -ENOENT; + int dist; + __u32 order; + lnet_nid_t dst_nid; + lnet_nid_t src_nid; + + peer->pid = LNET_PID_LUSTRE; + + /* Choose the matching UUID that's closest */ + while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { + if (peer->nid != LNET_NID_ANY && LNET_NIDADDR(peer->nid) == 0 && + LNET_NIDNET(dst_nid) != LNET_NIDNET(peer->nid)) + continue; + + dist = LNetDist(dst_nid, &src_nid, &order); + if (dist < 0) + continue; + + if (dist == 0) { /* local! use loopback LND */ + peer->nid = *self = LNET_NID_LO_0; + rc = 0; + break; + } + + if (rc < 0 || + dist < best_dist || + (dist == best_dist && order < best_order)) { + best_dist = dist; + best_order = order; + + peer->nid = dst_nid; + *self = src_nid; + rc = 0; + } + } + + CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer)); + return rc; } -void -liblustre_deregister_waitidle_callback (void *opaque) -{ - struct liblustre_wait_callback *llwc = opaque; - - cfs_list_del(&llwc->llwc_list); - OBD_FREE(llwc, sizeof(*llwc)); -} +static struct completion ptlrpc_done; -void * -liblustre_register_wait_callback (const char *name, - int (*fn)(void *arg), void *arg) +static void ptlrpc_release(struct percpu_ref *ref) { - return liblustre_register_waitidle_callback(&liblustre_wait_callbacks, - name, fn, arg); + complete(&ptlrpc_done); } -void -liblustre_deregister_wait_callback (void *opaque) +static void ptlrpc_ni_fini(void) { - liblustre_deregister_waitidle_callback(opaque); -} + /* Wait for the event queue to become idle since there may still be + * messages in flight with pending events (i.e. the fire-and-forget + * messages == client requests and "non-difficult" server + * replies */ -void * -liblustre_register_idle_callback (const char *name, - int (*fn)(void *arg), void *arg) -{ - return liblustre_register_waitidle_callback(&liblustre_idle_callbacks, - name, fn, arg); -} + init_completion(&ptlrpc_done); + percpu_ref_kill(&ptlrpc_pending); + wait_for_completion(&ptlrpc_done); -void -liblustre_deregister_idle_callback (void *opaque) -{ - liblustre_deregister_waitidle_callback(opaque); + lnet_assert_handler_unused(ptlrpc_handler); + LNetNIFini(); } -int -liblustre_check_events (int timeout) +lnet_pid_t ptl_get_pid(void) { - lnet_event_t ev; - int rc; - int i; - ENTRY; - - rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i); - if (rc == 0) - RETURN(0); - - LASSERT (rc == -EOVERFLOW || rc == 1); - - /* liblustre: no asynch callback so we can't afford to miss any - * events... */ - if (rc == -EOVERFLOW) { - CERROR ("Dropped an event!!!\n"); - abort(); - } - - ptlrpc_master_callback (&ev); - RETURN(1); + return LNET_PID_LUSTRE; } -int liblustre_waiting = 0; - -int -liblustre_wait_event (int timeout) -{ - cfs_list_t *tmp; - struct liblustre_wait_callback *llwc; - int found_something = 0; - - /* single threaded recursion check... */ - liblustre_waiting = 1; - - for (;;) { - /* Deal with all pending events */ - while (liblustre_check_events(0)) - found_something = 1; - - /* Give all registered callbacks a bite at the cherry */ - cfs_list_for_each(tmp, &liblustre_wait_callbacks) { - llwc = cfs_list_entry(tmp, - struct liblustre_wait_callback, - llwc_list); - - if (llwc->llwc_fn(llwc->llwc_arg)) - found_something = 1; - } - - if (found_something || timeout == 0) - break; - - /* Nothing so far, but I'm allowed to block... */ - found_something = liblustre_check_events(timeout); - if (!found_something) /* still nothing */ - break; /* I timed out */ - } - - liblustre_waiting = 0; - - return found_something; -} - -void -liblustre_wait_idle(void) +int ptlrpc_ni_init(void) { - static int recursed = 0; - - cfs_list_t *tmp; - struct liblustre_wait_callback *llwc; - int idle = 0; - - LASSERT(!recursed); - recursed = 1; - - do { - liblustre_wait_event(0); - - idle = 1; - - cfs_list_for_each(tmp, &liblustre_idle_callbacks) { - llwc = cfs_list_entry(tmp, - struct liblustre_wait_callback, - llwc_list); - - if (!llwc->llwc_fn(llwc->llwc_arg)) { - idle = 0; - break; - } - } - - } while (!idle); - - recursed = 0; + int rc; + lnet_pid_t pid; + + pid = ptl_get_pid(); + CDEBUG(D_NET, "My pid is: %x\n", pid); + + /* We're not passing any limits yet... */ + rc = LNetNIInit(pid); + if (rc < 0) { + CDEBUG(D_NET, "ptlrpc: Can't init network interface: rc = %d\n", + rc); + return rc; + } + + rc = percpu_ref_init(&ptlrpc_pending, ptlrpc_release, 0, GFP_KERNEL); + if (rc) { + CERROR("ptlrpc: Can't init percpu refcount: rc = %d\n", rc); + return rc; + } + /* CAVEAT EMPTOR: how we process portals events is _radically_ + * different depending on... + */ + /* kernel LNet calls our master callback when there are new event, + * because we are guaranteed to get every event via callback, + * so we just set EQ size to 0 to avoid overhread of serializing + * enqueue/dequeue operations in LNet. */ + ptlrpc_handler = ptlrpc_master_callback; + return 0; } -#endif /* __KERNEL__ */ - int ptlrpc_init_portals(void) { int rc = ptlrpc_ni_init(); if (rc != 0) { CERROR("network initialisation failed\n"); - return -EIO; + return rc; } -#ifndef __KERNEL__ - liblustre_services_callback = - liblustre_register_wait_callback("liblustre_check_services", - &liblustre_check_services, - NULL); - cfs_init_completion_module(liblustre_wait_event); -#endif rc = ptlrpcd_addref(); if (rc == 0) return 0; CERROR("rpcd initialisation failed\n"); -#ifndef __KERNEL__ - liblustre_deregister_wait_callback(liblustre_services_callback); -#endif ptlrpc_ni_fini(); return rc; } void ptlrpc_exit_portals(void) { -#ifndef __KERNEL__ - liblustre_deregister_wait_callback(liblustre_services_callback); -#endif ptlrpcd_decref(); ptlrpc_ni_fini(); }