X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fevents.c;h=55cc32e2413a7140b7ffdc332a56b03d02313c78;hp=fbfee4125d39b63917ab3326629e4a225350bc52;hb=185473cd6458f144ab512176c71beb85609b5ed9;hpb=95edc0dd8eab493f35b18aa1d0b66f32e1bcbf2d diff --git a/lustre/ptlrpc/events.c b/lustre/ptlrpc/events.c index fbfee412..55cc32e 100644 --- a/lustre/ptlrpc/events.c +++ b/lustre/ptlrpc/events.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,7 +24,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -65,10 +63,10 @@ void request_out_callback(lnet_event_t *ev) ev->type == LNET_EVENT_UNLINK); LASSERT (ev->unlinked); - DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req, - "type %d, status %d", ev->type, ev->status); + DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); sptlrpc_request_out_callback(req); + req->rq_real_sent = cfs_time_current_sec(); if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { @@ -96,8 +94,7 @@ void reply_in_callback(lnet_event_t *ev) struct ptlrpc_request *req = cbid->cbid_arg; ENTRY; - DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req, - "type %d, status %d", ev->type, ev->status); + DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK); LASSERT (ev->md.start == req->rq_repbuf); @@ -118,7 +115,7 @@ void reply_in_callback(lnet_event_t *ev) if (ev->type == LNET_EVENT_UNLINK) { LASSERT(ev->unlinked); - DEBUG_REQ(D_RPCTRACE, req, "unlink"); + DEBUG_REQ(D_NET, req, "unlink"); goto out_wake; } @@ -181,6 +178,7 @@ void client_bulk_callback (lnet_event_t *ev) { struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; + struct ptlrpc_request *req; ENTRY; LASSERT ((desc->bd_type == BULK_PUT_SINK && @@ -190,12 +188,18 @@ void client_bulk_callback (lnet_event_t *ev) ev->type == LNET_EVENT_UNLINK); LASSERT (ev->unlinked); + if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE)) + ev->status = -EIO; + + if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE)) + ev->status = -EIO; + CDEBUG((ev->status == 0) ? D_NET : D_ERROR, "event type %d, status %d, desc %p\n", ev->type, ev->status, desc); cfs_spin_lock(&desc->bd_lock); - + req = desc->bd_req; LASSERT(desc->bd_network_rw); desc->bd_network_rw = 0; @@ -203,6 +207,11 @@ void client_bulk_callback (lnet_event_t *ev) desc->bd_success = 1; desc->bd_nob_transferred = ev->mlength; desc->bd_sender = ev->sender; + } else { + /* start reconnect and resend if network error hit */ + cfs_spin_lock(&req->rq_lock); + req->rq_net_err = 1; + cfs_spin_unlock(&req->rq_lock); } /* release the encrypted pages for write */ @@ -211,20 +220,75 @@ void client_bulk_callback (lnet_event_t *ev) /* NB don't unlock till after wakeup; desc can disappear under us * otherwise */ - ptlrpc_client_wake_req(desc->bd_req); + ptlrpc_client_wake_req(req); cfs_spin_unlock(&desc->bd_lock); EXIT; } /* + * We will have percpt request history list for ptlrpc service in upcoming + * patches because we don't want to be serialized by current per-service + * history operations. So we require history ID can (somehow) show arriving + * order w/o grabbing global lock, and user can sort them in userspace. + * + * This is how we generate history ID for ptlrpc_request: + * ---------------------------------------------------- + * | 32 bits | 16 bits | (16 - X)bits | X bits | + * ---------------------------------------------------- + * | seconds | usec / 16 | sequence | CPT id | + * ---------------------------------------------------- + * + * it might not be precise but should be good enough. + */ + +#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits) + +#define REQS_SEC_SHIFT 32 +#define REQS_USEC_SHIFT 16 +#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt) + +static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, + struct ptlrpc_request *req) +{ + __u64 sec = req->rq_arrival_time.tv_sec; + __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */ + __u64 new_seq; + + /* set sequence ID for request and add it to history list, + * it must be called with hold svcpt::scp_lock */ + + new_seq = (sec << REQS_SEC_SHIFT) | + (usec << REQS_USEC_SHIFT) | svcpt->scp_cpt; + if (new_seq > svcpt->scp_hist_seq) { + /* This handles the initial case of scp_hist_seq == 0 or + * we just jumped into a new time window */ + svcpt->scp_hist_seq = new_seq; + } else { + LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT); + /* NB: increase sequence number in current usec bucket, + * however, it's possible that we used up all bits for + * sequence and jumped into the next usec bucket (future time), + * then we hope there will be less RPCs per bucket at some + * point, and sequence will catch up again */ + svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt)); + new_seq = svcpt->scp_hist_seq; + } + + req->rq_history_seq = new_seq; + + cfs_list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs); +} + +/* * Server's incoming request callback */ void request_in_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; - struct ptlrpc_service *service = rqbd->rqbd_service; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; + struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; + struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; + struct ptlrpc_service *service = svcpt->scp_service; struct ptlrpc_request *req; ENTRY; @@ -274,33 +338,30 @@ void request_in_callback(lnet_event_t *ev) req->rq_self = ev->target.nid; req->rq_rqbd = rqbd; req->rq_phase = RQ_PHASE_NEW; -#ifdef CRAY_XT3 - req->rq_uid = ev->uid; -#endif cfs_spin_lock_init(&req->rq_lock); CFS_INIT_LIST_HEAD(&req->rq_timed_list); + CFS_INIT_LIST_HEAD(&req->rq_exp_list); cfs_atomic_set(&req->rq_refcount, 1); if (ev->type == LNET_EVENT_PUT) - CDEBUG(D_RPCTRACE, "incoming req@%p x"LPU64" msgsize %u\n", + CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n", req, req->rq_xid, ev->mlength); CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer)); - cfs_spin_lock(&service->srv_lock); + cfs_spin_lock(&svcpt->scp_lock); - req->rq_history_seq = service->srv_request_seq++; - cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history); + ptlrpc_req_add_history(svcpt, req); - if (ev->unlinked) { - service->srv_nrqbd_receiving--; - CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", - service->srv_nrqbd_receiving); - - /* Normally, don't complain about 0 buffers posted; LNET won't - * drop incoming reqs since we set the portal lazy */ - if (test_req_buffer_pressure && - ev->type != LNET_EVENT_UNLINK && - service->srv_nrqbd_receiving == 0) + if (ev->unlinked) { + svcpt->scp_nrqbds_posted--; + CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", + svcpt->scp_nrqbds_posted); + + /* Normally, don't complain about 0 buffers posted; LNET won't + * drop incoming reqs since we set the portal lazy */ + if (test_req_buffer_pressure && + ev->type != LNET_EVENT_UNLINK && + svcpt->scp_nrqbds_posted == 0) CWARN("All %s request buffers busy\n", service->srv_name); @@ -310,15 +371,15 @@ void request_in_callback(lnet_event_t *ev) rqbd->rqbd_refcount++; } - cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue); - service->srv_n_queued_reqs++; + cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_incoming); + svcpt->scp_nreqs_incoming++; - /* NB everything can disappear under us once the request - * has been queued and we unlock, so do the wake now... */ - cfs_waitq_signal(&service->srv_waitq); + /* NB everything can disappear under us once the request + * has been queued and we unlock, so do the wake now... */ + cfs_waitq_signal(&svcpt->scp_waitq); - cfs_spin_unlock(&service->srv_lock); - EXIT; + cfs_spin_unlock(&svcpt->scp_lock); + EXIT; } /* @@ -326,9 +387,9 @@ void request_in_callback(lnet_event_t *ev) */ void reply_out_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_reply_state *rs = cbid->cbid_arg; - struct ptlrpc_service *svc = rs->rs_service; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; + struct ptlrpc_reply_state *rs = cbid->cbid_arg; + struct ptlrpc_service_part *svcpt = rs->rs_svcpt; ENTRY; LASSERT (ev->type == LNET_EVENT_SEND || @@ -340,7 +401,6 @@ void reply_out_callback(lnet_event_t *ev) * net's ref on 'rs' */ LASSERT (ev->unlinked); ptlrpc_rs_decref(rs); - cfs_atomic_dec (&svc->srv_outstanding_replies); EXIT; return; } @@ -350,19 +410,22 @@ void reply_out_callback(lnet_event_t *ev) if (ev->unlinked) { /* Last network callback. The net's ref on 'rs' stays put * until ptlrpc_handle_rs() is done with it */ - cfs_spin_lock(&svc->srv_lock); - cfs_spin_lock(&rs->rs_lock); - rs->rs_on_net = 0; - if (!rs->rs_no_ack || - rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed) - ptlrpc_schedule_difficult_reply (rs); - cfs_spin_unlock(&rs->rs_lock); - cfs_spin_unlock(&svc->srv_lock); - } - - EXIT; + cfs_spin_lock(&svcpt->scp_rep_lock); + cfs_spin_lock(&rs->rs_lock); + + rs->rs_on_net = 0; + if (!rs->rs_no_ack || + rs->rs_transno <= + rs->rs_export->exp_obd->obd_last_committed) + ptlrpc_schedule_difficult_reply(rs); + + cfs_spin_unlock(&rs->rs_lock); + cfs_spin_unlock(&svcpt->scp_rep_lock); + } + EXIT; } +#ifdef HAVE_SERVER_SUPPORT /* * Server's bulk completion callback */ @@ -405,6 +468,7 @@ void server_bulk_callback (lnet_event_t *ev) cfs_spin_unlock(&desc->bd_lock); EXIT; } +#endif static void ptlrpc_master_callback(lnet_event_t *ev) { @@ -417,8 +481,11 @@ static void ptlrpc_master_callback(lnet_event_t *ev) callback == reply_in_callback || callback == client_bulk_callback || callback == request_in_callback || - callback == reply_out_callback || - callback == server_bulk_callback); + callback == reply_out_callback +#ifdef HAVE_SERVER_SUPPORT + || callback == server_bulk_callback +#endif + ); callback (ev); } @@ -542,10 +609,11 @@ int ptlrpc_ni_init(void) /* CAVEAT EMPTOR: how we process portals events is _radically_ * different depending on... */ #ifdef __KERNEL__ - /* kernel portals calls our master callback when events are added to - * the event queue. In fact lustre never pulls events off this queue, - * so it's only sized for some debug history. */ - rc = LNetEQAlloc(1024, ptlrpc_master_callback, &ptlrpc_eq_h); + /* kernel LNet calls our master callback when there are new event, + * because we are guaranteed to get every event via callback, + * so we just set EQ size to 0 to avoid overhread of serializing + * enqueue/dequeue operations in LNet. */ + rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h); #else /* liblustre calls the master callback when it removes events from the * event queue. The event queue has to be big enough not to drop