X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fclient.c;h=a791abf62ff635246b549537a60bf70929ccf94f;hp=7cfb6aaa5718ef81c7a0091297186c2243f0d8f6;hb=01ca899324738343279c1d63823b7fab937197dc;hpb=2b294992edce5af7b79d4300ed3aa1ea6a8db850 diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 7cfb6aa..a791abf 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -47,8 +47,26 @@ #include "ptlrpc_internal.h" +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_pin, + .release_frags = ptlrpc_release_bulk_page_pin, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_nopin, + .release_frags = ptlrpc_release_bulk_noop, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = { + .add_iov_frag = ptlrpc_prep_bulk_frag, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops); + static int ptlrpc_send_new_req(struct ptlrpc_request *req); static int ptlrpcd_check_work(struct ptlrpc_request *req); +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); /** * Initialize passed in client structure \a cl. @@ -91,29 +109,46 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) return c; } -EXPORT_SYMBOL(ptlrpc_uuid_to_connection); /** * Allocate and initialize new bulk descriptor on the sender. * Returns pointer to the descriptor or NULL on error. */ -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) +struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw, + enum ptlrpc_bulk_op_type type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops *ops) { struct ptlrpc_bulk_desc *desc; int i; - OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages])); + /* ensure that only one of KIOV or IOVEC is set but not both */ + LASSERT((ptlrpc_is_bulk_desc_kiov(type) && + ops->add_kiov_frag != NULL) || + (ptlrpc_is_bulk_desc_kvec(type) && + ops->add_iov_frag != NULL)); + + if (type & PTLRPC_BULK_BUF_KIOV) { + OBD_ALLOC(desc, + offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kiov.bd_vec[nfrags])); + } else { + OBD_ALLOC(desc, + offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kvec.bd_kvec[nfrags])); + } + if (!desc) return NULL; spin_lock_init(&desc->bd_lock); init_waitqueue_head(&desc->bd_waitq); - desc->bd_max_iov = npages; + desc->bd_max_iov = nfrags; desc->bd_iov_count = 0; desc->bd_portal = portal; desc->bd_type = type; desc->bd_md_count = 0; + desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *) ops; LASSERT(max_brw > 0); desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this @@ -126,21 +161,25 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, /** * Prepare bulk descriptor for specified outgoing request \a req that - * can fit \a npages * pages. \a type is bulk type. \a portal is where + * can fit \a nfrags * pages. \a type is bulk type. \a portal is where * the bulk to be sent. Used on client-side. * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on * error. */ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) + unsigned nfrags, unsigned max_brw, + unsigned int type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops + *ops) { struct obd_import *imp = req->rq_import; struct ptlrpc_bulk_desc *desc; ENTRY; - LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); - desc = ptlrpc_new_bulk(npages, max_brw, type, portal); + LASSERT(ptlrpc_is_bulk_op_passive(type)); + + desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops); if (desc == NULL) RETURN(NULL); @@ -158,60 +197,90 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, } EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); -/* - * Add a page \a page to the bulk descriptor \a desc. - * Data to transfer in the page starts at offset \a pageoffset and - * amount of data to transfer from the page is \a len - */ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, int pin) + struct page *page, int pageoffset, int len, + int pin) { + lnet_kiov_t *kiov; + LASSERT(desc->bd_iov_count < desc->bd_max_iov); LASSERT(page != NULL); LASSERT(pageoffset >= 0); LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + + kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); desc->bd_nob += len; if (pin) page_cache_get(page); - ptlrpc_add_bulk_page(desc, page, pageoffset, len); + kiov->kiov_page = page; + kiov->kiov_offset = pageoffset; + kiov->kiov_len = len; + + desc->bd_iov_count++; } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -/** - * Uninitialize and free bulk descriptor \a desc. - * Works on bulk descriptors both from server and client side. - */ -void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len) +{ + struct kvec *iovec; + ENTRY; + + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(frag != NULL); + LASSERT(len > 0); + LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); + + iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); + + desc->bd_nob += len; + + iovec->iov_base = frag; + iovec->iov_len = len; + + desc->bd_iov_count++; + + RETURN(desc->bd_nob); +} +EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); + +void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) { - int i; ENTRY; LASSERT(desc != NULL); LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); + LASSERT(desc->bd_frag_ops != NULL); - sptlrpc_enc_pool_put_pages(desc); + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + sptlrpc_enc_pool_put_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); else class_import_put(desc->bd_import); - if (unpin) { - for (i = 0; i < desc->bd_iov_count ; i++) - page_cache_release(desc->bd_iov[i].kiov_page); - } + if (desc->bd_frag_ops->release_frags != NULL) + desc->bd_frag_ops->release_frags(desc); + + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kiov.bd_vec[desc->bd_max_iov])); + else + OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kvec.bd_kvec[desc-> + bd_max_iov])); - OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, - bd_iov[desc->bd_max_iov])); EXIT; } -EXPORT_SYMBOL(__ptlrpc_free_bulk); +EXPORT_SYMBOL(ptlrpc_free_bulk); /** * Set server timelimit for this req, i.e. how long are we willing to wait @@ -359,27 +428,29 @@ __must_hold(&req->rq_lock) rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); if (rc) { spin_lock(&req->rq_lock); - RETURN(rc); - } - - rc = unpack_reply(early_req); - if (rc == 0) { - /* Expecting to increase the service time estimate here */ - ptlrpc_at_adj_service(req, - lustre_msg_get_timeout(early_req->rq_repmsg)); - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(early_req->rq_repmsg)); - } - - sptlrpc_cli_finish_early_reply(early_req); + RETURN(rc); + } + rc = unpack_reply(early_req); if (rc != 0) { + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); RETURN(rc); } - /* Adjust the local timeout for this req */ - ptlrpc_at_set_req_timeout(req); + /* Use new timeout value just to adjust the local value for this + * request, don't include it into at_history. It is unclear yet why + * service time increased and should it be counted or skipped, e.g. + * that can be recovery case or some error or server, the real reply + * will add all new data if it is worth to add. */ + req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg); + lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); + + /* Network latency can be adjusted, it is pure network delays */ + ptlrpc_at_adj_net_latency(req, + lustre_msg_get_service_time(early_req->rq_repmsg)); + + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); olddl = req->rq_deadline; @@ -455,7 +526,7 @@ EXPORT_SYMBOL(ptlrpc_free_rq_pool); /** * Allocates, initializes and adds \a num_rq requests to the pool \a pool */ -void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) +int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) { int i; int size = 1; @@ -477,11 +548,11 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) spin_unlock(&pool->prp_lock); req = ptlrpc_request_cache_alloc(GFP_NOFS); if (!req) - return; + return i; OBD_ALLOC_LARGE(msg, size); if (!msg) { ptlrpc_request_cache_free(req); - return; + return i; } req->rq_reqbuf = msg; req->rq_reqbuf_len = size; @@ -490,7 +561,7 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) list_add_tail(&req->rq_list, &pool->prp_req_list); } spin_unlock(&pool->prp_lock); - return; + return num_rq; } EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); @@ -504,7 +575,7 @@ EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); */ struct ptlrpc_request_pool * ptlrpc_init_rq_pool(int num_rq, int msgsize, - void (*populate_pool)(struct ptlrpc_request_pool *, int)) + int (*populate_pool)(struct ptlrpc_request_pool *, int)) { struct ptlrpc_request_pool *pool; @@ -522,11 +593,6 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize, populate_pool(pool, num_rq); - if (list_empty(&pool->prp_req_list)) { - /* have not allocated a single request for the pool */ - OBD_FREE(pool, sizeof(struct ptlrpc_request_pool)); - pool = NULL; - } return pool; } EXPORT_SYMBOL(ptlrpc_init_rq_pool); @@ -630,7 +696,6 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, ptlrpc_at_set_req_timeout(request); - request->rq_xid = ptlrpc_next_xid(); lustre_msg_set_opc(request->rq_reqmsg, opcode); RETURN(0); @@ -700,11 +765,10 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, { struct ptlrpc_request *request = NULL; - if (pool) - request = ptlrpc_prep_req_from_pool(pool); + request = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!request) - request = ptlrpc_request_cache_alloc(GFP_NOFS); + if (!request && pool) + request = ptlrpc_prep_req_from_pool(pool); if (request) { ptlrpc_cli_req_init(request); @@ -834,7 +898,6 @@ ptlrpc_prep_req_pool(struct obd_import *imp, } return request; } -EXPORT_SYMBOL(ptlrpc_prep_req_pool); /** * Same as ptlrpc_prep_req_pool, but without pool @@ -846,18 +909,19 @@ ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs, NULL); } -EXPORT_SYMBOL(ptlrpc_prep_req); /** - * Allocate and initialize new request set structure. + * Allocate and initialize new request set structure on the current CPT. * Returns a pointer to the newly allocated set structure or NULL on error. */ struct ptlrpc_request_set *ptlrpc_prep_set(void) { - struct ptlrpc_request_set *set; + struct ptlrpc_request_set *set; + int cpt; ENTRY; - OBD_ALLOC(set, sizeof *set); + cpt = cfs_cpt_current(cfs_cpt_table, 0); + OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set); if (!set) RETURN(NULL); atomic_set(&set->set_refcount, 1); @@ -901,7 +965,6 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, RETURN(set); } -EXPORT_SYMBOL(ptlrpc_prep_fcset); /** * Wind down and free request set structure previously allocated with @@ -983,7 +1046,6 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, RETURN(0); } -EXPORT_SYMBOL(ptlrpc_set_add_cb); /** * Add a new request to the general purpose request set. @@ -1045,7 +1107,6 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, wake_up(&pc->pc_partners[i]->pc_set->set_waitq); } } -EXPORT_SYMBOL(ptlrpc_set_add_new_req); /** * Based on the current state of the import, determine if the request @@ -1222,6 +1283,7 @@ static int after_reply(struct ptlrpc_request *req) struct obd_device *obd = req->rq_import->imp_obd; int rc; struct timeval work_start; + __u64 committed; long timediff; ENTRY; @@ -1250,6 +1312,9 @@ static int after_reply(struct ptlrpc_request *req) RETURN(0); } + do_gettimeofday(&work_start); + timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL); + /* * NB Until this point, the whole of the incoming message, * including buflens, status etc is in the sender's byte order. @@ -1281,15 +1346,6 @@ static int after_reply(struct ptlrpc_request *req) spin_unlock(&req->rq_lock); req->rq_nr_resend++; - /* allocate new xid to avoid reply reconstruction */ - if (!req->rq_bulk) { - /* new xid is already allocated for bulk in - * ptlrpc_check_set() */ - req->rq_xid = ptlrpc_next_xid(); - DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for " - "resend on EINPROGRESS"); - } - /* Readjust the timeout for current conditions */ ptlrpc_at_set_req_timeout(req); /* delay resend to give a chance to the server to get ready. @@ -1304,8 +1360,6 @@ static int after_reply(struct ptlrpc_request *req) RETURN(0); } - do_gettimeofday(&work_start); - timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL); if (obd->obd_svc_stats != NULL) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); @@ -1384,10 +1438,9 @@ static int after_reply(struct ptlrpc_request *req) /* * Replay-enabled imports return commit-status information. */ - if (lustre_msg_get_last_committed(req->rq_repmsg)) { - imp->imp_peer_committed_transno = - lustre_msg_get_last_committed(req->rq_repmsg); - } + committed = lustre_msg_get_last_committed(req->rq_repmsg); + if (likely(committed > imp->imp_peer_committed_transno)) + imp->imp_peer_committed_transno = committed; ptlrpc_free_committed(imp); @@ -1419,10 +1472,19 @@ static int after_reply(struct ptlrpc_request *req) static int ptlrpc_send_new_req(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; + struct list_head *tmp; + __u64 min_xid = ~0ULL; int rc; ENTRY; LASSERT(req->rq_phase == RQ_PHASE_NEW); + + /* do not try to go further if there is not enough memory in enc_pool */ + if (req->rq_sent && req->rq_bulk != NULL) + if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() && + pool_is_at_full_capacity()) + RETURN(-ENOMEM); + if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) && (!req->rq_generation_set || req->rq_import_generation == imp->imp_generation)) @@ -1432,6 +1494,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) spin_lock(&imp->imp_lock); + /* the very first time we assign XID. it's important to assign XID + * and put it on the list atomically, so that the lowest assigned + * XID is always known. this is vital for multislot last_rcvd */ + if (req->rq_send_state == LUSTRE_IMP_REPLAY) { + LASSERT(req->rq_xid != 0); + } else { + LASSERT(req->rq_xid == 0); + req->rq_xid = ptlrpc_next_xid(); + } + if (!req->rq_generation_set) req->rq_import_generation = imp->imp_generation; @@ -1461,8 +1533,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) LASSERT(list_empty(&req->rq_list)); list_add_tail(&req->rq_list, &imp->imp_sending_list); atomic_inc(&req->rq_import->imp_inflight); + + /* find the lowest unreplied XID */ + list_for_each(tmp, &imp->imp_delayed_list) { + struct ptlrpc_request *r; + r = list_entry(tmp, struct ptlrpc_request, rq_list); + if (r->rq_xid < min_xid) + min_xid = r->rq_xid; + } + list_for_each(tmp, &imp->imp_sending_list) { + struct ptlrpc_request *r; + r = list_entry(tmp, struct ptlrpc_request, rq_list); + if (r->rq_xid < min_xid) + min_xid = r->rq_xid; + } spin_unlock(&imp->imp_lock); + if (likely(min_xid != ~0ULL)) + lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1); + lustre_msg_set_status(req->rq_reqmsg, current_pid()); rc = sptlrpc_req_refresh_ctx(req, -1); @@ -1486,6 +1575,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) lustre_msg_get_opc(req->rq_reqmsg)); rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); + atomic_dec(&req->rq_import->imp_inflight); + } + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + RETURN(rc); + } if (rc) { DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); spin_lock(&req->rq_lock); @@ -1724,20 +1823,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) spin_lock(&req->rq_lock); req->rq_resend = 1; spin_unlock(&req->rq_lock); - if (req->rq_bulk) { - __u64 old_xid; - - if (!ptlrpc_unregister_bulk(req, 1)) - continue; - - /* ensure previous bulk fails */ - old_xid = req->rq_xid; - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk " - "old x"LPU64 - " new x"LPU64"\n", - old_xid, req->rq_xid); - } + + if (req->rq_bulk != NULL && + !ptlrpc_unregister_bulk(req, 1)) + continue; } /* * rq_wait_ctx is only touched by ptlrpcd, @@ -1765,6 +1854,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) } rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) + list_del_init(&req->rq_list); + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + continue; + } if (rc) { DEBUG_REQ(D_HA, req, "send failed: rc = %d", rc); @@ -2036,7 +2133,6 @@ int ptlrpc_expired_set(void *data) */ RETURN(1); } -EXPORT_SYMBOL(ptlrpc_expired_set); /** * Sets rq_intr flag in \a req under spinlock. @@ -2053,7 +2149,7 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted); * Interrupts (sets interrupted flag) all uncompleted requests in * a set \a data. Callback for l_wait_event for interruptible waits. */ -void ptlrpc_interrupted_set(void *data) +static void ptlrpc_interrupted_set(void *data) { struct ptlrpc_request_set *set = data; struct list_head *tmp; @@ -2072,7 +2168,6 @@ void ptlrpc_interrupted_set(void *data) ptlrpc_mark_interrupted(req); } } -EXPORT_SYMBOL(ptlrpc_interrupted_set); /** * Get the smallest timeout in the set; this does NOT set a timeout. @@ -2123,7 +2218,6 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) } RETURN(timeout); } -EXPORT_SYMBOL(ptlrpc_set_next_timeout); /** * Send all unset request from the set and then wait untill all @@ -2160,7 +2254,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", set, timeout); - if (timeout == 0 && !cfs_signal_pending()) + if (timeout == 0 && !signal_pending(current)) /* * No requests are in-flight (ether timed out * or delayed), so we can allow interrupts. @@ -2185,7 +2279,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * pending when we started, we need to handle it now or we risk * it being ignored forever */ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr && - cfs_signal_pending()) { + signal_pending(current)) { sigset_t blocked_sigs = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); @@ -2193,7 +2287,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * like SIGINT or SIGKILL. We still ignore less * important signals since ptlrpc set is not easily * reentrant from userspace again */ - if (cfs_signal_pending()) + if (signal_pending(current)) ptlrpc_interrupted_set(set); cfs_restore_sigs(blocked_sigs); } @@ -2300,7 +2394,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) request->rq_import = NULL; } if (request->rq_bulk != NULL) - ptlrpc_free_bulk_pin(request->rq_bulk); + ptlrpc_free_bulk(request->rq_bulk); if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) sptlrpc_cli_free_reqbuf(request); @@ -2318,14 +2412,13 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked); /** * Drop one request reference. Must be called with import imp_lock held. - * When reference count drops to zero, reuqest is freed. + * When reference count drops to zero, request is freed. */ void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request) { assert_spin_locked(&request->rq_import->imp_lock); (void)__ptlrpc_req_finished(request, 1); } -EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock); /** * Helper function @@ -2382,7 +2475,7 @@ EXPORT_SYMBOL(ptlrpc_req_xid); * The request owner (i.e. the thread doing the I/O) must call... * Returns 0 on success or 1 if unregistering cannot be made. */ -int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) { int rc; struct l_wait_info lwi; @@ -2454,7 +2547,6 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) } RETURN(0); } -EXPORT_SYMBOL(ptlrpc_unregister_reply); static void ptlrpc_free_request(struct ptlrpc_request *req) { @@ -2581,7 +2673,6 @@ void ptlrpc_cleanup_client(struct obd_import *imp) ENTRY; EXIT; } -EXPORT_SYMBOL(ptlrpc_cleanup_client); /** * Schedule previously sent request for resend. @@ -2608,18 +2699,10 @@ void ptlrpc_resend_req(struct ptlrpc_request *req) req->rq_resend = 1; req->rq_net_err = 0; req->rq_timedout = 0; - if (req->rq_bulk) { - __u64 old_xid = req->rq_xid; - /* ensure previous bulk fails */ - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", - old_xid, req->rq_xid); - } ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } -EXPORT_SYMBOL(ptlrpc_resend_req); /* XXX: this function and rq_status are currently unused */ void ptlrpc_restart_req(struct ptlrpc_request *req) @@ -2633,7 +2716,6 @@ void ptlrpc_restart_req(struct ptlrpc_request *req) ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } -EXPORT_SYMBOL(ptlrpc_restart_req); /** * Grab additional reference on a request \a req @@ -2672,6 +2754,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); + spin_lock(&req->rq_lock); + req->rq_resend = 0; + spin_unlock(&req->rq_lock); + LASSERT(imp->imp_replayable); /* Balanced in ptlrpc_free_committed, usually. */ ptlrpc_request_addref(req); @@ -2701,7 +2787,6 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, list_add(&req->rq_replay_list, &imp->imp_replay_list); } -EXPORT_SYMBOL(ptlrpc_retain_replayable_request); /** * Send request and wait until it completes. @@ -2737,7 +2822,7 @@ EXPORT_SYMBOL(ptlrpc_queue_wait); /** * Callback used for replayed requests reply processing. - * In case of succesful reply calls registeresd request replay callback. + * In case of successful reply calls registered request replay callback. * In case of error restart replay process. */ static int ptlrpc_replay_interpret(const struct lu_env *env, @@ -2750,10 +2835,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, ENTRY; atomic_dec(&imp->imp_replay_inflight); - if (!ptlrpc_client_replied(req)) { - CERROR("request replay timed out, restarting recovery\n"); - GOTO(out, rc = -ETIMEDOUT); - } + /* Note: if it is bulk replay (MDS-MDS replay), then even if + * server got the request, but bulk transfer timeout, let's + * replay the bulk req again */ + if (!ptlrpc_client_replied(req) || + (req->rq_bulk != NULL && + lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) { + DEBUG_REQ(D_ERROR, req, "request replay timed out.\n"); + GOTO(out, rc = -ETIMEDOUT); + } if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR && (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN || @@ -2868,10 +2958,9 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) atomic_inc(&req->rq_import->imp_replay_inflight); ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ - ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); + ptlrpcd_add_req(req); RETURN(0); } -EXPORT_SYMBOL(ptlrpc_replay_req); /** * Aborts all in-flight request on import \a imp sending and delayed lists @@ -2931,7 +3020,6 @@ void ptlrpc_abort_inflight(struct obd_import *imp) EXIT; } -EXPORT_SYMBOL(ptlrpc_abort_inflight); /** * Abort all uncompleted requests in request set \a set @@ -3020,7 +3108,43 @@ __u64 ptlrpc_next_xid(void) return next; } -EXPORT_SYMBOL(ptlrpc_next_xid); + +/** + * If request has a new allocated XID (new request or EINPROGRESS resend), + * use this XID as matchbits of bulk, otherwise allocate a new matchbits for + * request to ensure previous bulk fails and avoid problems with lost replies + * and therefore several transfers landing into the same buffer from different + * sending attempts. + */ +void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) +{ + struct ptlrpc_bulk_desc *bd = req->rq_bulk; + + LASSERT(bd != NULL); + + if (!req->rq_resend || req->rq_nr_resend != 0) { + /* this request has a new xid, just use it as bulk matchbits */ + req->rq_mbits = req->rq_xid; + + } else { /* needs to generate a new matchbits for resend */ + __u64 old_mbits = req->rq_mbits; + + if ((bd->bd_import->imp_connect_data.ocd_connect_flags & + OBD_CONNECT_BULK_MBITS) != 0) + req->rq_mbits = ptlrpc_next_xid(); + else /* old version transfers rq_xid to peer as matchbits */ + req->rq_mbits = req->rq_xid = ptlrpc_next_xid(); + + CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", + old_mbits, req->rq_mbits); + } + + /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so + * that server can infer the number of bulks that were prepared, + * see LU-1431 */ + req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) / + LNET_MAX_IOV) - 1; +} /** * Get a glimpse at what next xid value might have been. @@ -3078,7 +3202,7 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req) req->rq_xid = ptlrpc_next_xid(); req->rq_import_generation = req->rq_import->imp_generation; - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); + ptlrpcd_add_req(req); } static int work_interpreter(const struct lu_env *env,