X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fclient.c;h=59f4cc5935d36a387a25480d89729fed9d3480eb;hp=6416e5f6783dfa2a0248404c69b8bde2f2fea0f1;hb=bb75072cb679bf52e00537c19e42f8e4e95255b6;hpb=ed2430bd5a8b1b61ce9ae5c4a321598dee728199;ds=sidebyside diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 6416e5f..59f4cc5 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -27,7 +27,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -37,11 +37,6 @@ /** Implementation of client-side PortalRPC interfaces */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -#include -#include -#include -#endif #include #include @@ -52,8 +47,26 @@ #include "ptlrpc_internal.h" +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_pin, + .release_frags = ptlrpc_release_bulk_page_pin, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_nopin, + .release_frags = ptlrpc_release_bulk_noop, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = { + .add_iov_frag = ptlrpc_prep_bulk_frag, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops); + static int ptlrpc_send_new_req(struct ptlrpc_request *req); static int ptlrpcd_check_work(struct ptlrpc_request *req); +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); /** * Initialize passed in client structure \a cl. @@ -96,29 +109,48 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) return c; } -EXPORT_SYMBOL(ptlrpc_uuid_to_connection); /** * Allocate and initialize new bulk descriptor on the sender. * Returns pointer to the descriptor or NULL on error. */ -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) +struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw, + enum ptlrpc_bulk_op_type type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops *ops) { struct ptlrpc_bulk_desc *desc; int i; - OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages])); - if (!desc) + /* ensure that only one of KIOV or IOVEC is set but not both */ + LASSERT((ptlrpc_is_bulk_desc_kiov(type) && + ops->add_kiov_frag != NULL) || + (ptlrpc_is_bulk_desc_kvec(type) && + ops->add_iov_frag != NULL)); + + OBD_ALLOC_PTR(desc); + if (desc == NULL) return NULL; + if (type & PTLRPC_BULK_BUF_KIOV) { + OBD_ALLOC_LARGE(GET_KIOV(desc), + nfrags * sizeof(*GET_KIOV(desc))); + if (GET_KIOV(desc) == NULL) + goto out; + } else { + OBD_ALLOC_LARGE(GET_KVEC(desc), + nfrags * sizeof(*GET_KVEC(desc))); + if (GET_KVEC(desc) == NULL) + goto out; + } spin_lock_init(&desc->bd_lock); init_waitqueue_head(&desc->bd_waitq); - desc->bd_max_iov = npages; + desc->bd_max_iov = nfrags; desc->bd_iov_count = 0; desc->bd_portal = portal; desc->bd_type = type; desc->bd_md_count = 0; + desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *) ops; LASSERT(max_brw > 0); desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this @@ -127,25 +159,32 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, LNetInvalidateHandle(&desc->bd_mds[i]); return desc; +out: + OBD_FREE_PTR(desc); + return NULL; } /** * Prepare bulk descriptor for specified outgoing request \a req that - * can fit \a npages * pages. \a type is bulk type. \a portal is where + * can fit \a nfrags * pages. \a type is bulk type. \a portal is where * the bulk to be sent. Used on client-side. * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on * error. */ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) + unsigned nfrags, unsigned max_brw, + unsigned int type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops + *ops) { struct obd_import *imp = req->rq_import; struct ptlrpc_bulk_desc *desc; ENTRY; - LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); - desc = ptlrpc_new_bulk(npages, max_brw, type, portal); + LASSERT(ptlrpc_is_bulk_op_passive(type)); + + desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops); if (desc == NULL) RETURN(NULL); @@ -163,60 +202,89 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, } EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); -/* - * Add a page \a page to the bulk descriptor \a desc. - * Data to transfer in the page starts at offset \a pageoffset and - * amount of data to transfer from the page is \a len - */ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, int pin) + struct page *page, int pageoffset, int len, + int pin) { + lnet_kiov_t *kiov; + LASSERT(desc->bd_iov_count < desc->bd_max_iov); LASSERT(page != NULL); LASSERT(pageoffset >= 0); LASSERT(len > 0); - LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(pageoffset + len <= PAGE_SIZE); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + + kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); desc->bd_nob += len; if (pin) page_cache_get(page); - ptlrpc_add_bulk_page(desc, page, pageoffset, len); + kiov->kiov_page = page; + kiov->kiov_offset = pageoffset; + kiov->kiov_len = len; + + desc->bd_iov_count++; } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -/** - * Uninitialize and free bulk descriptor \a desc. - * Works on bulk descriptors both from server and client side. - */ -void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len) +{ + struct kvec *iovec; + ENTRY; + + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(frag != NULL); + LASSERT(len > 0); + LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); + + iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); + + desc->bd_nob += len; + + iovec->iov_base = frag; + iovec->iov_len = len; + + desc->bd_iov_count++; + + RETURN(desc->bd_nob); +} +EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); + +void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) { - int i; ENTRY; LASSERT(desc != NULL); LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); + LASSERT(desc->bd_frag_ops != NULL); - sptlrpc_enc_pool_put_pages(desc); + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + sptlrpc_enc_pool_put_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); else class_import_put(desc->bd_import); - if (unpin) { - for (i = 0; i < desc->bd_iov_count ; i++) - page_cache_release(desc->bd_iov[i].kiov_page); - } + if (desc->bd_frag_ops->release_frags != NULL) + desc->bd_frag_ops->release_frags(desc); - OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, - bd_iov[desc->bd_max_iov])); + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + OBD_FREE_LARGE(GET_KIOV(desc), + desc->bd_max_iov * sizeof(*GET_KIOV(desc))); + else + OBD_FREE_LARGE(GET_KVEC(desc), + desc->bd_max_iov * sizeof(*GET_KVEC(desc))); + OBD_FREE_PTR(desc); EXIT; } -EXPORT_SYMBOL(__ptlrpc_free_bulk); +EXPORT_SYMBOL(ptlrpc_free_bulk); /** * Set server timelimit for this req, i.e. how long are we willing to wait @@ -286,22 +354,35 @@ int ptlrpc_at_get_net_latency(struct ptlrpc_request *req) } /* Adjust expected network latency */ -static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, - unsigned int service_time) +void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, + unsigned int service_time) { unsigned int nl, oldnl; struct imp_at *at; time_t now = cfs_time_current_sec(); LASSERT(req->rq_import); - at = &req->rq_import->imp_at; + + if (service_time > now - req->rq_sent + 3) { + /* bz16408, however, this can also happen if early reply + * is lost and client RPC is expired and resent, early reply + * or reply of original RPC can still be fit in reply buffer + * of resent RPC, now client is measuring time from the + * resent time, but server sent back service time of original + * RPC. + */ + CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ? + D_ADAPTTO : D_WARNING, + "Reported service time %u > total measured time " + CFS_DURATION_T"\n", service_time, + cfs_time_sub(now, req->rq_sent)); + return; + } /* Network latency is total time less server processing time */ - nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/; - if (service_time > now - req->rq_sent + 3 /* bz16408 */) - CWARN("Reported service time %u > total measured time " - CFS_DURATION_T"\n", service_time, - cfs_time_sub(now, req->rq_sent)); + nl = max_t(int, now - req->rq_sent - + service_time, 0) + 1; /* st rounding */ + at = &req->rq_import->imp_at; oldnl = at_measured(&at->iat_net_latency, nl); if (oldnl != 0) @@ -351,27 +432,29 @@ __must_hold(&req->rq_lock) rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); if (rc) { spin_lock(&req->rq_lock); - RETURN(rc); - } - - rc = unpack_reply(early_req); - if (rc == 0) { - /* Expecting to increase the service time estimate here */ - ptlrpc_at_adj_service(req, - lustre_msg_get_timeout(early_req->rq_repmsg)); - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(early_req->rq_repmsg)); - } - - sptlrpc_cli_finish_early_reply(early_req); + RETURN(rc); + } + rc = unpack_reply(early_req); if (rc != 0) { + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); RETURN(rc); } - /* Adjust the local timeout for this req */ - ptlrpc_at_set_req_timeout(req); + /* Use new timeout value just to adjust the local value for this + * request, don't include it into at_history. It is unclear yet why + * service time increased and should it be counted or skipped, e.g. + * that can be recovery case or some error or server, the real reply + * will add all new data if it is worth to add. */ + req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg); + lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); + + /* Network latency can be adjusted, it is pure network delays */ + ptlrpc_at_adj_net_latency(req, + lustre_msg_get_service_time(early_req->rq_repmsg)); + + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); olddl = req->rq_deadline; @@ -391,7 +474,7 @@ __must_hold(&req->rq_lock) RETURN(rc); } -struct kmem_cache *request_cache; +static struct kmem_cache *request_cache; int ptlrpc_request_cache_init(void) { @@ -447,7 +530,7 @@ EXPORT_SYMBOL(ptlrpc_free_rq_pool); /** * Allocates, initializes and adds \a num_rq requests to the pool \a pool */ -void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) +int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) { int i; int size = 1; @@ -469,20 +552,20 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) spin_unlock(&pool->prp_lock); req = ptlrpc_request_cache_alloc(GFP_NOFS); if (!req) - return; + return i; OBD_ALLOC_LARGE(msg, size); if (!msg) { ptlrpc_request_cache_free(req); - return; - } - req->rq_reqbuf = msg; - req->rq_reqbuf_len = size; - req->rq_pool = pool; + return i; + } + req->rq_reqbuf = msg; + req->rq_reqbuf_len = size; + req->rq_pool = pool; spin_lock(&pool->prp_lock); list_add_tail(&req->rq_list, &pool->prp_req_list); } spin_unlock(&pool->prp_lock); - return; + return num_rq; } EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); @@ -496,7 +579,7 @@ EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); */ struct ptlrpc_request_pool * ptlrpc_init_rq_pool(int num_rq, int msgsize, - void (*populate_pool)(struct ptlrpc_request_pool *, int)) + int (*populate_pool)(struct ptlrpc_request_pool *, int)) { struct ptlrpc_request_pool *pool; @@ -514,11 +597,6 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize, populate_pool(pool, num_rq); - if (list_empty(&pool->prp_req_list)) { - /* have not allocated a single request for the pool */ - OBD_FREE(pool, sizeof(struct ptlrpc_request_pool)); - pool = NULL; - } return pool; } EXPORT_SYMBOL(ptlrpc_init_rq_pool); @@ -577,85 +655,134 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) spin_unlock(&pool->prp_lock); } -static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, - __u32 version, int opcode, - int count, __u32 *lengths, char **bufs, - struct ptlrpc_cli_ctx *ctx) +void ptlrpc_add_unreplied(struct ptlrpc_request *req) { - struct obd_import *imp = request->rq_import; - int rc; - ENTRY; + struct obd_import *imp = req->rq_import; + struct list_head *tmp; + struct ptlrpc_request *iter; - if (unlikely(ctx)) - request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx); - else { - rc = sptlrpc_req_get_ctx(request); - if (rc) - GOTO(out_free, rc); - } + assert_spin_locked(&imp->imp_lock); + LASSERT(list_empty(&req->rq_unreplied_list)); - sptlrpc_req_set_flavor(request, opcode); + /* unreplied list is sorted by xid in ascending order */ + list_for_each_prev(tmp, &imp->imp_unreplied_list) { + iter = list_entry(tmp, struct ptlrpc_request, + rq_unreplied_list); - rc = lustre_pack_request(request, imp->imp_msg_magic, count, - lengths, bufs); - if (rc) { - LASSERT(!request->rq_pool); - GOTO(out_ctx, rc); - } + LASSERT(req->rq_xid != iter->rq_xid); + if (req->rq_xid < iter->rq_xid) + continue; + list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list); + return; + } + list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list); +} - lustre_msg_add_version(request->rq_reqmsg, version); - request->rq_send_state = LUSTRE_IMP_FULL; - request->rq_type = PTL_RPC_MSG_REQUEST; - request->rq_export = NULL; +void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req) +{ + req->rq_xid = ptlrpc_next_xid(); + ptlrpc_add_unreplied(req); +} - request->rq_req_cbid.cbid_fn = request_out_callback; - request->rq_req_cbid.cbid_arg = request; +static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req) +{ + spin_lock(&req->rq_import->imp_lock); + ptlrpc_assign_next_xid_nolock(req); + spin_unlock(&req->rq_import->imp_lock); +} - request->rq_reply_cbid.cbid_fn = reply_in_callback; - request->rq_reply_cbid.cbid_arg = request; +int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, + __u32 version, int opcode, char **bufs, + struct ptlrpc_cli_ctx *ctx) +{ + int count; + struct obd_import *imp; + __u32 *lengths; + int rc; - request->rq_reply_deadline = 0; - request->rq_phase = RQ_PHASE_NEW; - request->rq_next_phase = RQ_PHASE_UNDEFINED; + ENTRY; - request->rq_request_portal = imp->imp_client->cli_request_portal; - request->rq_reply_portal = imp->imp_client->cli_reply_portal; + count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT); + imp = request->rq_import; + lengths = request->rq_pill.rc_area[RCL_CLIENT]; - ptlrpc_at_set_req_timeout(request); + if (ctx != NULL) { + request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx); + } else { + rc = sptlrpc_req_get_ctx(request); + if (rc) + GOTO(out_free, rc); + } + sptlrpc_req_set_flavor(request, opcode); + + rc = lustre_pack_request(request, imp->imp_msg_magic, count, + lengths, bufs); + if (rc) + GOTO(out_ctx, rc); - spin_lock_init(&request->rq_lock); - INIT_LIST_HEAD(&request->rq_list); - INIT_LIST_HEAD(&request->rq_timed_list); - INIT_LIST_HEAD(&request->rq_replay_list); - INIT_LIST_HEAD(&request->rq_ctx_chain); - INIT_LIST_HEAD(&request->rq_set_chain); - INIT_LIST_HEAD(&request->rq_history_list); - INIT_LIST_HEAD(&request->rq_exp_list); - init_waitqueue_head(&request->rq_reply_waitq); - init_waitqueue_head(&request->rq_set_waitq); - request->rq_xid = ptlrpc_next_xid(); - atomic_set(&request->rq_refcount, 1); + lustre_msg_add_version(request->rq_reqmsg, version); + request->rq_send_state = LUSTRE_IMP_FULL; + request->rq_type = PTL_RPC_MSG_REQUEST; + + request->rq_req_cbid.cbid_fn = request_out_callback; + request->rq_req_cbid.cbid_arg = request; + + request->rq_reply_cbid.cbid_fn = reply_in_callback; + request->rq_reply_cbid.cbid_arg = request; + + request->rq_reply_deadline = 0; + request->rq_bulk_deadline = 0; + request->rq_req_deadline = 0; + request->rq_phase = RQ_PHASE_NEW; + request->rq_next_phase = RQ_PHASE_UNDEFINED; + + request->rq_request_portal = imp->imp_client->cli_request_portal; + request->rq_reply_portal = imp->imp_client->cli_reply_portal; + + ptlrpc_at_set_req_timeout(request); lustre_msg_set_opc(request->rq_reqmsg, opcode); + ptlrpc_assign_next_xid(request); + + /* Let's setup deadline for req/reply/bulk unlink for opcode. */ + if (cfs_fail_val == opcode) { + time_t *fail_t = NULL, *fail2_t = NULL; + + if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) + fail_t = &request->rq_bulk_deadline; + else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) + fail_t = &request->rq_reply_deadline; + else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) + fail_t = &request->rq_req_deadline; + else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) { + fail_t = &request->rq_reply_deadline; + fail2_t = &request->rq_bulk_deadline; + } + + if (fail_t) { + *fail_t = cfs_time_current_sec() + LONG_UNLINK; + + if (fail2_t) + *fail2_t = cfs_time_current_sec() + LONG_UNLINK; + + /* The RPC is infected, let the test to change the + * fail_loc */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(2)); + set_current_state(TASK_RUNNING); + } + } RETURN(0); + out_ctx: + LASSERT(!request->rq_pool); sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1); out_free: class_import_put(imp); - return rc; -} -int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, - __u32 version, int opcode, char **bufs, - struct ptlrpc_cli_ctx *ctx) -{ - int count; + return rc; - count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT); - return __ptlrpc_request_bufs_pack(request, version, opcode, count, - request->rq_pill.rc_area[RCL_CLIENT], - bufs, ctx); } EXPORT_SYMBOL(ptlrpc_request_bufs_pack); @@ -705,14 +832,15 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, { struct ptlrpc_request *request = NULL; - if (pool) - request = ptlrpc_prep_req_from_pool(pool); + request = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!request) - request = ptlrpc_request_cache_alloc(GFP_NOFS); + if (!request && pool) + request = ptlrpc_prep_req_from_pool(pool); if (request) { - LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp); + ptlrpc_cli_req_init(request); + + LASSERTF((unsigned long)imp > 0x1000, "%p", imp); LASSERT(imp != LP_POISON); LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", imp->imp_client); @@ -810,57 +938,17 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, EXPORT_SYMBOL(ptlrpc_request_alloc_pack); /** - * Prepare request (fetched from pool \a poolif not NULL) on import \a imp - * for operation \a opcode. Request would contain \a count buffers. - * Sizes of buffers are described in array \a lengths and buffers themselves - * are provided by a pointer \a bufs. - * Returns prepared request structure pointer or NULL on error. - */ -struct ptlrpc_request * -ptlrpc_prep_req_pool(struct obd_import *imp, - __u32 version, int opcode, - int count, __u32 *lengths, char **bufs, - struct ptlrpc_request_pool *pool) -{ - struct ptlrpc_request *request; - int rc; - - request = __ptlrpc_request_alloc(imp, pool); - if (!request) - return NULL; - - rc = __ptlrpc_request_bufs_pack(request, version, opcode, count, - lengths, bufs, NULL); - if (rc) { - ptlrpc_request_free(request); - request = NULL; - } - return request; -} -EXPORT_SYMBOL(ptlrpc_prep_req_pool); - -/** - * Same as ptlrpc_prep_req_pool, but without pool - */ -struct ptlrpc_request * -ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, - __u32 *lengths, char **bufs) -{ - return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs, - NULL); -} -EXPORT_SYMBOL(ptlrpc_prep_req); - -/** - * Allocate and initialize new request set structure. + * Allocate and initialize new request set structure on the current CPT. * Returns a pointer to the newly allocated set structure or NULL on error. */ struct ptlrpc_request_set *ptlrpc_prep_set(void) { - struct ptlrpc_request_set *set; + struct ptlrpc_request_set *set; + int cpt; ENTRY; - OBD_ALLOC(set, sizeof *set); + cpt = cfs_cpt_current(cfs_cpt_table, 0); + OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set); if (!set) RETURN(NULL); atomic_set(&set->set_refcount, 1); @@ -904,7 +992,6 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, RETURN(set); } -EXPORT_SYMBOL(ptlrpc_prep_fcset); /** * Wind down and free request set structure previously allocated with @@ -986,7 +1073,6 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, RETURN(0); } -EXPORT_SYMBOL(ptlrpc_set_add_cb); /** * Add a new request to the general purpose request set. @@ -997,6 +1083,9 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, { LASSERT(list_empty(&req->rq_set_chain)); + if (req->rq_allow_intr) + set->set_allow_intr = 1; + /* The set takes over the caller's request reference */ list_add_tail(&req->rq_set_chain, &set->set_requests); req->rq_set = set; @@ -1048,7 +1137,6 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, wake_up(&pc->pc_partners[i]->pc_set->set_waitq); } } -EXPORT_SYMBOL(ptlrpc_set_add_new_req); /** * Based on the current state of the import, determine if the request @@ -1080,9 +1168,9 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, D_HA : D_ERROR, req, "IMP_CLOSED "); *status = -EIO; } else if (ptlrpc_send_limit_expired(req)) { - /* probably doesn't need to be a D_ERROR after initial testing */ - DEBUG_REQ(D_ERROR, req, "send limit expired "); - *status = -EIO; + /* probably doesn't need to be a D_ERROR after initial testing*/ + DEBUG_REQ(D_HA, req, "send limit expired "); + *status = -ETIMEDOUT; } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && imp->imp_state == LUSTRE_IMP_CONNECTING) { /* allow CONNECT even if import is invalid */ ; @@ -1119,35 +1207,40 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, } /** - * Decide if the eror message regarding provided request \a req - * should be printed to the console or not. - * Makes it's decision on request status and other properties. - * Returns 1 to print error on the system console or 0 if not. + * Decide if the error message should be printed to the console or not. + * Makes its decision based on request type, status, and failure frequency. + * + * \param[in] req request that failed and may need a console message + * + * \retval false if no message should be printed + * \retval true if console message should be printed */ -static int ptlrpc_console_allow(struct ptlrpc_request *req) +static bool ptlrpc_console_allow(struct ptlrpc_request *req) { - __u32 opc; - int err; + __u32 opc; - LASSERT(req->rq_reqmsg != NULL); - opc = lustre_msg_get_opc(req->rq_reqmsg); + LASSERT(req->rq_reqmsg != NULL); + opc = lustre_msg_get_opc(req->rq_reqmsg); - /* Suppress particular reconnect errors which are to be expected. No - * errors are suppressed for the initial connection on an import */ - if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) && - (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) { + /* Suppress particular reconnect errors which are to be expected. */ + if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) { + int err; - /* Suppress timed out reconnect requests */ - if (req->rq_timedout) - return 0; + /* Suppress timed out reconnect requests */ + if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) || + req->rq_timedout) + return false; - /* Suppress unavailable/again reconnect requests */ - err = lustre_msg_get_status(req->rq_repmsg); - if (err == -ENODEV || err == -EAGAIN) - return 0; - } + /* Suppress most unavailable/again reconnect requests, but + * print occasionally so it is clear client is trying to + * connect to a server where no target is running. */ + err = lustre_msg_get_status(req->rq_repmsg); + if ((err == -ENODEV || err == -EAGAIN) && + req->rq_import->imp_conn_cnt % 30 != 20) + return false; + } - return 1; + return true; } /** @@ -1162,14 +1255,15 @@ static int ptlrpc_check_status(struct ptlrpc_request *req) err = lustre_msg_get_status(req->rq_repmsg); if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { struct obd_import *imp = req->rq_import; + lnet_nid_t nid = imp->imp_connection->c_peer.nid; __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); + if (ptlrpc_console_allow(req)) - LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s," - " operation %s failed with %d.\n", - imp->imp_obd->obd_name, - libcfs_nid2str( - imp->imp_connection->c_peer.nid), - ll_opcode2str(opc), err); + LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s " + "failed: rc = %d\n", + imp->imp_obd->obd_name, + ll_opcode2str(opc), + libcfs_nid2str(nid), err); RETURN(err < 0 ? err : -EINVAL); } @@ -1200,12 +1294,30 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req) LASSERT(versions); lustre_msg_set_versions(reqmsg, versions); - CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n", + CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n", versions[0], versions[1]); EXIT; } +__u64 ptlrpc_known_replied_xid(struct obd_import *imp) +{ + struct ptlrpc_request *req; + + assert_spin_locked(&imp->imp_lock); + if (list_empty(&imp->imp_unreplied_list)) + return 0; + + req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request, + rq_unreplied_list); + LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid); + + if (imp->imp_known_replied_xid < req->rq_xid - 1) + imp->imp_known_replied_xid = req->rq_xid - 1; + + return req->rq_xid - 1; +} + /** * Callback function called when client receives RPC reply for \a req. * Returns 0 on success or error code. @@ -1219,14 +1331,15 @@ static int after_reply(struct ptlrpc_request *req) struct obd_device *obd = req->rq_import->imp_obd; int rc; struct timeval work_start; + __u64 committed; long timediff; ENTRY; LASSERT(obd != NULL); /* repbuf must be unlinked */ - LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink); + LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked); - if (req->rq_reply_truncate) { + if (req->rq_reply_truncated) { if (ptlrpc_no_resend(req)) { DEBUG_REQ(D_ERROR, req, "reply buffer overflow," " expected: %d, actual size: %d", @@ -1247,6 +1360,9 @@ static int after_reply(struct ptlrpc_request *req) RETURN(0); } + do_gettimeofday(&work_start); + timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL); + /* * NB Until this point, the whole of the incoming message, * including buflens, status etc is in the sender's byte order. @@ -1273,18 +1389,11 @@ static int after_reply(struct ptlrpc_request *req) time_t now = cfs_time_current_sec(); DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS"); + spin_lock(&req->rq_lock); req->rq_resend = 1; + spin_unlock(&req->rq_lock); req->rq_nr_resend++; - /* allocate new xid to avoid reply reconstruction */ - if (!req->rq_bulk) { - /* new xid is already allocated for bulk in - * ptlrpc_check_set() */ - req->rq_xid = ptlrpc_next_xid(); - DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for " - "resend on EINPROGRESS"); - } - /* Readjust the timeout for current conditions */ ptlrpc_at_set_req_timeout(req); /* delay resend to give a chance to the server to get ready. @@ -1296,11 +1405,14 @@ static int after_reply(struct ptlrpc_request *req) else req->rq_sent = now + req->rq_nr_resend; + /* Resend for EINPROGRESS will use a new XID */ + spin_lock(&imp->imp_lock); + list_del_init(&req->rq_unreplied_list); + spin_unlock(&imp->imp_lock); + RETURN(0); } - do_gettimeofday(&work_start); - timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL); if (obd->obd_svc_stats != NULL) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); @@ -1323,20 +1435,20 @@ static int after_reply(struct ptlrpc_request *req) rc = ptlrpc_check_status(req); imp->imp_connect_error = rc; - if (rc) { - /* - * Either we've been evicted, or the server has failed for - * some reason. Try to reconnect, and if that fails, punt to - * the upcall. - */ - if (ll_rpc_recoverable_error(rc)) { - if (req->rq_send_state != LUSTRE_IMP_FULL || - imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { - RETURN(rc); - } - ptlrpc_request_handle_notconn(req); - RETURN(rc); - } + if (rc) { + /* + * Either we've been evicted, or the server has failed for + * some reason. Try to reconnect, and if that fails, punt to + * the upcall. + */ + if (ptlrpc_recoverable_error(rc)) { + if (req->rq_send_state != LUSTRE_IMP_FULL || + imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { + RETURN(rc); + } + ptlrpc_request_handle_notconn(req); + RETURN(rc); + } } else { /* * Let's look if server sent slv. Do it only for RPC with @@ -1379,10 +1491,9 @@ static int after_reply(struct ptlrpc_request *req) /* * Replay-enabled imports return commit-status information. */ - if (lustre_msg_get_last_committed(req->rq_repmsg)) { - imp->imp_peer_committed_transno = - lustre_msg_get_last_committed(req->rq_repmsg); - } + committed = lustre_msg_get_last_committed(req->rq_repmsg); + if (likely(committed > imp->imp_peer_committed_transno)) + imp->imp_peer_committed_transno = committed; ptlrpc_free_committed(imp); @@ -1414,10 +1525,18 @@ static int after_reply(struct ptlrpc_request *req) static int ptlrpc_send_new_req(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; + __u64 min_xid = 0; int rc; ENTRY; LASSERT(req->rq_phase == RQ_PHASE_NEW); + + /* do not try to go further if there is not enough memory in enc_pool */ + if (req->rq_sent && req->rq_bulk != NULL) + if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() && + pool_is_at_full_capacity()) + RETURN(-ENOMEM); + if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) && (!req->rq_generation_set || req->rq_import_generation == imp->imp_generation)) @@ -1427,6 +1546,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) spin_lock(&imp->imp_lock); + LASSERT(req->rq_xid != 0); + LASSERT(!list_empty(&req->rq_unreplied_list)); + if (!req->rq_generation_set) req->rq_import_generation = imp->imp_generation; @@ -1456,8 +1578,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) LASSERT(list_empty(&req->rq_list)); list_add_tail(&req->rq_list, &imp->imp_sending_list); atomic_inc(&req->rq_import->imp_inflight); + + /* find the known replied XID from the unreplied list, CONNECT + * and DISCONNECT requests are skipped to make the sanity check + * on server side happy. see process_req_last_xid(). + * + * For CONNECT: Because replay requests have lower XID, it'll + * break the sanity check if CONNECT bump the exp_last_xid on + * server. + * + * For DISCONNECT: Since client will abort inflight RPC before + * sending DISCONNECT, DISCONNECT may carry an XID which higher + * than the inflight RPC. + */ + if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req)) + min_xid = ptlrpc_known_replied_xid(imp); spin_unlock(&imp->imp_lock); + lustre_msg_set_last_xid(req->rq_reqmsg, min_xid); + lustre_msg_set_status(req->rq_reqmsg, current_pid()); rc = sptlrpc_req_refresh_ctx(req, -1); @@ -1474,13 +1613,23 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) } CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc" - " %s:%s:%d:"LPU64":%s:%d\n", current_comm(), + " %s:%s:%d:%llu:%s:%d\n", current_comm(), imp->imp_obd->obd_uuid.uuid, lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, libcfs_nid2str(imp->imp_connection->c_peer.nid), lustre_msg_get_opc(req->rq_reqmsg)); rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); + atomic_dec(&req->rq_import->imp_inflight); + } + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + RETURN(rc); + } if (rc) { DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); spin_lock(&req->rq_lock); @@ -1526,20 +1675,28 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { struct list_head *tmp, *next; - int force_timer_recalc = 0; - ENTRY; + struct list_head comp_reqs; + int force_timer_recalc = 0; + ENTRY; if (atomic_read(&set->set_remaining) == 0) - RETURN(1); + RETURN(1); + INIT_LIST_HEAD(&comp_reqs); list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); struct obd_import *imp = req->rq_import; int unregistered = 0; + int async = 1; int rc = 0; + if (req->rq_phase == RQ_PHASE_COMPLETE) { + list_move_tail(&req->rq_set_chain, &comp_reqs); + continue; + } + /* This schedule point is mainly for the ptlrpcd caller of this * function. Most ptlrpc sets are not long-lived and unbounded * in length, but at the least the set used by the ptlrpcd is. @@ -1548,13 +1705,26 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) */ cond_resched(); - if (req->rq_phase == RQ_PHASE_NEW && - ptlrpc_send_new_req(req)) { - force_timer_recalc = 1; - } + /* If the caller requires to allow to be interpreted by force + * and it has really been interpreted, then move the request + * to RQ_PHASE_INTERPRET phase in spite of what the current + * phase is. */ + if (unlikely(req->rq_allow_intr && req->rq_intr)) { + req->rq_status = -EINTR; + ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); + + /* Since it is interpreted and we have to wait for + * the reply to be unlinked, then use sync mode. */ + async = 0; + + GOTO(interpret, req->rq_status); + } + + if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req)) + force_timer_recalc = 1; - /* delayed send - skip */ - if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent) + /* delayed send - skip */ + if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent) continue; /* delayed resend - skip */ @@ -1562,29 +1732,43 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) req->rq_sent > cfs_time_current_sec()) continue; - if (!(req->rq_phase == RQ_PHASE_RPC || - req->rq_phase == RQ_PHASE_BULK || - req->rq_phase == RQ_PHASE_INTERPRET || - req->rq_phase == RQ_PHASE_UNREGISTERING || - req->rq_phase == RQ_PHASE_COMPLETE)) { - DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase); - LBUG(); - } + if (!(req->rq_phase == RQ_PHASE_RPC || + req->rq_phase == RQ_PHASE_BULK || + req->rq_phase == RQ_PHASE_INTERPRET || + req->rq_phase == RQ_PHASE_UNREG_RPC || + req->rq_phase == RQ_PHASE_UNREG_BULK)) { + DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase); + LBUG(); + } - if (req->rq_phase == RQ_PHASE_UNREGISTERING) { - LASSERT(req->rq_next_phase != req->rq_phase); - LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED); + if (req->rq_phase == RQ_PHASE_UNREG_RPC || + req->rq_phase == RQ_PHASE_UNREG_BULK) { + LASSERT(req->rq_next_phase != req->rq_phase); + LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED); + + if (req->rq_req_deadline && + !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) + req->rq_req_deadline = 0; + if (req->rq_reply_deadline && + !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) + req->rq_reply_deadline = 0; + if (req->rq_bulk_deadline && + !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) + req->rq_bulk_deadline = 0; - /* - * Skip processing until reply is unlinked. We - * can't return to pool before that and we can't - * call interpret before that. We need to make - * sure that all rdma transfers finished and will - * not corrupt any data. - */ - if (ptlrpc_client_recv_or_unlink(req) || - ptlrpc_client_bulk_active(req)) - continue; + /* + * Skip processing until reply is unlinked. We + * can't return to pool before that and we can't + * call interpret before that. We need to make + * sure that all rdma transfers finished and will + * not corrupt any data. + */ + if (req->rq_phase == RQ_PHASE_UNREG_RPC && + ptlrpc_client_recv_or_unlink(req)) + continue; + if (req->rq_phase == RQ_PHASE_UNREG_BULK && + ptlrpc_client_bulk_active(req)) + continue; /* * Turn fail_loc off to prevent it from looping @@ -1606,9 +1790,6 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) ptlrpc_rqphase_move(req, req->rq_next_phase); } - if (req->rq_phase == RQ_PHASE_COMPLETE) - continue; - if (req->rq_phase == RQ_PHASE_INTERPRET) GOTO(interpret, req->rq_status); @@ -1647,7 +1828,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr * so it sets rq_intr regardless of individual rpc - * timeouts. The synchronous IO waiting path sets + * timeouts. The synchronous IO waiting path sets * rq_intr irrespective of whether ptlrpcd * has seen a timeout. Our policy is to only interpret * interrupted rpcs after they have timed out, so we @@ -1715,20 +1896,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) spin_lock(&req->rq_lock); req->rq_resend = 1; spin_unlock(&req->rq_lock); - if (req->rq_bulk) { - __u64 old_xid; - - if (!ptlrpc_unregister_bulk(req, 1)) - continue; - - /* ensure previous bulk fails */ - old_xid = req->rq_xid; - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk " - "old x"LPU64 - " new x"LPU64"\n", - old_xid, req->rq_xid); - } + + if (req->rq_bulk != NULL && + !ptlrpc_unregister_bulk(req, 1)) + continue; } /* * rq_wait_ctx is only touched by ptlrpcd, @@ -1756,6 +1927,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) } rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) + list_del_init(&req->rq_list); + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + continue; + } if (rc) { DEBUG_REQ(D_HA, req, "send failed: rc = %d", rc); @@ -1827,27 +2006,27 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) req->rq_status = -EIO; } - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); + ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - interpret: - LASSERT(req->rq_phase == RQ_PHASE_INTERPRET); + interpret: + LASSERT(req->rq_phase == RQ_PHASE_INTERPRET); - /* This moves to "unregistering" phase we need to wait for - * reply unlink. */ - if (!unregistered && !ptlrpc_unregister_reply(req, 1)) { - /* start async bulk unlink too */ - ptlrpc_unregister_bulk(req, 1); - continue; - } + /* This moves to "unregistering" phase we need to wait for + * reply unlink. */ + if (!unregistered && !ptlrpc_unregister_reply(req, async)) { + /* start async bulk unlink too */ + ptlrpc_unregister_bulk(req, 1); + continue; + } - if (!ptlrpc_unregister_bulk(req, 1)) - continue; + if (!ptlrpc_unregister_bulk(req, async)) + continue; - /* When calling interpret receiving already should be - * finished. */ - LASSERT(!req->rq_receiving_reply); + /* When calling interpret receiving already should be + * finished. */ + LASSERT(!req->rq_receiving_reply); - ptlrpc_req_interpret(env, req, req->rq_status); + ptlrpc_req_interpret(env, req, req->rq_status); if (ptlrpcd_check_work(req)) { atomic_dec(&set->set_remaining); @@ -1857,7 +2036,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0, "Completed RPC pname:cluuid:pid:xid:nid:" - "opc %s:%s:%d:"LPU64":%s:%d\n", + "opc %s:%s:%d:%llu:%s:%d\n", current_comm(), imp->imp_obd->obd_uuid.uuid, lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, libcfs_nid2str(imp->imp_connection->c_peer.nid), @@ -1872,6 +2051,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) list_del_init(&req->rq_list); atomic_dec(&imp->imp_inflight); } + list_del_init(&req->rq_unreplied_list); spin_unlock(&imp->imp_lock); atomic_dec(&set->set_remaining); @@ -1894,9 +2074,15 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (req->rq_status != 0) set->set_rc = req->rq_status; ptlrpc_req_finished(req); + } else { + list_move_tail(&req->rq_set_chain, &comp_reqs); } } + /* move completed request at the head of list so it's easier for + * caller to find them */ + list_splice(&comp_reqs, &set->set_requests); + /* If we hit an error, we want to recover promptly. */ RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc); } @@ -1926,8 +2112,8 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) "timed out for sent delay" : "timed out for slow reply"), req->rq_sent, req->rq_real_sent); - if (imp != NULL && obd_debug_peer_on_timeout) - LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer); + if (imp != NULL && obd_debug_peer_on_timeout) + LNetDebugPeer(imp->imp_connection->c_peer); ptlrpc_unregister_reply(req, async_unlink); ptlrpc_unregister_bulk(req, async_unlink); @@ -2021,7 +2207,6 @@ int ptlrpc_expired_set(void *data) */ RETURN(1); } -EXPORT_SYMBOL(ptlrpc_expired_set); /** * Sets rq_intr flag in \a req under spinlock. @@ -2038,7 +2223,7 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted); * Interrupts (sets interrupted flag) all uncompleted requests in * a set \a data. Callback for l_wait_event for interruptible waits. */ -void ptlrpc_interrupted_set(void *data) +static void ptlrpc_interrupted_set(void *data) { struct ptlrpc_request_set *set = data; struct list_head *tmp; @@ -2050,14 +2235,17 @@ void ptlrpc_interrupted_set(void *data) struct ptlrpc_request *req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); + if (req->rq_intr) + continue; + if (req->rq_phase != RQ_PHASE_RPC && - req->rq_phase != RQ_PHASE_UNREGISTERING) + req->rq_phase != RQ_PHASE_UNREG_RPC && + !req->rq_allow_intr) continue; ptlrpc_mark_interrupted(req); } } -EXPORT_SYMBOL(ptlrpc_interrupted_set); /** * Get the smallest timeout in the set; this does NOT set a timeout. @@ -2108,7 +2296,6 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) } RETURN(timeout); } -EXPORT_SYMBOL(ptlrpc_set_next_timeout); /** * Send all unset request from the set and then wait untill all @@ -2145,21 +2332,21 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", set, timeout); - if (timeout == 0 && !cfs_signal_pending()) - /* - * No requests are in-flight (ether timed out - * or delayed), so we can allow interrupts. - * We still want to block for a limited time, - * so we allow interrupts during the timeout. - */ - lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), - ptlrpc_expired_set, - ptlrpc_interrupted_set, set); + if ((timeout == 0 && !signal_pending(current)) || + set->set_allow_intr) + /* No requests are in-flight (ether timed out + * or delayed), so we can allow interrupts. + * We still want to block for a limited time, + * so we allow interrupts during the timeout. */ + lwi = LWI_TIMEOUT_INTR_ALL( + cfs_time_seconds(timeout ? timeout : 1), + ptlrpc_expired_set, + ptlrpc_interrupted_set, set); else /* * At least one request is in flight, so no * interrupts are allowed. Wait until all - * complete, or an in-flight req times out. + * complete, or an in-flight req times out. */ lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1), ptlrpc_expired_set, set); @@ -2169,8 +2356,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) /* LU-769 - if we ignored the signal because it was already * pending when we started, we need to handle it now or we risk * it being ignored forever */ - if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr && - cfs_signal_pending()) { + if (rc == -ETIMEDOUT && + (!lwi.lwi_allow_intr || set->set_allow_intr) && + signal_pending(current)) { sigset_t blocked_sigs = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); @@ -2178,7 +2366,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * like SIGINT or SIGKILL. We still ignore less * important signals since ptlrpc set is not easily * reentrant from userspace again */ - if (cfs_signal_pending()) + if (signal_pending(current)) ptlrpc_interrupted_set(set); cfs_restore_sigs(blocked_sigs); } @@ -2246,27 +2434,27 @@ EXPORT_SYMBOL(ptlrpc_set_wait); */ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) { - ENTRY; - if (request == NULL) { - EXIT; - return; - } + ENTRY; + + if (request == NULL) + RETURN_EXIT; - LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */ + LASSERT(!request->rq_srv_req); + LASSERT(request->rq_export == NULL); + LASSERTF(!request->rq_receiving_reply, "req %p\n", request); LASSERTF(list_empty(&request->rq_list), "req %p\n", request); LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); - LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request); - LASSERTF(!request->rq_replay, "req %p\n", request); + LASSERTF(!request->rq_replay, "req %p\n", request); - req_capsule_fini(&request->rq_pill); + req_capsule_fini(&request->rq_pill); - /* We must take it off the imp_replay_list first. Otherwise, we'll set - * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ - if (request->rq_import != NULL) { + /* We must take it off the imp_replay_list first. Otherwise, we'll set + * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ + if (request->rq_import != NULL) { if (!locked) spin_lock(&request->rq_import->imp_lock); list_del_init(&request->rq_replay_list); + list_del_init(&request->rq_unreplied_list); if (!locked) spin_unlock(&request->rq_import->imp_lock); } @@ -2280,16 +2468,13 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) if (request->rq_repbuf != NULL) sptlrpc_cli_free_repbuf(request); - if (request->rq_export != NULL) { - class_export_put(request->rq_export); - request->rq_export = NULL; - } + if (request->rq_import != NULL) { class_import_put(request->rq_import); request->rq_import = NULL; } if (request->rq_bulk != NULL) - ptlrpc_free_bulk_pin(request->rq_bulk); + ptlrpc_free_bulk(request->rq_bulk); if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) sptlrpc_cli_free_reqbuf(request); @@ -2307,14 +2492,13 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked); /** * Drop one request reference. Must be called with import imp_lock held. - * When reference count drops to zero, reuqest is freed. + * When reference count drops to zero, request is freed. */ void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request) { assert_spin_locked(&request->rq_import->imp_lock); (void)__ptlrpc_req_finished(request, 1); } -EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock); /** * Helper function @@ -2371,7 +2555,7 @@ EXPORT_SYMBOL(ptlrpc_req_xid); * The request owner (i.e. the thread doing the I/O) must call... * Returns 0 on success or 1 if unregistering cannot be made. */ -int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) { int rc; struct l_wait_info lwi; @@ -2381,12 +2565,11 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) */ LASSERT(!in_interrupt()); - /* - * Let's setup deadline for reply unlink. - */ - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - async && request->rq_reply_deadline == 0) - request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK; + /* Let's setup deadline for reply unlink. */ + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && + async && request->rq_reply_deadline == 0 && cfs_fail_val == 0) + request->rq_reply_deadline = + cfs_time_current_sec() + LONG_UNLINK; /* * Nothing left to do. @@ -2402,10 +2585,8 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) if (!ptlrpc_client_recv_or_unlink(request)) RETURN(1); - /* - * Move to "Unregistering" phase as reply was not unlinked yet. - */ - ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING); + /* Move to "Unregistering" phase as reply was not unlinked yet. */ + ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC); /* * Do not wait for unlink to finish. @@ -2419,12 +2600,10 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) * unlinked before returning a req to the pool. */ for (;;) { -#ifdef __KERNEL__ /* The wq argument is ignored by user-space wait_event macros */ wait_queue_head_t *wq = (request->rq_set != NULL) ? &request->rq_set->set_waitq : &request->rq_reply_waitq; -#endif /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish NALs */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), @@ -2437,13 +2616,14 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) } LASSERT(rc == -ETIMEDOUT); - DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " - "rvcng=%d unlnk=%d/%d", request->rq_receiving_reply, - request->rq_req_unlink, request->rq_reply_unlink); + DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " + "receiving_reply=%d req_ulinked=%d reply_unlinked=%d", + request->rq_receiving_reply, + request->rq_req_unlinked, + request->rq_reply_unlinked); } RETURN(0); } -EXPORT_SYMBOL(ptlrpc_unregister_reply); static void ptlrpc_free_request(struct ptlrpc_request *req) { @@ -2498,15 +2678,16 @@ void ptlrpc_free_committed(struct obd_import *imp) if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && imp->imp_generation == imp->imp_last_generation_checked) { - CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n", + CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n", imp->imp_obd->obd_name, imp->imp_peer_committed_transno); RETURN_EXIT; } - CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n", + CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n", imp->imp_obd->obd_name, imp->imp_peer_committed_transno, imp->imp_generation); - if (imp->imp_generation != imp->imp_last_generation_checked) + if (imp->imp_generation != imp->imp_last_generation_checked || + imp->imp_last_transno_checked == 0) skip_committed_list = false; imp->imp_last_transno_checked = imp->imp_peer_committed_transno; @@ -2540,7 +2721,7 @@ void ptlrpc_free_committed(struct obd_import *imp) continue; } - DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")", + DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)", imp->imp_peer_committed_transno); free_req: ptlrpc_free_request(req); @@ -2555,6 +2736,9 @@ free_req: if (req->rq_import_generation < imp->imp_generation) { DEBUG_REQ(D_RPCTRACE, req, "free stale open request"); ptlrpc_free_request(req); + } else if (!req->rq_replay) { + DEBUG_REQ(D_RPCTRACE, req, "free closed open request"); + ptlrpc_free_request(req); } } out: @@ -2566,7 +2750,6 @@ void ptlrpc_cleanup_client(struct obd_import *imp) ENTRY; EXIT; } -EXPORT_SYMBOL(ptlrpc_cleanup_client); /** * Schedule previously sent request for resend. @@ -2593,18 +2776,10 @@ void ptlrpc_resend_req(struct ptlrpc_request *req) req->rq_resend = 1; req->rq_net_err = 0; req->rq_timedout = 0; - if (req->rq_bulk) { - __u64 old_xid = req->rq_xid; - /* ensure previous bulk fails */ - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", - old_xid, req->rq_xid); - } ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } -EXPORT_SYMBOL(ptlrpc_resend_req); /* XXX: this function and rq_status are currently unused */ void ptlrpc_restart_req(struct ptlrpc_request *req) @@ -2618,7 +2793,6 @@ void ptlrpc_restart_req(struct ptlrpc_request *req) ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } -EXPORT_SYMBOL(ptlrpc_restart_req); /** * Grab additional reference on a request \a req @@ -2657,6 +2831,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); + spin_lock(&req->rq_lock); + req->rq_resend = 0; + spin_unlock(&req->rq_lock); + LASSERT(imp->imp_replayable); /* Balanced in ptlrpc_free_committed, usually. */ ptlrpc_request_addref(req); @@ -2686,7 +2864,6 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, list_add(&req->rq_replay_list, &imp->imp_replay_list); } -EXPORT_SYMBOL(ptlrpc_retain_replayable_request); /** * Send request and wait until it completes. @@ -2720,14 +2897,9 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) } EXPORT_SYMBOL(ptlrpc_queue_wait); -struct ptlrpc_replay_async_args { - int praa_old_state; - int praa_old_status; -}; - /** * Callback used for replayed requests reply processing. - * In case of succesful reply calls registeresd request replay callback. + * In case of successful reply calls registered request replay callback. * In case of error restart replay process. */ static int ptlrpc_replay_interpret(const struct lu_env *env, @@ -2740,10 +2912,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, ENTRY; atomic_dec(&imp->imp_replay_inflight); - if (!ptlrpc_client_replied(req)) { - CERROR("request replay timed out, restarting recovery\n"); - GOTO(out, rc = -ETIMEDOUT); - } + /* Note: if it is bulk replay (MDS-MDS replay), then even if + * server got the request, but bulk transfer timeout, let's + * replay the bulk req again */ + if (!ptlrpc_client_replied(req) || + (req->rq_bulk != NULL && + lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) { + DEBUG_REQ(D_ERROR, req, "request replay timed out.\n"); + GOTO(out, rc = -ETIMEDOUT); + } if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR && (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN || @@ -2764,7 +2941,7 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) == lustre_msg_get_transno(req->rq_repmsg) || lustre_msg_get_transno(req->rq_repmsg) == 0, - LPX64"/"LPX64"\n", + "%#llx/%#llx\n", lustre_msg_get_transno(req->rq_reqmsg), lustre_msg_get_transno(req->rq_repmsg)); } @@ -2780,8 +2957,8 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, /* transaction number shouldn't be bigger than the latest replayed */ if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) { DEBUG_REQ(D_ERROR, req, - "Reported transno "LPU64" is bigger than the " - "replayed one: "LPU64, req->rq_transno, + "Reported transno %llu is bigger than the " + "replayed one: %llu", req->rq_transno, lustre_msg_get_transno(req->rq_reqmsg)); GOTO(out, rc = -EINVAL); } @@ -2797,6 +2974,49 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, DEBUG_REQ(D_ERROR, req, "status %d, old was %d", lustre_msg_get_status(req->rq_repmsg), aa->praa_old_status); + + /* Note: If the replay fails for MDT-MDT recovery, let's + * abort all of the following requests in the replay + * and sending list, because MDT-MDT update requests + * are dependent on each other, see LU-7039 */ + if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) { + struct ptlrpc_request *free_req; + struct ptlrpc_request *tmp; + + spin_lock(&imp->imp_lock); + list_for_each_entry_safe(free_req, tmp, + &imp->imp_replay_list, + rq_replay_list) { + ptlrpc_free_request(free_req); + } + + list_for_each_entry_safe(free_req, tmp, + &imp->imp_committed_list, + rq_replay_list) { + ptlrpc_free_request(free_req); + } + + list_for_each_entry_safe(free_req, tmp, + &imp->imp_delayed_list, + rq_list) { + spin_lock(&free_req->rq_lock); + free_req->rq_err = 1; + free_req->rq_status = -EIO; + ptlrpc_client_wake_req(free_req); + spin_unlock(&free_req->rq_lock); + } + + list_for_each_entry_safe(free_req, tmp, + &imp->imp_sending_list, + rq_list) { + spin_lock(&free_req->rq_lock); + free_req->rq_err = 1; + free_req->rq_status = -EIO; + ptlrpc_client_wake_req(free_req); + spin_unlock(&free_req->rq_lock); + } + spin_unlock(&imp->imp_lock); + } } else { /* Put it back for re-replay. */ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); @@ -2858,10 +3078,9 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) atomic_inc(&req->rq_import->imp_replay_inflight); ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ - ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); + ptlrpcd_add_req(req); RETURN(0); } -EXPORT_SYMBOL(ptlrpc_replay_req); /** * Aborts all in-flight request on import \a imp sending and delayed lists @@ -2921,7 +3140,6 @@ void ptlrpc_abort_inflight(struct obd_import *imp) EXIT; } -EXPORT_SYMBOL(ptlrpc_abort_inflight); /** * Abort all uncompleted requests in request set \a set @@ -3010,7 +3228,51 @@ __u64 ptlrpc_next_xid(void) return next; } -EXPORT_SYMBOL(ptlrpc_next_xid); + +/** + * If request has a new allocated XID (new request or EINPROGRESS resend), + * use this XID as matchbits of bulk, otherwise allocate a new matchbits for + * request to ensure previous bulk fails and avoid problems with lost replies + * and therefore several transfers landing into the same buffer from different + * sending attempts. + */ +void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) +{ + struct ptlrpc_bulk_desc *bd = req->rq_bulk; + + LASSERT(bd != NULL); + + if (!req->rq_resend) { + /* this request has a new xid, just use it as bulk matchbits */ + req->rq_mbits = req->rq_xid; + + } else { /* needs to generate a new matchbits for resend */ + __u64 old_mbits = req->rq_mbits; + + if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)){ + req->rq_mbits = ptlrpc_next_xid(); + } else {/* old version transfers rq_xid to peer as matchbits */ + spin_lock(&req->rq_import->imp_lock); + list_del_init(&req->rq_unreplied_list); + ptlrpc_assign_next_xid_nolock(req); + req->rq_mbits = req->rq_xid; + spin_unlock(&req->rq_import->imp_lock); + } + CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n", + old_mbits, req->rq_mbits); + } + + /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so + * that server can infer the number of bulks that were prepared, + * see LU-1431 */ + req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) / + LNET_MAX_IOV) - 1; + + /* Set rq_xid as rq_mbits to indicate the final bulk for the old + * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808 */ + if (!OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)) + req->rq_xid = req->rq_mbits; +} /** * Get a glimpse at what next xid value might have been. @@ -3062,13 +3324,12 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req) req->rq_timeout = obd_timeout; req->rq_sent = cfs_time_current_sec(); req->rq_deadline = req->rq_sent + req->rq_timeout; - req->rq_reply_deadline = req->rq_deadline; req->rq_phase = RQ_PHASE_INTERPRET; req->rq_next_phase = RQ_PHASE_COMPLETE; req->rq_xid = ptlrpc_next_xid(); req->rq_import_generation = req->rq_import->imp_generation; - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); + ptlrpcd_add_req(req); } static int work_interpreter(const struct lu_env *env, @@ -3104,7 +3365,7 @@ static int ptlrpcd_check_work(struct ptlrpc_request *req) void *ptlrpcd_alloc_work(struct obd_import *imp, int (*cb)(const struct lu_env *, void *), void *cbdata) { - struct ptlrpc_request *req = NULL; + struct ptlrpc_request *req = NULL; struct ptlrpc_work_async_args *args; ENTRY; @@ -3120,27 +3381,16 @@ void *ptlrpcd_alloc_work(struct obd_import *imp, RETURN(ERR_PTR(-ENOMEM)); } + ptlrpc_cli_req_init(req); + req->rq_send_state = LUSTRE_IMP_FULL; req->rq_type = PTL_RPC_MSG_REQUEST; req->rq_import = class_import_get(imp); - req->rq_export = NULL; req->rq_interpret_reply = work_interpreter; /* don't want reply */ - req->rq_receiving_reply = 0; - req->rq_req_unlink = req->rq_reply_unlink = 0; req->rq_no_delay = req->rq_no_resend = 1; req->rq_pill.rc_fmt = (void *)&worker_format; - spin_lock_init(&req->rq_lock); - INIT_LIST_HEAD(&req->rq_list); - INIT_LIST_HEAD(&req->rq_replay_list); - INIT_LIST_HEAD(&req->rq_set_chain); - INIT_LIST_HEAD(&req->rq_history_list); - INIT_LIST_HEAD(&req->rq_exp_list); - init_waitqueue_head(&req->rq_reply_waitq); - init_waitqueue_head(&req->rq_set_waitq); - atomic_set(&req->rq_refcount, 1); - CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args)); args = ptlrpc_req_async_args(req); args->cb = cb;