X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fclient.c;h=a791abf62ff635246b549537a60bf70929ccf94f;hb=01ca899324738343279c1d63823b7fab937197dc;hp=7936a0da80185fad8f5c32d6015eced5ea63f62e;hpb=cc2ff1bfd66a5c004eb6ed61fc2dac3f1ab49d3a;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 7936a0d..a791abf 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -27,7 +27,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -37,11 +37,6 @@ /** Implementation of client-side PortalRPC interfaces */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -#include -#include -#include -#endif #include #include @@ -52,8 +47,26 @@ #include "ptlrpc_internal.h" +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_pin, + .release_frags = ptlrpc_release_bulk_page_pin, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_nopin, + .release_frags = ptlrpc_release_bulk_noop, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = { + .add_iov_frag = ptlrpc_prep_bulk_frag, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops); + static int ptlrpc_send_new_req(struct ptlrpc_request *req); static int ptlrpcd_check_work(struct ptlrpc_request *req); +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); /** * Initialize passed in client structure \a cl. @@ -96,29 +109,46 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) return c; } -EXPORT_SYMBOL(ptlrpc_uuid_to_connection); /** * Allocate and initialize new bulk descriptor on the sender. * Returns pointer to the descriptor or NULL on error. */ -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) +struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw, + enum ptlrpc_bulk_op_type type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops *ops) { struct ptlrpc_bulk_desc *desc; int i; - OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages])); + /* ensure that only one of KIOV or IOVEC is set but not both */ + LASSERT((ptlrpc_is_bulk_desc_kiov(type) && + ops->add_kiov_frag != NULL) || + (ptlrpc_is_bulk_desc_kvec(type) && + ops->add_iov_frag != NULL)); + + if (type & PTLRPC_BULK_BUF_KIOV) { + OBD_ALLOC(desc, + offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kiov.bd_vec[nfrags])); + } else { + OBD_ALLOC(desc, + offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kvec.bd_kvec[nfrags])); + } + if (!desc) return NULL; spin_lock_init(&desc->bd_lock); init_waitqueue_head(&desc->bd_waitq); - desc->bd_max_iov = npages; + desc->bd_max_iov = nfrags; desc->bd_iov_count = 0; desc->bd_portal = portal; desc->bd_type = type; desc->bd_md_count = 0; + desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *) ops; LASSERT(max_brw > 0); desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this @@ -131,21 +161,25 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, /** * Prepare bulk descriptor for specified outgoing request \a req that - * can fit \a npages * pages. \a type is bulk type. \a portal is where + * can fit \a nfrags * pages. \a type is bulk type. \a portal is where * the bulk to be sent. Used on client-side. * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on * error. */ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal) + unsigned nfrags, unsigned max_brw, + unsigned int type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops + *ops) { struct obd_import *imp = req->rq_import; struct ptlrpc_bulk_desc *desc; ENTRY; - LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); - desc = ptlrpc_new_bulk(npages, max_brw, type, portal); + LASSERT(ptlrpc_is_bulk_op_passive(type)); + + desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops); if (desc == NULL) RETURN(NULL); @@ -163,60 +197,90 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, } EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); -/* - * Add a page \a page to the bulk descriptor \a desc. - * Data to transfer in the page starts at offset \a pageoffset and - * amount of data to transfer from the page is \a len - */ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, int pin) + struct page *page, int pageoffset, int len, + int pin) { + lnet_kiov_t *kiov; + LASSERT(desc->bd_iov_count < desc->bd_max_iov); LASSERT(page != NULL); LASSERT(pageoffset >= 0); LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + + kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); desc->bd_nob += len; if (pin) page_cache_get(page); - ptlrpc_add_bulk_page(desc, page, pageoffset, len); + kiov->kiov_page = page; + kiov->kiov_offset = pageoffset; + kiov->kiov_len = len; + + desc->bd_iov_count++; } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -/** - * Uninitialize and free bulk descriptor \a desc. - * Works on bulk descriptors both from server and client side. - */ -void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len) +{ + struct kvec *iovec; + ENTRY; + + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(frag != NULL); + LASSERT(len > 0); + LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); + + iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); + + desc->bd_nob += len; + + iovec->iov_base = frag; + iovec->iov_len = len; + + desc->bd_iov_count++; + + RETURN(desc->bd_nob); +} +EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); + +void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) { - int i; ENTRY; LASSERT(desc != NULL); LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); + LASSERT(desc->bd_frag_ops != NULL); - sptlrpc_enc_pool_put_pages(desc); + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + sptlrpc_enc_pool_put_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); else class_import_put(desc->bd_import); - if (unpin) { - for (i = 0; i < desc->bd_iov_count ; i++) - page_cache_release(desc->bd_iov[i].kiov_page); - } + if (desc->bd_frag_ops->release_frags != NULL) + desc->bd_frag_ops->release_frags(desc); + + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kiov.bd_vec[desc->bd_max_iov])); + else + OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kvec.bd_kvec[desc-> + bd_max_iov])); - OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, - bd_iov[desc->bd_max_iov])); EXIT; } -EXPORT_SYMBOL(__ptlrpc_free_bulk); +EXPORT_SYMBOL(ptlrpc_free_bulk); /** * Set server timelimit for this req, i.e. how long are we willing to wait @@ -286,22 +350,35 @@ int ptlrpc_at_get_net_latency(struct ptlrpc_request *req) } /* Adjust expected network latency */ -static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, - unsigned int service_time) +void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, + unsigned int service_time) { unsigned int nl, oldnl; struct imp_at *at; time_t now = cfs_time_current_sec(); LASSERT(req->rq_import); - at = &req->rq_import->imp_at; + + if (service_time > now - req->rq_sent + 3) { + /* bz16408, however, this can also happen if early reply + * is lost and client RPC is expired and resent, early reply + * or reply of original RPC can still be fit in reply buffer + * of resent RPC, now client is measuring time from the + * resent time, but server sent back service time of original + * RPC. + */ + CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ? + D_ADAPTTO : D_WARNING, + "Reported service time %u > total measured time " + CFS_DURATION_T"\n", service_time, + cfs_time_sub(now, req->rq_sent)); + return; + } /* Network latency is total time less server processing time */ - nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/; - if (service_time > now - req->rq_sent + 3 /* bz16408 */) - CWARN("Reported service time %u > total measured time " - CFS_DURATION_T"\n", service_time, - cfs_time_sub(now, req->rq_sent)); + nl = max_t(int, now - req->rq_sent - + service_time, 0) + 1; /* st rounding */ + at = &req->rq_import->imp_at; oldnl = at_measured(&at->iat_net_latency, nl); if (oldnl != 0) @@ -338,6 +415,7 @@ static int unpack_reply(struct ptlrpc_request *req) * If anything goes wrong just ignore it - same as if it never happened */ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) +__must_hold(&req->rq_lock) { struct ptlrpc_request *early_req; time_t olddl; @@ -350,33 +428,37 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); if (rc) { spin_lock(&req->rq_lock); - RETURN(rc); - } - - rc = unpack_reply(early_req); - if (rc == 0) { - /* Expecting to increase the service time estimate here */ - ptlrpc_at_adj_service(req, - lustre_msg_get_timeout(early_req->rq_repmsg)); - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(early_req->rq_repmsg)); - } - - sptlrpc_cli_finish_early_reply(early_req); + RETURN(rc); + } + rc = unpack_reply(early_req); if (rc != 0) { + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); RETURN(rc); } - /* Adjust the local timeout for this req */ - ptlrpc_at_set_req_timeout(req); + /* Use new timeout value just to adjust the local value for this + * request, don't include it into at_history. It is unclear yet why + * service time increased and should it be counted or skipped, e.g. + * that can be recovery case or some error or server, the real reply + * will add all new data if it is worth to add. */ + req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg); + lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); + + /* Network latency can be adjusted, it is pure network delays */ + ptlrpc_at_adj_net_latency(req, + lustre_msg_get_service_time(early_req->rq_repmsg)); + + sptlrpc_cli_finish_early_reply(early_req); spin_lock(&req->rq_lock); olddl = req->rq_deadline; - /* server assumes it now has rq_timeout from when it sent the - * early reply, so client should give it at least that long. */ - req->rq_deadline = cfs_time_current_sec() + req->rq_timeout + + /* server assumes it now has rq_timeout from when the request + * arrived, so the client should give it at least that long. + * since we don't know the arrival time we'll use the original + * sent time */ + req->rq_deadline = req->rq_sent + req->rq_timeout + ptlrpc_at_get_net_latency(req); DEBUG_REQ(D_ADAPTTO, req, @@ -388,9 +470,9 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) RETURN(rc); } -struct kmem_cache *request_cache; +static struct kmem_cache *request_cache; -int ptlrpc_request_cache_init() +int ptlrpc_request_cache_init(void) { request_cache = kmem_cache_create("ptlrpc_cache", sizeof(struct ptlrpc_request), @@ -398,12 +480,12 @@ int ptlrpc_request_cache_init() return request_cache == NULL ? -ENOMEM : 0; } -void ptlrpc_request_cache_fini() +void ptlrpc_request_cache_fini(void) { kmem_cache_destroy(request_cache); } -struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags) +struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags) { struct ptlrpc_request *req; @@ -422,15 +504,15 @@ void ptlrpc_request_cache_free(struct ptlrpc_request *req) */ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) { - cfs_list_t *l, *tmp; + struct list_head *l, *tmp; struct ptlrpc_request *req; LASSERT(pool != NULL); spin_lock(&pool->prp_lock); - cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) { - req = cfs_list_entry(l, struct ptlrpc_request, rq_list); - cfs_list_del(&req->rq_list); + list_for_each_safe(l, tmp, &pool->prp_req_list) { + req = list_entry(l, struct ptlrpc_request, rq_list); + list_del(&req->rq_list); LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size); @@ -444,7 +526,7 @@ EXPORT_SYMBOL(ptlrpc_free_rq_pool); /** * Allocates, initializes and adds \a num_rq requests to the pool \a pool */ -void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) +int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) { int i; int size = 1; @@ -452,7 +534,7 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) while (size < pool->prp_rq_size) size <<= 1; - LASSERTF(cfs_list_empty(&pool->prp_req_list) || + LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size, "Trying to change pool size with nonempty pool " "from %d to %d bytes\n", pool->prp_rq_size, size); @@ -466,20 +548,20 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) spin_unlock(&pool->prp_lock); req = ptlrpc_request_cache_alloc(GFP_NOFS); if (!req) - return; + return i; OBD_ALLOC_LARGE(msg, size); if (!msg) { ptlrpc_request_cache_free(req); - return; + return i; } req->rq_reqbuf = msg; req->rq_reqbuf_len = size; req->rq_pool = pool; spin_lock(&pool->prp_lock); - cfs_list_add_tail(&req->rq_list, &pool->prp_req_list); + list_add_tail(&req->rq_list, &pool->prp_req_list); } spin_unlock(&pool->prp_lock); - return; + return num_rq; } EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); @@ -493,30 +575,25 @@ EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); */ struct ptlrpc_request_pool * ptlrpc_init_rq_pool(int num_rq, int msgsize, - void (*populate_pool)(struct ptlrpc_request_pool *, int)) + int (*populate_pool)(struct ptlrpc_request_pool *, int)) { - struct ptlrpc_request_pool *pool; + struct ptlrpc_request_pool *pool; - OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool)); - if (!pool) - return NULL; + OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool)); + if (!pool) + return NULL; - /* Request next power of two for the allocation, because internally - kernel would do exactly this */ + /* Request next power of two for the allocation, because internally + kernel would do exactly this */ spin_lock_init(&pool->prp_lock); - CFS_INIT_LIST_HEAD(&pool->prp_req_list); - pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; - pool->prp_populate = populate_pool; + INIT_LIST_HEAD(&pool->prp_req_list); + pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; + pool->prp_populate = populate_pool; - populate_pool(pool, num_rq); + populate_pool(pool, num_rq); - if (cfs_list_empty(&pool->prp_req_list)) { - /* have not allocated a single request for the pool */ - OBD_FREE(pool, sizeof (struct ptlrpc_request_pool)); - pool = NULL; - } - return pool; + return pool; } EXPORT_SYMBOL(ptlrpc_init_rq_pool); @@ -538,14 +615,14 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) * in writeout path, where this matters, this is safe to do, because * nothing is lost in this case, and when some in-flight requests * complete, this code will be called again. */ - if (unlikely(cfs_list_empty(&pool->prp_req_list))) { + if (unlikely(list_empty(&pool->prp_req_list))) { spin_unlock(&pool->prp_lock); return NULL; } - request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); - cfs_list_del_init(&request->rq_list); + request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, + rq_list); + list_del_init(&request->rq_list); spin_unlock(&pool->prp_lock); LASSERT(request->rq_reqbuf); @@ -568,9 +645,9 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) struct ptlrpc_request_pool *pool = request->rq_pool; spin_lock(&pool->prp_lock); - LASSERT(cfs_list_empty(&request->rq_list)); + LASSERT(list_empty(&request->rq_list)); LASSERT(!request->rq_receiving_reply); - cfs_list_add_tail(&request->rq_list, &pool->prp_req_list); + list_add_tail(&request->rq_list, &pool->prp_req_list); spin_unlock(&pool->prp_lock); } @@ -603,7 +680,6 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, lustre_msg_add_version(request->rq_reqmsg, version); request->rq_send_state = LUSTRE_IMP_FULL; request->rq_type = PTL_RPC_MSG_REQUEST; - request->rq_export = NULL; request->rq_req_cbid.cbid_fn = request_out_callback; request->rq_req_cbid.cbid_arg = request; @@ -620,19 +696,6 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, ptlrpc_at_set_req_timeout(request); - spin_lock_init(&request->rq_lock); - CFS_INIT_LIST_HEAD(&request->rq_list); - CFS_INIT_LIST_HEAD(&request->rq_timed_list); - CFS_INIT_LIST_HEAD(&request->rq_replay_list); - CFS_INIT_LIST_HEAD(&request->rq_ctx_chain); - CFS_INIT_LIST_HEAD(&request->rq_set_chain); - CFS_INIT_LIST_HEAD(&request->rq_history_list); - CFS_INIT_LIST_HEAD(&request->rq_exp_list); - init_waitqueue_head(&request->rq_reply_waitq); - init_waitqueue_head(&request->rq_set_waitq); - request->rq_xid = ptlrpc_next_xid(); - atomic_set(&request->rq_refcount, 1); - lustre_msg_set_opc(request->rq_reqmsg, opcode); RETURN(0); @@ -702,16 +765,17 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, { struct ptlrpc_request *request = NULL; - if (pool) - request = ptlrpc_prep_req_from_pool(pool); + request = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!request) - request = ptlrpc_request_cache_alloc(GFP_NOFS); + if (!request && pool) + request = ptlrpc_prep_req_from_pool(pool); if (request) { + ptlrpc_cli_req_init(request); + LASSERTF((unsigned long)imp > 0x1000, "%p", imp); LASSERT(imp != LP_POISON); - LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p", + LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", imp->imp_client); LASSERT(imp->imp_client != LP_POISON); @@ -834,7 +898,6 @@ ptlrpc_prep_req_pool(struct obd_import *imp, } return request; } -EXPORT_SYMBOL(ptlrpc_prep_req_pool); /** * Same as ptlrpc_prep_req_pool, but without pool @@ -846,28 +909,29 @@ ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs, NULL); } -EXPORT_SYMBOL(ptlrpc_prep_req); /** - * Allocate and initialize new request set structure. + * Allocate and initialize new request set structure on the current CPT. * Returns a pointer to the newly allocated set structure or NULL on error. */ struct ptlrpc_request_set *ptlrpc_prep_set(void) { - struct ptlrpc_request_set *set; + struct ptlrpc_request_set *set; + int cpt; ENTRY; - OBD_ALLOC(set, sizeof *set); + cpt = cfs_cpt_current(cfs_cpt_table, 0); + OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set); if (!set) RETURN(NULL); atomic_set(&set->set_refcount, 1); - CFS_INIT_LIST_HEAD(&set->set_requests); + INIT_LIST_HEAD(&set->set_requests); init_waitqueue_head(&set->set_waitq); atomic_set(&set->set_new_count, 0); atomic_set(&set->set_remaining, 0); spin_lock_init(&set->set_new_req_lock); - CFS_INIT_LIST_HEAD(&set->set_new_requests); - CFS_INIT_LIST_HEAD(&set->set_cblist); + INIT_LIST_HEAD(&set->set_new_requests); + INIT_LIST_HEAD(&set->set_cblist); set->set_max_inflight = UINT_MAX; set->set_producer = NULL; set->set_producer_arg = NULL; @@ -901,7 +965,6 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, RETURN(set); } -EXPORT_SYMBOL(ptlrpc_prep_fcset); /** * Wind down and free request set structure previously allocated with @@ -913,33 +976,33 @@ EXPORT_SYMBOL(ptlrpc_prep_fcset); */ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) { - cfs_list_t *tmp; - cfs_list_t *next; - int expected_phase; - int n = 0; - ENTRY; + struct list_head *tmp; + struct list_head *next; + int expected_phase; + int n = 0; + ENTRY; - /* Requests on the set should either all be completed, or all be new */ + /* Requests on the set should either all be completed, or all be new */ expected_phase = (atomic_read(&set->set_remaining) == 0) ? - RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - cfs_list_for_each (tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - - LASSERT(req->rq_phase == expected_phase); - n++; - } + RQ_PHASE_COMPLETE : RQ_PHASE_NEW; + list_for_each(tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + + LASSERT(req->rq_phase == expected_phase); + n++; + } LASSERTF(atomic_read(&set->set_remaining) == 0 || atomic_read(&set->set_remaining) == n, "%d / %d\n", atomic_read(&set->set_remaining), n); - cfs_list_for_each_safe(tmp, next, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - cfs_list_del_init(&req->rq_set_chain); + list_for_each_safe(tmp, next, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + list_del_init(&req->rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -971,19 +1034,18 @@ EXPORT_SYMBOL(ptlrpc_set_destroy); int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, set_interpreter_func fn, void *data) { - struct ptlrpc_set_cbdata *cbdata; + struct ptlrpc_set_cbdata *cbdata; - OBD_ALLOC_PTR(cbdata); - if (cbdata == NULL) - RETURN(-ENOMEM); + OBD_ALLOC_PTR(cbdata); + if (cbdata == NULL) + RETURN(-ENOMEM); - cbdata->psc_interpret = fn; - cbdata->psc_data = data; - cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist); + cbdata->psc_interpret = fn; + cbdata->psc_data = data; + list_add_tail(&cbdata->psc_item, &set->set_cblist); - RETURN(0); + RETURN(0); } -EXPORT_SYMBOL(ptlrpc_set_add_cb); /** * Add a new request to the general purpose request set. @@ -992,10 +1054,10 @@ EXPORT_SYMBOL(ptlrpc_set_add_cb); void ptlrpc_set_add_req(struct ptlrpc_request_set *set, struct ptlrpc_request *req) { - LASSERT(cfs_list_empty(&req->rq_set_chain)); + LASSERT(list_empty(&req->rq_set_chain)); /* The set takes over the caller's request reference */ - cfs_list_add_tail(&req->rq_set_chain, &set->set_requests); + list_add_tail(&req->rq_set_chain, &set->set_requests); req->rq_set = set; atomic_inc(&set->set_remaining); req->rq_queued_time = cfs_time_current(); @@ -1030,7 +1092,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, */ req->rq_set = set; req->rq_queued_time = cfs_time_current(); - cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests); + list_add_tail(&req->rq_set_chain, &set->set_new_requests); count = atomic_inc_return(&set->set_new_count); spin_unlock(&set->set_new_req_lock); @@ -1045,7 +1107,6 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, wake_up(&pc->pc_partners[i]->pc_set->set_waitq); } } -EXPORT_SYMBOL(ptlrpc_set_add_new_req); /** * Based on the current state of the import, determine if the request @@ -1077,9 +1138,9 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, D_HA : D_ERROR, req, "IMP_CLOSED "); *status = -EIO; } else if (ptlrpc_send_limit_expired(req)) { - /* probably doesn't need to be a D_ERROR after initial testing */ - DEBUG_REQ(D_ERROR, req, "send limit expired "); - *status = -EIO; + /* probably doesn't need to be a D_ERROR after initial testing*/ + DEBUG_REQ(D_HA, req, "send limit expired "); + *status = -ETIMEDOUT; } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && imp->imp_state == LUSTRE_IMP_CONNECTING) { /* allow CONNECT even if import is invalid */ ; @@ -1116,35 +1177,40 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, } /** - * Decide if the eror message regarding provided request \a req - * should be printed to the console or not. - * Makes it's decision on request status and other properties. - * Returns 1 to print error on the system console or 0 if not. + * Decide if the error message should be printed to the console or not. + * Makes its decision based on request type, status, and failure frequency. + * + * \param[in] req request that failed and may need a console message + * + * \retval false if no message should be printed + * \retval true if console message should be printed */ -static int ptlrpc_console_allow(struct ptlrpc_request *req) +static bool ptlrpc_console_allow(struct ptlrpc_request *req) { - __u32 opc; - int err; + __u32 opc; - LASSERT(req->rq_reqmsg != NULL); - opc = lustre_msg_get_opc(req->rq_reqmsg); + LASSERT(req->rq_reqmsg != NULL); + opc = lustre_msg_get_opc(req->rq_reqmsg); - /* Suppress particular reconnect errors which are to be expected. No - * errors are suppressed for the initial connection on an import */ - if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) && - (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) { + /* Suppress particular reconnect errors which are to be expected. */ + if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) { + int err; - /* Suppress timed out reconnect requests */ - if (req->rq_timedout) - return 0; + /* Suppress timed out reconnect requests */ + if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) || + req->rq_timedout) + return false; - /* Suppress unavailable/again reconnect requests */ - err = lustre_msg_get_status(req->rq_repmsg); - if (err == -ENODEV || err == -EAGAIN) - return 0; - } + /* Suppress most unavailable/again reconnect requests, but + * print occasionally so it is clear client is trying to + * connect to a server where no target is running. */ + err = lustre_msg_get_status(req->rq_repmsg); + if ((err == -ENODEV || err == -EAGAIN) && + req->rq_import->imp_conn_cnt % 30 != 20) + return false; + } - return 1; + return true; } /** @@ -1159,14 +1225,15 @@ static int ptlrpc_check_status(struct ptlrpc_request *req) err = lustre_msg_get_status(req->rq_repmsg); if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { struct obd_import *imp = req->rq_import; + lnet_nid_t nid = imp->imp_connection->c_peer.nid; __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); + if (ptlrpc_console_allow(req)) - LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s," - " operation %s failed with %d.\n", - imp->imp_obd->obd_name, - libcfs_nid2str( - imp->imp_connection->c_peer.nid), - ll_opcode2str(opc), err); + LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s " + "failed: rc = %d\n", + imp->imp_obd->obd_name, + ll_opcode2str(opc), + libcfs_nid2str(nid), err); RETURN(err < 0 ? err : -EINVAL); } @@ -1216,14 +1283,15 @@ static int after_reply(struct ptlrpc_request *req) struct obd_device *obd = req->rq_import->imp_obd; int rc; struct timeval work_start; + __u64 committed; long timediff; ENTRY; LASSERT(obd != NULL); /* repbuf must be unlinked */ - LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink); + LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked); - if (req->rq_reply_truncate) { + if (req->rq_reply_truncated) { if (ptlrpc_no_resend(req)) { DEBUG_REQ(D_ERROR, req, "reply buffer overflow," " expected: %d, actual size: %d", @@ -1244,6 +1312,9 @@ static int after_reply(struct ptlrpc_request *req) RETURN(0); } + do_gettimeofday(&work_start); + timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL); + /* * NB Until this point, the whole of the incoming message, * including buflens, status etc is in the sender's byte order. @@ -1270,18 +1341,11 @@ static int after_reply(struct ptlrpc_request *req) time_t now = cfs_time_current_sec(); DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS"); + spin_lock(&req->rq_lock); req->rq_resend = 1; + spin_unlock(&req->rq_lock); req->rq_nr_resend++; - /* allocate new xid to avoid reply reconstruction */ - if (!req->rq_bulk) { - /* new xid is already allocated for bulk in - * ptlrpc_check_set() */ - req->rq_xid = ptlrpc_next_xid(); - DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for " - "resend on EINPROGRESS"); - } - /* Readjust the timeout for current conditions */ ptlrpc_at_set_req_timeout(req); /* delay resend to give a chance to the server to get ready. @@ -1296,8 +1360,6 @@ static int after_reply(struct ptlrpc_request *req) RETURN(0); } - do_gettimeofday(&work_start); - timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL); if (obd->obd_svc_stats != NULL) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); @@ -1320,20 +1382,20 @@ static int after_reply(struct ptlrpc_request *req) rc = ptlrpc_check_status(req); imp->imp_connect_error = rc; - if (rc) { - /* - * Either we've been evicted, or the server has failed for - * some reason. Try to reconnect, and if that fails, punt to - * the upcall. - */ - if (ll_rpc_recoverable_error(rc)) { - if (req->rq_send_state != LUSTRE_IMP_FULL || - imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { - RETURN(rc); - } - ptlrpc_request_handle_notconn(req); - RETURN(rc); - } + if (rc) { + /* + * Either we've been evicted, or the server has failed for + * some reason. Try to reconnect, and if that fails, punt to + * the upcall. + */ + if (ptlrpc_recoverable_error(rc)) { + if (req->rq_send_state != LUSTRE_IMP_FULL || + imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { + RETURN(rc); + } + ptlrpc_request_handle_notconn(req); + RETURN(rc); + } } else { /* * Let's look if server sent slv. Do it only for RPC with @@ -1376,19 +1438,18 @@ static int after_reply(struct ptlrpc_request *req) /* * Replay-enabled imports return commit-status information. */ - if (lustre_msg_get_last_committed(req->rq_repmsg)) { - imp->imp_peer_committed_transno = - lustre_msg_get_last_committed(req->rq_repmsg); - } + committed = lustre_msg_get_last_committed(req->rq_repmsg); + if (likely(committed > imp->imp_peer_committed_transno)) + imp->imp_peer_committed_transno = committed; ptlrpc_free_committed(imp); - if (!cfs_list_empty(&imp->imp_replay_list)) { + if (!list_empty(&imp->imp_replay_list)) { struct ptlrpc_request *last; - last = cfs_list_entry(imp->imp_replay_list.prev, - struct ptlrpc_request, - rq_replay_list); + last = list_entry(imp->imp_replay_list.prev, + struct ptlrpc_request, + rq_replay_list); /* * Requests with rq_replay stay on the list even if no * commit is expected. @@ -1411,10 +1472,19 @@ static int after_reply(struct ptlrpc_request *req) static int ptlrpc_send_new_req(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; + struct list_head *tmp; + __u64 min_xid = ~0ULL; int rc; ENTRY; LASSERT(req->rq_phase == RQ_PHASE_NEW); + + /* do not try to go further if there is not enough memory in enc_pool */ + if (req->rq_sent && req->rq_bulk != NULL) + if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() && + pool_is_at_full_capacity()) + RETURN(-ENOMEM); + if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) && (!req->rq_generation_set || req->rq_import_generation == imp->imp_generation)) @@ -1424,6 +1494,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) spin_lock(&imp->imp_lock); + /* the very first time we assign XID. it's important to assign XID + * and put it on the list atomically, so that the lowest assigned + * XID is always known. this is vital for multislot last_rcvd */ + if (req->rq_send_state == LUSTRE_IMP_REPLAY) { + LASSERT(req->rq_xid != 0); + } else { + LASSERT(req->rq_xid == 0); + req->rq_xid = ptlrpc_next_xid(); + } + if (!req->rq_generation_set) req->rq_import_generation = imp->imp_generation; @@ -1436,8 +1516,8 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg), ptlrpc_import_state_name(req->rq_send_state), ptlrpc_import_state_name(imp->imp_state)); - LASSERT(cfs_list_empty(&req->rq_list)); - cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list); + LASSERT(list_empty(&req->rq_list)); + list_add_tail(&req->rq_list, &imp->imp_delayed_list); atomic_inc(&req->rq_import->imp_inflight); spin_unlock(&imp->imp_lock); RETURN(0); @@ -1450,11 +1530,28 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) RETURN(rc); } - LASSERT(cfs_list_empty(&req->rq_list)); - cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list); + LASSERT(list_empty(&req->rq_list)); + list_add_tail(&req->rq_list, &imp->imp_sending_list); atomic_inc(&req->rq_import->imp_inflight); + + /* find the lowest unreplied XID */ + list_for_each(tmp, &imp->imp_delayed_list) { + struct ptlrpc_request *r; + r = list_entry(tmp, struct ptlrpc_request, rq_list); + if (r->rq_xid < min_xid) + min_xid = r->rq_xid; + } + list_for_each(tmp, &imp->imp_sending_list) { + struct ptlrpc_request *r; + r = list_entry(tmp, struct ptlrpc_request, rq_list); + if (r->rq_xid < min_xid) + min_xid = r->rq_xid; + } spin_unlock(&imp->imp_lock); + if (likely(min_xid != ~0ULL)) + lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1); + lustre_msg_set_status(req->rq_reqmsg, current_pid()); rc = sptlrpc_req_refresh_ctx(req, -1); @@ -1478,6 +1575,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) lustre_msg_get_opc(req->rq_reqmsg)); rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); + atomic_dec(&req->rq_import->imp_inflight); + } + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + RETURN(rc); + } if (rc) { DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); spin_lock(&req->rq_lock); @@ -1517,23 +1624,35 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) * and no more replies are expected. * (it is possible to get less replies than requests sent e.g. due to timed out * requests or requests that we had trouble to send out) + * + * NOTE: This function contains a potential schedule point (cond_resched()). */ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { - cfs_list_t *tmp, *next; - int force_timer_recalc = 0; - ENTRY; + struct list_head *tmp, *next; + struct list_head comp_reqs; + int force_timer_recalc = 0; + ENTRY; if (atomic_read(&set->set_remaining) == 0) - RETURN(1); + RETURN(1); - cfs_list_for_each_safe(tmp, next, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - struct obd_import *imp = req->rq_import; - int unregistered = 0; - int rc = 0; + INIT_LIST_HEAD(&comp_reqs); + list_for_each_safe(tmp, next, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + struct obd_import *imp = req->rq_import; + int unregistered = 0; + int rc = 0; + + /* This schedule point is mainly for the ptlrpcd caller of this + * function. Most ptlrpc sets are not long-lived and unbounded + * in length, but at the least the set used by the ptlrpcd is. + * Since the processing time is unbounded, we need to insert an + * explicit schedule point to make the thread well-behaved. + */ + cond_resched(); if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req)) { @@ -1593,8 +1712,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) ptlrpc_rqphase_move(req, req->rq_next_phase); } - if (req->rq_phase == RQ_PHASE_COMPLETE) + if (req->rq_phase == RQ_PHASE_COMPLETE) { + list_move_tail(&req->rq_set_chain, &comp_reqs); continue; + } if (req->rq_phase == RQ_PHASE_INTERPRET) GOTO(interpret, req->rq_status); @@ -1634,7 +1755,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr * so it sets rq_intr regardless of individual rpc - * timeouts. The synchronous IO waiting path sets + * timeouts. The synchronous IO waiting path sets * rq_intr irrespective of whether ptlrpcd * has seen a timeout. Our policy is to only interpret * interrupted rpcs after they have timed out, so we @@ -1653,15 +1774,17 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) req->rq_waiting || req->rq_wait_ctx) { int status; - if (!ptlrpc_unregister_reply(req, 1)) - continue; + if (!ptlrpc_unregister_reply(req, 1)) { + ptlrpc_unregister_bulk(req, 1); + continue; + } spin_lock(&imp->imp_lock); if (ptlrpc_import_delay_req(imp, req, &status)){ /* put on delay list - only if we wait * recovery finished - before send */ - cfs_list_del_init(&req->rq_list); - cfs_list_add_tail(&req->rq_list, + list_del_init(&req->rq_list); + list_add_tail(&req->rq_list, &imp-> imp_delayed_list); spin_unlock(&imp->imp_lock); @@ -1684,8 +1807,8 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) GOTO(interpret, req->rq_status); } - cfs_list_del_init(&req->rq_list); - cfs_list_add_tail(&req->rq_list, + list_del_init(&req->rq_list); + list_add_tail(&req->rq_list, &imp->imp_sending_list); spin_unlock(&imp->imp_lock); @@ -1700,20 +1823,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) spin_lock(&req->rq_lock); req->rq_resend = 1; spin_unlock(&req->rq_lock); - if (req->rq_bulk) { - __u64 old_xid; - - if (!ptlrpc_unregister_bulk(req, 1)) - continue; - - /* ensure previous bulk fails */ - old_xid = req->rq_xid; - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk " - "old x"LPU64 - " new x"LPU64"\n", - old_xid, req->rq_xid); - } + + if (req->rq_bulk != NULL && + !ptlrpc_unregister_bulk(req, 1)) + continue; } /* * rq_wait_ctx is only touched by ptlrpcd, @@ -1741,6 +1854,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) } rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) + list_del_init(&req->rq_list); + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + continue; + } if (rc) { DEBUG_REQ(D_HA, req, "send failed: rc = %d", rc); @@ -1853,8 +1974,8 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) * may happen in the case of marking it erroneous for the case * ptlrpc_import_delay_req(req, status) find it impossible to * allow sending this rpc and returns *status != 0. */ - if (!cfs_list_empty(&req->rq_list)) { - cfs_list_del_init(&req->rq_list); + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); atomic_dec(&imp->imp_inflight); } spin_unlock(&imp->imp_lock); @@ -1869,7 +1990,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) /* free the request that has just been completed * in order not to pollute set->set_requests */ - cfs_list_del_init(&req->rq_set_chain); + list_del_init(&req->rq_set_chain); spin_lock(&req->rq_lock); req->rq_set = NULL; req->rq_invalid_rqset = 0; @@ -1879,9 +2000,15 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (req->rq_status != 0) set->set_rc = req->rq_status; ptlrpc_req_finished(req); + } else { + list_move_tail(&req->rq_set_chain, &comp_reqs); } } + /* move completed request at the head of list so it's easier for + * caller to find them */ + list_splice(&comp_reqs, &set->set_requests); + /* If we hit an error, we want to recover promptly. */ RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc); } @@ -1965,20 +2092,20 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) */ int ptlrpc_expired_set(void *data) { - struct ptlrpc_request_set *set = data; - cfs_list_t *tmp; - time_t now = cfs_time_current_sec(); - ENTRY; + struct ptlrpc_request_set *set = data; + struct list_head *tmp; + time_t now = cfs_time_current_sec(); + ENTRY; - LASSERT(set != NULL); + LASSERT(set != NULL); - /* - * A timeout expired. See which reqs it applies to... - */ - cfs_list_for_each (tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + /* + * A timeout expired. See which reqs it applies to... + */ + list_for_each(tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); /* don't expire request waiting for context */ if (req->rq_wait_ctx) @@ -2006,7 +2133,6 @@ int ptlrpc_expired_set(void *data) */ RETURN(1); } -EXPORT_SYMBOL(ptlrpc_expired_set); /** * Sets rq_intr flag in \a req under spinlock. @@ -2023,42 +2149,40 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted); * Interrupts (sets interrupted flag) all uncompleted requests in * a set \a data. Callback for l_wait_event for interruptible waits. */ -void ptlrpc_interrupted_set(void *data) +static void ptlrpc_interrupted_set(void *data) { - struct ptlrpc_request_set *set = data; - cfs_list_t *tmp; + struct ptlrpc_request_set *set = data; + struct list_head *tmp; - LASSERT(set != NULL); - CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); + LASSERT(set != NULL); + CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); - cfs_list_for_each(tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each(tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, rq_set_chain); - if (req->rq_phase != RQ_PHASE_RPC && - req->rq_phase != RQ_PHASE_UNREGISTERING) - continue; + if (req->rq_phase != RQ_PHASE_RPC && + req->rq_phase != RQ_PHASE_UNREGISTERING) + continue; - ptlrpc_mark_interrupted(req); - } + ptlrpc_mark_interrupted(req); + } } -EXPORT_SYMBOL(ptlrpc_interrupted_set); /** * Get the smallest timeout in the set; this does NOT set a timeout. */ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) { - cfs_list_t *tmp; - time_t now = cfs_time_current_sec(); - int timeout = 0; - struct ptlrpc_request *req; - int deadline; - ENTRY; + struct list_head *tmp; + time_t now = cfs_time_current_sec(); + int timeout = 0; + struct ptlrpc_request *req; + int deadline; + ENTRY; - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); /* * Request in-flight? @@ -2094,7 +2218,6 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) } RETURN(timeout); } -EXPORT_SYMBOL(ptlrpc_set_next_timeout); /** * Send all unset request from the set and then wait untill all @@ -2104,7 +2227,7 @@ EXPORT_SYMBOL(ptlrpc_set_next_timeout); */ int ptlrpc_set_wait(struct ptlrpc_request_set *set) { - cfs_list_t *tmp; + struct list_head *tmp; struct ptlrpc_request *req; struct l_wait_info lwi; int rc, timeout; @@ -2113,14 +2236,14 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) if (set->set_producer) (void)ptlrpc_set_producer(set); else - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, + rq_set_chain); if (req->rq_phase == RQ_PHASE_NEW) (void)ptlrpc_send_new_req(req); } - if (cfs_list_empty(&set->set_requests)) + if (list_empty(&set->set_requests)) RETURN(0); do { @@ -2131,21 +2254,21 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", set, timeout); - if (timeout == 0 && !cfs_signal_pending()) + if (timeout == 0 && !signal_pending(current)) /* * No requests are in-flight (ether timed out * or delayed), so we can allow interrupts. * We still want to block for a limited time, * so we allow interrupts during the timeout. */ - lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), + lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), ptlrpc_expired_set, ptlrpc_interrupted_set, set); else /* * At least one request is in flight, so no * interrupts are allowed. Wait until all - * complete, or an in-flight req times out. + * complete, or an in-flight req times out. */ lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1), ptlrpc_expired_set, set); @@ -2156,7 +2279,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * pending when we started, we need to handle it now or we risk * it being ignored forever */ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr && - cfs_signal_pending()) { + signal_pending(current)) { sigset_t blocked_sigs = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); @@ -2164,7 +2287,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * like SIGINT or SIGKILL. We still ignore less * important signals since ptlrpc set is not easily * reentrant from userspace again */ - if (cfs_signal_pending()) + if (signal_pending(current)) ptlrpc_interrupted_set(set); cfs_restore_sigs(blocked_sigs); } @@ -2179,9 +2302,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * I don't really care if we go once more round the loop in * the error cases -eeb. */ if (rc == 0 && atomic_read(&set->set_remaining) == 0) { - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, + rq_set_chain); spin_lock(&req->rq_lock); req->rq_invalid_rqset = 1; spin_unlock(&req->rq_lock); @@ -2192,8 +2315,8 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) LASSERT(atomic_read(&set->set_remaining) == 0); rc = set->set_rc; /* rq_status of already freed requests if any */ - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); LASSERT(req->rq_phase == RQ_PHASE_COMPLETE); if (req->rq_status != 0) @@ -2208,9 +2331,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) struct ptlrpc_set_cbdata *cbdata, *n; int err; - cfs_list_for_each_entry_safe(cbdata, n, + list_for_each_entry_safe(cbdata, n, &set->set_cblist, psc_item) { - cfs_list_del_init(&cbdata->psc_item); + list_del_init(&cbdata->psc_item); err = cbdata->psc_interpret(set, cbdata->psc_data, rc); if (err && !rc) rc = err; @@ -2232,31 +2355,30 @@ EXPORT_SYMBOL(ptlrpc_set_wait); */ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) { - ENTRY; - if (request == NULL) { - EXIT; - return; - } + ENTRY; + + if (request == NULL) + RETURN_EXIT; - LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */ - LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request); - LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request); - LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request); - LASSERTF(!request->rq_replay, "req %p\n", request); + LASSERT(!request->rq_srv_req); + LASSERT(request->rq_export == NULL); + LASSERTF(!request->rq_receiving_reply, "req %p\n", request); + LASSERTF(list_empty(&request->rq_list), "req %p\n", request); + LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); + LASSERTF(!request->rq_replay, "req %p\n", request); - req_capsule_fini(&request->rq_pill); + req_capsule_fini(&request->rq_pill); - /* We must take it off the imp_replay_list first. Otherwise, we'll set - * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ - if (request->rq_import != NULL) { + /* We must take it off the imp_replay_list first. Otherwise, we'll set + * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ + if (request->rq_import != NULL) { if (!locked) spin_lock(&request->rq_import->imp_lock); - cfs_list_del_init(&request->rq_replay_list); + list_del_init(&request->rq_replay_list); if (!locked) spin_unlock(&request->rq_import->imp_lock); } - LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request); + LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request); if (atomic_read(&request->rq_refcount) != 0) { DEBUG_REQ(D_ERROR, request, @@ -2266,16 +2388,13 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) if (request->rq_repbuf != NULL) sptlrpc_cli_free_repbuf(request); - if (request->rq_export != NULL) { - class_export_put(request->rq_export); - request->rq_export = NULL; - } + if (request->rq_import != NULL) { class_import_put(request->rq_import); request->rq_import = NULL; } if (request->rq_bulk != NULL) - ptlrpc_free_bulk_pin(request->rq_bulk); + ptlrpc_free_bulk(request->rq_bulk); if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) sptlrpc_cli_free_reqbuf(request); @@ -2293,14 +2412,13 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked); /** * Drop one request reference. Must be called with import imp_lock held. - * When reference count drops to zero, reuqest is freed. + * When reference count drops to zero, request is freed. */ void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request) { assert_spin_locked(&request->rq_import->imp_lock); (void)__ptlrpc_req_finished(request, 1); } -EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock); /** * Helper function @@ -2357,7 +2475,7 @@ EXPORT_SYMBOL(ptlrpc_req_xid); * The request owner (i.e. the thread doing the I/O) must call... * Returns 0 on success or 1 if unregistering cannot be made. */ -int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) { int rc; struct l_wait_info lwi; @@ -2405,12 +2523,10 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) * unlinked before returning a req to the pool. */ for (;;) { -#ifdef __KERNEL__ /* The wq argument is ignored by user-space wait_event macros */ wait_queue_head_t *wq = (request->rq_set != NULL) ? &request->rq_set->set_waitq : &request->rq_reply_waitq; -#endif /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish NALs */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), @@ -2423,13 +2539,14 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) } LASSERT(rc == -ETIMEDOUT); - DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " - "rvcng=%d unlnk=%d", request->rq_receiving_reply, - request->rq_must_unlink); + DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " + "receiving_reply=%d req_ulinked=%d reply_unlinked=%d", + request->rq_receiving_reply, + request->rq_req_unlinked, + request->rq_reply_unlinked); } RETURN(0); } -EXPORT_SYMBOL(ptlrpc_unregister_reply); static void ptlrpc_free_request(struct ptlrpc_request *req) { @@ -2439,7 +2556,7 @@ static void ptlrpc_free_request(struct ptlrpc_request *req) if (req->rq_commit_cb != NULL) req->rq_commit_cb(req); - cfs_list_del_init(&req->rq_replay_list); + list_del_init(&req->rq_replay_list); __ptlrpc_req_finished(req, 1); } @@ -2452,7 +2569,7 @@ void ptlrpc_request_committed(struct ptlrpc_request *req, int force) struct obd_import *imp = req->rq_import; spin_lock(&imp->imp_lock); - if (cfs_list_empty(&req->rq_replay_list)) { + if (list_empty(&req->rq_replay_list)) { spin_unlock(&imp->imp_lock); return; } @@ -2492,13 +2609,14 @@ void ptlrpc_free_committed(struct obd_import *imp) imp->imp_obd->obd_name, imp->imp_peer_committed_transno, imp->imp_generation); - if (imp->imp_generation != imp->imp_last_generation_checked) + if (imp->imp_generation != imp->imp_last_generation_checked || + imp->imp_last_transno_checked == 0) skip_committed_list = false; imp->imp_last_transno_checked = imp->imp_peer_committed_transno; imp->imp_last_generation_checked = imp->imp_generation; - cfs_list_for_each_entry_safe(req, saved, &imp->imp_replay_list, + list_for_each_entry_safe(req, saved, &imp->imp_replay_list, rq_replay_list) { /* XXX ok to remove when 1357 resolved - rread 05/29/03 */ LASSERT(req != last_req); @@ -2521,7 +2639,7 @@ void ptlrpc_free_committed(struct obd_import *imp) if (req->rq_replay) { DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)"); - cfs_list_move_tail(&req->rq_replay_list, + list_move_tail(&req->rq_replay_list, &imp->imp_committed_list); continue; } @@ -2535,12 +2653,15 @@ free_req: if (skip_committed_list) GOTO(out, 0); - cfs_list_for_each_entry_safe(req, saved, &imp->imp_committed_list, + list_for_each_entry_safe(req, saved, &imp->imp_committed_list, rq_replay_list) { LASSERT(req->rq_transno != 0); if (req->rq_import_generation < imp->imp_generation) { DEBUG_REQ(D_RPCTRACE, req, "free stale open request"); ptlrpc_free_request(req); + } else if (!req->rq_replay) { + DEBUG_REQ(D_RPCTRACE, req, "free closed open request"); + ptlrpc_free_request(req); } } out: @@ -2552,7 +2673,6 @@ void ptlrpc_cleanup_client(struct obd_import *imp) ENTRY; EXIT; } -EXPORT_SYMBOL(ptlrpc_cleanup_client); /** * Schedule previously sent request for resend. @@ -2563,25 +2683,26 @@ EXPORT_SYMBOL(ptlrpc_cleanup_client); void ptlrpc_resend_req(struct ptlrpc_request *req) { DEBUG_REQ(D_HA, req, "going to resend"); + spin_lock(&req->rq_lock); + + /* Request got reply but linked to the import list still. + Let ptlrpc_check_set() to process it. */ + if (ptlrpc_client_replied(req)) { + spin_unlock(&req->rq_lock); + DEBUG_REQ(D_HA, req, "it has reply, so skip it"); + return; + } + lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 }); req->rq_status = -EAGAIN; - spin_lock(&req->rq_lock); req->rq_resend = 1; req->rq_net_err = 0; req->rq_timedout = 0; - if (req->rq_bulk) { - __u64 old_xid = req->rq_xid; - /* ensure previous bulk fails */ - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", - old_xid, req->rq_xid); - } ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } -EXPORT_SYMBOL(ptlrpc_resend_req); /* XXX: this function and rq_status are currently unused */ void ptlrpc_restart_req(struct ptlrpc_request *req) @@ -2595,7 +2716,6 @@ void ptlrpc_restart_req(struct ptlrpc_request *req) ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } -EXPORT_SYMBOL(ptlrpc_restart_req); /** * Grab additional reference on a request \a req @@ -2615,7 +2735,7 @@ EXPORT_SYMBOL(ptlrpc_request_addref); void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp) { - cfs_list_t *tmp; + struct list_head *tmp; assert_spin_locked(&imp->imp_lock); @@ -2628,19 +2748,23 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, as resent replayed requests. */ lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); - /* don't re-add requests that have been replayed */ - if (!cfs_list_empty(&req->rq_replay_list)) - return; + /* don't re-add requests that have been replayed */ + if (!list_empty(&req->rq_replay_list)) + return; - lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); + lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); - LASSERT(imp->imp_replayable); - /* Balanced in ptlrpc_free_committed, usually. */ - ptlrpc_request_addref(req); - cfs_list_for_each_prev(tmp, &imp->imp_replay_list) { - struct ptlrpc_request *iter = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + spin_lock(&req->rq_lock); + req->rq_resend = 0; + spin_unlock(&req->rq_lock); + + LASSERT(imp->imp_replayable); + /* Balanced in ptlrpc_free_committed, usually. */ + ptlrpc_request_addref(req); + list_for_each_prev(tmp, &imp->imp_replay_list) { + struct ptlrpc_request *iter = list_entry(tmp, + struct ptlrpc_request, + rq_replay_list); /* We may have duplicate transnos if we create and then * open a file, or for closes retained if to match creating @@ -2657,13 +2781,12 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, continue; } - cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list); - return; - } + list_add(&req->rq_replay_list, &iter->rq_replay_list); + return; + } - cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list); + list_add(&req->rq_replay_list, &imp->imp_replay_list); } -EXPORT_SYMBOL(ptlrpc_retain_replayable_request); /** * Send request and wait until it completes. @@ -2678,11 +2801,11 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) LASSERT(req->rq_set == NULL); LASSERT(!req->rq_receiving_reply); - set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("Unable to allocate ptlrpc set."); - RETURN(-ENOMEM); - } + set = ptlrpc_prep_set(); + if (set == NULL) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); + RETURN(-ENOMEM); + } /* for distributed debugging */ lustre_msg_set_status(req->rq_reqmsg, current_pid()); @@ -2697,14 +2820,9 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) } EXPORT_SYMBOL(ptlrpc_queue_wait); -struct ptlrpc_replay_async_args { - int praa_old_state; - int praa_old_status; -}; - /** * Callback used for replayed requests reply processing. - * In case of succesful reply calls registeresd request replay callback. + * In case of successful reply calls registered request replay callback. * In case of error restart replay process. */ static int ptlrpc_replay_interpret(const struct lu_env *env, @@ -2717,10 +2835,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, ENTRY; atomic_dec(&imp->imp_replay_inflight); - if (!ptlrpc_client_replied(req)) { - CERROR("request replay timed out, restarting recovery\n"); - GOTO(out, rc = -ETIMEDOUT); - } + /* Note: if it is bulk replay (MDS-MDS replay), then even if + * server got the request, but bulk transfer timeout, let's + * replay the bulk req again */ + if (!ptlrpc_client_replied(req) || + (req->rq_bulk != NULL && + lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) { + DEBUG_REQ(D_ERROR, req, "request replay timed out.\n"); + GOTO(out, rc = -ETIMEDOUT); + } if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR && (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN || @@ -2835,32 +2958,32 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) atomic_inc(&req->rq_import->imp_replay_inflight); ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ - ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); + ptlrpcd_add_req(req); RETURN(0); } -EXPORT_SYMBOL(ptlrpc_replay_req); /** * Aborts all in-flight request on import \a imp sending and delayed lists */ void ptlrpc_abort_inflight(struct obd_import *imp) { - cfs_list_t *tmp, *n; - ENTRY; + struct list_head *tmp, *n; + ENTRY; - /* Make sure that no new requests get processed for this import. - * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing - * this flag and then putting requests on sending_list or delayed_list. - */ + /* Make sure that no new requests get processed for this import. + * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing + * this flag and then putting requests on sending_list or delayed_list. + */ spin_lock(&imp->imp_lock); - /* XXX locking? Maybe we should remove each request with the list - * locked? Also, how do we know if the requests on the list are - * being freed at this time? - */ - cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, rq_list); + /* XXX locking? Maybe we should remove each request with the list + * locked? Also, how do we know if the requests on the list are + * being freed at this time? + */ + list_for_each_safe(tmp, n, &imp->imp_sending_list) { + struct ptlrpc_request *req = list_entry(tmp, + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_RPCTRACE, req, "inflight"); @@ -2873,9 +2996,9 @@ void ptlrpc_abort_inflight(struct obd_import *imp) spin_unlock(&req->rq_lock); } - cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) { + list_for_each_safe(tmp, n, &imp->imp_delayed_list) { struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, rq_list); + list_entry(tmp, struct ptlrpc_request, rq_list); DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); @@ -2897,21 +3020,20 @@ void ptlrpc_abort_inflight(struct obd_import *imp) EXIT; } -EXPORT_SYMBOL(ptlrpc_abort_inflight); /** * Abort all uncompleted requests in request set \a set */ void ptlrpc_abort_set(struct ptlrpc_request_set *set) { - cfs_list_t *tmp, *pos; + struct list_head *tmp, *pos; - LASSERT(set != NULL); + LASSERT(set != NULL); - cfs_list_for_each_safe(pos, tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(pos, struct ptlrpc_request, - rq_set_chain); + list_for_each_safe(pos, tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(pos, struct ptlrpc_request, + rq_set_chain); spin_lock(&req->rq_lock); if (req->rq_phase != RQ_PHASE_RPC) { @@ -2986,7 +3108,43 @@ __u64 ptlrpc_next_xid(void) return next; } -EXPORT_SYMBOL(ptlrpc_next_xid); + +/** + * If request has a new allocated XID (new request or EINPROGRESS resend), + * use this XID as matchbits of bulk, otherwise allocate a new matchbits for + * request to ensure previous bulk fails and avoid problems with lost replies + * and therefore several transfers landing into the same buffer from different + * sending attempts. + */ +void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) +{ + struct ptlrpc_bulk_desc *bd = req->rq_bulk; + + LASSERT(bd != NULL); + + if (!req->rq_resend || req->rq_nr_resend != 0) { + /* this request has a new xid, just use it as bulk matchbits */ + req->rq_mbits = req->rq_xid; + + } else { /* needs to generate a new matchbits for resend */ + __u64 old_mbits = req->rq_mbits; + + if ((bd->bd_import->imp_connect_data.ocd_connect_flags & + OBD_CONNECT_BULK_MBITS) != 0) + req->rq_mbits = ptlrpc_next_xid(); + else /* old version transfers rq_xid to peer as matchbits */ + req->rq_mbits = req->rq_xid = ptlrpc_next_xid(); + + CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", + old_mbits, req->rq_mbits); + } + + /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so + * that server can infer the number of bulks that were prepared, + * see LU-1431 */ + req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) / + LNET_MAX_IOV) - 1; +} /** * Get a glimpse at what next xid value might have been. @@ -3044,7 +3202,7 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req) req->rq_xid = ptlrpc_next_xid(); req->rq_import_generation = req->rq_import->imp_generation; - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); + ptlrpcd_add_req(req); } static int work_interpreter(const struct lu_env *env, @@ -3080,7 +3238,7 @@ static int ptlrpcd_check_work(struct ptlrpc_request *req) void *ptlrpcd_alloc_work(struct obd_import *imp, int (*cb)(const struct lu_env *, void *), void *cbdata) { - struct ptlrpc_request *req = NULL; + struct ptlrpc_request *req = NULL; struct ptlrpc_work_async_args *args; ENTRY; @@ -3096,27 +3254,16 @@ void *ptlrpcd_alloc_work(struct obd_import *imp, RETURN(ERR_PTR(-ENOMEM)); } + ptlrpc_cli_req_init(req); + req->rq_send_state = LUSTRE_IMP_FULL; req->rq_type = PTL_RPC_MSG_REQUEST; req->rq_import = class_import_get(imp); - req->rq_export = NULL; req->rq_interpret_reply = work_interpreter; /* don't want reply */ - req->rq_receiving_reply = 0; - req->rq_must_unlink = 0; req->rq_no_delay = req->rq_no_resend = 1; req->rq_pill.rc_fmt = (void *)&worker_format; - spin_lock_init(&req->rq_lock); - CFS_INIT_LIST_HEAD(&req->rq_list); - CFS_INIT_LIST_HEAD(&req->rq_replay_list); - CFS_INIT_LIST_HEAD(&req->rq_set_chain); - CFS_INIT_LIST_HEAD(&req->rq_history_list); - CFS_INIT_LIST_HEAD(&req->rq_exp_list); - init_waitqueue_head(&req->rq_reply_waitq); - init_waitqueue_head(&req->rq_set_waitq); - atomic_set(&req->rq_refcount, 1); - CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args)); args = ptlrpc_req_async_args(req); args->cb = cb;