X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fclient.c;h=a791abf62ff635246b549537a60bf70929ccf94f;hp=de8af5b4196ce9cca1343c55a7d43a27dada2bcd;hb=01ca899324738343279c1d63823b7fab937197dc;hpb=2064c2a7e616b172f72462884b23d899bfc040ff diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index de8af5b..a791abf 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -29,8 +27,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011 Whamcloud, Inc. - * + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -40,11 +37,6 @@ /** Implementation of client-side PortalRPC interfaces */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -#include -#include -#include -#endif #include #include @@ -55,6 +47,27 @@ #include "ptlrpc_internal.h" +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_pin, + .release_frags = ptlrpc_release_bulk_page_pin, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { + .add_kiov_frag = ptlrpc_prep_bulk_page_nopin, + .release_frags = ptlrpc_release_bulk_noop, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); + +const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = { + .add_iov_frag = ptlrpc_prep_bulk_frag, +}; +EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops); + +static int ptlrpc_send_new_req(struct ptlrpc_request *req); +static int ptlrpcd_check_work(struct ptlrpc_request *req); +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); + /** * Initialize passed in client structure \a cl. */ @@ -65,6 +78,7 @@ void ptlrpc_init_client(int req_portal, int rep_portal, char *name, cl->cli_reply_portal = rep_portal; cl->cli_name = name; } +EXPORT_SYMBOL(ptlrpc_init_client); /** * Return PortalRPC connection for remore uud \a uuid @@ -76,6 +90,9 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) lnet_process_id_t peer; int err; + /* ptlrpc_uuid_to_peer() initializes its 2nd parameter + * before accessing its values. */ + /* coverity[uninit_use_in_call] */ err = ptlrpc_uuid_to_peer(uuid, &peer, &self); if (err != 0) { CNETERR("cannot find peer %s!\n", uuid->uuid); @@ -94,46 +111,77 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) } /** - * Allocate and initialize new bulk descriptor + * Allocate and initialize new bulk descriptor on the sender. * Returns pointer to the descriptor or NULL on error. */ -static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal) -{ - struct ptlrpc_bulk_desc *desc; - - OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages])); - if (!desc) - return NULL; - - cfs_spin_lock_init(&desc->bd_lock); - cfs_waitq_init(&desc->bd_waitq); - desc->bd_max_iov = npages; - desc->bd_iov_count = 0; - LNetInvalidateHandle(&desc->bd_md_h); - desc->bd_portal = portal; - desc->bd_type = type; - - return desc; +struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw, + enum ptlrpc_bulk_op_type type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops *ops) +{ + struct ptlrpc_bulk_desc *desc; + int i; + + /* ensure that only one of KIOV or IOVEC is set but not both */ + LASSERT((ptlrpc_is_bulk_desc_kiov(type) && + ops->add_kiov_frag != NULL) || + (ptlrpc_is_bulk_desc_kvec(type) && + ops->add_iov_frag != NULL)); + + if (type & PTLRPC_BULK_BUF_KIOV) { + OBD_ALLOC(desc, + offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kiov.bd_vec[nfrags])); + } else { + OBD_ALLOC(desc, + offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kvec.bd_kvec[nfrags])); + } + + if (!desc) + return NULL; + + spin_lock_init(&desc->bd_lock); + init_waitqueue_head(&desc->bd_waitq); + desc->bd_max_iov = nfrags; + desc->bd_iov_count = 0; + desc->bd_portal = portal; + desc->bd_type = type; + desc->bd_md_count = 0; + desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *) ops; + LASSERT(max_brw > 0); + desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); + /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this + * node. Negotiated ocd_brw_size will always be <= this number. */ + for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++) + LNetInvalidateHandle(&desc->bd_mds[i]); + + return desc; } /** * Prepare bulk descriptor for specified outgoing request \a req that - * can fit \a npages * pages. \a type is bulk type. \a portal is where + * can fit \a nfrags * pages. \a type is bulk type. \a portal is where * the bulk to be sent. Used on client-side. * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on * error. */ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - int npages, int type, int portal) + unsigned nfrags, unsigned max_brw, + unsigned int type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops + *ops) { - struct obd_import *imp = req->rq_import; - struct ptlrpc_bulk_desc *desc; + struct obd_import *imp = req->rq_import; + struct ptlrpc_bulk_desc *desc; - ENTRY; - LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); - desc = new_bulk(npages, type, portal); - if (desc == NULL) - RETURN(NULL); + ENTRY; + LASSERT(ptlrpc_is_bulk_op_passive(type)); + + desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops); + if (desc == NULL) + RETURN(NULL); desc->bd_import_generation = req->rq_import_generation; desc->bd_import = class_import_get(imp); @@ -147,88 +195,92 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, return desc; } +EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); -/** - * Prepare bulk descriptor for specified incoming request \a req that - * can fit \a npages * pages. \a type is bulk type. \a portal is where - * the bulk to be sent. Used on server-side after request was already - * received. - * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on - * error. - */ -struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req, - int npages, int type, int portal) +void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, + struct page *page, int pageoffset, int len, + int pin) { - struct obd_export *exp = req->rq_export; - struct ptlrpc_bulk_desc *desc; + lnet_kiov_t *kiov; - ENTRY; - LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK); + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(page != NULL); + LASSERT(pageoffset >= 0); + LASSERT(len > 0); + LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - desc = new_bulk(npages, type, portal); - if (desc == NULL) - RETURN(NULL); + kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); - desc->bd_export = class_export_get(exp); - desc->bd_req = req; + desc->bd_nob += len; - desc->bd_cbid.cbid_fn = server_bulk_callback; - desc->bd_cbid.cbid_arg = desc; + if (pin) + page_cache_get(page); - /* NB we don't assign rq_bulk here; server-side requests are - * re-used, and the handler frees the bulk desc explicitly. */ + kiov->kiov_page = page; + kiov->kiov_offset = pageoffset; + kiov->kiov_len = len; - return desc; + desc->bd_iov_count++; } +EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -/** - * Add a page \a page to the bulk descriptor \a desc. - * Data to transfer in the page starts at offset \a pageoffset and - * amount of data to transfer from the page is \a len - */ -void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, int len) +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len) { - LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(page != NULL); - LASSERT(pageoffset >= 0); - LASSERT(len > 0); - LASSERT(pageoffset + len <= CFS_PAGE_SIZE); + struct kvec *iovec; + ENTRY; + + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(frag != NULL); + LASSERT(len > 0); + LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); + + iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); + + desc->bd_nob += len; + + iovec->iov_base = frag; + iovec->iov_len = len; - desc->bd_nob += len; + desc->bd_iov_count++; - cfs_page_pin(page); - ptlrpc_add_bulk_page(desc, page, pageoffset, len); + RETURN(desc->bd_nob); } +EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); -/** - * Uninitialize and free bulk descriptor \a desc. - * Works on bulk descriptors both from server and client side. - */ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) { - int i; - ENTRY; + ENTRY; - LASSERT(desc != NULL); - LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ - LASSERT(!desc->bd_network_rw); /* network hands off or */ - LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); + LASSERT(desc != NULL); + LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ + LASSERT(desc->bd_md_count == 0); /* network hands off */ + LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); + LASSERT(desc->bd_frag_ops != NULL); - sptlrpc_enc_pool_put_pages(desc); + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + sptlrpc_enc_pool_put_pages(desc); - if (desc->bd_export) - class_export_put(desc->bd_export); - else - class_import_put(desc->bd_import); + if (desc->bd_export) + class_export_put(desc->bd_export); + else + class_import_put(desc->bd_import); - for (i = 0; i < desc->bd_iov_count ; i++) - cfs_page_unpin(desc->bd_iov[i].kiov_page); + if (desc->bd_frag_ops->release_frags != NULL) + desc->bd_frag_ops->release_frags(desc); - OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, - bd_iov[desc->bd_max_iov])); - EXIT; + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) + OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kiov.bd_vec[desc->bd_max_iov])); + else + OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, + bd_u.bd_kvec.bd_kvec[desc-> + bd_max_iov])); + + EXIT; } +EXPORT_SYMBOL(ptlrpc_free_bulk); /** * Set server timelimit for this req, i.e. how long are we willing to wait @@ -267,6 +319,7 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req) reqmsg*/ lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); } +EXPORT_SYMBOL(ptlrpc_at_set_req_timeout); /* Adjust max service estimate based on server value */ static void ptlrpc_at_adj_service(struct ptlrpc_request *req, @@ -297,22 +350,35 @@ int ptlrpc_at_get_net_latency(struct ptlrpc_request *req) } /* Adjust expected network latency */ -static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, - unsigned int service_time) +void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, + unsigned int service_time) { unsigned int nl, oldnl; struct imp_at *at; time_t now = cfs_time_current_sec(); LASSERT(req->rq_import); - at = &req->rq_import->imp_at; + + if (service_time > now - req->rq_sent + 3) { + /* bz16408, however, this can also happen if early reply + * is lost and client RPC is expired and resent, early reply + * or reply of original RPC can still be fit in reply buffer + * of resent RPC, now client is measuring time from the + * resent time, but server sent back service time of original + * RPC. + */ + CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ? + D_ADAPTTO : D_WARNING, + "Reported service time %u > total measured time " + CFS_DURATION_T"\n", service_time, + cfs_time_sub(now, req->rq_sent)); + return; + } /* Network latency is total time less server processing time */ - nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/; - if (service_time > now - req->rq_sent + 3 /* bz16408 */) - CWARN("Reported service time %u > total measured time " - CFS_DURATION_T"\n", service_time, - cfs_time_sub(now, req->rq_sent)); + nl = max_t(int, now - req->rq_sent - + service_time, 0) + 1; /* st rounding */ + at = &req->rq_import->imp_at; oldnl = at_measured(&at->iat_net_latency, nl); if (oldnl != 0) @@ -349,6 +415,7 @@ static int unpack_reply(struct ptlrpc_request *req) * If anything goes wrong just ignore it - same as if it never happened */ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) +__must_hold(&req->rq_lock) { struct ptlrpc_request *early_req; time_t olddl; @@ -356,46 +423,79 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) ENTRY; req->rq_early = 0; - cfs_spin_unlock(&req->rq_lock); + spin_unlock(&req->rq_lock); - rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); - if (rc) { - cfs_spin_lock(&req->rq_lock); - RETURN(rc); - } + rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); + if (rc) { + spin_lock(&req->rq_lock); + RETURN(rc); + } - rc = unpack_reply(early_req); - if (rc == 0) { - /* Expecting to increase the service time estimate here */ - ptlrpc_at_adj_service(req, - lustre_msg_get_timeout(early_req->rq_repmsg)); - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(early_req->rq_repmsg)); - } + rc = unpack_reply(early_req); + if (rc != 0) { + sptlrpc_cli_finish_early_reply(early_req); + spin_lock(&req->rq_lock); + RETURN(rc); + } - sptlrpc_cli_finish_early_reply(early_req); + /* Use new timeout value just to adjust the local value for this + * request, don't include it into at_history. It is unclear yet why + * service time increased and should it be counted or skipped, e.g. + * that can be recovery case or some error or server, the real reply + * will add all new data if it is worth to add. */ + req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg); + lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); - cfs_spin_lock(&req->rq_lock); + /* Network latency can be adjusted, it is pure network delays */ + ptlrpc_at_adj_net_latency(req, + lustre_msg_get_service_time(early_req->rq_repmsg)); - if (rc == 0) { - /* Adjust the local timeout for this req */ - ptlrpc_at_set_req_timeout(req); + sptlrpc_cli_finish_early_reply(early_req); - olddl = req->rq_deadline; - /* server assumes it now has rq_timeout from when it sent the - early reply, so client should give it at least that long. */ - req->rq_deadline = cfs_time_current_sec() + req->rq_timeout + - ptlrpc_at_get_net_latency(req); + spin_lock(&req->rq_lock); + olddl = req->rq_deadline; + /* server assumes it now has rq_timeout from when the request + * arrived, so the client should give it at least that long. + * since we don't know the arrival time we'll use the original + * sent time */ + req->rq_deadline = req->rq_sent + req->rq_timeout + + ptlrpc_at_get_net_latency(req); - DEBUG_REQ(D_ADAPTTO, req, - "Early reply #%d, new deadline in "CFS_DURATION_T"s " - "("CFS_DURATION_T"s)", req->rq_early_count, - cfs_time_sub(req->rq_deadline, - cfs_time_current_sec()), - cfs_time_sub(req->rq_deadline, olddl)); - } + DEBUG_REQ(D_ADAPTTO, req, + "Early reply #%d, new deadline in "CFS_DURATION_T"s " + "("CFS_DURATION_T"s)", req->rq_early_count, + cfs_time_sub(req->rq_deadline, cfs_time_current_sec()), + cfs_time_sub(req->rq_deadline, olddl)); - RETURN(rc); + RETURN(rc); +} + +static struct kmem_cache *request_cache; + +int ptlrpc_request_cache_init(void) +{ + request_cache = kmem_cache_create("ptlrpc_cache", + sizeof(struct ptlrpc_request), + 0, SLAB_HWCACHE_ALIGN, NULL); + return request_cache == NULL ? -ENOMEM : 0; +} + +void ptlrpc_request_cache_fini(void) +{ + kmem_cache_destroy(request_cache); +} + +struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags) +{ + struct ptlrpc_request *req; + + OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags); + return req; +} + +void ptlrpc_request_cache_free(struct ptlrpc_request *req) +{ + OBD_SLAB_FREE_PTR(req, request_cache); } /** @@ -404,28 +504,29 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) */ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) { - cfs_list_t *l, *tmp; - struct ptlrpc_request *req; + struct list_head *l, *tmp; + struct ptlrpc_request *req; - LASSERT(pool != NULL); + LASSERT(pool != NULL); - cfs_spin_lock(&pool->prp_lock); - cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) { - req = cfs_list_entry(l, struct ptlrpc_request, rq_list); - cfs_list_del(&req->rq_list); - LASSERT(req->rq_reqbuf); - LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); - OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size); - OBD_FREE(req, sizeof(*req)); - } - cfs_spin_unlock(&pool->prp_lock); - OBD_FREE(pool, sizeof(*pool)); + spin_lock(&pool->prp_lock); + list_for_each_safe(l, tmp, &pool->prp_req_list) { + req = list_entry(l, struct ptlrpc_request, rq_list); + list_del(&req->rq_list); + LASSERT(req->rq_reqbuf); + LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); + OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size); + ptlrpc_request_cache_free(req); + } + spin_unlock(&pool->prp_lock); + OBD_FREE(pool, sizeof(*pool)); } +EXPORT_SYMBOL(ptlrpc_free_rq_pool); /** * Allocates, initializes and adds \a num_rq requests to the pool \a pool */ -void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) +int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) { int i; int size = 1; @@ -433,35 +534,36 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) while (size < pool->prp_rq_size) size <<= 1; - LASSERTF(cfs_list_empty(&pool->prp_req_list) || + LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size, "Trying to change pool size with nonempty pool " "from %d to %d bytes\n", pool->prp_rq_size, size); - cfs_spin_lock(&pool->prp_lock); - pool->prp_rq_size = size; - for (i = 0; i < num_rq; i++) { - struct ptlrpc_request *req; - struct lustre_msg *msg; - - cfs_spin_unlock(&pool->prp_lock); - OBD_ALLOC(req, sizeof(struct ptlrpc_request)); - if (!req) - return; - OBD_ALLOC_LARGE(msg, size); - if (!msg) { - OBD_FREE(req, sizeof(struct ptlrpc_request)); - return; + spin_lock(&pool->prp_lock); + pool->prp_rq_size = size; + for (i = 0; i < num_rq; i++) { + struct ptlrpc_request *req; + struct lustre_msg *msg; + + spin_unlock(&pool->prp_lock); + req = ptlrpc_request_cache_alloc(GFP_NOFS); + if (!req) + return i; + OBD_ALLOC_LARGE(msg, size); + if (!msg) { + ptlrpc_request_cache_free(req); + return i; } req->rq_reqbuf = msg; req->rq_reqbuf_len = size; req->rq_pool = pool; - cfs_spin_lock(&pool->prp_lock); - cfs_list_add_tail(&req->rq_list, &pool->prp_req_list); - } - cfs_spin_unlock(&pool->prp_lock); - return; + spin_lock(&pool->prp_lock); + list_add_tail(&req->rq_list, &pool->prp_req_list); + } + spin_unlock(&pool->prp_lock); + return num_rq; } +EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); /** * Create and initialize new request pool with given attributes: @@ -473,31 +575,27 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) */ struct ptlrpc_request_pool * ptlrpc_init_rq_pool(int num_rq, int msgsize, - void (*populate_pool)(struct ptlrpc_request_pool *, int)) + int (*populate_pool)(struct ptlrpc_request_pool *, int)) { - struct ptlrpc_request_pool *pool; + struct ptlrpc_request_pool *pool; - OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool)); - if (!pool) - return NULL; + OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool)); + if (!pool) + return NULL; - /* Request next power of two for the allocation, because internally - kernel would do exactly this */ + /* Request next power of two for the allocation, because internally + kernel would do exactly this */ - cfs_spin_lock_init(&pool->prp_lock); - CFS_INIT_LIST_HEAD(&pool->prp_req_list); - pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; - pool->prp_populate = populate_pool; + spin_lock_init(&pool->prp_lock); + INIT_LIST_HEAD(&pool->prp_req_list); + pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; + pool->prp_populate = populate_pool; - populate_pool(pool, num_rq); + populate_pool(pool, num_rq); - if (cfs_list_empty(&pool->prp_req_list)) { - /* have not allocated a single request for the pool */ - OBD_FREE(pool, sizeof (struct ptlrpc_request_pool)); - pool = NULL; - } - return pool; + return pool; } +EXPORT_SYMBOL(ptlrpc_init_rq_pool); /** * Fetches one request from pool \a pool @@ -511,21 +609,21 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) if (!pool) return NULL; - cfs_spin_lock(&pool->prp_lock); + spin_lock(&pool->prp_lock); - /* See if we have anything in a pool, and bail out if nothing, - * in writeout path, where this matters, this is safe to do, because - * nothing is lost in this case, and when some in-flight requests - * complete, this code will be called again. */ - if (unlikely(cfs_list_empty(&pool->prp_req_list))) { - cfs_spin_unlock(&pool->prp_lock); - return NULL; - } + /* See if we have anything in a pool, and bail out if nothing, + * in writeout path, where this matters, this is safe to do, because + * nothing is lost in this case, and when some in-flight requests + * complete, this code will be called again. */ + if (unlikely(list_empty(&pool->prp_req_list))) { + spin_unlock(&pool->prp_lock); + return NULL; + } - request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); - cfs_list_del_init(&request->rq_list); - cfs_spin_unlock(&pool->prp_lock); + request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, + rq_list); + list_del_init(&request->rq_list); + spin_unlock(&pool->prp_lock); LASSERT(request->rq_reqbuf); LASSERT(request->rq_pool); @@ -544,13 +642,13 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) */ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) { - struct ptlrpc_request_pool *pool = request->rq_pool; + struct ptlrpc_request_pool *pool = request->rq_pool; - cfs_spin_lock(&pool->prp_lock); - LASSERT(cfs_list_empty(&request->rq_list)); - LASSERT(!request->rq_receiving_reply); - cfs_list_add_tail(&request->rq_list, &pool->prp_req_list); - cfs_spin_unlock(&pool->prp_lock); + spin_lock(&pool->prp_lock); + LASSERT(list_empty(&request->rq_list)); + LASSERT(!request->rq_receiving_reply); + list_add_tail(&request->rq_list, &pool->prp_req_list); + spin_unlock(&pool->prp_lock); } static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, @@ -582,7 +680,6 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, lustre_msg_add_version(request->rq_reqmsg, version); request->rq_send_state = LUSTRE_IMP_FULL; request->rq_type = PTL_RPC_MSG_REQUEST; - request->rq_export = NULL; request->rq_req_cbid.cbid_fn = request_out_callback; request->rq_req_cbid.cbid_arg = request; @@ -599,27 +696,14 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, ptlrpc_at_set_req_timeout(request); - cfs_spin_lock_init(&request->rq_lock); - CFS_INIT_LIST_HEAD(&request->rq_list); - CFS_INIT_LIST_HEAD(&request->rq_timed_list); - CFS_INIT_LIST_HEAD(&request->rq_replay_list); - CFS_INIT_LIST_HEAD(&request->rq_ctx_chain); - CFS_INIT_LIST_HEAD(&request->rq_set_chain); - CFS_INIT_LIST_HEAD(&request->rq_history_list); - CFS_INIT_LIST_HEAD(&request->rq_exp_list); - cfs_waitq_init(&request->rq_reply_waitq); - cfs_waitq_init(&request->rq_set_waitq); - request->rq_xid = ptlrpc_next_xid(); - cfs_atomic_set(&request->rq_refcount, 1); - - lustre_msg_set_opc(request->rq_reqmsg, opcode); + lustre_msg_set_opc(request->rq_reqmsg, opcode); - RETURN(0); + RETURN(0); out_ctx: - sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1); + sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1); out_free: - class_import_put(imp); - return rc; + class_import_put(imp); + return rc; } int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, @@ -642,8 +726,32 @@ EXPORT_SYMBOL(ptlrpc_request_bufs_pack); int ptlrpc_request_pack(struct ptlrpc_request *request, __u32 version, int opcode) { - return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL); -} + int rc; + rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL); + if (rc) + return rc; + + /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of + * ptlrpc_body sent from server equal to local ptlrpc_body size, so we + * have to send old ptlrpc_body to keep interoprability with these + * clients. + * + * Only three kinds of server->client RPCs so far: + * - LDLM_BL_CALLBACK + * - LDLM_CP_CALLBACK + * - LDLM_GL_CALLBACK + * + * XXX This should be removed whenever we drop the interoprability with + * the these old clients. + */ + if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK || + opcode == LDLM_GL_CALLBACK) + req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY, + sizeof(struct ptlrpc_body_v2), RCL_CLIENT); + + return rc; +} +EXPORT_SYMBOL(ptlrpc_request_pack); /** * Helper function to allocate new request on import \a imp @@ -653,29 +761,30 @@ int ptlrpc_request_pack(struct ptlrpc_request *request, */ static inline struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, - struct ptlrpc_request_pool *pool) + struct ptlrpc_request_pool *pool) { - struct ptlrpc_request *request = NULL; + struct ptlrpc_request *request = NULL; - if (pool) - request = ptlrpc_prep_req_from_pool(pool); + request = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!request) - OBD_ALLOC_PTR(request); + if (!request && pool) + request = ptlrpc_prep_req_from_pool(pool); - if (request) { - LASSERTF((unsigned long)imp > 0x1000, "%p", imp); - LASSERT(imp != LP_POISON); - LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p", - imp->imp_client); - LASSERT(imp->imp_client != LP_POISON); + if (request) { + ptlrpc_cli_req_init(request); - request->rq_import = class_import_get(imp); - } else { - CERROR("request allocation out of memory\n"); - } + LASSERTF((unsigned long)imp > 0x1000, "%p", imp); + LASSERT(imp != LP_POISON); + LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", + imp->imp_client); + LASSERT(imp->imp_client != LP_POISON); - return request; + request->rq_import = class_import_get(imp); + } else { + CERROR("request allocation out of memory\n"); + } + + return request; } /** @@ -709,6 +818,7 @@ struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, { return ptlrpc_request_alloc_internal(imp, NULL, format); } +EXPORT_SYMBOL(ptlrpc_request_alloc); /** * Allocate new request structure for import \a imp from pool \a pool and @@ -720,6 +830,7 @@ struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, { return ptlrpc_request_alloc_internal(imp, pool, format); } +EXPORT_SYMBOL(ptlrpc_request_alloc_pool); /** * For requests not from pool, free memory of the request structure. @@ -727,11 +838,12 @@ struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, */ void ptlrpc_request_free(struct ptlrpc_request *request) { - if (request->rq_pool) - __ptlrpc_free_req_to_pool(request); - else - OBD_FREE_PTR(request); + if (request->rq_pool) + __ptlrpc_free_req_to_pool(request); + else + ptlrpc_request_cache_free(request); } +EXPORT_SYMBOL(ptlrpc_request_free); /** * Allocate new request for operatione \a opcode and immediatelly pack it for @@ -756,6 +868,7 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, } return req; } +EXPORT_SYMBOL(ptlrpc_request_alloc_pack); /** * Prepare request (fetched from pool \a poolif not NULL) on import \a imp @@ -798,106 +911,59 @@ ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, } /** - * Allocate "fake" request that would not be sent anywhere in the end. - * Only used as a hack because we have no other way of performing - * async actions in lustre between layers. - * Used on MDS to request object preallocations from more than one OST at a - * time. - */ -struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp, - unsigned int timeout, - ptlrpc_interpterer_t interpreter) -{ - struct ptlrpc_request *request = NULL; - ENTRY; - - OBD_ALLOC(request, sizeof(*request)); - if (!request) { - CERROR("request allocation out of memory\n"); - RETURN(NULL); - } - - request->rq_send_state = LUSTRE_IMP_FULL; - request->rq_type = PTL_RPC_MSG_REQUEST; - request->rq_import = class_import_get(imp); - request->rq_export = NULL; - request->rq_import_generation = imp->imp_generation; - - request->rq_timeout = timeout; - request->rq_sent = cfs_time_current_sec(); - request->rq_deadline = request->rq_sent + timeout; - request->rq_reply_deadline = request->rq_deadline; - request->rq_interpret_reply = interpreter; - request->rq_phase = RQ_PHASE_RPC; - request->rq_next_phase = RQ_PHASE_INTERPRET; - /* don't want reply */ - request->rq_receiving_reply = 0; - request->rq_must_unlink = 0; - request->rq_no_delay = request->rq_no_resend = 1; - request->rq_fake = 1; - - cfs_spin_lock_init(&request->rq_lock); - CFS_INIT_LIST_HEAD(&request->rq_list); - CFS_INIT_LIST_HEAD(&request->rq_replay_list); - CFS_INIT_LIST_HEAD(&request->rq_set_chain); - CFS_INIT_LIST_HEAD(&request->rq_history_list); - CFS_INIT_LIST_HEAD(&request->rq_exp_list); - cfs_waitq_init(&request->rq_reply_waitq); - cfs_waitq_init(&request->rq_set_waitq); - - request->rq_xid = ptlrpc_next_xid(); - cfs_atomic_set(&request->rq_refcount, 1); - - RETURN(request); -} - -/** - * Indicate that processing of "fake" request is finished. + * Allocate and initialize new request set structure on the current CPT. + * Returns a pointer to the newly allocated set structure or NULL on error. */ -void ptlrpc_fakereq_finished(struct ptlrpc_request *req) +struct ptlrpc_request_set *ptlrpc_prep_set(void) { - struct ptlrpc_request_set *set = req->rq_set; - int wakeup = 0; - - /* hold ref on the request to prevent others (ptlrpcd) to free it */ - ptlrpc_request_addref(req); - cfs_list_del_init(&req->rq_list); - - /* if we kill request before timeout - need adjust counter */ - if (req->rq_phase == RQ_PHASE_RPC && set != NULL && - cfs_atomic_dec_and_test(&set->set_remaining)) - wakeup = 1; - - ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); - - /* Only need to call wakeup once when to be empty. */ - if (wakeup) - cfs_waitq_signal(&set->set_waitq); - ptlrpc_req_finished(req); -} + struct ptlrpc_request_set *set; + int cpt; + + ENTRY; + cpt = cfs_cpt_current(cfs_cpt_table, 0); + OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set); + if (!set) + RETURN(NULL); + atomic_set(&set->set_refcount, 1); + INIT_LIST_HEAD(&set->set_requests); + init_waitqueue_head(&set->set_waitq); + atomic_set(&set->set_new_count, 0); + atomic_set(&set->set_remaining, 0); + spin_lock_init(&set->set_new_req_lock); + INIT_LIST_HEAD(&set->set_new_requests); + INIT_LIST_HEAD(&set->set_cblist); + set->set_max_inflight = UINT_MAX; + set->set_producer = NULL; + set->set_producer_arg = NULL; + set->set_rc = 0; + + RETURN(set); +} +EXPORT_SYMBOL(ptlrpc_prep_set); /** - * Allocate and initialize new request set structure. + * Allocate and initialize new request set structure with flow control + * extension. This extension allows to control the number of requests in-flight + * for the whole set. A callback function to generate requests must be provided + * and the request set will keep the number of requests sent over the wire to + * @max_inflight. * Returns a pointer to the newly allocated set structure or NULL on error. */ -struct ptlrpc_request_set *ptlrpc_prep_set(void) +struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, + void *arg) + { - struct ptlrpc_request_set *set; + struct ptlrpc_request_set *set; - ENTRY; - OBD_ALLOC(set, sizeof *set); - if (!set) - RETURN(NULL); - cfs_atomic_set(&set->set_refcount, 1); - CFS_INIT_LIST_HEAD(&set->set_requests); - cfs_waitq_init(&set->set_waitq); - cfs_atomic_set(&set->set_new_count, 0); - cfs_atomic_set(&set->set_remaining, 0); - cfs_spin_lock_init(&set->set_new_req_lock); - CFS_INIT_LIST_HEAD(&set->set_new_requests); - CFS_INIT_LIST_HEAD(&set->set_cblist); + set = ptlrpc_prep_set(); + if (!set) + RETURN(NULL); + + set->set_max_inflight = max; + set->set_producer = func; + set->set_producer_arg = arg; - RETURN(set); + RETURN(set); } /** @@ -910,54 +976,55 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void) */ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) { - cfs_list_t *tmp; - cfs_list_t *next; - int expected_phase; - int n = 0; - ENTRY; - - /* Requests on the set should either all be completed, or all be new */ - expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ? - RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - cfs_list_for_each (tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - - LASSERT(req->rq_phase == expected_phase); - n++; - } - - LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 || - cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n", - cfs_atomic_read(&set->set_remaining), n); - - cfs_list_for_each_safe(tmp, next, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - cfs_list_del_init(&req->rq_set_chain); - - LASSERT(req->rq_phase == expected_phase); - - if (req->rq_phase == RQ_PHASE_NEW) { - ptlrpc_req_interpret(NULL, req, -EBADR); - cfs_atomic_dec(&set->set_remaining); - } - - cfs_spin_lock(&req->rq_lock); - req->rq_set = NULL; - req->rq_invalid_rqset = 0; - cfs_spin_unlock(&req->rq_lock); + struct list_head *tmp; + struct list_head *next; + int expected_phase; + int n = 0; + ENTRY; + + /* Requests on the set should either all be completed, or all be new */ + expected_phase = (atomic_read(&set->set_remaining) == 0) ? + RQ_PHASE_COMPLETE : RQ_PHASE_NEW; + list_for_each(tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + + LASSERT(req->rq_phase == expected_phase); + n++; + } + + LASSERTF(atomic_read(&set->set_remaining) == 0 || + atomic_read(&set->set_remaining) == n, "%d / %d\n", + atomic_read(&set->set_remaining), n); + + list_for_each_safe(tmp, next, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + list_del_init(&req->rq_set_chain); + + LASSERT(req->rq_phase == expected_phase); + + if (req->rq_phase == RQ_PHASE_NEW) { + ptlrpc_req_interpret(NULL, req, -EBADR); + atomic_dec(&set->set_remaining); + } + + spin_lock(&req->rq_lock); + req->rq_set = NULL; + req->rq_invalid_rqset = 0; + spin_unlock(&req->rq_lock); ptlrpc_req_finished (req); } - LASSERT(cfs_atomic_read(&set->set_remaining) == 0); + LASSERT(atomic_read(&set->set_remaining) == 0); - ptlrpc_reqset_put(set); - EXIT; + ptlrpc_reqset_put(set); + EXIT; } +EXPORT_SYMBOL(ptlrpc_set_destroy); /** * Add a callback function \a fn to the set. @@ -967,17 +1034,17 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, set_interpreter_func fn, void *data) { - struct ptlrpc_set_cbdata *cbdata; + struct ptlrpc_set_cbdata *cbdata; - OBD_ALLOC_PTR(cbdata); - if (cbdata == NULL) - RETURN(-ENOMEM); + OBD_ALLOC_PTR(cbdata); + if (cbdata == NULL) + RETURN(-ENOMEM); - cbdata->psc_interpret = fn; - cbdata->psc_data = data; - cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist); + cbdata->psc_interpret = fn; + cbdata->psc_data = data; + list_add_tail(&cbdata->psc_item, &set->set_cblist); - RETURN(0); + RETURN(0); } /** @@ -987,14 +1054,23 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, void ptlrpc_set_add_req(struct ptlrpc_request_set *set, struct ptlrpc_request *req) { - LASSERT(cfs_list_empty(&req->rq_set_chain)); + LASSERT(list_empty(&req->rq_set_chain)); + + /* The set takes over the caller's request reference */ + list_add_tail(&req->rq_set_chain, &set->set_requests); + req->rq_set = set; + atomic_inc(&set->set_remaining); + req->rq_queued_time = cfs_time_current(); + + if (req->rq_reqmsg != NULL) + lustre_msg_set_jobid(req->rq_reqmsg, NULL); - /* The set takes over the caller's request reference */ - cfs_list_add_tail(&req->rq_set_chain, &set->set_requests); - req->rq_set = set; - cfs_atomic_inc(&set->set_remaining); - req->rq_queued_time = cfs_time_current(); + if (set->set_producer != NULL) + /* If the request set has a producer callback, the RPC must be + * sent straight away */ + ptlrpc_send_new_req(req); } +EXPORT_SYMBOL(ptlrpc_set_add_req); /** * Add a request to a request with dedicated server thread @@ -1008,28 +1084,28 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, int count, i; LASSERT(req->rq_set == NULL); - LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0); - - cfs_spin_lock(&set->set_new_req_lock); - /* - * The set takes over the caller's request reference. - */ - req->rq_set = set; - req->rq_queued_time = cfs_time_current(); - cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests); - count = cfs_atomic_inc_return(&set->set_new_count); - cfs_spin_unlock(&set->set_new_req_lock); - - /* Only need to call wakeup once for the first entry. */ - if (count == 1) { - cfs_waitq_signal(&set->set_waitq); - - /* XXX: It maybe unnecessary to wakeup all the partners. But to - * guarantee the async RPC can be processed ASAP, we have - * no other better choice. It maybe fixed in future. */ - for (i = 0; i < pc->pc_npartners; i++) - cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq); - } + LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0); + + spin_lock(&set->set_new_req_lock); + /* + * The set takes over the caller's request reference. + */ + req->rq_set = set; + req->rq_queued_time = cfs_time_current(); + list_add_tail(&req->rq_set_chain, &set->set_new_requests); + count = atomic_inc_return(&set->set_new_count); + spin_unlock(&set->set_new_req_lock); + + /* Only need to call wakeup once for the first entry. */ + if (count == 1) { + wake_up(&set->set_waitq); + + /* XXX: It maybe unnecessary to wakeup all the partners. But to + * guarantee the async RPC can be processed ASAP, we have + * no other better choice. It maybe fixed in future. */ + for (i = 0; i < pc->pc_npartners; i++) + wake_up(&pc->pc_partners[i]->pc_set->set_waitq); + } } /** @@ -1056,70 +1132,85 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, } else if (imp->imp_state == LUSTRE_IMP_NEW) { DEBUG_REQ(D_ERROR, req, "Uninitialized import."); *status = -EIO; - LBUG(); - } else if (imp->imp_state == LUSTRE_IMP_CLOSED) { - DEBUG_REQ(D_ERROR, req, "IMP_CLOSED "); - *status = -EIO; + } else if (imp->imp_state == LUSTRE_IMP_CLOSED) { + /* pings may safely race with umount */ + DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ? + D_HA : D_ERROR, req, "IMP_CLOSED "); + *status = -EIO; } else if (ptlrpc_send_limit_expired(req)) { - /* probably doesn't need to be a D_ERROR after initial testing */ - DEBUG_REQ(D_ERROR, req, "send limit expired "); - *status = -EIO; - } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && - imp->imp_state == LUSTRE_IMP_CONNECTING) { - /* allow CONNECT even if import is invalid */ ; - if (cfs_atomic_read(&imp->imp_inval_count) != 0) { - DEBUG_REQ(D_ERROR, req, "invalidate in flight"); - *status = -EIO; - } - } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) { - if (!imp->imp_deactive) - DEBUG_REQ(D_ERROR, req, "IMP_INVALID"); - *status = -ESHUTDOWN; /* bz 12940 */ - } else if (req->rq_import_generation != imp->imp_generation) { + /* probably doesn't need to be a D_ERROR after initial testing*/ + DEBUG_REQ(D_HA, req, "send limit expired "); + *status = -ETIMEDOUT; + } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && + imp->imp_state == LUSTRE_IMP_CONNECTING) { + /* allow CONNECT even if import is invalid */ ; + if (atomic_read(&imp->imp_inval_count) != 0) { + DEBUG_REQ(D_ERROR, req, "invalidate in flight"); + *status = -EIO; + } + } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) { + if (!imp->imp_deactive) + DEBUG_REQ(D_NET, req, "IMP_INVALID"); + *status = -ESHUTDOWN; /* bz 12940 */ + } else if (req->rq_import_generation != imp->imp_generation) { DEBUG_REQ(D_ERROR, req, "req wrong generation:"); *status = -EIO; } else if (req->rq_send_state != imp->imp_state) { /* invalidate in progress - any requests should be drop */ - if (cfs_atomic_read(&imp->imp_inval_count) != 0) { + if (atomic_read(&imp->imp_inval_count) != 0) { DEBUG_REQ(D_ERROR, req, "invalidate in flight"); *status = -EIO; } else if (imp->imp_dlm_fake || req->rq_no_delay) { *status = -EWOULDBLOCK; - } else { - delay = 1; - } - } + } else if (req->rq_allow_replay && + (imp->imp_state == LUSTRE_IMP_REPLAY || + imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS || + imp->imp_state == LUSTRE_IMP_REPLAY_WAIT || + imp->imp_state == LUSTRE_IMP_RECOVER)) { + DEBUG_REQ(D_HA, req, "allow during recovery.\n"); + } else { + delay = 1; + } + } - RETURN(delay); + RETURN(delay); } /** - * Decide if the eror message regarding provided request \a req - * should be printed to the console or not. - * Makes it's decision on request status and other properties. - * Returns 1 to print error on the system console or 0 if not. + * Decide if the error message should be printed to the console or not. + * Makes its decision based on request type, status, and failure frequency. + * + * \param[in] req request that failed and may need a console message + * + * \retval false if no message should be printed + * \retval true if console message should be printed */ -static int ptlrpc_console_allow(struct ptlrpc_request *req) +static bool ptlrpc_console_allow(struct ptlrpc_request *req) { - __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); - int err; + __u32 opc; - /* Suppress particular reconnect errors which are to be expected. No - * errors are suppressed for the initial connection on an import */ - if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) && - (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) { + LASSERT(req->rq_reqmsg != NULL); + opc = lustre_msg_get_opc(req->rq_reqmsg); - /* Suppress timed out reconnect requests */ - if (req->rq_timedout) - return 0; + /* Suppress particular reconnect errors which are to be expected. */ + if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) { + int err; - /* Suppress unavailable/again reconnect requests */ - err = lustre_msg_get_status(req->rq_repmsg); - if (err == -ENODEV || err == -EAGAIN) - return 0; - } + /* Suppress timed out reconnect requests */ + if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) || + req->rq_timedout) + return false; + + /* Suppress most unavailable/again reconnect requests, but + * print occasionally so it is clear client is trying to + * connect to a server where no target is running. */ + err = lustre_msg_get_status(req->rq_repmsg); + if ((err == -ENODEV || err == -EAGAIN) && + req->rq_import->imp_conn_cnt % 30 != 20) + return false; + } - return 1; + return true; } /** @@ -1132,15 +1223,19 @@ static int ptlrpc_check_status(struct ptlrpc_request *req) ENTRY; err = lustre_msg_get_status(req->rq_repmsg); - if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { - struct obd_import *imp = req->rq_import; - __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); - LCONSOLE_ERROR_MSG(0x011,"an error occurred while communicating" - " with %s. The %s operation failed with %d\n", - libcfs_nid2str(imp->imp_connection->c_peer.nid), - ll_opcode2str(opc), err); - RETURN(err < 0 ? err : -EINVAL); - } + if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { + struct obd_import *imp = req->rq_import; + lnet_nid_t nid = imp->imp_connection->c_peer.nid; + __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); + + if (ptlrpc_console_allow(req)) + LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s " + "failed: rc = %d\n", + imp->imp_obd->obd_name, + ll_opcode2str(opc), + libcfs_nid2str(nid), err); + RETURN(err < 0 ? err : -EINVAL); + } if (err < 0) { DEBUG_REQ(D_INFO, req, "status is %d", err); @@ -1149,21 +1244,6 @@ static int ptlrpc_check_status(struct ptlrpc_request *req) DEBUG_REQ(D_INFO, req, "status is %d", err); } - if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { - struct obd_import *imp = req->rq_import; - __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); - - if (ptlrpc_console_allow(req)) - LCONSOLE_ERROR_MSG(0x011,"an error occurred while " - "communicating with %s. The %s " - "operation failed with %d\n", - libcfs_nid2str( - imp->imp_connection->c_peer.nid), - ll_opcode2str(opc), err); - - RETURN(err < 0 ? err : -EINVAL); - } - RETURN(err); } @@ -1203,14 +1283,15 @@ static int after_reply(struct ptlrpc_request *req) struct obd_device *obd = req->rq_import->imp_obd; int rc; struct timeval work_start; + __u64 committed; long timediff; ENTRY; LASSERT(obd != NULL); /* repbuf must be unlinked */ - LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink); + LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked); - if (req->rq_reply_truncate) { + if (req->rq_reply_truncated) { if (ptlrpc_no_resend(req)) { DEBUG_REQ(D_ERROR, req, "reply buffer overflow," " expected: %d, actual size: %d", @@ -1225,10 +1306,15 @@ static int after_reply(struct ptlrpc_request *req) * will roundup it */ req->rq_replen = req->rq_nob_received; req->rq_nob_received = 0; - req->rq_resend = 1; + spin_lock(&req->rq_lock); + req->rq_resend = 1; + spin_unlock(&req->rq_lock); RETURN(0); } + do_gettimeofday(&work_start); + timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL); + /* * NB Until this point, the whole of the incoming message, * including buflens, status etc is in the sender's byte order. @@ -1239,23 +1325,46 @@ static int after_reply(struct ptlrpc_request *req) RETURN(rc); } - /* - * Security layer unwrap might ask resend this request. - */ - if (req->rq_resend) - RETURN(0); - - rc = unpack_reply(req); - if (rc) - RETURN(rc); - - cfs_gettimeofday(&work_start); - timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL); - if (obd->obd_svc_stats != NULL) { - lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, - timediff); - ptlrpc_lprocfs_rpc_sent(req, timediff); - } + /* + * Security layer unwrap might ask resend this request. + */ + if (req->rq_resend) + RETURN(0); + + rc = unpack_reply(req); + if (rc) + RETURN(rc); + + /* retry indefinitely on EINPROGRESS */ + if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS && + ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) { + time_t now = cfs_time_current_sec(); + + DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS"); + spin_lock(&req->rq_lock); + req->rq_resend = 1; + spin_unlock(&req->rq_lock); + req->rq_nr_resend++; + + /* Readjust the timeout for current conditions */ + ptlrpc_at_set_req_timeout(req); + /* delay resend to give a chance to the server to get ready. + * The delay is increased by 1s on every resend and is capped to + * the current request timeout (i.e. obd_timeout if AT is off, + * or AT service time x 125% + 5s, see at_est2timeout) */ + if (req->rq_nr_resend > req->rq_timeout) + req->rq_sent = now + req->rq_timeout; + else + req->rq_sent = now + req->rq_nr_resend; + + RETURN(0); + } + + if (obd->obd_svc_stats != NULL) { + lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, + timediff); + ptlrpc_lprocfs_rpc_sent(req, timediff); + } if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY && lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) { @@ -1273,20 +1382,20 @@ static int after_reply(struct ptlrpc_request *req) rc = ptlrpc_check_status(req); imp->imp_connect_error = rc; - if (rc) { - /* - * Either we've been evicted, or the server has failed for - * some reason. Try to reconnect, and if that fails, punt to - * the upcall. - */ - if (ll_rpc_recoverable_error(rc)) { - if (req->rq_send_state != LUSTRE_IMP_FULL || - imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { - RETURN(rc); - } - ptlrpc_request_handle_notconn(req); - RETURN(rc); - } + if (rc) { + /* + * Either we've been evicted, or the server has failed for + * some reason. Try to reconnect, and if that fails, punt to + * the upcall. + */ + if (ptlrpc_recoverable_error(rc)) { + if (req->rq_send_state != LUSTRE_IMP_FULL || + imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { + RETURN(rc); + } + ptlrpc_request_handle_notconn(req); + RETURN(rc); + } } else { /* * Let's look if server sent slv. Do it only for RPC with @@ -1304,7 +1413,7 @@ static int after_reply(struct ptlrpc_request *req) } if (imp->imp_replayable) { - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); /* * No point in adding already-committed requests to the replay * list, we will just remove them immediately. b=9829 @@ -1316,81 +1425,134 @@ static int after_reply(struct ptlrpc_request *req) /** version recovery */ ptlrpc_save_versions(req); ptlrpc_retain_replayable_request(req, imp); - } else if (req->rq_commit_cb != NULL) { - cfs_spin_unlock(&imp->imp_lock); - req->rq_commit_cb(req); - cfs_spin_lock(&imp->imp_lock); + } else if (req->rq_commit_cb != NULL && + list_empty(&req->rq_replay_list)) { + /* NB: don't call rq_commit_cb if it's already on + * rq_replay_list, ptlrpc_free_committed() will call + * it later, see LU-3618 for details */ + spin_unlock(&imp->imp_lock); + req->rq_commit_cb(req); + spin_lock(&imp->imp_lock); } /* * Replay-enabled imports return commit-status information. */ - if (lustre_msg_get_last_committed(req->rq_repmsg)) { - imp->imp_peer_committed_transno = - lustre_msg_get_last_committed(req->rq_repmsg); - } - ptlrpc_free_committed(imp); + committed = lustre_msg_get_last_committed(req->rq_repmsg); + if (likely(committed > imp->imp_peer_committed_transno)) + imp->imp_peer_committed_transno = committed; - if (req->rq_transno > imp->imp_peer_committed_transno) - ptlrpc_pinger_commit_expected(imp); + ptlrpc_free_committed(imp); - cfs_spin_unlock(&imp->imp_lock); - } + if (!list_empty(&imp->imp_replay_list)) { + struct ptlrpc_request *last; - RETURN(rc); + last = list_entry(imp->imp_replay_list.prev, + struct ptlrpc_request, + rq_replay_list); + /* + * Requests with rq_replay stay on the list even if no + * commit is expected. + */ + if (last->rq_transno > imp->imp_peer_committed_transno) + ptlrpc_pinger_commit_expected(imp); + } + + spin_unlock(&imp->imp_lock); + } + + RETURN(rc); } /** * Helper function to send request \a req over the network for the first time * Also adjusts request phase. * Returns 0 on success or error code. - */ + */ static int ptlrpc_send_new_req(struct ptlrpc_request *req) { - struct obd_import *imp; + struct obd_import *imp = req->rq_import; + struct list_head *tmp; + __u64 min_xid = ~0ULL; int rc; ENTRY; LASSERT(req->rq_phase == RQ_PHASE_NEW); - if (req->rq_sent && (req->rq_sent > cfs_time_current_sec())) - RETURN (0); - ptlrpc_rqphase_move(req, RQ_PHASE_RPC); - - imp = req->rq_import; - cfs_spin_lock(&imp->imp_lock); - - req->rq_import_generation = imp->imp_generation; - - if (ptlrpc_import_delay_req(imp, req, &rc)) { - cfs_spin_lock(&req->rq_lock); - req->rq_waiting = 1; - cfs_spin_unlock(&req->rq_lock); - - DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: " - "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg), - ptlrpc_import_state_name(req->rq_send_state), - ptlrpc_import_state_name(imp->imp_state)); - LASSERT(cfs_list_empty(&req->rq_list)); - cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list); - cfs_atomic_inc(&req->rq_import->imp_inflight); - cfs_spin_unlock(&imp->imp_lock); - RETURN(0); - } + /* do not try to go further if there is not enough memory in enc_pool */ + if (req->rq_sent && req->rq_bulk != NULL) + if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() && + pool_is_at_full_capacity()) + RETURN(-ENOMEM); - if (rc != 0) { - cfs_spin_unlock(&imp->imp_lock); - req->rq_status = rc; - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - RETURN(rc); - } + if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) && + (!req->rq_generation_set || + req->rq_import_generation == imp->imp_generation)) + RETURN (0); - LASSERT(cfs_list_empty(&req->rq_list)); - cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list); - cfs_atomic_inc(&req->rq_import->imp_inflight); - cfs_spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_RPC); - lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid()); + spin_lock(&imp->imp_lock); + + /* the very first time we assign XID. it's important to assign XID + * and put it on the list atomically, so that the lowest assigned + * XID is always known. this is vital for multislot last_rcvd */ + if (req->rq_send_state == LUSTRE_IMP_REPLAY) { + LASSERT(req->rq_xid != 0); + } else { + LASSERT(req->rq_xid == 0); + req->rq_xid = ptlrpc_next_xid(); + } + + if (!req->rq_generation_set) + req->rq_import_generation = imp->imp_generation; + + if (ptlrpc_import_delay_req(imp, req, &rc)) { + spin_lock(&req->rq_lock); + req->rq_waiting = 1; + spin_unlock(&req->rq_lock); + + DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: " + "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg), + ptlrpc_import_state_name(req->rq_send_state), + ptlrpc_import_state_name(imp->imp_state)); + LASSERT(list_empty(&req->rq_list)); + list_add_tail(&req->rq_list, &imp->imp_delayed_list); + atomic_inc(&req->rq_import->imp_inflight); + spin_unlock(&imp->imp_lock); + RETURN(0); + } + + if (rc != 0) { + spin_unlock(&imp->imp_lock); + req->rq_status = rc; + ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); + RETURN(rc); + } + + LASSERT(list_empty(&req->rq_list)); + list_add_tail(&req->rq_list, &imp->imp_sending_list); + atomic_inc(&req->rq_import->imp_inflight); + + /* find the lowest unreplied XID */ + list_for_each(tmp, &imp->imp_delayed_list) { + struct ptlrpc_request *r; + r = list_entry(tmp, struct ptlrpc_request, rq_list); + if (r->rq_xid < min_xid) + min_xid = r->rq_xid; + } + list_for_each(tmp, &imp->imp_sending_list) { + struct ptlrpc_request *r; + r = list_entry(tmp, struct ptlrpc_request, rq_list); + if (r->rq_xid < min_xid) + min_xid = r->rq_xid; + } + spin_unlock(&imp->imp_lock); + + if (likely(min_xid != ~0ULL)) + lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1); + + lustre_msg_set_status(req->rq_reqmsg, current_pid()); rc = sptlrpc_req_refresh_ctx(req, -1); if (rc) { @@ -1398,49 +1560,99 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) req->rq_status = rc; RETURN(1); } else { - req->rq_wait_ctx = 1; + spin_lock(&req->rq_lock); + req->rq_wait_ctx = 1; + spin_unlock(&req->rq_lock); RETURN(0); } } - CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc" - " %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(), - imp->imp_obd->obd_uuid.uuid, - lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - lustre_msg_get_opc(req->rq_reqmsg)); + CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc" + " %s:%s:%d:"LPU64":%s:%d\n", current_comm(), + imp->imp_obd->obd_uuid.uuid, + lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, + libcfs_nid2str(imp->imp_connection->c_peer.nid), + lustre_msg_get_opc(req->rq_reqmsg)); rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); + atomic_dec(&req->rq_import->imp_inflight); + } + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + RETURN(rc); + } if (rc) { DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); - req->rq_net_err = 1; + spin_lock(&req->rq_lock); + req->rq_net_err = 1; + spin_unlock(&req->rq_lock); RETURN(rc); } RETURN(0); } +static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) +{ + int remaining, rc; + ENTRY; + + LASSERT(set->set_producer != NULL); + + remaining = atomic_read(&set->set_remaining); + + /* populate the ->set_requests list with requests until we + * reach the maximum number of RPCs in flight for this set */ + while (atomic_read(&set->set_remaining) < set->set_max_inflight) { + rc = set->set_producer(set, set->set_producer_arg); + if (rc == -ENOENT) { + /* no more RPC to produce */ + set->set_producer = NULL; + set->set_producer_arg = NULL; + RETURN(0); + } + } + + RETURN((atomic_read(&set->set_remaining) - remaining)); +} + /** * this sends any unsent RPCs in \a set and returns 1 if all are sent * and no more replies are expected. * (it is possible to get less replies than requests sent e.g. due to timed out * requests or requests that we had trouble to send out) + * + * NOTE: This function contains a potential schedule point (cond_resched()). */ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { - cfs_list_t *tmp; - int force_timer_recalc = 0; - ENTRY; - - if (cfs_atomic_read(&set->set_remaining) == 0) - RETURN(1); - - cfs_list_for_each(tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - struct obd_import *imp = req->rq_import; - int unregistered = 0; - int rc = 0; + struct list_head *tmp, *next; + struct list_head comp_reqs; + int force_timer_recalc = 0; + ENTRY; + + if (atomic_read(&set->set_remaining) == 0) + RETURN(1); + + INIT_LIST_HEAD(&comp_reqs); + list_for_each_safe(tmp, next, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + struct obd_import *imp = req->rq_import; + int unregistered = 0; + int rc = 0; + + /* This schedule point is mainly for the ptlrpcd caller of this + * function. Most ptlrpc sets are not long-lived and unbounded + * in length, but at the least the set used by the ptlrpcd is. + * Since the processing time is unbounded, we need to insert an + * explicit schedule point to make the thread well-behaved. + */ + cond_resched(); if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req)) { @@ -1449,7 +1661,12 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) /* delayed send - skip */ if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent) - continue; + continue; + + /* delayed resend - skip */ + if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend && + req->rq_sent > cfs_time_current_sec()) + continue; if (!(req->rq_phase == RQ_PHASE_RPC || req->rq_phase == RQ_PHASE_BULK || @@ -1495,8 +1712,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) ptlrpc_rqphase_move(req, req->rq_next_phase); } - if (req->rq_phase == RQ_PHASE_COMPLETE) + if (req->rq_phase == RQ_PHASE_COMPLETE) { + list_move_tail(&req->rq_set_chain, &comp_reqs); continue; + } if (req->rq_phase == RQ_PHASE_INTERPRET) GOTO(interpret, req->rq_status); @@ -1525,9 +1744,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) } if (req->rq_err) { - cfs_spin_lock(&req->rq_lock); - req->rq_replied = 0; - cfs_spin_unlock(&req->rq_lock); + spin_lock(&req->rq_lock); + req->rq_replied = 0; + spin_unlock(&req->rq_lock); if (req->rq_status == 0) req->rq_status = -EIO; ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); @@ -1536,7 +1755,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr * so it sets rq_intr regardless of individual rpc - * timeouts. The synchronous IO waiting path sets + * timeouts. The synchronous IO waiting path sets * rq_intr irrespective of whether ptlrpcd * has seen a timeout. Our policy is to only interpret * interrupted rpcs after they have timed out, so we @@ -1555,18 +1774,20 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) req->rq_waiting || req->rq_wait_ctx) { int status; - if (!ptlrpc_unregister_reply(req, 1)) - continue; - - cfs_spin_lock(&imp->imp_lock); - if (ptlrpc_import_delay_req(imp, req, &status)){ - /* put on delay list - only if we wait - * recovery finished - before send */ - cfs_list_del_init(&req->rq_list); - cfs_list_add_tail(&req->rq_list, - &imp-> \ - imp_delayed_list); - cfs_spin_unlock(&imp->imp_lock); + if (!ptlrpc_unregister_reply(req, 1)) { + ptlrpc_unregister_bulk(req, 1); + continue; + } + + spin_lock(&imp->imp_lock); + if (ptlrpc_import_delay_req(imp, req, &status)){ + /* put on delay list - only if we wait + * recovery finished - before send */ + list_del_init(&req->rq_list); + list_add_tail(&req->rq_list, + &imp-> + imp_delayed_list); + spin_unlock(&imp->imp_lock); continue; } @@ -1574,47 +1795,38 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) req->rq_status = status; ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - cfs_spin_unlock(&imp->imp_lock); - GOTO(interpret, req->rq_status); - } - if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) { - req->rq_status = -ENOTCONN; - ptlrpc_rqphase_move(req, - RQ_PHASE_INTERPRET); - cfs_spin_unlock(&imp->imp_lock); - GOTO(interpret, req->rq_status); - } - - cfs_list_del_init(&req->rq_list); - cfs_list_add_tail(&req->rq_list, - &imp->imp_sending_list); - - cfs_spin_unlock(&imp->imp_lock); - - cfs_spin_lock(&req->rq_lock); - req->rq_waiting = 0; - cfs_spin_unlock(&req->rq_lock); - - if (req->rq_timedout || req->rq_resend) { - /* This is re-sending anyways, - * let's mark req as resend. */ - cfs_spin_lock(&req->rq_lock); - req->rq_resend = 1; - cfs_spin_unlock(&req->rq_lock); - if (req->rq_bulk) { - __u64 old_xid; - - if (!ptlrpc_unregister_bulk(req, 1)) - continue; - - /* ensure previous bulk fails */ - old_xid = req->rq_xid; - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk " - "old x"LPU64 - " new x"LPU64"\n", - old_xid, req->rq_xid); - } + spin_unlock(&imp->imp_lock); + GOTO(interpret, req->rq_status); + } + if (ptlrpc_no_resend(req) && + !req->rq_wait_ctx) { + req->rq_status = -ENOTCONN; + ptlrpc_rqphase_move(req, + RQ_PHASE_INTERPRET); + spin_unlock(&imp->imp_lock); + GOTO(interpret, req->rq_status); + } + + list_del_init(&req->rq_list); + list_add_tail(&req->rq_list, + &imp->imp_sending_list); + + spin_unlock(&imp->imp_lock); + + spin_lock(&req->rq_lock); + req->rq_waiting = 0; + spin_unlock(&req->rq_lock); + + if (req->rq_timedout || req->rq_resend) { + /* This is re-sending anyways, + * let's mark req as resend. */ + spin_lock(&req->rq_lock); + req->rq_resend = 1; + spin_unlock(&req->rq_lock); + + if (req->rq_bulk != NULL && + !ptlrpc_unregister_bulk(req, 1)) + continue; } /* * rq_wait_ctx is only touched by ptlrpcd, @@ -1624,57 +1836,66 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (status) { if (req->rq_err) { req->rq_status = status; - cfs_spin_lock(&req->rq_lock); - req->rq_wait_ctx = 0; - cfs_spin_unlock(&req->rq_lock); - force_timer_recalc = 1; - } else { - cfs_spin_lock(&req->rq_lock); - req->rq_wait_ctx = 1; - cfs_spin_unlock(&req->rq_lock); - } - - continue; - } else { - cfs_spin_lock(&req->rq_lock); - req->rq_wait_ctx = 0; - cfs_spin_unlock(&req->rq_lock); - } - - rc = ptl_send_rpc(req, 0); - if (rc) { - DEBUG_REQ(D_HA, req, "send failed (%d)", - rc); - force_timer_recalc = 1; - cfs_spin_lock(&req->rq_lock); - req->rq_net_err = 1; - cfs_spin_unlock(&req->rq_lock); - } - /* need to reset the timeout */ - force_timer_recalc = 1; - } - - cfs_spin_lock(&req->rq_lock); - - if (ptlrpc_client_early(req)) { - ptlrpc_at_recv_early_reply(req); - cfs_spin_unlock(&req->rq_lock); - continue; - } - - /* Still waiting for a reply? */ - if (ptlrpc_client_recv(req)) { - cfs_spin_unlock(&req->rq_lock); - continue; - } - - /* Did we actually receive a reply? */ - if (!ptlrpc_client_replied(req)) { - cfs_spin_unlock(&req->rq_lock); - continue; - } - - cfs_spin_unlock(&req->rq_lock); + spin_lock(&req->rq_lock); + req->rq_wait_ctx = 0; + spin_unlock(&req->rq_lock); + force_timer_recalc = 1; + } else { + spin_lock(&req->rq_lock); + req->rq_wait_ctx = 1; + spin_unlock(&req->rq_lock); + } + + continue; + } else { + spin_lock(&req->rq_lock); + req->rq_wait_ctx = 0; + spin_unlock(&req->rq_lock); + } + + rc = ptl_send_rpc(req, 0); + if (rc == -ENOMEM) { + spin_lock(&imp->imp_lock); + if (!list_empty(&req->rq_list)) + list_del_init(&req->rq_list); + spin_unlock(&imp->imp_lock); + ptlrpc_rqphase_move(req, RQ_PHASE_NEW); + continue; + } + if (rc) { + DEBUG_REQ(D_HA, req, + "send failed: rc = %d", rc); + force_timer_recalc = 1; + spin_lock(&req->rq_lock); + req->rq_net_err = 1; + spin_unlock(&req->rq_lock); + continue; + } + /* need to reset the timeout */ + force_timer_recalc = 1; + } + + spin_lock(&req->rq_lock); + + if (ptlrpc_client_early(req)) { + ptlrpc_at_recv_early_reply(req); + spin_unlock(&req->rq_lock); + continue; + } + + /* Still waiting for a reply? */ + if (ptlrpc_client_recv(req)) { + spin_unlock(&req->rq_lock); + continue; + } + + /* Did we actually receive a reply? */ + if (!ptlrpc_client_replied(req)) { + spin_unlock(&req->rq_lock); + continue; + } + + spin_unlock(&req->rq_lock); /* unlink from net because we are going to * swab in-place of reply buffer */ @@ -1691,7 +1912,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) * process the reply. Similarly if the RPC returned * an error, and therefore the bulk will never arrive. */ - if (req->rq_bulk == NULL || req->rq_status != 0) { + if (req->rq_bulk == NULL || req->rq_status < 0) { ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); GOTO(interpret, req->rq_status); } @@ -1703,14 +1924,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (ptlrpc_client_bulk_active(req)) continue; - if (!req->rq_bulk->bd_success) { - /* The RPC reply arrived OK, but the bulk screwed - * up! Dead weird since the server told us the RPC - * was good after getting the REPLY for her GET or - * the ACK for her PUT. */ - DEBUG_REQ(D_ERROR, req, "bulk transfer failed"); - LBUG(); - } + if (req->rq_bulk->bd_failure) { + /* The RPC reply arrived OK, but the bulk screwed + * up! Dead weird since the server told us the RPC + * was good after getting the REPLY for her GET or + * the ACK for her PUT. */ + DEBUG_REQ(D_ERROR, req, "bulk transfer failed"); + req->rq_status = -EIO; + } ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); @@ -1734,34 +1955,64 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) ptlrpc_req_interpret(env, req, req->rq_status); - ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); - - CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:" - "opc %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(), - imp->imp_obd->obd_uuid.uuid, - req->rq_reqmsg ? lustre_msg_get_status(req->rq_reqmsg):-1, - req->rq_xid, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1); - - cfs_spin_lock(&imp->imp_lock); - /* Request already may be not on sending or delaying list. This - * may happen in the case of marking it erroneous for the case - * ptlrpc_import_delay_req(req, status) find it impossible to - * allow sending this rpc and returns *status != 0. */ - if (!cfs_list_empty(&req->rq_list)) { - cfs_list_del_init(&req->rq_list); - cfs_atomic_dec(&imp->imp_inflight); - } - cfs_spin_unlock(&imp->imp_lock); - - cfs_atomic_dec(&set->set_remaining); - cfs_waitq_broadcast(&imp->imp_recovery_waitq); - } - - /* If we hit an error, we want to recover promptly. */ - RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc); -} + if (ptlrpcd_check_work(req)) { + atomic_dec(&set->set_remaining); + continue; + } + ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); + + CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0, + "Completed RPC pname:cluuid:pid:xid:nid:" + "opc %s:%s:%d:"LPU64":%s:%d\n", + current_comm(), imp->imp_obd->obd_uuid.uuid, + lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, + libcfs_nid2str(imp->imp_connection->c_peer.nid), + lustre_msg_get_opc(req->rq_reqmsg)); + + spin_lock(&imp->imp_lock); + /* Request already may be not on sending or delaying list. This + * may happen in the case of marking it erroneous for the case + * ptlrpc_import_delay_req(req, status) find it impossible to + * allow sending this rpc and returns *status != 0. */ + if (!list_empty(&req->rq_list)) { + list_del_init(&req->rq_list); + atomic_dec(&imp->imp_inflight); + } + spin_unlock(&imp->imp_lock); + + atomic_dec(&set->set_remaining); + wake_up_all(&imp->imp_recovery_waitq); + + if (set->set_producer) { + /* produce a new request if possible */ + if (ptlrpc_set_producer(set) > 0) + force_timer_recalc = 1; + + /* free the request that has just been completed + * in order not to pollute set->set_requests */ + list_del_init(&req->rq_set_chain); + spin_lock(&req->rq_lock); + req->rq_set = NULL; + req->rq_invalid_rqset = 0; + spin_unlock(&req->rq_lock); + + /* record rq_status to compute the final status later */ + if (req->rq_status != 0) + set->set_rc = req->rq_status; + ptlrpc_req_finished(req); + } else { + list_move_tail(&req->rq_set_chain, &comp_reqs); + } + } + + /* move completed request at the head of list so it's easier for + * caller to find them */ + list_splice(&comp_reqs, &set->set_requests); + + /* If we hit an error, we want to recover promptly. */ + RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc); +} +EXPORT_SYMBOL(ptlrpc_check_set); /** * Time out request \a req. is \a async_unlink is set, that means do not wait @@ -1770,17 +2021,16 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) */ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) { - struct obd_import *imp = req->rq_import; - int rc = 0; - ENTRY; + struct obd_import *imp = req->rq_import; + int rc = 0; + ENTRY; - cfs_spin_lock(&req->rq_lock); - req->rq_timedout = 1; - cfs_spin_unlock(&req->rq_lock); + spin_lock(&req->rq_lock); + req->rq_timedout = 1; + spin_unlock(&req->rq_lock); - DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, "Request " - " sent has %s: [sent "CFS_DURATION_T"/" - "real "CFS_DURATION_T"]", + DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T + "/real "CFS_DURATION_T"]", req->rq_net_err ? "failed due to network error" : ((req->rq_real_sent == 0 || cfs_time_before(req->rq_real_sent, req->rq_sent) || @@ -1802,10 +2052,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) RETURN(1); } - if (req->rq_fake) - RETURN(1); - - cfs_atomic_inc(&imp->imp_timeouts); + atomic_inc(&imp->imp_timeouts); /* The DLM server doesn't want recovery run on its imports. */ if (imp->imp_dlm_fake) @@ -1819,11 +2066,11 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)", ptlrpc_import_state_name(req->rq_send_state), ptlrpc_import_state_name(imp->imp_state)); - cfs_spin_lock(&req->rq_lock); - req->rq_status = -ETIMEDOUT; - req->rq_err = 1; - cfs_spin_unlock(&req->rq_lock); - RETURN(1); + spin_lock(&req->rq_lock); + req->rq_status = -ETIMEDOUT; + req->rq_err = 1; + spin_unlock(&req->rq_lock); + RETURN(1); } /* if a request can't be resent we can't wait for an answer after @@ -1845,20 +2092,20 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) */ int ptlrpc_expired_set(void *data) { - struct ptlrpc_request_set *set = data; - cfs_list_t *tmp; - time_t now = cfs_time_current_sec(); - ENTRY; + struct ptlrpc_request_set *set = data; + struct list_head *tmp; + time_t now = cfs_time_current_sec(); + ENTRY; - LASSERT(set != NULL); + LASSERT(set != NULL); - /* - * A timeout expired. See which reqs it applies to... - */ - cfs_list_for_each (tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + /* + * A timeout expired. See which reqs it applies to... + */ + list_for_each(tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, + rq_set_chain); /* don't expire request waiting for context */ if (req->rq_wait_ctx) @@ -1892,34 +2139,34 @@ int ptlrpc_expired_set(void *data) */ void ptlrpc_mark_interrupted(struct ptlrpc_request *req) { - cfs_spin_lock(&req->rq_lock); - req->rq_intr = 1; - cfs_spin_unlock(&req->rq_lock); + spin_lock(&req->rq_lock); + req->rq_intr = 1; + spin_unlock(&req->rq_lock); } +EXPORT_SYMBOL(ptlrpc_mark_interrupted); /** * Interrupts (sets interrupted flag) all uncompleted requests in * a set \a data. Callback for l_wait_event for interruptible waits. */ -void ptlrpc_interrupted_set(void *data) +static void ptlrpc_interrupted_set(void *data) { - struct ptlrpc_request_set *set = data; - cfs_list_t *tmp; + struct ptlrpc_request_set *set = data; + struct list_head *tmp; - LASSERT(set != NULL); - CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); + LASSERT(set != NULL); + CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); - cfs_list_for_each(tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each(tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, rq_set_chain); - if (req->rq_phase != RQ_PHASE_RPC && - req->rq_phase != RQ_PHASE_UNREGISTERING) - continue; + if (req->rq_phase != RQ_PHASE_RPC && + req->rq_phase != RQ_PHASE_UNREGISTERING) + continue; - ptlrpc_mark_interrupted(req); - } + ptlrpc_mark_interrupted(req); + } } /** @@ -1927,17 +2174,15 @@ void ptlrpc_interrupted_set(void *data) */ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) { - cfs_list_t *tmp; - time_t now = cfs_time_current_sec(); - int timeout = 0; - struct ptlrpc_request *req; - int deadline; - ENTRY; - - SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */ + struct list_head *tmp; + time_t now = cfs_time_current_sec(); + int timeout = 0; + struct ptlrpc_request *req; + int deadline; + ENTRY; - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); /* * Request in-flight? @@ -1961,6 +2206,8 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) if (req->rq_phase == RQ_PHASE_NEW) deadline = req->rq_sent; + else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend) + deadline = req->rq_sent; else deadline = req->rq_sent + req->rq_timeout; @@ -1980,21 +2227,25 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) */ int ptlrpc_set_wait(struct ptlrpc_request_set *set) { - cfs_list_t *tmp; + struct list_head *tmp; struct ptlrpc_request *req; struct l_wait_info lwi; int rc, timeout; ENTRY; - if (cfs_list_empty(&set->set_requests)) + if (set->set_producer) + (void)ptlrpc_set_producer(set); + else + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + if (req->rq_phase == RQ_PHASE_NEW) + (void)ptlrpc_send_new_req(req); + } + + if (list_empty(&set->set_requests)) RETURN(0); - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); - if (req->rq_phase == RQ_PHASE_NEW) - (void)ptlrpc_send_new_req(req); - } - do { timeout = ptlrpc_set_next_timeout(set); @@ -2003,21 +2254,21 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", set, timeout); - if (timeout == 0 && !cfs_signal_pending()) + if (timeout == 0 && !signal_pending(current)) /* * No requests are in-flight (ether timed out * or delayed), so we can allow interrupts. * We still want to block for a limited time, * so we allow interrupts during the timeout. */ - lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), + lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), ptlrpc_expired_set, ptlrpc_interrupted_set, set); else /* * At least one request is in flight, so no * interrupts are allowed. Wait until all - * complete, or an in-flight req times out. + * complete, or an in-flight req times out. */ lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1), ptlrpc_expired_set, set); @@ -2027,19 +2278,19 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) /* LU-769 - if we ignored the signal because it was already * pending when we started, we need to handle it now or we risk * it being ignored forever */ - if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr && - cfs_signal_pending()) { - cfs_sigset_t blocked_sigs = - cfs_block_sigsinv(LUSTRE_FATAL_SIGS); - - /* In fact we only interrupt for the "fatal" signals - * like SIGINT or SIGKILL. We still ignore less - * important signals since ptlrpc set is not easily - * reentrant from userspace again */ - if (cfs_signal_pending()) - ptlrpc_interrupted_set(set); - cfs_block_sigs(blocked_sigs); - } + if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr && + signal_pending(current)) { + sigset_t blocked_sigs = + cfs_block_sigsinv(LUSTRE_FATAL_SIGS); + + /* In fact we only interrupt for the "fatal" signals + * like SIGINT or SIGKILL. We still ignore less + * important signals since ptlrpc set is not easily + * reentrant from userspace again */ + if (signal_pending(current)) + ptlrpc_interrupted_set(set); + cfs_restore_sigs(blocked_sigs); + } LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT); @@ -2050,22 +2301,22 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * EINTR. * I don't really care if we go once more round the loop in * the error cases -eeb. */ - if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) { - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - cfs_spin_lock(&req->rq_lock); - req->rq_invalid_rqset = 1; - cfs_spin_unlock(&req->rq_lock); - } - } - } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0); - - LASSERT(cfs_atomic_read(&set->set_remaining) == 0); - - rc = 0; - cfs_list_for_each(tmp, &set->set_requests) { - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); + if (rc == 0 && atomic_read(&set->set_remaining) == 0) { + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + spin_lock(&req->rq_lock); + req->rq_invalid_rqset = 1; + spin_unlock(&req->rq_lock); + } + } + } while (rc != 0 || atomic_read(&set->set_remaining) != 0); + + LASSERT(atomic_read(&set->set_remaining) == 0); + + rc = set->set_rc; /* rq_status of already freed requests if any */ + list_for_each(tmp, &set->set_requests) { + req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); LASSERT(req->rq_phase == RQ_PHASE_COMPLETE); if (req->rq_status != 0) @@ -2080,9 +2331,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) struct ptlrpc_set_cbdata *cbdata, *n; int err; - cfs_list_for_each_entry_safe(cbdata, n, + list_for_each_entry_safe(cbdata, n, &set->set_cblist, psc_item) { - cfs_list_del_init(&cbdata->psc_item); + list_del_init(&cbdata->psc_item); err = cbdata->psc_interpret(set, cbdata->psc_data, rc); if (err && !rc) rc = err; @@ -2092,6 +2343,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) RETURN(rc); } +EXPORT_SYMBOL(ptlrpc_set_wait); /** * Helper fuction for request freeing. @@ -2103,50 +2355,46 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) */ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) { - ENTRY; - if (request == NULL) { - EXIT; - return; - } + ENTRY; - LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */ - LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request); - LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request); - LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request); - LASSERTF(!request->rq_replay, "req %p\n", request); + if (request == NULL) + RETURN_EXIT; - req_capsule_fini(&request->rq_pill); + LASSERT(!request->rq_srv_req); + LASSERT(request->rq_export == NULL); + LASSERTF(!request->rq_receiving_reply, "req %p\n", request); + LASSERTF(list_empty(&request->rq_list), "req %p\n", request); + LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); + LASSERTF(!request->rq_replay, "req %p\n", request); - /* We must take it off the imp_replay_list first. Otherwise, we'll set - * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ - if (request->rq_import != NULL) { - if (!locked) - cfs_spin_lock(&request->rq_import->imp_lock); - cfs_list_del_init(&request->rq_replay_list); - if (!locked) - cfs_spin_unlock(&request->rq_import->imp_lock); - } - LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request); + req_capsule_fini(&request->rq_pill); - if (cfs_atomic_read(&request->rq_refcount) != 0) { - DEBUG_REQ(D_ERROR, request, - "freeing request with nonzero refcount"); - LBUG(); + /* We must take it off the imp_replay_list first. Otherwise, we'll set + * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ + if (request->rq_import != NULL) { + if (!locked) + spin_lock(&request->rq_import->imp_lock); + list_del_init(&request->rq_replay_list); + if (!locked) + spin_unlock(&request->rq_import->imp_lock); } + LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request); + + if (atomic_read(&request->rq_refcount) != 0) { + DEBUG_REQ(D_ERROR, request, + "freeing request with nonzero refcount"); + LBUG(); + } if (request->rq_repbuf != NULL) sptlrpc_cli_free_repbuf(request); - if (request->rq_export != NULL) { - class_export_put(request->rq_export); - request->rq_export = NULL; - } + if (request->rq_import != NULL) { class_import_put(request->rq_import); request->rq_import = NULL; } - if (request->rq_bulk != NULL) - ptlrpc_free_bulk(request->rq_bulk); + if (request->rq_bulk != NULL) + ptlrpc_free_bulk(request->rq_bulk); if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) sptlrpc_cli_free_reqbuf(request); @@ -2157,19 +2405,19 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) if (request->rq_pool) __ptlrpc_free_req_to_pool(request); else - OBD_FREE(request, sizeof(*request)); - EXIT; + ptlrpc_request_cache_free(request); + EXIT; } static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked); /** * Drop one request reference. Must be called with import imp_lock held. - * When reference count drops to zero, reuqest is freed. + * When reference count drops to zero, request is freed. */ void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request) { - LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock); - (void)__ptlrpc_req_finished(request, 1); + assert_spin_locked(&request->rq_import->imp_lock); + (void)__ptlrpc_req_finished(request, 1); } /** @@ -2192,9 +2440,9 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked) } DEBUG_REQ(D_INFO, request, "refcount now %u", - cfs_atomic_read(&request->rq_refcount) - 1); + atomic_read(&request->rq_refcount) - 1); - if (cfs_atomic_dec_and_test(&request->rq_refcount)) { + if (atomic_dec_and_test(&request->rq_refcount)) { __ptlrpc_free_req(request, locked); RETURN(1); } @@ -2209,6 +2457,7 @@ void ptlrpc_req_finished(struct ptlrpc_request *request) { __ptlrpc_req_finished(request, 0); } +EXPORT_SYMBOL(ptlrpc_req_finished); /** * Returns xid of a \a request @@ -2226,20 +2475,19 @@ EXPORT_SYMBOL(ptlrpc_req_xid); * The request owner (i.e. the thread doing the I/O) must call... * Returns 0 on success or 1 if unregistering cannot be made. */ -int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) +static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) { - int rc; - cfs_waitq_t *wq; - struct l_wait_info lwi; + int rc; + struct l_wait_info lwi; - /* - * Might sleep. - */ - LASSERT(!cfs_in_interrupt()); + /* + * Might sleep. + */ + LASSERT(!in_interrupt()); - /* - * Let's setup deadline for reply unlink. - */ + /* + * Let's setup deadline for reply unlink. + */ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && async && request->rq_reply_deadline == 0) request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK; @@ -2274,12 +2522,11 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) * a chance to run reply_in_callback(), and to make sure we've * unlinked before returning a req to the pool. */ - if (request->rq_set != NULL) - wq = &request->rq_set->set_waitq; - else - wq = &request->rq_reply_waitq; - for (;;) { + /* The wq argument is ignored by user-space wait_event macros */ + wait_queue_head_t *wq = (request->rq_set != NULL) ? + &request->rq_set->set_waitq : + &request->rq_reply_waitq; /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish NALs */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), @@ -2292,13 +2539,48 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) } LASSERT(rc == -ETIMEDOUT); - DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " - "rvcng=%d unlnk=%d", request->rq_receiving_reply, - request->rq_must_unlink); + DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " + "receiving_reply=%d req_ulinked=%d reply_unlinked=%d", + request->rq_receiving_reply, + request->rq_req_unlinked, + request->rq_reply_unlinked); } RETURN(0); } +static void ptlrpc_free_request(struct ptlrpc_request *req) +{ + spin_lock(&req->rq_lock); + req->rq_replay = 0; + spin_unlock(&req->rq_lock); + + if (req->rq_commit_cb != NULL) + req->rq_commit_cb(req); + list_del_init(&req->rq_replay_list); + + __ptlrpc_req_finished(req, 1); +} + +/** + * the request is committed and dropped from the replay list of its import + */ +void ptlrpc_request_committed(struct ptlrpc_request *req, int force) +{ + struct obd_import *imp = req->rq_import; + + spin_lock(&imp->imp_lock); + if (list_empty(&req->rq_replay_list)) { + spin_unlock(&imp->imp_lock); + return; + } + + if (force || req->rq_transno <= imp->imp_peer_committed_transno) + ptlrpc_free_request(req); + + spin_unlock(&imp->imp_lock); +} +EXPORT_SYMBOL(ptlrpc_request_committed); + /** * Iterates through replay_list on import and prunes * all requests have transno smaller than last_committed for the @@ -2309,33 +2591,33 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) */ void ptlrpc_free_committed(struct obd_import *imp) { - cfs_list_t *tmp, *saved; - struct ptlrpc_request *req; - struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ - ENTRY; - - LASSERT(imp != NULL); - - LASSERT_SPIN_LOCKED(&imp->imp_lock); + struct ptlrpc_request *req, *saved; + struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ + bool skip_committed_list = true; + ENTRY; + LASSERT(imp != NULL); + assert_spin_locked(&imp->imp_lock); if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && imp->imp_generation == imp->imp_last_generation_checked) { CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n", imp->imp_obd->obd_name, imp->imp_peer_committed_transno); - EXIT; - return; + RETURN_EXIT; } CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n", imp->imp_obd->obd_name, imp->imp_peer_committed_transno, imp->imp_generation); + + if (imp->imp_generation != imp->imp_last_generation_checked || + imp->imp_last_transno_checked == 0) + skip_committed_list = false; + imp->imp_last_transno_checked = imp->imp_peer_committed_transno; imp->imp_last_generation_checked = imp->imp_generation; - cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) { - req = cfs_list_entry(tmp, struct ptlrpc_request, - rq_replay_list); - + list_for_each_entry_safe(req, saved, &imp->imp_replay_list, + rq_replay_list) { /* XXX ok to remove when 1357 resolved - rread 05/29/03 */ LASSERT(req != last_req); last_req = req; @@ -2349,38 +2631,47 @@ void ptlrpc_free_committed(struct obd_import *imp) GOTO(free_req, 0); } - if (req->rq_replay) { - DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)"); - continue; - } - /* not yet committed */ if (req->rq_transno > imp->imp_peer_committed_transno) { DEBUG_REQ(D_RPCTRACE, req, "stopping search"); break; } + if (req->rq_replay) { + DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)"); + list_move_tail(&req->rq_replay_list, + &imp->imp_committed_list); + continue; + } + DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")", imp->imp_peer_committed_transno); free_req: - cfs_spin_lock(&req->rq_lock); - req->rq_replay = 0; - cfs_spin_unlock(&req->rq_lock); - if (req->rq_commit_cb != NULL) - req->rq_commit_cb(req); - cfs_list_del_init(&req->rq_replay_list); - __ptlrpc_req_finished(req, 1); - } - + ptlrpc_free_request(req); + } + + if (skip_committed_list) + GOTO(out, 0); + + list_for_each_entry_safe(req, saved, &imp->imp_committed_list, + rq_replay_list) { + LASSERT(req->rq_transno != 0); + if (req->rq_import_generation < imp->imp_generation) { + DEBUG_REQ(D_RPCTRACE, req, "free stale open request"); + ptlrpc_free_request(req); + } else if (!req->rq_replay) { + DEBUG_REQ(D_RPCTRACE, req, "free closed open request"); + ptlrpc_free_request(req); + } + } +out: EXIT; - return; } void ptlrpc_cleanup_client(struct obd_import *imp) { ENTRY; EXIT; - return; } /** @@ -2392,36 +2683,38 @@ void ptlrpc_cleanup_client(struct obd_import *imp) void ptlrpc_resend_req(struct ptlrpc_request *req) { DEBUG_REQ(D_HA, req, "going to resend"); + spin_lock(&req->rq_lock); + + /* Request got reply but linked to the import list still. + Let ptlrpc_check_set() to process it. */ + if (ptlrpc_client_replied(req)) { + spin_unlock(&req->rq_lock); + DEBUG_REQ(D_HA, req, "it has reply, so skip it"); + return; + } + lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 }); req->rq_status = -EAGAIN; - cfs_spin_lock(&req->rq_lock); req->rq_resend = 1; req->rq_net_err = 0; req->rq_timedout = 0; - if (req->rq_bulk) { - __u64 old_xid = req->rq_xid; - /* ensure previous bulk fails */ - req->rq_xid = ptlrpc_next_xid(); - CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", - old_xid, req->rq_xid); - } ptlrpc_client_wake_req(req); - cfs_spin_unlock(&req->rq_lock); + spin_unlock(&req->rq_lock); } /* XXX: this function and rq_status are currently unused */ void ptlrpc_restart_req(struct ptlrpc_request *req) { - DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request"); - req->rq_status = -ERESTARTSYS; + DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request"); + req->rq_status = -ERESTARTSYS; - cfs_spin_lock(&req->rq_lock); - req->rq_restart = 1; - req->rq_timedout = 0; - ptlrpc_client_wake_req(req); - cfs_spin_unlock(&req->rq_lock); + spin_lock(&req->rq_lock); + req->rq_restart = 1; + req->rq_timedout = 0; + ptlrpc_client_wake_req(req); + spin_unlock(&req->rq_lock); } /** @@ -2429,10 +2722,11 @@ void ptlrpc_restart_req(struct ptlrpc_request *req) */ struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req) { - ENTRY; - cfs_atomic_inc(&req->rq_refcount); - RETURN(req); + ENTRY; + atomic_inc(&req->rq_refcount); + RETURN(req); } +EXPORT_SYMBOL(ptlrpc_request_addref); /** * Add a request to import replay_list. @@ -2441,9 +2735,9 @@ struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req) void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp) { - cfs_list_t *tmp; + struct list_head *tmp; - LASSERT_SPIN_LOCKED(&imp->imp_lock); + assert_spin_locked(&imp->imp_lock); if (req->rq_transno == 0) { DEBUG_REQ(D_EMERG, req, "saving request with zero transno"); @@ -2454,19 +2748,23 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, as resent replayed requests. */ lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); - /* don't re-add requests that have been replayed */ - if (!cfs_list_empty(&req->rq_replay_list)) - return; + /* don't re-add requests that have been replayed */ + if (!list_empty(&req->rq_replay_list)) + return; - lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); + lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); - LASSERT(imp->imp_replayable); - /* Balanced in ptlrpc_free_committed, usually. */ - ptlrpc_request_addref(req); - cfs_list_for_each_prev(tmp, &imp->imp_replay_list) { - struct ptlrpc_request *iter = - cfs_list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + spin_lock(&req->rq_lock); + req->rq_resend = 0; + spin_unlock(&req->rq_lock); + + LASSERT(imp->imp_replayable); + /* Balanced in ptlrpc_free_committed, usually. */ + ptlrpc_request_addref(req); + list_for_each_prev(tmp, &imp->imp_replay_list) { + struct ptlrpc_request *iter = list_entry(tmp, + struct ptlrpc_request, + rq_replay_list); /* We may have duplicate transnos if we create and then * open a file, or for closes retained if to match creating @@ -2483,11 +2781,11 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, continue; } - cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list); - return; - } + list_add(&req->rq_replay_list, &iter->rq_replay_list); + return; + } - cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list); + list_add(&req->rq_replay_list, &imp->imp_replay_list); } /** @@ -2503,14 +2801,14 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) LASSERT(req->rq_set == NULL); LASSERT(!req->rq_receiving_reply); - set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("Unable to allocate ptlrpc set."); - RETURN(-ENOMEM); - } + set = ptlrpc_prep_set(); + if (set == NULL) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); + RETURN(-ENOMEM); + } - /* for distributed debugging */ - lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid()); + /* for distributed debugging */ + lustre_msg_set_status(req->rq_reqmsg, current_pid()); /* add a ref for the set (see comment in ptlrpc_set_add_req) */ ptlrpc_request_addref(req); @@ -2520,31 +2818,32 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) RETURN(rc); } - -struct ptlrpc_replay_async_args { - int praa_old_state; - int praa_old_status; -}; +EXPORT_SYMBOL(ptlrpc_queue_wait); /** * Callback used for replayed requests reply processing. - * In case of succesful reply calls registeresd request replay callback. + * In case of successful reply calls registered request replay callback. * In case of error restart replay process. */ static int ptlrpc_replay_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void * data, int rc) + struct ptlrpc_request *req, + void * data, int rc) { - struct ptlrpc_replay_async_args *aa = data; - struct obd_import *imp = req->rq_import; + struct ptlrpc_replay_async_args *aa = data; + struct obd_import *imp = req->rq_import; - ENTRY; - cfs_atomic_dec(&imp->imp_replay_inflight); + ENTRY; + atomic_dec(&imp->imp_replay_inflight); - if (!ptlrpc_client_replied(req)) { - CERROR("request replay timed out, restarting recovery\n"); - GOTO(out, rc = -ETIMEDOUT); - } + /* Note: if it is bulk replay (MDS-MDS replay), then even if + * server got the request, but bulk transfer timeout, let's + * replay the bulk req again */ + if (!ptlrpc_client_replied(req) || + (req->rq_bulk != NULL && + lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) { + DEBUG_REQ(D_ERROR, req, "request replay timed out.\n"); + GOTO(out, rc = -ETIMEDOUT); + } if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR && (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN || @@ -2553,13 +2852,13 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, /** VBR: check version failure */ if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) { - /** replay was failed due to version mismatch */ - DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n"); - cfs_spin_lock(&imp->imp_lock); - imp->imp_vbr_failed = 1; - imp->imp_no_lock_replay = 1; - cfs_spin_unlock(&imp->imp_lock); - lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); + /** replay was failed due to version mismatch */ + DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n"); + spin_lock(&imp->imp_lock); + imp->imp_vbr_failed = 1; + imp->imp_no_lock_replay = 1; + spin_unlock(&imp->imp_lock); + lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); } else { /** The transno had better not change over replay. */ LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) == @@ -2570,12 +2869,12 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, lustre_msg_get_transno(req->rq_repmsg)); } - cfs_spin_lock(&imp->imp_lock); - /** if replays by version then gap was occur on server, no trust to locks */ - if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY) - imp->imp_no_lock_replay = 1; - imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg); - cfs_spin_unlock(&imp->imp_lock); + spin_lock(&imp->imp_lock); + /** if replays by version then gap occur on server, no trust to locks */ + if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY) + imp->imp_no_lock_replay = 1; + imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg); + spin_unlock(&imp->imp_lock); LASSERT(imp->imp_last_replay_transno); /* transaction number shouldn't be bigger than the latest replayed */ @@ -2650,13 +2949,17 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) /* Readjust the timeout for current conditions */ ptlrpc_at_set_req_timeout(req); + /* Tell server the net_latency, so the server can calculate how long + * it should wait for next replay */ + lustre_msg_set_service_time(req->rq_reqmsg, + ptlrpc_at_get_net_latency(req)); DEBUG_REQ(D_HA, req, "REPLAY"); - cfs_atomic_inc(&req->rq_import->imp_replay_inflight); - ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ + atomic_inc(&req->rq_import->imp_replay_inflight); + ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ - ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); - RETURN(0); + ptlrpcd_add_req(req); + RETURN(0); } /** @@ -2664,57 +2967,58 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) */ void ptlrpc_abort_inflight(struct obd_import *imp) { - cfs_list_t *tmp, *n; - ENTRY; + struct list_head *tmp, *n; + ENTRY; - /* Make sure that no new requests get processed for this import. - * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing - * this flag and then putting requests on sending_list or delayed_list. - */ - cfs_spin_lock(&imp->imp_lock); + /* Make sure that no new requests get processed for this import. + * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing + * this flag and then putting requests on sending_list or delayed_list. + */ + spin_lock(&imp->imp_lock); - /* XXX locking? Maybe we should remove each request with the list - * locked? Also, how do we know if the requests on the list are - * being freed at this time? - */ - cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, rq_list); + /* XXX locking? Maybe we should remove each request with the list + * locked? Also, how do we know if the requests on the list are + * being freed at this time? + */ + list_for_each_safe(tmp, n, &imp->imp_sending_list) { + struct ptlrpc_request *req = list_entry(tmp, + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_RPCTRACE, req, "inflight"); - cfs_spin_lock (&req->rq_lock); - if (req->rq_import_generation < imp->imp_generation) { - req->rq_err = 1; - req->rq_status = -EINTR; - ptlrpc_client_wake_req(req); - } - cfs_spin_unlock (&req->rq_lock); - } + spin_lock(&req->rq_lock); + if (req->rq_import_generation < imp->imp_generation) { + req->rq_err = 1; + req->rq_status = -EIO; + ptlrpc_client_wake_req(req); + } + spin_unlock(&req->rq_lock); + } - cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) { - struct ptlrpc_request *req = - cfs_list_entry(tmp, struct ptlrpc_request, rq_list); + list_for_each_safe(tmp, n, &imp->imp_delayed_list) { + struct ptlrpc_request *req = + list_entry(tmp, struct ptlrpc_request, rq_list); - DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); + DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); - cfs_spin_lock (&req->rq_lock); - if (req->rq_import_generation < imp->imp_generation) { - req->rq_err = 1; - req->rq_status = -EINTR; - ptlrpc_client_wake_req(req); - } - cfs_spin_unlock (&req->rq_lock); - } + spin_lock(&req->rq_lock); + if (req->rq_import_generation < imp->imp_generation) { + req->rq_err = 1; + req->rq_status = -EIO; + ptlrpc_client_wake_req(req); + } + spin_unlock(&req->rq_lock); + } - /* Last chance to free reqs left on the replay list, but we - * will still leak reqs that haven't committed. */ - if (imp->imp_replayable) - ptlrpc_free_committed(imp); + /* Last chance to free reqs left on the replay list, but we + * will still leak reqs that haven't committed. */ + if (imp->imp_replayable) + ptlrpc_free_committed(imp); - cfs_spin_unlock(&imp->imp_lock); + spin_unlock(&imp->imp_lock); - EXIT; + EXIT; } /** @@ -2722,30 +3026,30 @@ void ptlrpc_abort_inflight(struct obd_import *imp) */ void ptlrpc_abort_set(struct ptlrpc_request_set *set) { - cfs_list_t *tmp, *pos; + struct list_head *tmp, *pos; - LASSERT(set != NULL); + LASSERT(set != NULL); - cfs_list_for_each_safe(pos, tmp, &set->set_requests) { - struct ptlrpc_request *req = - cfs_list_entry(pos, struct ptlrpc_request, - rq_set_chain); + list_for_each_safe(pos, tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(pos, struct ptlrpc_request, + rq_set_chain); - cfs_spin_lock(&req->rq_lock); - if (req->rq_phase != RQ_PHASE_RPC) { - cfs_spin_unlock(&req->rq_lock); - continue; - } + spin_lock(&req->rq_lock); + if (req->rq_phase != RQ_PHASE_RPC) { + spin_unlock(&req->rq_lock); + continue; + } - req->rq_err = 1; - req->rq_status = -EINTR; - ptlrpc_client_wake_req(req); - cfs_spin_unlock(&req->rq_lock); - } + req->rq_err = 1; + req->rq_status = -EINTR; + ptlrpc_client_wake_req(req); + spin_unlock(&req->rq_lock); + } } static __u64 ptlrpc_last_xid; -static cfs_spinlock_t ptlrpc_last_xid_lock; +static spinlock_t ptlrpc_last_xid_lock; /** * Initialize the XID for the node. This is common among all requests on @@ -2765,28 +3069,81 @@ static cfs_spinlock_t ptlrpc_last_xid_lock; #define YEAR_2004 (1ULL << 30) void ptlrpc_init_xid(void) { - time_t now = cfs_time_current_sec(); + time_t now = cfs_time_current_sec(); - cfs_spin_lock_init(&ptlrpc_last_xid_lock); - if (now < YEAR_2004) { - cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid)); - ptlrpc_last_xid >>= 2; - ptlrpc_last_xid |= (1ULL << 61); - } else { - ptlrpc_last_xid = (__u64)now << 20; - } + spin_lock_init(&ptlrpc_last_xid_lock); + if (now < YEAR_2004) { + cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid)); + ptlrpc_last_xid >>= 2; + ptlrpc_last_xid |= (1ULL << 61); + } else { + ptlrpc_last_xid = (__u64)now << 20; + } + + /* Need to always be aligned to a power-of-two for mutli-bulk BRW */ + CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0); + ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK; } /** - * Increase xid and returns resultng new value to the caller. + * Increase xid and returns resulting new value to the caller. + * + * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting + * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC + * itself uses the last bulk xid needed, so the server can determine the + * the number of bulk transfers from the RPC XID and a bitmask. The starting + * xid must align to a power-of-two value. + * + * This is assumed to be true due to the initial ptlrpc_last_xid + * value also being initialized to a power-of-two value. LU-1431 */ __u64 ptlrpc_next_xid(void) { - __u64 tmp; - cfs_spin_lock(&ptlrpc_last_xid_lock); - tmp = ++ptlrpc_last_xid; - cfs_spin_unlock(&ptlrpc_last_xid_lock); - return tmp; + __u64 next; + + spin_lock(&ptlrpc_last_xid_lock); + next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT; + ptlrpc_last_xid = next; + spin_unlock(&ptlrpc_last_xid_lock); + + return next; +} + +/** + * If request has a new allocated XID (new request or EINPROGRESS resend), + * use this XID as matchbits of bulk, otherwise allocate a new matchbits for + * request to ensure previous bulk fails and avoid problems with lost replies + * and therefore several transfers landing into the same buffer from different + * sending attempts. + */ +void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) +{ + struct ptlrpc_bulk_desc *bd = req->rq_bulk; + + LASSERT(bd != NULL); + + if (!req->rq_resend || req->rq_nr_resend != 0) { + /* this request has a new xid, just use it as bulk matchbits */ + req->rq_mbits = req->rq_xid; + + } else { /* needs to generate a new matchbits for resend */ + __u64 old_mbits = req->rq_mbits; + + if ((bd->bd_import->imp_connect_data.ocd_connect_flags & + OBD_CONNECT_BULK_MBITS) != 0) + req->rq_mbits = ptlrpc_next_xid(); + else /* old version transfers rq_xid to peer as matchbits */ + req->rq_mbits = req->rq_xid = ptlrpc_next_xid(); + + CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n", + old_mbits, req->rq_mbits); + } + + /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so + * that server can infer the number of bulks that were prepared, + * see LU-1431 */ + req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) / + LNET_MAX_IOV) - 1; } /** @@ -2796,15 +3153,17 @@ __u64 ptlrpc_next_xid(void) __u64 ptlrpc_sample_next_xid(void) { #if BITS_PER_LONG == 32 - /* need to avoid possible word tearing on 32-bit systems */ - __u64 tmp; - cfs_spin_lock(&ptlrpc_last_xid_lock); - tmp = ptlrpc_last_xid + 1; - cfs_spin_unlock(&ptlrpc_last_xid_lock); - return tmp; + /* need to avoid possible word tearing on 32-bit systems */ + __u64 next; + + spin_lock(&ptlrpc_last_xid_lock); + next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT; + spin_unlock(&ptlrpc_last_xid_lock); + + return next; #else - /* No need to lock, since returned value is racy anyways */ - return ptlrpc_last_xid + 1; + /* No need to lock, since returned value is racy anyways */ + return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT; #endif } EXPORT_SYMBOL(ptlrpc_sample_next_xid); @@ -2827,73 +3186,90 @@ EXPORT_SYMBOL(ptlrpc_sample_next_xid); * have delay before it really runs by ptlrpcd thread. */ struct ptlrpc_work_async_args { - __u64 magic; - int (*cb)(const struct lu_env *, void *); - void *cbdata; + int (*cb)(const struct lu_env *, void *); + void *cbdata; }; -#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */ +static void ptlrpcd_add_work_req(struct ptlrpc_request *req) +{ + /* re-initialize the req */ + req->rq_timeout = obd_timeout; + req->rq_sent = cfs_time_current_sec(); + req->rq_deadline = req->rq_sent + req->rq_timeout; + req->rq_reply_deadline = req->rq_deadline; + req->rq_phase = RQ_PHASE_INTERPRET; + req->rq_next_phase = RQ_PHASE_COMPLETE; + req->rq_xid = ptlrpc_next_xid(); + req->rq_import_generation = req->rq_import->imp_generation; + + ptlrpcd_add_req(req); +} static int work_interpreter(const struct lu_env *env, - struct ptlrpc_request *req, void *data, int rc) + struct ptlrpc_request *req, void *data, int rc) { - struct ptlrpc_work_async_args *arg = data; + struct ptlrpc_work_async_args *arg = data; + + LASSERT(ptlrpcd_check_work(req)); + LASSERT(arg->cb != NULL); + + rc = arg->cb(env, arg->cbdata); + + list_del_init(&req->rq_set_chain); + req->rq_set = NULL; + + if (atomic_dec_return(&req->rq_refcount) > 1) { + atomic_set(&req->rq_refcount, 2); + ptlrpcd_add_work_req(req); + } + return rc; +} - LASSERT(arg->magic == PTLRPC_WORK_MAGIC); - LASSERT(arg->cb != NULL); +static int worker_format; - return arg->cb(env, arg->cbdata); +static int ptlrpcd_check_work(struct ptlrpc_request *req) +{ + return req->rq_pill.rc_fmt == (void *)&worker_format; } /** * Create a work for ptlrpc. */ void *ptlrpcd_alloc_work(struct obd_import *imp, - int (*cb)(const struct lu_env *, void *), void *cbdata) + int (*cb)(const struct lu_env *, void *), void *cbdata) { - struct ptlrpc_request *req = NULL; - struct ptlrpc_work_async_args *args; - ENTRY; + struct ptlrpc_request *req = NULL; + struct ptlrpc_work_async_args *args; + ENTRY; - cfs_might_sleep(); + might_sleep(); - if (cb == NULL) - RETURN(ERR_PTR(-EINVAL)); + if (cb == NULL) + RETURN(ERR_PTR(-EINVAL)); - /* copy some code from deprecated fakereq. */ - OBD_ALLOC_PTR(req); - if (req == NULL) { - CERROR("ptlrpc: run out of memory!\n"); - RETURN(ERR_PTR(-ENOMEM)); - } + /* copy some code from deprecated fakereq. */ + req = ptlrpc_request_cache_alloc(GFP_NOFS); + if (req == NULL) { + CERROR("ptlrpc: run out of memory!\n"); + RETURN(ERR_PTR(-ENOMEM)); + } + + ptlrpc_cli_req_init(req); + + req->rq_send_state = LUSTRE_IMP_FULL; + req->rq_type = PTL_RPC_MSG_REQUEST; + req->rq_import = class_import_get(imp); + req->rq_interpret_reply = work_interpreter; + /* don't want reply */ + req->rq_no_delay = req->rq_no_resend = 1; + req->rq_pill.rc_fmt = (void *)&worker_format; + + CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args)); + args = ptlrpc_req_async_args(req); + args->cb = cb; + args->cbdata = cbdata; - req->rq_send_state = LUSTRE_IMP_FULL; - req->rq_type = PTL_RPC_MSG_REQUEST; - req->rq_import = class_import_get(imp); - req->rq_export = NULL; - req->rq_interpret_reply = work_interpreter; - /* don't want reply */ - req->rq_receiving_reply = 0; - req->rq_must_unlink = 0; - req->rq_no_delay = req->rq_no_resend = 1; - - cfs_spin_lock_init(&req->rq_lock); - CFS_INIT_LIST_HEAD(&req->rq_list); - CFS_INIT_LIST_HEAD(&req->rq_replay_list); - CFS_INIT_LIST_HEAD(&req->rq_set_chain); - CFS_INIT_LIST_HEAD(&req->rq_history_list); - CFS_INIT_LIST_HEAD(&req->rq_exp_list); - cfs_waitq_init(&req->rq_reply_waitq); - cfs_waitq_init(&req->rq_set_waitq); - cfs_atomic_set(&req->rq_refcount, 1); - - CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args)); - args = ptlrpc_req_async_args(req); - args->magic = PTLRPC_WORK_MAGIC; - args->cb = cb; - args->cbdata = cbdata; - - RETURN(req); + RETURN(req); } EXPORT_SYMBOL(ptlrpcd_alloc_work); @@ -2908,7 +3284,7 @@ EXPORT_SYMBOL(ptlrpcd_destroy_work); int ptlrpcd_queue_work(void *handler) { - struct ptlrpc_request *req = handler; + struct ptlrpc_request *req = handler; /* * Check if the req is already being queued. @@ -2918,26 +3294,9 @@ int ptlrpcd_queue_work(void *handler) * for this purpose. This is okay because the caller should use this * req as opaque data. - Jinshan */ - LASSERT(cfs_atomic_read(&req->rq_refcount) > 0); - if (cfs_atomic_read(&req->rq_refcount) > 1) - return -EBUSY; - - if (cfs_atomic_inc_return(&req->rq_refcount) > 2) { /* race */ - cfs_atomic_dec(&req->rq_refcount); - return -EBUSY; - } - - /* re-initialize the req */ - req->rq_timeout = obd_timeout; - req->rq_sent = cfs_time_current_sec(); - req->rq_deadline = req->rq_sent + req->rq_timeout; - req->rq_reply_deadline = req->rq_deadline; - req->rq_phase = RQ_PHASE_INTERPRET; - req->rq_next_phase = RQ_PHASE_COMPLETE; - req->rq_xid = ptlrpc_next_xid(); - req->rq_import_generation = req->rq_import->imp_generation; - - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); - return 0; + LASSERT(atomic_read(&req->rq_refcount) > 0); + if (atomic_inc_return(&req->rq_refcount) == 2) + ptlrpcd_add_work_req(req); + return 0; } EXPORT_SYMBOL(ptlrpcd_queue_work);