* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/** Implementation of client-side PortalRPC interfaces */
#define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <errno.h>
-#include <signal.h>
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <obd_class.h>
#include "ptlrpc_internal.h"
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
+ .add_kiov_frag = ptlrpc_prep_bulk_page_pin,
+ .release_frags = ptlrpc_release_bulk_page_pin,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
+ .add_kiov_frag = ptlrpc_prep_bulk_page_nopin,
+ .release_frags = ptlrpc_release_bulk_noop,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
+ .add_iov_frag = ptlrpc_prep_bulk_frag,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
+
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+static int ptlrpcd_check_work(struct ptlrpc_request *req);
+static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
/**
* Initialize passed in client structure \a cl.
return c;
}
-EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
/**
* Allocate and initialize new bulk descriptor on the sender.
* Returns pointer to the descriptor or NULL on error.
*/
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw,
+ enum ptlrpc_bulk_op_type type,
+ unsigned portal,
+ const struct ptlrpc_bulk_frag_ops *ops)
{
struct ptlrpc_bulk_desc *desc;
int i;
- OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]));
+ /* ensure that only one of KIOV or IOVEC is set but not both */
+ LASSERT((ptlrpc_is_bulk_desc_kiov(type) &&
+ ops->add_kiov_frag != NULL) ||
+ (ptlrpc_is_bulk_desc_kvec(type) &&
+ ops->add_iov_frag != NULL));
+
+ if (type & PTLRPC_BULK_BUF_KIOV) {
+ OBD_ALLOC(desc,
+ offsetof(struct ptlrpc_bulk_desc,
+ bd_u.bd_kiov.bd_vec[nfrags]));
+ } else {
+ OBD_ALLOC(desc,
+ offsetof(struct ptlrpc_bulk_desc,
+ bd_u.bd_kvec.bd_kvec[nfrags]));
+ }
+
if (!desc)
return NULL;
spin_lock_init(&desc->bd_lock);
- cfs_waitq_init(&desc->bd_waitq);
- desc->bd_max_iov = npages;
+ init_waitqueue_head(&desc->bd_waitq);
+ desc->bd_max_iov = nfrags;
desc->bd_iov_count = 0;
desc->bd_portal = portal;
desc->bd_type = type;
desc->bd_md_count = 0;
+ desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *) ops;
LASSERT(max_brw > 0);
desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
/* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
/**
* Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
* the bulk to be sent. Used on client-side.
* Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+ unsigned nfrags, unsigned max_brw,
+ unsigned int type,
+ unsigned portal,
+ const struct ptlrpc_bulk_frag_ops
+ *ops)
{
struct obd_import *imp = req->rq_import;
struct ptlrpc_bulk_desc *desc;
ENTRY;
- LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
- desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ LASSERT(ptlrpc_is_bulk_op_passive(type));
+
+ desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
if (desc == NULL)
RETURN(NULL);
}
EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-/**
- * Add a page \a page to the bulk descriptor \a desc.
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset, int len, int pin)
+ struct page *page, int pageoffset, int len,
+ int pin)
{
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(page != NULL);
- LASSERT(pageoffset >= 0);
- LASSERT(len > 0);
- LASSERT(pageoffset + len <= CFS_PAGE_SIZE);
+ lnet_kiov_t *kiov;
+
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(page != NULL);
+ LASSERT(pageoffset >= 0);
+ LASSERT(len > 0);
+ LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
- desc->bd_nob += len;
+ desc->bd_nob += len;
if (pin)
- cfs_page_pin(page);
+ page_cache_get(page);
- ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+ kiov->kiov_page = page;
+ kiov->kiov_offset = pageoffset;
+ kiov->kiov_len = len;
+
+ desc->bd_iov_count++;
}
EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-/**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+ void *frag, int len)
+{
+ struct kvec *iovec;
+ ENTRY;
+
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(frag != NULL);
+ LASSERT(len > 0);
+ LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
+
+ iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
+
+ desc->bd_nob += len;
+
+ iovec->iov_base = frag;
+ iovec->iov_len = len;
+
+ desc->bd_iov_count++;
+
+ RETURN(desc->bd_nob);
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
+
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
{
- int i;
ENTRY;
LASSERT(desc != NULL);
LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
LASSERT(desc->bd_md_count == 0); /* network hands off */
LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+ LASSERT(desc->bd_frag_ops != NULL);
- sptlrpc_enc_pool_put_pages(desc);
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+ sptlrpc_enc_pool_put_pages(desc);
if (desc->bd_export)
class_export_put(desc->bd_export);
else
class_import_put(desc->bd_import);
- if (unpin) {
- for (i = 0; i < desc->bd_iov_count ; i++)
- cfs_page_unpin(desc->bd_iov[i].kiov_page);
- }
+ if (desc->bd_frag_ops->release_frags != NULL)
+ desc->bd_frag_ops->release_frags(desc);
+
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+ OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
+ bd_u.bd_kiov.bd_vec[desc->bd_max_iov]));
+ else
+ OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
+ bd_u.bd_kvec.bd_kvec[desc->
+ bd_max_iov]));
- OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
- bd_iov[desc->bd_max_iov]));
EXIT;
}
-EXPORT_SYMBOL(__ptlrpc_free_bulk);
+EXPORT_SYMBOL(ptlrpc_free_bulk);
/**
* Set server timelimit for this req, i.e. how long are we willing to wait
}
/* Adjust expected network latency */
-static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
- unsigned int service_time)
+void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
+ unsigned int service_time)
{
unsigned int nl, oldnl;
struct imp_at *at;
time_t now = cfs_time_current_sec();
LASSERT(req->rq_import);
- at = &req->rq_import->imp_at;
+
+ if (service_time > now - req->rq_sent + 3) {
+ /* bz16408, however, this can also happen if early reply
+ * is lost and client RPC is expired and resent, early reply
+ * or reply of original RPC can still be fit in reply buffer
+ * of resent RPC, now client is measuring time from the
+ * resent time, but server sent back service time of original
+ * RPC.
+ */
+ CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
+ D_ADAPTTO : D_WARNING,
+ "Reported service time %u > total measured time "
+ CFS_DURATION_T"\n", service_time,
+ cfs_time_sub(now, req->rq_sent));
+ return;
+ }
/* Network latency is total time less server processing time */
- nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/;
- if (service_time > now - req->rq_sent + 3 /* bz16408 */)
- CWARN("Reported service time %u > total measured time "
- CFS_DURATION_T"\n", service_time,
- cfs_time_sub(now, req->rq_sent));
+ nl = max_t(int, now - req->rq_sent -
+ service_time, 0) + 1; /* st rounding */
+ at = &req->rq_import->imp_at;
oldnl = at_measured(&at->iat_net_latency, nl);
if (oldnl != 0)
* If anything goes wrong just ignore it - same as if it never happened
*/
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
+__must_hold(&req->rq_lock)
{
struct ptlrpc_request *early_req;
time_t olddl;
rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
if (rc) {
spin_lock(&req->rq_lock);
- RETURN(rc);
- }
-
- rc = unpack_reply(early_req);
- if (rc == 0) {
- /* Expecting to increase the service time estimate here */
- ptlrpc_at_adj_service(req,
- lustre_msg_get_timeout(early_req->rq_repmsg));
- ptlrpc_at_adj_net_latency(req,
- lustre_msg_get_service_time(early_req->rq_repmsg));
- }
-
- sptlrpc_cli_finish_early_reply(early_req);
+ RETURN(rc);
+ }
+ rc = unpack_reply(early_req);
if (rc != 0) {
+ sptlrpc_cli_finish_early_reply(early_req);
spin_lock(&req->rq_lock);
RETURN(rc);
}
- /* Adjust the local timeout for this req */
- ptlrpc_at_set_req_timeout(req);
+ /* Use new timeout value just to adjust the local value for this
+ * request, don't include it into at_history. It is unclear yet why
+ * service time increased and should it be counted or skipped, e.g.
+ * that can be recovery case or some error or server, the real reply
+ * will add all new data if it is worth to add. */
+ req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
+ lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
+
+ /* Network latency can be adjusted, it is pure network delays */
+ ptlrpc_at_adj_net_latency(req,
+ lustre_msg_get_service_time(early_req->rq_repmsg));
+
+ sptlrpc_cli_finish_early_reply(early_req);
spin_lock(&req->rq_lock);
olddl = req->rq_deadline;
- /* server assumes it now has rq_timeout from when it sent the
- * early reply, so client should give it at least that long. */
- req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
+ /* server assumes it now has rq_timeout from when the request
+ * arrived, so the client should give it at least that long.
+ * since we don't know the arrival time we'll use the original
+ * sent time */
+ req->rq_deadline = req->rq_sent + req->rq_timeout +
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
RETURN(rc);
}
+static struct kmem_cache *request_cache;
+
+int ptlrpc_request_cache_init(void)
+{
+ request_cache = kmem_cache_create("ptlrpc_cache",
+ sizeof(struct ptlrpc_request),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ return request_cache == NULL ? -ENOMEM : 0;
+}
+
+void ptlrpc_request_cache_fini(void)
+{
+ kmem_cache_destroy(request_cache);
+}
+
+struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
+{
+ struct ptlrpc_request *req;
+
+ OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
+ return req;
+}
+
+void ptlrpc_request_cache_free(struct ptlrpc_request *req)
+{
+ OBD_SLAB_FREE_PTR(req, request_cache);
+}
+
/**
* Wind down request pool \a pool.
* Frees all requests from the pool too
*/
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- cfs_list_t *l, *tmp;
+ struct list_head *l, *tmp;
struct ptlrpc_request *req;
LASSERT(pool != NULL);
spin_lock(&pool->prp_lock);
- cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
+ list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = list_entry(l, struct ptlrpc_request, rq_list);
+ list_del(&req->rq_list);
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
- OBD_FREE(req, sizeof(*req));
+ ptlrpc_request_cache_free(req);
}
spin_unlock(&pool->prp_lock);
OBD_FREE(pool, sizeof(*pool));
/**
* Allocates, initializes and adds \a num_rq requests to the pool \a pool
*/
-void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
+int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
{
int i;
int size = 1;
while (size < pool->prp_rq_size)
size <<= 1;
- LASSERTF(cfs_list_empty(&pool->prp_req_list) ||
+ LASSERTF(list_empty(&pool->prp_req_list) ||
size == pool->prp_rq_size,
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
struct lustre_msg *msg;
spin_unlock(&pool->prp_lock);
- OBD_ALLOC(req, sizeof(struct ptlrpc_request));
- if (!req)
- return;
- OBD_ALLOC_LARGE(msg, size);
- if (!msg) {
- OBD_FREE(req, sizeof(struct ptlrpc_request));
- return;
+ req = ptlrpc_request_cache_alloc(GFP_NOFS);
+ if (!req)
+ return i;
+ OBD_ALLOC_LARGE(msg, size);
+ if (!msg) {
+ ptlrpc_request_cache_free(req);
+ return i;
}
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
req->rq_pool = pool;
spin_lock(&pool->prp_lock);
- cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
+ list_add_tail(&req->rq_list, &pool->prp_req_list);
}
spin_unlock(&pool->prp_lock);
- return;
+ return num_rq;
}
EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
*/
struct ptlrpc_request_pool *
ptlrpc_init_rq_pool(int num_rq, int msgsize,
- void (*populate_pool)(struct ptlrpc_request_pool *, int))
+ int (*populate_pool)(struct ptlrpc_request_pool *, int))
{
- struct ptlrpc_request_pool *pool;
+ struct ptlrpc_request_pool *pool;
- OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
- if (!pool)
- return NULL;
+ OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
+ if (!pool)
+ return NULL;
- /* Request next power of two for the allocation, because internally
- kernel would do exactly this */
+ /* Request next power of two for the allocation, because internally
+ kernel would do exactly this */
spin_lock_init(&pool->prp_lock);
- CFS_INIT_LIST_HEAD(&pool->prp_req_list);
- pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
- pool->prp_populate = populate_pool;
+ INIT_LIST_HEAD(&pool->prp_req_list);
+ pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
+ pool->prp_populate = populate_pool;
- populate_pool(pool, num_rq);
+ populate_pool(pool, num_rq);
- if (cfs_list_empty(&pool->prp_req_list)) {
- /* have not allocated a single request for the pool */
- OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
- pool = NULL;
- }
- return pool;
+ return pool;
}
EXPORT_SYMBOL(ptlrpc_init_rq_pool);
* in writeout path, where this matters, this is safe to do, because
* nothing is lost in this case, and when some in-flight requests
* complete, this code will be called again. */
- if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ if (unlikely(list_empty(&pool->prp_req_list))) {
spin_unlock(&pool->prp_lock);
return NULL;
}
- request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- cfs_list_del_init(&request->rq_list);
+ request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ list_del_init(&request->rq_list);
spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
struct ptlrpc_request_pool *pool = request->rq_pool;
spin_lock(&pool->prp_lock);
- LASSERT(cfs_list_empty(&request->rq_list));
+ LASSERT(list_empty(&request->rq_list));
LASSERT(!request->rq_receiving_reply);
- cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ list_add_tail(&request->rq_list, &pool->prp_req_list);
spin_unlock(&pool->prp_lock);
}
lustre_msg_add_version(request->rq_reqmsg, version);
request->rq_send_state = LUSTRE_IMP_FULL;
request->rq_type = PTL_RPC_MSG_REQUEST;
- request->rq_export = NULL;
request->rq_req_cbid.cbid_fn = request_out_callback;
request->rq_req_cbid.cbid_arg = request;
ptlrpc_at_set_req_timeout(request);
- spin_lock_init(&request->rq_lock);
- CFS_INIT_LIST_HEAD(&request->rq_list);
- CFS_INIT_LIST_HEAD(&request->rq_timed_list);
- CFS_INIT_LIST_HEAD(&request->rq_replay_list);
- CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
- CFS_INIT_LIST_HEAD(&request->rq_set_chain);
- CFS_INIT_LIST_HEAD(&request->rq_history_list);
- CFS_INIT_LIST_HEAD(&request->rq_exp_list);
- cfs_waitq_init(&request->rq_reply_waitq);
- cfs_waitq_init(&request->rq_set_waitq);
- request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
-
- lustre_msg_set_opc(request->rq_reqmsg, opcode);
+ lustre_msg_set_opc(request->rq_reqmsg, opcode);
- RETURN(0);
+ RETURN(0);
out_ctx:
- sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
+ sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
- class_import_put(imp);
- return rc;
+ class_import_put(imp);
+ return rc;
}
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
*/
static inline
struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
- struct ptlrpc_request_pool *pool)
+ struct ptlrpc_request_pool *pool)
{
- struct ptlrpc_request *request = NULL;
+ struct ptlrpc_request *request = NULL;
- if (pool)
- request = ptlrpc_prep_req_from_pool(pool);
+ request = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!request)
- OBD_ALLOC_PTR(request);
+ if (!request && pool)
+ request = ptlrpc_prep_req_from_pool(pool);
- if (request) {
- LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
- LASSERT(imp != LP_POISON);
- LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
- imp->imp_client);
- LASSERT(imp->imp_client != LP_POISON);
+ if (request) {
+ ptlrpc_cli_req_init(request);
- request->rq_import = class_import_get(imp);
- } else {
- CERROR("request allocation out of memory\n");
- }
+ LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
+ LASSERT(imp != LP_POISON);
+ LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
+ imp->imp_client);
+ LASSERT(imp->imp_client != LP_POISON);
- return request;
+ request->rq_import = class_import_get(imp);
+ } else {
+ CERROR("request allocation out of memory\n");
+ }
+
+ return request;
}
/**
*/
void ptlrpc_request_free(struct ptlrpc_request *request)
{
- if (request->rq_pool)
- __ptlrpc_free_req_to_pool(request);
- else
- OBD_FREE_PTR(request);
+ if (request->rq_pool)
+ __ptlrpc_free_req_to_pool(request);
+ else
+ ptlrpc_request_cache_free(request);
}
EXPORT_SYMBOL(ptlrpc_request_free);
}
return request;
}
-EXPORT_SYMBOL(ptlrpc_prep_req_pool);
/**
* Same as ptlrpc_prep_req_pool, but without pool
return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
NULL);
}
-EXPORT_SYMBOL(ptlrpc_prep_req);
/**
- * Allocate and initialize new request set structure.
+ * Allocate and initialize new request set structure on the current CPT.
* Returns a pointer to the newly allocated set structure or NULL on error.
*/
struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
- struct ptlrpc_request_set *set;
+ struct ptlrpc_request_set *set;
+ int cpt;
ENTRY;
- OBD_ALLOC(set, sizeof *set);
+ cpt = cfs_cpt_current(cfs_cpt_table, 0);
+ OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set);
if (!set)
RETURN(NULL);
- cfs_atomic_set(&set->set_refcount, 1);
- CFS_INIT_LIST_HEAD(&set->set_requests);
- cfs_waitq_init(&set->set_waitq);
- cfs_atomic_set(&set->set_new_count, 0);
- cfs_atomic_set(&set->set_remaining, 0);
+ atomic_set(&set->set_refcount, 1);
+ INIT_LIST_HEAD(&set->set_requests);
+ init_waitqueue_head(&set->set_waitq);
+ atomic_set(&set->set_new_count, 0);
+ atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
- CFS_INIT_LIST_HEAD(&set->set_new_requests);
- CFS_INIT_LIST_HEAD(&set->set_cblist);
+ INIT_LIST_HEAD(&set->set_new_requests);
+ INIT_LIST_HEAD(&set->set_cblist);
set->set_max_inflight = UINT_MAX;
set->set_producer = NULL;
set->set_producer_arg = NULL;
RETURN(set);
}
-EXPORT_SYMBOL(ptlrpc_prep_fcset);
/**
* Wind down and free request set structure previously allocated with
*/
void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
- cfs_list_t *next;
- int expected_phase;
- int n = 0;
- ENTRY;
+ struct list_head *tmp;
+ struct list_head *next;
+ int expected_phase;
+ int n = 0;
+ ENTRY;
- /* Requests on the set should either all be completed, or all be new */
- expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ?
- RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- cfs_list_for_each (tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ /* Requests on the set should either all be completed, or all be new */
+ expected_phase = (atomic_read(&set->set_remaining) == 0) ?
+ RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
- LASSERT(req->rq_phase == expected_phase);
- n++;
- }
+ LASSERT(req->rq_phase == expected_phase);
+ n++;
+ }
- LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 ||
- cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n",
- cfs_atomic_read(&set->set_remaining), n);
+ LASSERTF(atomic_read(&set->set_remaining) == 0 ||
+ atomic_read(&set->set_remaining) == n, "%d / %d\n",
+ atomic_read(&set->set_remaining), n);
- cfs_list_for_each_safe(tmp, next, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- cfs_list_del_init(&req->rq_set_chain);
+ list_for_each_safe(tmp, next, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ list_del_init(&req->rq_set_chain);
- LASSERT(req->rq_phase == expected_phase);
+ LASSERT(req->rq_phase == expected_phase);
- if (req->rq_phase == RQ_PHASE_NEW) {
- ptlrpc_req_interpret(NULL, req, -EBADR);
- cfs_atomic_dec(&set->set_remaining);
- }
+ if (req->rq_phase == RQ_PHASE_NEW) {
+ ptlrpc_req_interpret(NULL, req, -EBADR);
+ atomic_dec(&set->set_remaining);
+ }
spin_lock(&req->rq_lock);
req->rq_set = NULL;
ptlrpc_req_finished (req);
}
- LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
+ LASSERT(atomic_read(&set->set_remaining) == 0);
- ptlrpc_reqset_put(set);
- EXIT;
+ ptlrpc_reqset_put(set);
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_set_destroy);
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data)
{
- struct ptlrpc_set_cbdata *cbdata;
+ struct ptlrpc_set_cbdata *cbdata;
- OBD_ALLOC_PTR(cbdata);
- if (cbdata == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC_PTR(cbdata);
+ if (cbdata == NULL)
+ RETURN(-ENOMEM);
- cbdata->psc_interpret = fn;
- cbdata->psc_data = data;
- cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist);
+ cbdata->psc_interpret = fn;
+ cbdata->psc_data = data;
+ list_add_tail(&cbdata->psc_item, &set->set_cblist);
- RETURN(0);
+ RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_set_add_cb);
/**
* Add a new request to the general purpose request set.
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
- LASSERT(cfs_list_empty(&req->rq_set_chain));
+ LASSERT(list_empty(&req->rq_set_chain));
/* The set takes over the caller's request reference */
- cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
+ list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
- cfs_atomic_inc(&set->set_remaining);
+ atomic_inc(&set->set_remaining);
req->rq_queued_time = cfs_time_current();
if (req->rq_reqmsg != NULL)
*/
req->rq_set = set;
req->rq_queued_time = cfs_time_current();
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = cfs_atomic_inc_return(&set->set_new_count);
+ list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
- /* Only need to call wakeup once for the first entry. */
- if (count == 1) {
- cfs_waitq_signal(&set->set_waitq);
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ wake_up(&set->set_waitq);
- /* XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future. */
- for (i = 0; i < pc->pc_npartners; i++)
- cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
- }
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
}
-EXPORT_SYMBOL(ptlrpc_set_add_new_req);
/**
* Based on the current state of the import, determine if the request
D_HA : D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
} else if (ptlrpc_send_limit_expired(req)) {
- /* probably doesn't need to be a D_ERROR after initial testing */
- DEBUG_REQ(D_ERROR, req, "send limit expired ");
- *status = -EIO;
- } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
- imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */ ;
- if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- }
+ /* probably doesn't need to be a D_ERROR after initial testing*/
+ DEBUG_REQ(D_HA, req, "send limit expired ");
+ *status = -ETIMEDOUT;
+ } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
+ imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ /* allow CONNECT even if import is invalid */ ;
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ }
} else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
if (!imp->imp_deactive)
DEBUG_REQ(D_NET, req, "IMP_INVALID");
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
/* invalidate in progress - any requests should be drop */
- if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
+ if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
} else if (imp->imp_dlm_fake || req->rq_no_delay) {
}
/**
- * Decide if the eror message regarding provided request \a req
- * should be printed to the console or not.
- * Makes it's decision on request status and other properties.
- * Returns 1 to print error on the system console or 0 if not.
+ * Decide if the error message should be printed to the console or not.
+ * Makes its decision based on request type, status, and failure frequency.
+ *
+ * \param[in] req request that failed and may need a console message
+ *
+ * \retval false if no message should be printed
+ * \retval true if console message should be printed
*/
-static int ptlrpc_console_allow(struct ptlrpc_request *req)
+static bool ptlrpc_console_allow(struct ptlrpc_request *req)
{
- __u32 opc;
- int err;
+ __u32 opc;
- LASSERT(req->rq_reqmsg != NULL);
- opc = lustre_msg_get_opc(req->rq_reqmsg);
+ LASSERT(req->rq_reqmsg != NULL);
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
- /* Suppress particular reconnect errors which are to be expected. No
- * errors are suppressed for the initial connection on an import */
- if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
- (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
+ /* Suppress particular reconnect errors which are to be expected. */
+ if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
+ int err;
- /* Suppress timed out reconnect requests */
- if (req->rq_timedout)
- return 0;
+ /* Suppress timed out reconnect requests */
+ if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
+ req->rq_timedout)
+ return false;
- /* Suppress unavailable/again reconnect requests */
- err = lustre_msg_get_status(req->rq_repmsg);
- if (err == -ENODEV || err == -EAGAIN)
- return 0;
- }
+ /* Suppress most unavailable/again reconnect requests, but
+ * print occasionally so it is clear client is trying to
+ * connect to a server where no target is running. */
+ err = lustre_msg_get_status(req->rq_repmsg);
+ if ((err == -ENODEV || err == -EAGAIN) &&
+ req->rq_import->imp_conn_cnt % 30 != 20)
+ return false;
+ }
- return 1;
+ return true;
}
/**
err = lustre_msg_get_status(req->rq_repmsg);
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
struct obd_import *imp = req->rq_import;
+ lnet_nid_t nid = imp->imp_connection->c_peer.nid;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+
if (ptlrpc_console_allow(req))
- LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
- " operation %s failed with %d.\n",
- imp->imp_obd->obd_name,
- libcfs_nid2str(
- imp->imp_connection->c_peer.nid),
- ll_opcode2str(opc), err);
+ LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s "
+ "failed: rc = %d\n",
+ imp->imp_obd->obd_name,
+ ll_opcode2str(opc),
+ libcfs_nid2str(nid), err);
RETURN(err < 0 ? err : -EINVAL);
}
struct obd_device *obd = req->rq_import->imp_obd;
int rc;
struct timeval work_start;
+ __u64 committed;
long timediff;
ENTRY;
LASSERT(obd != NULL);
/* repbuf must be unlinked */
- LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink);
+ LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
- if (req->rq_reply_truncate) {
+ if (req->rq_reply_truncated) {
if (ptlrpc_no_resend(req)) {
DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
" expected: %d, actual size: %d",
* will roundup it */
req->rq_replen = req->rq_nob_received;
req->rq_nob_received = 0;
- req->rq_resend = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+
/*
* NB Until this point, the whole of the incoming message,
* including buflens, status etc is in the sender's byte order.
RETURN(rc);
}
+ /*
+ * Security layer unwrap might ask resend this request.
+ */
+ if (req->rq_resend)
+ RETURN(0);
+
+ rc = unpack_reply(req);
+ if (rc)
+ RETURN(rc);
+
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
time_t now = cfs_time_current_sec();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
req->rq_nr_resend++;
- /* allocate new xid to avoid reply reconstruction */
- if (!req->rq_bulk) {
- /* new xid is already allocated for bulk in
- * ptlrpc_check_set() */
- req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
- "resend on EINPROGRESS");
- }
-
/* Readjust the timeout for current conditions */
ptlrpc_at_set_req_timeout(req);
/* delay resend to give a chance to the server to get ready.
req->rq_sent = now + req->rq_timeout;
else
req->rq_sent = now + req->rq_nr_resend;
- }
-
- /*
- * Security layer unwrap might ask resend this request.
- */
- if (req->rq_resend)
- RETURN(0);
- rc = unpack_reply(req);
- if (rc)
- RETURN(rc);
+ RETURN(0);
+ }
- cfs_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
- if (obd->obd_svc_stats != NULL) {
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
- ptlrpc_lprocfs_rpc_sent(req, timediff);
- }
+ if (obd->obd_svc_stats != NULL) {
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
+ timediff);
+ ptlrpc_lprocfs_rpc_sent(req, timediff);
+ }
if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
rc = ptlrpc_check_status(req);
imp->imp_connect_error = rc;
- if (rc) {
- /*
- * Either we've been evicted, or the server has failed for
- * some reason. Try to reconnect, and if that fails, punt to
- * the upcall.
- */
- if (ll_rpc_recoverable_error(rc)) {
- if (req->rq_send_state != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
- RETURN(rc);
- }
- ptlrpc_request_handle_notconn(req);
- RETURN(rc);
- }
+ if (rc) {
+ /*
+ * Either we've been evicted, or the server has failed for
+ * some reason. Try to reconnect, and if that fails, punt to
+ * the upcall.
+ */
+ if (ptlrpc_recoverable_error(rc)) {
+ if (req->rq_send_state != LUSTRE_IMP_FULL ||
+ imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
+ RETURN(rc);
+ }
+ ptlrpc_request_handle_notconn(req);
+ RETURN(rc);
+ }
} else {
/*
* Let's look if server sent slv. Do it only for RPC with
/** version recovery */
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL) {
+ } else if (req->rq_commit_cb != NULL &&
+ list_empty(&req->rq_replay_list)) {
+ /* NB: don't call rq_commit_cb if it's already on
+ * rq_replay_list, ptlrpc_free_committed() will call
+ * it later, see LU-3618 for details */
spin_unlock(&imp->imp_lock);
req->rq_commit_cb(req);
spin_lock(&imp->imp_lock);
/*
* Replay-enabled imports return commit-status information.
*/
- if (lustre_msg_get_last_committed(req->rq_repmsg)) {
- imp->imp_peer_committed_transno =
- lustre_msg_get_last_committed(req->rq_repmsg);
- }
+ committed = lustre_msg_get_last_committed(req->rq_repmsg);
+ if (likely(committed > imp->imp_peer_committed_transno))
+ imp->imp_peer_committed_transno = committed;
ptlrpc_free_committed(imp);
- if (!cfs_list_empty(&imp->imp_replay_list)) {
+ if (!list_empty(&imp->imp_replay_list)) {
struct ptlrpc_request *last;
- last = cfs_list_entry(imp->imp_replay_list.prev,
- struct ptlrpc_request,
- rq_replay_list);
+ last = list_entry(imp->imp_replay_list.prev,
+ struct ptlrpc_request,
+ rq_replay_list);
/*
* Requests with rq_replay stay on the list even if no
* commit is expected.
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
+ struct list_head *tmp;
+ __u64 min_xid = ~0ULL;
int rc;
ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+ /* do not try to go further if there is not enough memory in enc_pool */
+ if (req->rq_sent && req->rq_bulk != NULL)
+ if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+ pool_is_at_full_capacity())
+ RETURN(-ENOMEM);
+
if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
spin_lock(&imp->imp_lock);
+ /* the very first time we assign XID. it's important to assign XID
+ * and put it on the list atomically, so that the lowest assigned
+ * XID is always known. this is vital for multislot last_rcvd */
+ if (req->rq_send_state == LUSTRE_IMP_REPLAY) {
+ LASSERT(req->rq_xid != 0);
+ } else {
+ LASSERT(req->rq_xid == 0);
+ req->rq_xid = ptlrpc_next_xid();
+ }
+
if (!req->rq_generation_set)
req->rq_import_generation = imp->imp_generation;
"(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
RETURN(0);
}
RETURN(rc);
}
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ atomic_inc(&req->rq_import->imp_inflight);
+
+ /* find the lowest unreplied XID */
+ list_for_each(tmp, &imp->imp_delayed_list) {
+ struct ptlrpc_request *r;
+ r = list_entry(tmp, struct ptlrpc_request, rq_list);
+ if (r->rq_xid < min_xid)
+ min_xid = r->rq_xid;
+ }
+ list_for_each(tmp, &imp->imp_sending_list) {
+ struct ptlrpc_request *r;
+ r = list_entry(tmp, struct ptlrpc_request, rq_list);
+ if (r->rq_xid < min_xid)
+ min_xid = r->rq_xid;
+ }
spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ if (likely(min_xid != ~0ULL))
+ lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1);
+
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
if (rc) {
req->rq_status = rc;
RETURN(1);
} else {
- req->rq_wait_ctx = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
}
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
- imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
+ " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&req->rq_import->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ RETURN(rc);
+ }
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
- req->rq_net_err = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
RETURN(rc);
}
RETURN(0);
LASSERT(set->set_producer != NULL);
- remaining = cfs_atomic_read(&set->set_remaining);
+ remaining = atomic_read(&set->set_remaining);
/* populate the ->set_requests list with requests until we
* reach the maximum number of RPCs in flight for this set */
- while (cfs_atomic_read(&set->set_remaining) < set->set_max_inflight) {
+ while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
rc = set->set_producer(set, set->set_producer_arg);
if (rc == -ENOENT) {
/* no more RPC to produce */
}
}
- RETURN((cfs_atomic_read(&set->set_remaining) - remaining));
+ RETURN((atomic_read(&set->set_remaining) - remaining));
}
/**
* and no more replies are expected.
* (it is possible to get less replies than requests sent e.g. due to timed out
* requests or requests that we had trouble to send out)
+ *
+ * NOTE: This function contains a potential schedule point (cond_resched()).
*/
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp, *next;
- int force_timer_recalc = 0;
- ENTRY;
+ struct list_head *tmp, *next;
+ struct list_head comp_reqs;
+ int force_timer_recalc = 0;
+ ENTRY;
- if (cfs_atomic_read(&set->set_remaining) == 0)
- RETURN(1);
+ if (atomic_read(&set->set_remaining) == 0)
+ RETURN(1);
+
+ INIT_LIST_HEAD(&comp_reqs);
+ list_for_each_safe(tmp, next, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ struct obd_import *imp = req->rq_import;
+ int unregistered = 0;
+ int rc = 0;
- cfs_list_for_each_safe(tmp, next, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- struct obd_import *imp = req->rq_import;
- int unregistered = 0;
- int rc = 0;
+ /* This schedule point is mainly for the ptlrpcd caller of this
+ * function. Most ptlrpc sets are not long-lived and unbounded
+ * in length, but at the least the set used by the ptlrpcd is.
+ * Since the processing time is unbounded, we need to insert an
+ * explicit schedule point to make the thread well-behaved.
+ */
+ cond_resched();
if (req->rq_phase == RQ_PHASE_NEW &&
ptlrpc_send_new_req(req)) {
ptlrpc_rqphase_move(req, req->rq_next_phase);
}
- if (req->rq_phase == RQ_PHASE_COMPLETE)
+ if (req->rq_phase == RQ_PHASE_COMPLETE) {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
continue;
+ }
if (req->rq_phase == RQ_PHASE_INTERPRET)
GOTO(interpret, req->rq_status);
/* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
* so it sets rq_intr regardless of individual rpc
- * timeouts. The synchronous IO waiting path sets
+ * timeouts. The synchronous IO waiting path sets
* rq_intr irrespective of whether ptlrpcd
* has seen a timeout. Our policy is to only interpret
* interrupted rpcs after they have timed out, so we
req->rq_waiting || req->rq_wait_ctx) {
int status;
- if (!ptlrpc_unregister_reply(req, 1))
- continue;
+ if (!ptlrpc_unregister_reply(req, 1)) {
+ ptlrpc_unregister_bulk(req, 1);
+ continue;
+ }
spin_lock(&imp->imp_lock);
if (ptlrpc_import_delay_req(imp, req, &status)){
/* put on delay list - only if we wait
* recovery finished - before send */
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
+ list_del_init(&req->rq_list);
+ list_add_tail(&req->rq_list,
&imp->
imp_delayed_list);
spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
+ list_del_init(&req->rq_list);
+ list_add_tail(&req->rq_list,
&imp->imp_sending_list);
spin_unlock(&imp->imp_lock);
spin_lock(&req->rq_lock);
req->rq_resend = 1;
spin_unlock(&req->rq_lock);
- if (req->rq_bulk) {
- __u64 old_xid;
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* ensure previous bulk fails */
- old_xid = req->rq_xid;
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk "
- "old x"LPU64
- " new x"LPU64"\n",
- old_xid, req->rq_xid);
- }
+
+ if (req->rq_bulk != NULL &&
+ !ptlrpc_unregister_bulk(req, 1))
+ continue;
}
/*
* rq_wait_ctx is only touched by ptlrpcd,
}
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list))
+ list_del_init(&req->rq_list);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ continue;
+ }
if (rc) {
DEBUG_REQ(D_HA, req,
"send failed: rc = %d", rc);
spin_lock(&req->rq_lock);
req->rq_net_err = 1;
spin_unlock(&req->rq_lock);
+ continue;
}
/* need to reset the timeout */
force_timer_recalc = 1;
ptlrpc_req_interpret(env, req, req->rq_status);
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
+ if (ptlrpcd_check_work(req)) {
+ atomic_dec(&set->set_remaining);
+ continue;
+ }
+ ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
"Completed RPC pname:cluuid:pid:xid:nid:"
"opc %s:%s:%d:"LPU64":%s:%d\n",
- cfs_curproc_comm(), imp->imp_obd->obd_uuid.uuid,
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
libcfs_nid2str(imp->imp_connection->c_peer.nid),
lustre_msg_get_opc(req->rq_reqmsg));
* may happen in the case of marking it erroneous for the case
* ptlrpc_import_delay_req(req, status) find it impossible to
* allow sending this rpc and returns *status != 0. */
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&imp->imp_inflight);
}
spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+ atomic_dec(&set->set_remaining);
+ wake_up_all(&imp->imp_recovery_waitq);
if (set->set_producer) {
/* produce a new request if possible */
/* free the request that has just been completed
* in order not to pollute set->set_requests */
- cfs_list_del_init(&req->rq_set_chain);
+ list_del_init(&req->rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_set = NULL;
req->rq_invalid_rqset = 0;
if (req->rq_status != 0)
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
+ } else {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
}
- }
+ }
- /* If we hit an error, we want to recover promptly. */
- RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
+ /* move completed request at the head of list so it's easier for
+ * caller to find them */
+ list_splice(&comp_reqs, &set->set_requests);
+
+ /* If we hit an error, we want to recover promptly. */
+ RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
EXPORT_SYMBOL(ptlrpc_check_set);
RETURN(1);
}
- cfs_atomic_inc(&imp->imp_timeouts);
+ atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
if (imp->imp_dlm_fake)
*/
int ptlrpc_expired_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- cfs_list_t *tmp;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+ time_t now = cfs_time_current_sec();
+ ENTRY;
- LASSERT(set != NULL);
+ LASSERT(set != NULL);
- /*
- * A timeout expired. See which reqs it applies to...
- */
- cfs_list_for_each (tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ /*
+ * A timeout expired. See which reqs it applies to...
+ */
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
/* don't expire request waiting for context */
if (req->rq_wait_ctx)
*/
RETURN(1);
}
-EXPORT_SYMBOL(ptlrpc_expired_set);
/**
* Sets rq_intr flag in \a req under spinlock.
* Interrupts (sets interrupted flag) all uncompleted requests in
* a set \a data. Callback for l_wait_event for interruptible waits.
*/
-void ptlrpc_interrupted_set(void *data)
+static void ptlrpc_interrupted_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- cfs_list_t *tmp;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
- LASSERT(set != NULL);
- CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
+ LASSERT(set != NULL);
+ CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
- cfs_list_for_each(tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
- if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING)
- continue;
+ if (req->rq_phase != RQ_PHASE_RPC &&
+ req->rq_phase != RQ_PHASE_UNREGISTERING)
+ continue;
- ptlrpc_mark_interrupted(req);
- }
+ ptlrpc_mark_interrupted(req);
+ }
}
-EXPORT_SYMBOL(ptlrpc_interrupted_set);
/**
* Get the smallest timeout in the set; this does NOT set a timeout.
*/
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
- time_t now = cfs_time_current_sec();
- int timeout = 0;
- struct ptlrpc_request *req;
- int deadline;
- ENTRY;
-
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
+ struct list_head *tmp;
+ time_t now = cfs_time_current_sec();
+ int timeout = 0;
+ struct ptlrpc_request *req;
+ int deadline;
+ ENTRY;
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/*
* Request in-flight?
}
RETURN(timeout);
}
-EXPORT_SYMBOL(ptlrpc_set_next_timeout);
/**
* Send all unset request from the set and then wait untill all
*/
int ptlrpc_set_wait(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout;
if (set->set_producer)
(void)ptlrpc_set_producer(set);
else
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
(void)ptlrpc_send_new_req(req);
}
- if (cfs_list_empty(&set->set_requests))
+ if (list_empty(&set->set_requests))
RETURN(0);
do {
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout);
- if (timeout == 0 && !cfs_signal_pending())
+ if (timeout == 0 && !signal_pending(current))
/*
* No requests are in-flight (ether timed out
* or delayed), so we can allow interrupts.
* We still want to block for a limited time,
* so we allow interrupts during the timeout.
*/
- lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
+ lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
ptlrpc_expired_set,
ptlrpc_interrupted_set, set);
else
/*
* At least one request is in flight, so no
* interrupts are allowed. Wait until all
- * complete, or an in-flight req times out.
+ * complete, or an in-flight req times out.
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1),
ptlrpc_expired_set, set);
/* LU-769 - if we ignored the signal because it was already
* pending when we started, we need to handle it now or we risk
* it being ignored forever */
- if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
- cfs_sigset_t blocked_sigs =
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- /* In fact we only interrupt for the "fatal" signals
- * like SIGINT or SIGKILL. We still ignore less
- * important signals since ptlrpc set is not easily
- * reentrant from userspace again */
- if (cfs_signal_pending())
- ptlrpc_interrupted_set(set);
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ signal_pending(current)) {
+ sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (signal_pending(current))
+ ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
- }
+ }
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
* EINTR.
* I don't really care if we go once more round the loop in
* the error cases -eeb. */
- if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) {
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_invalid_rqset = 1;
spin_unlock(&req->rq_lock);
- }
- }
- } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
+ }
+ }
+ } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
- LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
+ LASSERT(atomic_read(&set->set_remaining) == 0);
rc = set->set_rc; /* rq_status of already freed requests if any */
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
if (req->rq_status != 0)
struct ptlrpc_set_cbdata *cbdata, *n;
int err;
- cfs_list_for_each_entry_safe(cbdata, n,
+ list_for_each_entry_safe(cbdata, n,
&set->set_cblist, psc_item) {
- cfs_list_del_init(&cbdata->psc_item);
+ list_del_init(&cbdata->psc_item);
err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
if (err && !rc)
rc = err;
*/
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
- ENTRY;
- if (request == NULL) {
- EXIT;
- return;
- }
+ ENTRY;
- LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
- LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
- LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request);
- LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
- LASSERTF(!request->rq_replay, "req %p\n", request);
+ if (request == NULL)
+ RETURN_EXIT;
- req_capsule_fini(&request->rq_pill);
+ LASSERT(!request->rq_srv_req);
+ LASSERT(request->rq_export == NULL);
+ LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
+ LASSERTF(!request->rq_replay, "req %p\n", request);
- /* We must take it off the imp_replay_list first. Otherwise, we'll set
- * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
- if (request->rq_import != NULL) {
+ req_capsule_fini(&request->rq_pill);
+
+ /* We must take it off the imp_replay_list first. Otherwise, we'll set
+ * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
+ if (request->rq_import != NULL) {
if (!locked)
spin_lock(&request->rq_import->imp_lock);
- cfs_list_del_init(&request->rq_replay_list);
+ list_del_init(&request->rq_replay_list);
if (!locked)
spin_unlock(&request->rq_import->imp_lock);
}
- LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
- if (cfs_atomic_read(&request->rq_refcount) != 0) {
- DEBUG_REQ(D_ERROR, request,
- "freeing request with nonzero refcount");
- LBUG();
- }
+ if (atomic_read(&request->rq_refcount) != 0) {
+ DEBUG_REQ(D_ERROR, request,
+ "freeing request with nonzero refcount");
+ LBUG();
+ }
if (request->rq_repbuf != NULL)
sptlrpc_cli_free_repbuf(request);
- if (request->rq_export != NULL) {
- class_export_put(request->rq_export);
- request->rq_export = NULL;
- }
+
if (request->rq_import != NULL) {
class_import_put(request->rq_import);
request->rq_import = NULL;
}
if (request->rq_bulk != NULL)
- ptlrpc_free_bulk_pin(request->rq_bulk);
+ ptlrpc_free_bulk(request->rq_bulk);
if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
sptlrpc_cli_free_reqbuf(request);
if (request->rq_pool)
__ptlrpc_free_req_to_pool(request);
else
- OBD_FREE(request, sizeof(*request));
- EXIT;
+ ptlrpc_request_cache_free(request);
+ EXIT;
}
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
/**
* Drop one request reference. Must be called with import imp_lock held.
- * When reference count drops to zero, reuqest is freed.
+ * When reference count drops to zero, request is freed.
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
- LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
- (void)__ptlrpc_req_finished(request, 1);
+ assert_spin_locked(&request->rq_import->imp_lock);
+ (void)__ptlrpc_req_finished(request, 1);
}
-EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
/**
* Helper function
}
DEBUG_REQ(D_INFO, request, "refcount now %u",
- cfs_atomic_read(&request->rq_refcount) - 1);
+ atomic_read(&request->rq_refcount) - 1);
- if (cfs_atomic_dec_and_test(&request->rq_refcount)) {
+ if (atomic_dec_and_test(&request->rq_refcount)) {
__ptlrpc_free_req(request, locked);
RETURN(1);
}
* The request owner (i.e. the thread doing the I/O) must call...
* Returns 0 on success or 1 if unregistering cannot be made.
*/
-int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
+static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
- int rc;
- cfs_waitq_t *wq;
- struct l_wait_info lwi;
+ int rc;
+ struct l_wait_info lwi;
- /*
- * Might sleep.
- */
- LASSERT(!cfs_in_interrupt());
+ /*
+ * Might sleep.
+ */
+ LASSERT(!in_interrupt());
- /*
- * Let's setup deadline for reply unlink.
- */
+ /*
+ * Let's setup deadline for reply unlink.
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
* a chance to run reply_in_callback(), and to make sure we've
* unlinked before returning a req to the pool.
*/
- if (request->rq_set != NULL)
- wq = &request->rq_set->set_waitq;
- else
- wq = &request->rq_reply_waitq;
-
for (;;) {
+ /* The wq argument is ignored by user-space wait_event macros */
+ wait_queue_head_t *wq = (request->rq_set != NULL) ?
+ &request->rq_set->set_waitq :
+ &request->rq_reply_waitq;
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
}
LASSERT(rc == -ETIMEDOUT);
- DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
- "rvcng=%d unlnk=%d", request->rq_receiving_reply,
- request->rq_must_unlink);
+ DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
+ "receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
+ request->rq_receiving_reply,
+ request->rq_req_unlinked,
+ request->rq_reply_unlinked);
}
RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_unregister_reply);
+
+static void ptlrpc_free_request(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_commit_cb != NULL)
+ req->rq_commit_cb(req);
+ list_del_init(&req->rq_replay_list);
+
+ __ptlrpc_req_finished(req, 1);
+}
+
+/**
+ * the request is committed and dropped from the replay list of its import
+ */
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
+{
+ struct obd_import *imp = req->rq_import;
+
+ spin_lock(&imp->imp_lock);
+ if (list_empty(&req->rq_replay_list)) {
+ spin_unlock(&imp->imp_lock);
+ return;
+ }
+
+ if (force || req->rq_transno <= imp->imp_peer_committed_transno)
+ ptlrpc_free_request(req);
+
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_request_committed);
/**
* Iterates through replay_list on import and prunes
*/
void ptlrpc_free_committed(struct obd_import *imp)
{
- cfs_list_t *tmp, *saved;
- struct ptlrpc_request *req;
- struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- ENTRY;
-
- LASSERT(imp != NULL);
-
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ struct ptlrpc_request *req, *saved;
+ struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
+ bool skip_committed_list = true;
+ ENTRY;
+ LASSERT(imp != NULL);
+ assert_spin_locked(&imp->imp_lock);
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
- EXIT;
- return;
+ RETURN_EXIT;
}
CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
+
+ if (imp->imp_generation != imp->imp_last_generation_checked ||
+ imp->imp_last_transno_checked == 0)
+ skip_committed_list = false;
+
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
+ list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
+ rq_replay_list) {
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
last_req = req;
GOTO(free_req, 0);
}
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- continue;
- }
-
/* not yet committed */
if (req->rq_transno > imp->imp_peer_committed_transno) {
DEBUG_REQ(D_RPCTRACE, req, "stopping search");
break;
}
+ if (req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
+ list_move_tail(&req->rq_replay_list,
+ &imp->imp_committed_list);
+ continue;
+ }
+
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
- if (req->rq_commit_cb != NULL)
- req->rq_commit_cb(req);
- cfs_list_del_init(&req->rq_replay_list);
- __ptlrpc_req_finished(req, 1);
+ ptlrpc_free_request(req);
}
+ if (skip_committed_list)
+ GOTO(out, 0);
+
+ list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
+ rq_replay_list) {
+ LASSERT(req->rq_transno != 0);
+ if (req->rq_import_generation < imp->imp_generation) {
+ DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
+ ptlrpc_free_request(req);
+ } else if (!req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "free closed open request");
+ ptlrpc_free_request(req);
+ }
+ }
+out:
EXIT;
- return;
}
void ptlrpc_cleanup_client(struct obd_import *imp)
{
ENTRY;
EXIT;
- return;
}
-EXPORT_SYMBOL(ptlrpc_cleanup_client);
/**
* Schedule previously sent request for resend.
void ptlrpc_resend_req(struct ptlrpc_request *req)
{
DEBUG_REQ(D_HA, req, "going to resend");
+ spin_lock(&req->rq_lock);
+
+ /* Request got reply but linked to the import list still.
+ Let ptlrpc_check_set() to process it. */
+ if (ptlrpc_client_replied(req)) {
+ spin_unlock(&req->rq_lock);
+ DEBUG_REQ(D_HA, req, "it has reply, so skip it");
+ return;
+ }
+
lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
req->rq_status = -EAGAIN;
- spin_lock(&req->rq_lock);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
- if (req->rq_bulk) {
- __u64 old_xid = req->rq_xid;
- /* ensure previous bulk fails */
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
- old_xid, req->rq_xid);
- }
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
-EXPORT_SYMBOL(ptlrpc_resend_req);
/* XXX: this function and rq_status are currently unused */
void ptlrpc_restart_req(struct ptlrpc_request *req)
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
-EXPORT_SYMBOL(ptlrpc_restart_req);
/**
* Grab additional reference on a request \a req
*/
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
- ENTRY;
- cfs_atomic_inc(&req->rq_refcount);
- RETURN(req);
+ ENTRY;
+ atomic_inc(&req->rq_refcount);
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpc_request_addref);
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ assert_spin_locked(&imp->imp_lock);
if (req->rq_transno == 0) {
DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
as resent replayed requests. */
lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
- /* don't re-add requests that have been replayed */
- if (!cfs_list_empty(&req->rq_replay_list))
- return;
+ /* don't re-add requests that have been replayed */
+ if (!list_empty(&req->rq_replay_list))
+ return;
- lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
+ lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
- LASSERT(imp->imp_replayable);
- /* Balanced in ptlrpc_free_committed, usually. */
- ptlrpc_request_addref(req);
- cfs_list_for_each_prev(tmp, &imp->imp_replay_list) {
- struct ptlrpc_request *iter =
- cfs_list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 0;
+ spin_unlock(&req->rq_lock);
+
+ LASSERT(imp->imp_replayable);
+ /* Balanced in ptlrpc_free_committed, usually. */
+ ptlrpc_request_addref(req);
+ list_for_each_prev(tmp, &imp->imp_replay_list) {
+ struct ptlrpc_request *iter = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_replay_list);
/* We may have duplicate transnos if we create and then
* open a file, or for closes retained if to match creating
continue;
}
- cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list);
- return;
- }
+ list_add(&req->rq_replay_list, &iter->rq_replay_list);
+ return;
+ }
- cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
+ list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
-EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
/**
* Send request and wait until it completes.
LASSERT(req->rq_set == NULL);
LASSERT(!req->rq_receiving_reply);
- set = ptlrpc_prep_set();
- if (set == NULL) {
- CERROR("Unable to allocate ptlrpc set.");
- RETURN(-ENOMEM);
- }
+ set = ptlrpc_prep_set();
+ if (set == NULL) {
+ CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
+ RETURN(-ENOMEM);
+ }
- /* for distributed debugging */
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ /* for distributed debugging */
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
/* add a ref for the set (see comment in ptlrpc_set_add_req) */
ptlrpc_request_addref(req);
}
EXPORT_SYMBOL(ptlrpc_queue_wait);
-struct ptlrpc_replay_async_args {
- int praa_old_state;
- int praa_old_status;
-};
-
/**
* Callback used for replayed requests reply processing.
- * In case of succesful reply calls registeresd request replay callback.
+ * In case of successful reply calls registered request replay callback.
* In case of error restart replay process.
*/
static int ptlrpc_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void * data, int rc)
+ struct ptlrpc_request *req,
+ void * data, int rc)
{
- struct ptlrpc_replay_async_args *aa = data;
- struct obd_import *imp = req->rq_import;
-
- ENTRY;
- cfs_atomic_dec(&imp->imp_replay_inflight);
+ struct ptlrpc_replay_async_args *aa = data;
+ struct obd_import *imp = req->rq_import;
- if (!ptlrpc_client_replied(req)) {
- CERROR("request replay timed out, restarting recovery\n");
- GOTO(out, rc = -ETIMEDOUT);
- }
+ ENTRY;
+ atomic_dec(&imp->imp_replay_inflight);
+
+ /* Note: if it is bulk replay (MDS-MDS replay), then even if
+ * server got the request, but bulk transfer timeout, let's
+ * replay the bulk req again */
+ if (!ptlrpc_client_replied(req) ||
+ (req->rq_bulk != NULL &&
+ lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
+ DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
+ GOTO(out, rc = -ETIMEDOUT);
+ }
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
(lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
ptlrpc_at_get_net_latency(req));
DEBUG_REQ(D_HA, req, "REPLAY");
- cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
- ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ ptlrpcd_add_req(req);
+ RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_replay_req);
/**
* Aborts all in-flight request on import \a imp sending and delayed lists
*/
void ptlrpc_abort_inflight(struct obd_import *imp)
{
- cfs_list_t *tmp, *n;
- ENTRY;
+ struct list_head *tmp, *n;
+ ENTRY;
- /* Make sure that no new requests get processed for this import.
- * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
- * this flag and then putting requests on sending_list or delayed_list.
- */
+ /* Make sure that no new requests get processed for this import.
+ * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
+ * this flag and then putting requests on sending_list or delayed_list.
+ */
spin_lock(&imp->imp_lock);
- /* XXX locking? Maybe we should remove each request with the list
- * locked? Also, how do we know if the requests on the list are
- * being freed at this time?
- */
- cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ /* XXX locking? Maybe we should remove each request with the list
+ * locked? Also, how do we know if the requests on the list are
+ * being freed at this time?
+ */
+ list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ struct ptlrpc_request *req = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_RPCTRACE, req, "inflight");
spin_unlock(&req->rq_lock);
}
- cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
EXIT;
}
-EXPORT_SYMBOL(ptlrpc_abort_inflight);
/**
* Abort all uncompleted requests in request set \a set
*/
void ptlrpc_abort_set(struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
- LASSERT(set != NULL);
+ LASSERT(set != NULL);
- cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- cfs_list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
spin_lock(&req->rq_lock);
if (req->rq_phase != RQ_PHASE_RPC) {
return next;
}
-EXPORT_SYMBOL(ptlrpc_next_xid);
+
+/**
+ * If request has a new allocated XID (new request or EINPROGRESS resend),
+ * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
+ * request to ensure previous bulk fails and avoid problems with lost replies
+ * and therefore several transfers landing into the same buffer from different
+ * sending attempts.
+ */
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *bd = req->rq_bulk;
+
+ LASSERT(bd != NULL);
+
+ if (!req->rq_resend || req->rq_nr_resend != 0) {
+ /* this request has a new xid, just use it as bulk matchbits */
+ req->rq_mbits = req->rq_xid;
+
+ } else { /* needs to generate a new matchbits for resend */
+ __u64 old_mbits = req->rq_mbits;
+
+ if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_BULK_MBITS) != 0)
+ req->rq_mbits = ptlrpc_next_xid();
+ else /* old version transfers rq_xid to peer as matchbits */
+ req->rq_mbits = req->rq_xid = ptlrpc_next_xid();
+
+ CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
+ old_mbits, req->rq_mbits);
+ }
+
+ /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
+ * that server can infer the number of bulks that were prepared,
+ * see LU-1431 */
+ req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
+ LNET_MAX_IOV) - 1;
+}
/**
* Get a glimpse at what next xid value might have been.
* have delay before it really runs by ptlrpcd thread.
*/
struct ptlrpc_work_async_args {
- __u64 magic;
- int (*cb)(const struct lu_env *, void *);
- void *cbdata;
+ int (*cb)(const struct lu_env *, void *);
+ void *cbdata;
};
-#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
+{
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req);
+}
static int work_interpreter(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
+ struct ptlrpc_request *req, void *data, int rc)
{
- struct ptlrpc_work_async_args *arg = data;
+ struct ptlrpc_work_async_args *arg = data;
- LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
- LASSERT(arg->cb != NULL);
+ LASSERT(ptlrpcd_check_work(req));
+ LASSERT(arg->cb != NULL);
- return arg->cb(env, arg->cbdata);
+ rc = arg->cb(env, arg->cbdata);
+
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+
+ if (atomic_dec_return(&req->rq_refcount) > 1) {
+ atomic_set(&req->rq_refcount, 2);
+ ptlrpcd_add_work_req(req);
+ }
+ return rc;
+}
+
+static int worker_format;
+
+static int ptlrpcd_check_work(struct ptlrpc_request *req)
+{
+ return req->rq_pill.rc_fmt == (void *)&worker_format;
}
/**
* Create a work for ptlrpc.
*/
void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *cbdata)
+ int (*cb)(const struct lu_env *, void *), void *cbdata)
{
- struct ptlrpc_request *req = NULL;
- struct ptlrpc_work_async_args *args;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_work_async_args *args;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- if (cb == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ if (cb == NULL)
+ RETURN(ERR_PTR(-EINVAL));
- /* copy some code from deprecated fakereq. */
- OBD_ALLOC_PTR(req);
- if (req == NULL) {
- CERROR("ptlrpc: run out of memory!\n");
- RETURN(ERR_PTR(-ENOMEM));
- }
+ /* copy some code from deprecated fakereq. */
+ req = ptlrpc_request_cache_alloc(GFP_NOFS);
+ if (req == NULL) {
+ CERROR("ptlrpc: run out of memory!\n");
+ RETURN(ERR_PTR(-ENOMEM));
+ }
+
+ ptlrpc_cli_req_init(req);
+
+ req->rq_send_state = LUSTRE_IMP_FULL;
+ req->rq_type = PTL_RPC_MSG_REQUEST;
+ req->rq_import = class_import_get(imp);
+ req->rq_interpret_reply = work_interpreter;
+ /* don't want reply */
+ req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_pill.rc_fmt = (void *)&worker_format;
+
+ CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ args = ptlrpc_req_async_args(req);
+ args->cb = cb;
+ args->cbdata = cbdata;
- req->rq_send_state = LUSTRE_IMP_FULL;
- req->rq_type = PTL_RPC_MSG_REQUEST;
- req->rq_import = class_import_get(imp);
- req->rq_export = NULL;
- req->rq_interpret_reply = work_interpreter;
- /* don't want reply */
- req->rq_receiving_reply = 0;
- req->rq_must_unlink = 0;
- req->rq_no_delay = req->rq_no_resend = 1;
-
- spin_lock_init(&req->rq_lock);
- CFS_INIT_LIST_HEAD(&req->rq_list);
- CFS_INIT_LIST_HEAD(&req->rq_replay_list);
- CFS_INIT_LIST_HEAD(&req->rq_set_chain);
- CFS_INIT_LIST_HEAD(&req->rq_history_list);
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
- cfs_waitq_init(&req->rq_reply_waitq);
- cfs_waitq_init(&req->rq_set_waitq);
- cfs_atomic_set(&req->rq_refcount, 1);
-
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
- args = ptlrpc_req_async_args(req);
- args->magic = PTLRPC_WORK_MAGIC;
- args->cb = cb;
- args->cbdata = cbdata;
-
- RETURN(req);
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpcd_alloc_work);
int ptlrpcd_queue_work(void *handler)
{
- struct ptlrpc_request *req = handler;
+ struct ptlrpc_request *req = handler;
/*
* Check if the req is already being queued.
* for this purpose. This is okay because the caller should use this
* req as opaque data. - Jinshan
*/
- LASSERT(cfs_atomic_read(&req->rq_refcount) > 0);
- if (cfs_atomic_read(&req->rq_refcount) > 1)
- return -EBUSY;
-
- if (cfs_atomic_inc_return(&req->rq_refcount) > 2) { /* race */
- cfs_atomic_dec(&req->rq_refcount);
- return -EBUSY;
- }
-
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- return 0;
+ LASSERT(atomic_read(&req->rq_refcount) > 0);
+ if (atomic_inc_return(&req->rq_refcount) == 2)
+ ptlrpcd_add_work_req(req);
+ return 0;
}
EXPORT_SYMBOL(ptlrpcd_queue_work);