-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "ptlrpc_internal.h"
+static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+static int ptlrpcd_check_work(struct ptlrpc_request *req);
+
/**
* Initialize passed in client structure \a cl.
*/
cl->cli_reply_portal = rep_portal;
cl->cli_name = name;
}
+EXPORT_SYMBOL(ptlrpc_init_client);
/**
* Return PortalRPC connection for remore uud \a uuid
lnet_process_id_t peer;
int err;
+ /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
+ * before accessing its values. */
+ /* coverity[uninit_use_in_call] */
err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
if (err != 0) {
CNETERR("cannot find peer %s!\n", uuid->uuid);
return c;
}
+EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
/**
- * Allocate and initialize new bulk descriptor
+ * Allocate and initialize new bulk descriptor on the sender.
* Returns pointer to the descriptor or NULL on error.
*/
-static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal)
{
- struct ptlrpc_bulk_desc *desc;
+ struct ptlrpc_bulk_desc *desc;
+ int i;
- OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
- if (!desc)
- return NULL;
+ OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]));
+ if (!desc)
+ return NULL;
- cfs_spin_lock_init(&desc->bd_lock);
- cfs_waitq_init(&desc->bd_waitq);
- desc->bd_max_iov = npages;
- desc->bd_iov_count = 0;
- LNetInvalidateHandle(&desc->bd_md_h);
- desc->bd_portal = portal;
- desc->bd_type = type;
+ spin_lock_init(&desc->bd_lock);
+ init_waitqueue_head(&desc->bd_waitq);
+ desc->bd_max_iov = npages;
+ desc->bd_iov_count = 0;
+ desc->bd_portal = portal;
+ desc->bd_type = type;
+ desc->bd_md_count = 0;
+ LASSERT(max_brw > 0);
+ desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
+ /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
+ * node. Negotiated ocd_brw_size will always be <= this number. */
+ for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
+ LNetInvalidateHandle(&desc->bd_mds[i]);
- return desc;
+ return desc;
}
/**
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- int npages, int type, int portal)
+ unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal)
{
- struct obd_import *imp = req->rq_import;
- struct ptlrpc_bulk_desc *desc;
+ struct obd_import *imp = req->rq_import;
+ struct ptlrpc_bulk_desc *desc;
- ENTRY;
- LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
- desc = new_bulk(npages, type, portal);
- if (desc == NULL)
- RETURN(NULL);
+ ENTRY;
+ LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
+ desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ if (desc == NULL)
+ RETURN(NULL);
desc->bd_import_generation = req->rq_import_generation;
desc->bd_import = class_import_get(imp);
return desc;
}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-/**
- * Prepare bulk descriptor for specified incoming request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
- * the bulk to be sent. Used on server-side after request was already
- * received.
- * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
- * error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
- int npages, int type, int portal)
-{
- struct obd_export *exp = req->rq_export;
- struct ptlrpc_bulk_desc *desc;
-
- ENTRY;
- LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
-
- desc = new_bulk(npages, type, portal);
- if (desc == NULL)
- RETURN(NULL);
-
- desc->bd_export = class_export_get(exp);
- desc->bd_req = req;
-
- desc->bd_cbid.cbid_fn = server_bulk_callback;
- desc->bd_cbid.cbid_arg = desc;
-
- /* NB we don't assign rq_bulk here; server-side requests are
- * re-used, and the handler frees the bulk desc explicitly. */
-
- return desc;
-}
-
-/**
+/*
* Add a page \a page to the bulk descriptor \a desc.
* Data to transfer in the page starts at offset \a pageoffset and
* amount of data to transfer from the page is \a len
*/
-void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset, int len)
+void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset, int len, int pin)
{
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(page != NULL);
- LASSERT(pageoffset >= 0);
- LASSERT(len > 0);
- LASSERT(pageoffset + len <= CFS_PAGE_SIZE);
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(page != NULL);
+ LASSERT(pageoffset >= 0);
+ LASSERT(len > 0);
+ LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
- desc->bd_nob += len;
+ desc->bd_nob += len;
- cfs_page_pin(page);
- ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+ if (pin)
+ page_cache_get(page);
+
+ ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
+EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
/**
* Uninitialize and free bulk descriptor \a desc.
* Works on bulk descriptors both from server and client side.
*/
-void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
+void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
{
- int i;
- ENTRY;
+ int i;
+ ENTRY;
- LASSERT(desc != NULL);
- LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
- LASSERT(!desc->bd_network_rw); /* network hands off or */
- LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+ LASSERT(desc != NULL);
+ LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
+ LASSERT(desc->bd_md_count == 0); /* network hands off */
+ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
- sptlrpc_enc_pool_put_pages(desc);
+ sptlrpc_enc_pool_put_pages(desc);
- if (desc->bd_export)
- class_export_put(desc->bd_export);
- else
- class_import_put(desc->bd_import);
+ if (desc->bd_export)
+ class_export_put(desc->bd_export);
+ else
+ class_import_put(desc->bd_import);
- for (i = 0; i < desc->bd_iov_count ; i++)
- cfs_page_unpin(desc->bd_iov[i].kiov_page);
+ if (unpin) {
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ page_cache_release(desc->bd_iov[i].kiov_page);
+ }
- OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
- bd_iov[desc->bd_max_iov]));
- EXIT;
+ OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
+ bd_iov[desc->bd_max_iov]));
+ EXIT;
}
+EXPORT_SYMBOL(__ptlrpc_free_bulk);
/**
* Set server timelimit for this req, i.e. how long are we willing to wait
reqmsg*/
lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
}
+EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
/* Adjust max service estimate based on server value */
static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
ENTRY;
req->rq_early = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
- rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
- if (rc) {
- cfs_spin_lock(&req->rq_lock);
+ rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
+ if (rc) {
+ spin_lock(&req->rq_lock);
RETURN(rc);
}
sptlrpc_cli_finish_early_reply(early_req);
- cfs_spin_lock(&req->rq_lock);
+ if (rc != 0) {
+ spin_lock(&req->rq_lock);
+ RETURN(rc);
+ }
- if (rc == 0) {
- /* Adjust the local timeout for this req */
- ptlrpc_at_set_req_timeout(req);
-
- olddl = req->rq_deadline;
- /* server assumes it now has rq_timeout from when it sent the
- early reply, so client should give it at least that long. */
- req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
- ptlrpc_at_get_net_latency(req);
-
- DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in "CFS_DURATION_T"s "
- "("CFS_DURATION_T"s)", req->rq_early_count,
- cfs_time_sub(req->rq_deadline,
- cfs_time_current_sec()),
- cfs_time_sub(req->rq_deadline, olddl));
- }
+ /* Adjust the local timeout for this req */
+ ptlrpc_at_set_req_timeout(req);
- RETURN(rc);
+ spin_lock(&req->rq_lock);
+ olddl = req->rq_deadline;
+ /* server assumes it now has rq_timeout from when it sent the
+ * early reply, so client should give it at least that long. */
+ req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
+ ptlrpc_at_get_net_latency(req);
+
+ DEBUG_REQ(D_ADAPTTO, req,
+ "Early reply #%d, new deadline in "CFS_DURATION_T"s "
+ "("CFS_DURATION_T"s)", req->rq_early_count,
+ cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
+ cfs_time_sub(req->rq_deadline, olddl));
+
+ RETURN(rc);
+}
+
+struct kmem_cache *request_cache;
+
+int ptlrpc_request_cache_init()
+{
+ request_cache = kmem_cache_create("ptlrpc_cache",
+ sizeof(struct ptlrpc_request),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ return request_cache == NULL ? -ENOMEM : 0;
+}
+
+void ptlrpc_request_cache_fini()
+{
+ kmem_cache_destroy(request_cache);
+}
+
+struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags)
+{
+ struct ptlrpc_request *req;
+
+ OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
+ return req;
+}
+
+void ptlrpc_request_cache_free(struct ptlrpc_request *req)
+{
+ OBD_SLAB_FREE_PTR(req, request_cache);
}
/**
*/
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- cfs_list_t *l, *tmp;
- struct ptlrpc_request *req;
+ cfs_list_t *l, *tmp;
+ struct ptlrpc_request *req;
- LASSERT(pool != NULL);
+ LASSERT(pool != NULL);
- cfs_spin_lock(&pool->prp_lock);
- cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
- OBD_FREE(req, sizeof(*req));
- }
- cfs_spin_unlock(&pool->prp_lock);
- OBD_FREE(pool, sizeof(*pool));
+ spin_lock(&pool->prp_lock);
+ cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+ cfs_list_del(&req->rq_list);
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
+ OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
+ ptlrpc_request_cache_free(req);
+ }
+ spin_unlock(&pool->prp_lock);
+ OBD_FREE(pool, sizeof(*pool));
}
+EXPORT_SYMBOL(ptlrpc_free_rq_pool);
/**
* Allocates, initializes and adds \a num_rq requests to the pool \a pool
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
- cfs_spin_lock(&pool->prp_lock);
- pool->prp_rq_size = size;
- for (i = 0; i < num_rq; i++) {
- struct ptlrpc_request *req;
- struct lustre_msg *msg;
-
- cfs_spin_unlock(&pool->prp_lock);
- OBD_ALLOC(req, sizeof(struct ptlrpc_request));
- if (!req)
- return;
- OBD_ALLOC_LARGE(msg, size);
- if (!msg) {
- OBD_FREE(req, sizeof(struct ptlrpc_request));
- return;
+ spin_lock(&pool->prp_lock);
+ pool->prp_rq_size = size;
+ for (i = 0; i < num_rq; i++) {
+ struct ptlrpc_request *req;
+ struct lustre_msg *msg;
+
+ spin_unlock(&pool->prp_lock);
+ req = ptlrpc_request_cache_alloc(__GFP_IO);
+ if (!req)
+ return;
+ OBD_ALLOC_LARGE(msg, size);
+ if (!msg) {
+ ptlrpc_request_cache_free(req);
+ return;
}
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
req->rq_pool = pool;
- cfs_spin_lock(&pool->prp_lock);
- cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
- }
- cfs_spin_unlock(&pool->prp_lock);
- return;
+ spin_lock(&pool->prp_lock);
+ cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
+ }
+ spin_unlock(&pool->prp_lock);
+ return;
}
+EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
/**
* Create and initialize new request pool with given attributes:
/* Request next power of two for the allocation, because internally
kernel would do exactly this */
- cfs_spin_lock_init(&pool->prp_lock);
+ spin_lock_init(&pool->prp_lock);
CFS_INIT_LIST_HEAD(&pool->prp_req_list);
pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
pool->prp_populate = populate_pool;
}
return pool;
}
+EXPORT_SYMBOL(ptlrpc_init_rq_pool);
/**
* Fetches one request from pool \a pool
if (!pool)
return NULL;
- cfs_spin_lock(&pool->prp_lock);
+ spin_lock(&pool->prp_lock);
- /* See if we have anything in a pool, and bail out if nothing,
- * in writeout path, where this matters, this is safe to do, because
- * nothing is lost in this case, and when some in-flight requests
- * complete, this code will be called again. */
- if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
- cfs_spin_unlock(&pool->prp_lock);
- return NULL;
- }
+ /* See if we have anything in a pool, and bail out if nothing,
+ * in writeout path, where this matters, this is safe to do, because
+ * nothing is lost in this case, and when some in-flight requests
+ * complete, this code will be called again. */
+ if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ spin_unlock(&pool->prp_lock);
+ return NULL;
+ }
- request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- cfs_list_del_init(&request->rq_list);
- cfs_spin_unlock(&pool->prp_lock);
+ request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ cfs_list_del_init(&request->rq_list);
+ spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
LASSERT(request->rq_pool);
*/
static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
{
- struct ptlrpc_request_pool *pool = request->rq_pool;
+ struct ptlrpc_request_pool *pool = request->rq_pool;
- cfs_spin_lock(&pool->prp_lock);
- LASSERT(cfs_list_empty(&request->rq_list));
- LASSERT(!request->rq_receiving_reply);
- cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
- cfs_spin_unlock(&pool->prp_lock);
+ spin_lock(&pool->prp_lock);
+ LASSERT(cfs_list_empty(&request->rq_list));
+ LASSERT(!request->rq_receiving_reply);
+ cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ spin_unlock(&pool->prp_lock);
}
static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- cfs_spin_lock_init(&request->rq_lock);
- CFS_INIT_LIST_HEAD(&request->rq_list);
- CFS_INIT_LIST_HEAD(&request->rq_timed_list);
- CFS_INIT_LIST_HEAD(&request->rq_replay_list);
- CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
- CFS_INIT_LIST_HEAD(&request->rq_set_chain);
- CFS_INIT_LIST_HEAD(&request->rq_history_list);
- CFS_INIT_LIST_HEAD(&request->rq_exp_list);
- cfs_waitq_init(&request->rq_reply_waitq);
- cfs_waitq_init(&request->rq_set_waitq);
- request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
-
- lustre_msg_set_opc(request->rq_reqmsg, opcode);
-
- RETURN(0);
+ spin_lock_init(&request->rq_lock);
+ CFS_INIT_LIST_HEAD(&request->rq_list);
+ CFS_INIT_LIST_HEAD(&request->rq_timed_list);
+ CFS_INIT_LIST_HEAD(&request->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_history_list);
+ CFS_INIT_LIST_HEAD(&request->rq_exp_list);
+ init_waitqueue_head(&request->rq_reply_waitq);
+ init_waitqueue_head(&request->rq_set_waitq);
+ request->rq_xid = ptlrpc_next_xid();
+ cfs_atomic_set(&request->rq_refcount, 1);
+
+ lustre_msg_set_opc(request->rq_reqmsg, opcode);
+
+ RETURN(0);
out_ctx:
- sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
+ sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
- class_import_put(imp);
- return rc;
+ class_import_put(imp);
+ return rc;
}
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
int ptlrpc_request_pack(struct ptlrpc_request *request,
__u32 version, int opcode)
{
- return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
-}
+ int rc;
+ rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
+ if (rc)
+ return rc;
+
+ /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
+ * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
+ * have to send old ptlrpc_body to keep interoprability with these
+ * clients.
+ *
+ * Only three kinds of server->client RPCs so far:
+ * - LDLM_BL_CALLBACK
+ * - LDLM_CP_CALLBACK
+ * - LDLM_GL_CALLBACK
+ *
+ * XXX This should be removed whenever we drop the interoprability with
+ * the these old clients.
+ */
+ if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
+ opcode == LDLM_GL_CALLBACK)
+ req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
+ sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_request_pack);
/**
* Helper function to allocate new request on import \a imp
struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
struct ptlrpc_request_pool *pool)
{
- struct ptlrpc_request *request = NULL;
+ struct ptlrpc_request *request = NULL;
- if (pool)
- request = ptlrpc_prep_req_from_pool(pool);
+ if (pool)
+ request = ptlrpc_prep_req_from_pool(pool);
- if (!request)
- OBD_ALLOC_PTR(request);
+ if (!request)
+ request = ptlrpc_request_cache_alloc(__GFP_IO);
if (request) {
LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
{
return ptlrpc_request_alloc_internal(imp, NULL, format);
}
+EXPORT_SYMBOL(ptlrpc_request_alloc);
/**
* Allocate new request structure for import \a imp from pool \a pool and
{
return ptlrpc_request_alloc_internal(imp, pool, format);
}
+EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
/**
* For requests not from pool, free memory of the request structure.
*/
void ptlrpc_request_free(struct ptlrpc_request *request)
{
- if (request->rq_pool)
- __ptlrpc_free_req_to_pool(request);
- else
- OBD_FREE_PTR(request);
+ if (request->rq_pool)
+ __ptlrpc_free_req_to_pool(request);
+ else
+ ptlrpc_request_cache_free(request);
}
+EXPORT_SYMBOL(ptlrpc_request_free);
/**
* Allocate new request for operatione \a opcode and immediatelly pack it for
}
return req;
}
+EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
/**
* Prepare request (fetched from pool \a poolif not NULL) on import \a imp
}
return request;
}
+EXPORT_SYMBOL(ptlrpc_prep_req_pool);
/**
* Same as ptlrpc_prep_req_pool, but without pool
return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
NULL);
}
+EXPORT_SYMBOL(ptlrpc_prep_req);
/**
- * Allocate "fake" request that would not be sent anywhere in the end.
- * Only used as a hack because we have no other way of performing
- * async actions in lustre between layers.
- * Used on MDS to request object preallocations from more than one OST at a
- * time.
- */
-struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
- unsigned int timeout,
- ptlrpc_interpterer_t interpreter)
-{
- struct ptlrpc_request *request = NULL;
- ENTRY;
-
- OBD_ALLOC(request, sizeof(*request));
- if (!request) {
- CERROR("request allocation out of memory\n");
- RETURN(NULL);
- }
-
- request->rq_send_state = LUSTRE_IMP_FULL;
- request->rq_type = PTL_RPC_MSG_REQUEST;
- request->rq_import = class_import_get(imp);
- request->rq_export = NULL;
- request->rq_import_generation = imp->imp_generation;
-
- request->rq_timeout = timeout;
- request->rq_sent = cfs_time_current_sec();
- request->rq_deadline = request->rq_sent + timeout;
- request->rq_reply_deadline = request->rq_deadline;
- request->rq_interpret_reply = interpreter;
- request->rq_phase = RQ_PHASE_RPC;
- request->rq_next_phase = RQ_PHASE_INTERPRET;
- /* don't want reply */
- request->rq_receiving_reply = 0;
- request->rq_must_unlink = 0;
- request->rq_no_delay = request->rq_no_resend = 1;
- request->rq_fake = 1;
-
- cfs_spin_lock_init(&request->rq_lock);
- CFS_INIT_LIST_HEAD(&request->rq_list);
- CFS_INIT_LIST_HEAD(&request->rq_replay_list);
- CFS_INIT_LIST_HEAD(&request->rq_set_chain);
- CFS_INIT_LIST_HEAD(&request->rq_history_list);
- CFS_INIT_LIST_HEAD(&request->rq_exp_list);
- cfs_waitq_init(&request->rq_reply_waitq);
- cfs_waitq_init(&request->rq_set_waitq);
-
- request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
-
- RETURN(request);
-}
-
-/**
- * Indicate that processing of "fake" request is finished.
+ * Allocate and initialize new request set structure.
+ * Returns a pointer to the newly allocated set structure or NULL on error.
*/
-void ptlrpc_fakereq_finished(struct ptlrpc_request *req)
+struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
- struct ptlrpc_request_set *set = req->rq_set;
- int wakeup = 0;
-
- /* hold ref on the request to prevent others (ptlrpcd) to free it */
- ptlrpc_request_addref(req);
- cfs_list_del_init(&req->rq_list);
-
- /* if we kill request before timeout - need adjust counter */
- if (req->rq_phase == RQ_PHASE_RPC && set != NULL &&
- cfs_atomic_dec_and_test(&set->set_remaining))
- wakeup = 1;
-
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
-
- /* Only need to call wakeup once when to be empty. */
- if (wakeup)
- cfs_waitq_signal(&set->set_waitq);
- ptlrpc_req_finished(req);
-}
+ struct ptlrpc_request_set *set;
+
+ ENTRY;
+ OBD_ALLOC(set, sizeof *set);
+ if (!set)
+ RETURN(NULL);
+ cfs_atomic_set(&set->set_refcount, 1);
+ CFS_INIT_LIST_HEAD(&set->set_requests);
+ init_waitqueue_head(&set->set_waitq);
+ cfs_atomic_set(&set->set_new_count, 0);
+ cfs_atomic_set(&set->set_remaining, 0);
+ spin_lock_init(&set->set_new_req_lock);
+ CFS_INIT_LIST_HEAD(&set->set_new_requests);
+ CFS_INIT_LIST_HEAD(&set->set_cblist);
+ set->set_max_inflight = UINT_MAX;
+ set->set_producer = NULL;
+ set->set_producer_arg = NULL;
+ set->set_rc = 0;
+
+ RETURN(set);
+}
+EXPORT_SYMBOL(ptlrpc_prep_set);
/**
- * Allocate and initialize new request set structure.
+ * Allocate and initialize new request set structure with flow control
+ * extension. This extension allows to control the number of requests in-flight
+ * for the whole set. A callback function to generate requests must be provided
+ * and the request set will keep the number of requests sent over the wire to
+ * @max_inflight.
* Returns a pointer to the newly allocated set structure or NULL on error.
*/
-struct ptlrpc_request_set *ptlrpc_prep_set(void)
+struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
+ void *arg)
+
{
- struct ptlrpc_request_set *set;
+ struct ptlrpc_request_set *set;
- ENTRY;
- OBD_ALLOC(set, sizeof *set);
- if (!set)
- RETURN(NULL);
- cfs_atomic_set(&set->set_refcount, 1);
- CFS_INIT_LIST_HEAD(&set->set_requests);
- cfs_waitq_init(&set->set_waitq);
- cfs_atomic_set(&set->set_new_count, 0);
- cfs_atomic_set(&set->set_remaining, 0);
- cfs_spin_lock_init(&set->set_new_req_lock);
- CFS_INIT_LIST_HEAD(&set->set_new_requests);
- CFS_INIT_LIST_HEAD(&set->set_cblist);
+ set = ptlrpc_prep_set();
+ if (!set)
+ RETURN(NULL);
- RETURN(set);
+ set->set_max_inflight = max;
+ set->set_producer = func;
+ set->set_producer_arg = arg;
+
+ RETURN(set);
}
+EXPORT_SYMBOL(ptlrpc_prep_fcset);
/**
* Wind down and free request set structure previously allocated with
cfs_atomic_dec(&set->set_remaining);
}
- cfs_spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
ptlrpc_req_finished (req);
}
ptlrpc_reqset_put(set);
EXIT;
}
+EXPORT_SYMBOL(ptlrpc_set_destroy);
/**
* Add a callback function \a fn to the set.
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_set_add_cb);
/**
* Add a new request to the general purpose request set.
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
- LASSERT(cfs_list_empty(&req->rq_set_chain));
+ LASSERT(cfs_list_empty(&req->rq_set_chain));
+
+ /* The set takes over the caller's request reference */
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
+ req->rq_set = set;
+ cfs_atomic_inc(&set->set_remaining);
+ req->rq_queued_time = cfs_time_current();
- /* The set takes over the caller's request reference */
- cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
- req->rq_set = set;
- cfs_atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current();
+ if (req->rq_reqmsg != NULL)
+ lustre_msg_set_jobid(req->rq_reqmsg, NULL);
+
+ if (set->set_producer != NULL)
+ /* If the request set has a producer callback, the RPC must be
+ * sent straight away */
+ ptlrpc_send_new_req(req);
}
+EXPORT_SYMBOL(ptlrpc_set_add_req);
/**
* Add a request to a request with dedicated server thread
int count, i;
LASSERT(req->rq_set == NULL);
- LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0);
-
- cfs_spin_lock(&set->set_new_req_lock);
- /*
- * The set takes over the caller's request reference.
- */
- req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = cfs_atomic_inc_return(&set->set_new_count);
- cfs_spin_unlock(&set->set_new_req_lock);
-
- /* Only need to call wakeup once for the first entry. */
- if (count == 1) {
- cfs_waitq_signal(&set->set_waitq);
-
- /* XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future. */
- for (i = 0; i < pc->pc_npartners; i++)
- cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
- }
-}
+ LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
+
+ spin_lock(&set->set_new_req_lock);
+ /*
+ * The set takes over the caller's request reference.
+ */
+ req->rq_set = set;
+ req->rq_queued_time = cfs_time_current();
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = cfs_atomic_inc_return(&set->set_new_count);
+ spin_unlock(&set->set_new_req_lock);
+
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ wake_up(&set->set_waitq);
+
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
+}
+EXPORT_SYMBOL(ptlrpc_set_add_new_req);
/**
* Based on the current state of the import, determine if the request
} else if (imp->imp_state == LUSTRE_IMP_NEW) {
DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
*status = -EIO;
- LBUG();
- } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
- *status = -EIO;
+ } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+ /* pings may safely race with umount */
+ DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
+ D_HA : D_ERROR, req, "IMP_CLOSED ");
+ *status = -EIO;
} else if (ptlrpc_send_limit_expired(req)) {
/* probably doesn't need to be a D_ERROR after initial testing */
DEBUG_REQ(D_ERROR, req, "send limit expired ");
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
}
- } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
- if (!imp->imp_deactive)
- DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
- *status = -ESHUTDOWN; /* bz 12940 */
- } else if (req->rq_import_generation != imp->imp_generation) {
+ } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
+ if (!imp->imp_deactive)
+ DEBUG_REQ(D_NET, req, "IMP_INVALID");
+ *status = -ESHUTDOWN; /* bz 12940 */
+ } else if (req->rq_import_generation != imp->imp_generation) {
DEBUG_REQ(D_ERROR, req, "req wrong generation:");
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
*status = -EIO;
} else if (imp->imp_dlm_fake || req->rq_no_delay) {
*status = -EWOULDBLOCK;
- } else {
- delay = 1;
- }
- }
+ } else if (req->rq_allow_replay &&
+ (imp->imp_state == LUSTRE_IMP_REPLAY ||
+ imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
+ imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
+ imp->imp_state == LUSTRE_IMP_RECOVER)) {
+ DEBUG_REQ(D_HA, req, "allow during recovery.\n");
+ } else {
+ delay = 1;
+ }
+ }
- RETURN(delay);
+ RETURN(delay);
}
/**
*/
static int ptlrpc_console_allow(struct ptlrpc_request *req)
{
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+ __u32 opc;
int err;
+ LASSERT(req->rq_reqmsg != NULL);
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+
/* Suppress particular reconnect errors which are to be expected. No
* errors are suppressed for the initial connection on an import */
if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
ENTRY;
err = lustre_msg_get_status(req->rq_repmsg);
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
- struct obd_import *imp = req->rq_import;
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
- LCONSOLE_ERROR_MSG(0x011,"an error occurred while communicating"
- " with %s. The %s operation failed with %d\n",
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- ll_opcode2str(opc), err);
- RETURN(err < 0 ? err : -EINVAL);
- }
+ if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
+ struct obd_import *imp = req->rq_import;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+ if (ptlrpc_console_allow(req))
+ LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
+ " operation %s failed with %d.\n",
+ imp->imp_obd->obd_name,
+ libcfs_nid2str(
+ imp->imp_connection->c_peer.nid),
+ ll_opcode2str(opc), err);
+ RETURN(err < 0 ? err : -EINVAL);
+ }
if (err < 0) {
DEBUG_REQ(D_INFO, req, "status is %d", err);
DEBUG_REQ(D_INFO, req, "status is %d", err);
}
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
- struct obd_import *imp = req->rq_import;
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- if (ptlrpc_console_allow(req))
- LCONSOLE_ERROR_MSG(0x011,"an error occurred while "
- "communicating with %s. The %s "
- "operation failed with %d\n",
- libcfs_nid2str(
- imp->imp_connection->c_peer.nid),
- ll_opcode2str(opc), err);
-
- RETURN(err < 0 ? err : -EINVAL);
- }
-
RETURN(err);
}
* will roundup it */
req->rq_replen = req->rq_nob_received;
req->rq_nob_received = 0;
- req->rq_resend = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
RETURN(rc);
}
- /*
- * Security layer unwrap might ask resend this request.
- */
- if (req->rq_resend)
- RETURN(0);
-
- rc = unpack_reply(req);
- if (rc)
- RETURN(rc);
-
- cfs_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
- if (obd->obd_svc_stats != NULL) {
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
- ptlrpc_lprocfs_rpc_sent(req, timediff);
- }
+ /*
+ * Security layer unwrap might ask resend this request.
+ */
+ if (req->rq_resend)
+ RETURN(0);
+
+ rc = unpack_reply(req);
+ if (rc)
+ RETURN(rc);
+
+ /* retry indefinitely on EINPROGRESS */
+ if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
+ ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
+ time_t now = cfs_time_current_sec();
+
+ DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
+ req->rq_resend = 1;
+ req->rq_nr_resend++;
+
+ /* allocate new xid to avoid reply reconstruction */
+ if (!req->rq_bulk) {
+ /* new xid is already allocated for bulk in
+ * ptlrpc_check_set() */
+ req->rq_xid = ptlrpc_next_xid();
+ DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
+ "resend on EINPROGRESS");
+ }
+
+ /* Readjust the timeout for current conditions */
+ ptlrpc_at_set_req_timeout(req);
+ /* delay resend to give a chance to the server to get ready.
+ * The delay is increased by 1s on every resend and is capped to
+ * the current request timeout (i.e. obd_timeout if AT is off,
+ * or AT service time x 125% + 5s, see at_est2timeout) */
+ if (req->rq_nr_resend > req->rq_timeout)
+ req->rq_sent = now + req->rq_timeout;
+ else
+ req->rq_sent = now + req->rq_nr_resend;
+
+ RETURN(0);
+ }
+
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
+ if (obd->obd_svc_stats != NULL) {
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
+ timediff);
+ ptlrpc_lprocfs_rpc_sent(req, timediff);
+ }
if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
}
if (imp->imp_replayable) {
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/*
* No point in adding already-committed requests to the replay
* list, we will just remove them immediately. b=9829
/** version recovery */
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL) {
- cfs_spin_unlock(&imp->imp_lock);
- req->rq_commit_cb(req);
- cfs_spin_lock(&imp->imp_lock);
+ } else if (req->rq_commit_cb != NULL &&
+ list_empty(&req->rq_replay_list)) {
+ /* NB: don't call rq_commit_cb if it's already on
+ * rq_replay_list, ptlrpc_free_committed() will call
+ * it later, see LU-3618 for details */
+ spin_unlock(&imp->imp_lock);
+ req->rq_commit_cb(req);
+ spin_lock(&imp->imp_lock);
}
/*
imp->imp_peer_committed_transno =
lustre_msg_get_last_committed(req->rq_repmsg);
}
- ptlrpc_free_committed(imp);
- if (req->rq_transno > imp->imp_peer_committed_transno)
- ptlrpc_pinger_commit_expected(imp);
+ ptlrpc_free_committed(imp);
- cfs_spin_unlock(&imp->imp_lock);
- }
+ if (!cfs_list_empty(&imp->imp_replay_list)) {
+ struct ptlrpc_request *last;
- RETURN(rc);
+ last = cfs_list_entry(imp->imp_replay_list.prev,
+ struct ptlrpc_request,
+ rq_replay_list);
+ /*
+ * Requests with rq_replay stay on the list even if no
+ * commit is expected.
+ */
+ if (last->rq_transno > imp->imp_peer_committed_transno)
+ ptlrpc_pinger_commit_expected(imp);
+ }
+
+ spin_unlock(&imp->imp_lock);
+ }
+
+ RETURN(rc);
}
/**
* Helper function to send request \a req over the network for the first time
* Also adjusts request phase.
* Returns 0 on success or error code.
- */
+ */
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
- struct obd_import *imp;
+ struct obd_import *imp = req->rq_import;
int rc;
ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
- if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()))
+ if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ (!req->rq_generation_set ||
+ req->rq_import_generation == imp->imp_generation))
RETURN (0);
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
- imp = req->rq_import;
- cfs_spin_lock(&imp->imp_lock);
-
- req->rq_import_generation = imp->imp_generation;
-
- if (ptlrpc_import_delay_req(imp, req, &rc)) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_waiting = 1;
- cfs_spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(0);
- }
-
- if (rc != 0) {
- cfs_spin_unlock(&imp->imp_lock);
- req->rq_status = rc;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- RETURN(rc);
- }
-
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
- cfs_spin_unlock(&imp->imp_lock);
-
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ spin_lock(&imp->imp_lock);
+
+ if (!req->rq_generation_set)
+ req->rq_import_generation = imp->imp_generation;
+
+ if (ptlrpc_import_delay_req(imp, req, &rc)) {
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 1;
+ spin_unlock(&req->rq_lock);
+
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
+ "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ ptlrpc_import_state_name(req->rq_send_state),
+ ptlrpc_import_state_name(imp->imp_state));
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+ RETURN(0);
+ }
+
+ if (rc != 0) {
+ spin_unlock(&imp->imp_lock);
+ req->rq_status = rc;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ RETURN(rc);
+ }
+
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
if (rc) {
req->rq_status = rc;
RETURN(1);
} else {
- req->rq_wait_ctx = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
}
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
- imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
+ " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
- req->rq_net_err = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
RETURN(rc);
}
RETURN(0);
}
+static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
+{
+ int remaining, rc;
+ ENTRY;
+
+ LASSERT(set->set_producer != NULL);
+
+ remaining = cfs_atomic_read(&set->set_remaining);
+
+ /* populate the ->set_requests list with requests until we
+ * reach the maximum number of RPCs in flight for this set */
+ while (cfs_atomic_read(&set->set_remaining) < set->set_max_inflight) {
+ rc = set->set_producer(set, set->set_producer_arg);
+ if (rc == -ENOENT) {
+ /* no more RPC to produce */
+ set->set_producer = NULL;
+ set->set_producer_arg = NULL;
+ RETURN(0);
+ }
+ }
+
+ RETURN((cfs_atomic_read(&set->set_remaining) - remaining));
+}
+
/**
* this sends any unsent RPCs in \a set and returns 1 if all are sent
* and no more replies are expected.
*/
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
+ cfs_list_t *tmp, *next;
int force_timer_recalc = 0;
ENTRY;
if (cfs_atomic_read(&set->set_remaining) == 0)
RETURN(1);
- cfs_list_for_each(tmp, &set->set_requests) {
+ cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
/* delayed send - skip */
if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
- continue;
+ continue;
+
+ /* delayed resend - skip */
+ if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
+ req->rq_sent > cfs_time_current_sec())
+ continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_phase == RQ_PHASE_BULK ||
}
if (req->rq_err) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_replied = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_replied = 0;
+ spin_unlock(&req->rq_lock);
if (req->rq_status == 0)
req->rq_status = -EIO;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
if (!ptlrpc_unregister_reply(req, 1))
continue;
- cfs_spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req, &status)){
- /* put on delay list - only if we wait
- * recovery finished - before send */
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
- &imp-> \
- imp_delayed_list);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (ptlrpc_import_delay_req(imp, req, &status)){
+ /* put on delay list - only if we wait
+ * recovery finished - before send */
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp->
+ imp_delayed_list);
+ spin_unlock(&imp->imp_lock);
continue;
}
req->rq_status = status;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(interpret, req->rq_status);
- }
- if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) {
- req->rq_status = -ENOTCONN;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(interpret, req->rq_status);
- }
-
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
-
- cfs_spin_unlock(&imp->imp_lock);
-
- cfs_spin_lock(&req->rq_lock);
- req->rq_waiting = 0;
- cfs_spin_unlock(&req->rq_lock);
-
- if (req->rq_timedout || req->rq_resend) {
- /* This is re-sending anyways,
- * let's mark req as resend. */
- cfs_spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+ if (ptlrpc_no_resend(req) &&
+ !req->rq_wait_ctx) {
+ req->rq_status = -ENOTCONN;
+ ptlrpc_rqphase_move(req,
+ RQ_PHASE_INTERPRET);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp->imp_sending_list);
+
+ spin_unlock(&imp->imp_lock);
+
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_timedout || req->rq_resend) {
+ /* This is re-sending anyways,
+ * let's mark req as resend. */
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
if (req->rq_bulk) {
__u64 old_xid;
if (status) {
if (req->rq_err) {
req->rq_status = status;
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- cfs_spin_unlock(&req->rq_lock);
- force_timer_recalc = 1;
- } else {
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
-
- continue;
- } else {
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- cfs_spin_unlock(&req->rq_lock);
- }
-
- rc = ptl_send_rpc(req, 0);
- if (rc) {
- DEBUG_REQ(D_HA, req, "send failed (%d)",
- rc);
- force_timer_recalc = 1;
- cfs_spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
- /* need to reset the timeout */
- force_timer_recalc = 1;
- }
-
- cfs_spin_lock(&req->rq_lock);
-
- if (ptlrpc_client_early(req)) {
- ptlrpc_at_recv_early_reply(req);
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
-
- /* Still waiting for a reply? */
- if (ptlrpc_client_recv(req)) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
-
- /* Did we actually receive a reply? */
- if (!ptlrpc_client_replied(req)) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
-
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ force_timer_recalc = 1;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ continue;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ }
+
+ rc = ptl_send_rpc(req, 0);
+ if (rc) {
+ DEBUG_REQ(D_HA, req,
+ "send failed: rc = %d", rc);
+ force_timer_recalc = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+ /* need to reset the timeout */
+ force_timer_recalc = 1;
+ }
+
+ spin_lock(&req->rq_lock);
+
+ if (ptlrpc_client_early(req)) {
+ ptlrpc_at_recv_early_reply(req);
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ /* Still waiting for a reply? */
+ if (ptlrpc_client_recv(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ /* Did we actually receive a reply? */
+ if (!ptlrpc_client_replied(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ spin_unlock(&req->rq_lock);
/* unlink from net because we are going to
* swab in-place of reply buffer */
if (ptlrpc_client_bulk_active(req))
continue;
- if (!req->rq_bulk->bd_success) {
- /* The RPC reply arrived OK, but the bulk screwed
- * up! Dead weird since the server told us the RPC
- * was good after getting the REPLY for her GET or
- * the ACK for her PUT. */
- DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
- req->rq_status = -EIO;
- }
+ if (req->rq_bulk->bd_failure) {
+ /* The RPC reply arrived OK, but the bulk screwed
+ * up! Dead weird since the server told us the RPC
+ * was good after getting the REPLY for her GET or
+ * the ACK for her PUT. */
+ DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
+ req->rq_status = -EIO;
+ }
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
ptlrpc_req_interpret(env, req, req->rq_status);
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
-
- CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:"
- "opc %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
- imp->imp_obd->obd_uuid.uuid,
- req->rq_reqmsg ? lustre_msg_get_status(req->rq_reqmsg):-1,
- req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1);
-
- cfs_spin_lock(&imp->imp_lock);
- /* Request already may be not on sending or delaying list. This
- * may happen in the case of marking it erroneous for the case
- * ptlrpc_import_delay_req(req, status) find it impossible to
- * allow sending this rpc and returns *status != 0. */
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
- }
- cfs_spin_unlock(&imp->imp_lock);
-
- cfs_atomic_dec(&set->set_remaining);
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+ if (ptlrpcd_check_work(req)) {
+ atomic_dec(&set->set_remaining);
+ continue;
+ }
+ ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
+
+ CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
+ "Completed RPC pname:cluuid:pid:xid:nid:"
+ "opc %s:%s:%d:"LPU64":%s:%d\n",
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
+
+ spin_lock(&imp->imp_lock);
+ /* Request already may be not on sending or delaying list. This
+ * may happen in the case of marking it erroneous for the case
+ * ptlrpc_import_delay_req(req, status) find it impossible to
+ * allow sending this rpc and returns *status != 0. */
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+
+ cfs_atomic_dec(&set->set_remaining);
+ wake_up_all(&imp->imp_recovery_waitq);
+
+ if (set->set_producer) {
+ /* produce a new request if possible */
+ if (ptlrpc_set_producer(set) > 0)
+ force_timer_recalc = 1;
+
+ /* free the request that has just been completed
+ * in order not to pollute set->set_requests */
+ cfs_list_del_init(&req->rq_set_chain);
+ spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
+
+ /* record rq_status to compute the final status later */
+ if (req->rq_status != 0)
+ set->set_rc = req->rq_status;
+ ptlrpc_req_finished(req);
+ }
}
/* If we hit an error, we want to recover promptly. */
RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
+EXPORT_SYMBOL(ptlrpc_check_set);
/**
* Time out request \a req. is \a async_unlink is set, that means do not wait
*/
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
- struct obd_import *imp = req->rq_import;
- int rc = 0;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ int rc = 0;
+ ENTRY;
- cfs_spin_lock(&req->rq_lock);
- req->rq_timedout = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_timedout = 1;
+ spin_unlock(&req->rq_lock);
- DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, "Request "
- " sent has %s: [sent "CFS_DURATION_T"/"
- "real "CFS_DURATION_T"]",
+ DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
+ "/real "CFS_DURATION_T"]",
req->rq_net_err ? "failed due to network error" :
((req->rq_real_sent == 0 ||
cfs_time_before(req->rq_real_sent, req->rq_sent) ||
RETURN(1);
}
- if (req->rq_fake)
- RETURN(1);
-
cfs_atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- cfs_spin_lock(&req->rq_lock);
- req->rq_status = -ETIMEDOUT;
- req->rq_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- RETURN(1);
+ spin_lock(&req->rq_lock);
+ req->rq_status = -ETIMEDOUT;
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ RETURN(1);
}
/* if a request can't be resent we can't wait for an answer after
*/
RETURN(1);
}
+EXPORT_SYMBOL(ptlrpc_expired_set);
/**
* Sets rq_intr flag in \a req under spinlock.
*/
void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
{
- cfs_spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
+EXPORT_SYMBOL(ptlrpc_mark_interrupted);
/**
* Interrupts (sets interrupted flag) all uncompleted requests in
ptlrpc_mark_interrupted(req);
}
}
+EXPORT_SYMBOL(ptlrpc_interrupted_set);
/**
* Get the smallest timeout in the set; this does NOT set a timeout.
int deadline;
ENTRY;
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
-
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
deadline = req->rq_sent;
+ else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
+ deadline = req->rq_sent;
else
deadline = req->rq_sent + req->rq_timeout;
}
RETURN(timeout);
}
+EXPORT_SYMBOL(ptlrpc_set_next_timeout);
/**
* Send all unset request from the set and then wait untill all
int rc, timeout;
ENTRY;
+ if (set->set_producer)
+ (void)ptlrpc_set_producer(set);
+ else
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ if (req->rq_phase == RQ_PHASE_NEW)
+ (void)ptlrpc_send_new_req(req);
+ }
+
if (cfs_list_empty(&set->set_requests))
RETURN(0);
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
- if (req->rq_phase == RQ_PHASE_NEW)
- (void)ptlrpc_send_new_req(req);
- }
-
do {
timeout = ptlrpc_set_next_timeout(set);
/* LU-769 - if we ignored the signal because it was already
* pending when we started, we need to handle it now or we risk
* it being ignored forever */
- if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
- cfs_sigset_t blocked_sigs =
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- /* In fact we only interrupt for the "fatal" signals
- * like SIGINT or SIGKILL. We still ignore less
- * important signals since ptlrpc set is not easily
- * reentrant from userspace again */
- if (cfs_signal_pending())
- ptlrpc_interrupted_set(set);
- cfs_block_sigs(blocked_sigs);
- }
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ cfs_signal_pending()) {
+ sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (cfs_signal_pending())
+ ptlrpc_interrupted_set(set);
+ cfs_restore_sigs(blocked_sigs);
+ }
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
- req->rq_invalid_rqset = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_invalid_rqset = 1;
+ spin_unlock(&req->rq_lock);
}
}
} while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
- rc = 0;
+ rc = set->set_rc; /* rq_status of already freed requests if any */
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
RETURN(rc);
}
+EXPORT_SYMBOL(ptlrpc_set_wait);
/**
* Helper fuction for request freeing.
/* We must take it off the imp_replay_list first. Otherwise, we'll set
* request->rq_reqmsg to NULL while osc_close is dereferencing it. */
if (request->rq_import != NULL) {
- if (!locked)
- cfs_spin_lock(&request->rq_import->imp_lock);
- cfs_list_del_init(&request->rq_replay_list);
- if (!locked)
- cfs_spin_unlock(&request->rq_import->imp_lock);
+ if (!locked)
+ spin_lock(&request->rq_import->imp_lock);
+ cfs_list_del_init(&request->rq_replay_list);
+ if (!locked)
+ spin_unlock(&request->rq_import->imp_lock);
}
LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
class_import_put(request->rq_import);
request->rq_import = NULL;
}
- if (request->rq_bulk != NULL)
- ptlrpc_free_bulk(request->rq_bulk);
+ if (request->rq_bulk != NULL)
+ ptlrpc_free_bulk_pin(request->rq_bulk);
if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
sptlrpc_cli_free_reqbuf(request);
if (request->rq_pool)
__ptlrpc_free_req_to_pool(request);
else
- OBD_FREE(request, sizeof(*request));
- EXIT;
+ ptlrpc_request_cache_free(request);
+ EXIT;
}
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
- LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
- (void)__ptlrpc_req_finished(request, 1);
+ LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+ (void)__ptlrpc_req_finished(request, 1);
}
+EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
/**
* Helper function
{
__ptlrpc_req_finished(request, 0);
}
+EXPORT_SYMBOL(ptlrpc_req_finished);
/**
* Returns xid of a \a request
*/
int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
- int rc;
- cfs_waitq_t *wq;
- struct l_wait_info lwi;
+ int rc;
+ struct l_wait_info lwi;
- /*
- * Might sleep.
- */
- LASSERT(!cfs_in_interrupt());
+ /*
+ * Might sleep.
+ */
+ LASSERT(!in_interrupt());
- /*
- * Let's setup deadline for reply unlink.
- */
+ /*
+ * Let's setup deadline for reply unlink.
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
* a chance to run reply_in_callback(), and to make sure we've
* unlinked before returning a req to the pool.
*/
- if (request->rq_set != NULL)
- wq = &request->rq_set->set_waitq;
- else
- wq = &request->rq_reply_waitq;
-
for (;;) {
+#ifdef __KERNEL__
+ /* The wq argument is ignored by user-space wait_event macros */
+ wait_queue_head_t *wq = (request->rq_set != NULL) ?
+ &request->rq_set->set_waitq :
+ &request->rq_reply_waitq;
+#endif
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
}
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_unregister_reply);
+
+static void ptlrpc_free_request(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_commit_cb != NULL)
+ req->rq_commit_cb(req);
+ cfs_list_del_init(&req->rq_replay_list);
+
+ __ptlrpc_req_finished(req, 1);
+}
+
+/**
+ * the request is committed and dropped from the replay list of its import
+ */
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
+{
+ struct obd_import *imp = req->rq_import;
+
+ spin_lock(&imp->imp_lock);
+ if (cfs_list_empty(&req->rq_replay_list)) {
+ spin_unlock(&imp->imp_lock);
+ return;
+ }
+
+ if (force || req->rq_transno <= imp->imp_peer_committed_transno)
+ ptlrpc_free_request(req);
+
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_request_committed);
/**
* Iterates through replay_list on import and prunes
*/
void ptlrpc_free_committed(struct obd_import *imp)
{
- cfs_list_t *tmp, *saved;
- struct ptlrpc_request *req;
- struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- ENTRY;
-
- LASSERT(imp != NULL);
+ struct ptlrpc_request *req, *saved;
+ struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
+ bool skip_committed_list = true;
+ ENTRY;
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ LASSERT(imp != NULL);
+ LASSERT(spin_is_locked(&imp->imp_lock));
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
- EXIT;
- return;
+ RETURN_EXIT;
}
CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
+
+ if (imp->imp_generation != imp->imp_last_generation_checked)
+ skip_committed_list = false;
+
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
+ cfs_list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
+ rq_replay_list) {
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
last_req = req;
GOTO(free_req, 0);
}
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- continue;
- }
-
/* not yet committed */
if (req->rq_transno > imp->imp_peer_committed_transno) {
DEBUG_REQ(D_RPCTRACE, req, "stopping search");
break;
}
+ if (req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
+ cfs_list_move_tail(&req->rq_replay_list,
+ &imp->imp_committed_list);
+ continue;
+ }
+
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- cfs_spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- cfs_spin_unlock(&req->rq_lock);
- if (req->rq_commit_cb != NULL)
- req->rq_commit_cb(req);
- cfs_list_del_init(&req->rq_replay_list);
- __ptlrpc_req_finished(req, 1);
+ ptlrpc_free_request(req);
}
+ if (skip_committed_list)
+ GOTO(out, 0);
+
+ cfs_list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
+ rq_replay_list) {
+ LASSERT(req->rq_transno != 0);
+ if (req->rq_import_generation < imp->imp_generation) {
+ DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
+ ptlrpc_free_request(req);
+ }
+ }
+out:
EXIT;
- return;
}
void ptlrpc_cleanup_client(struct obd_import *imp)
{
ENTRY;
EXIT;
- return;
}
+EXPORT_SYMBOL(ptlrpc_cleanup_client);
/**
* Schedule previously sent request for resend.
lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
req->rq_status = -EAGAIN;
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
old_xid, req->rq_xid);
}
ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
}
+EXPORT_SYMBOL(ptlrpc_resend_req);
/* XXX: this function and rq_status are currently unused */
void ptlrpc_restart_req(struct ptlrpc_request *req)
{
- DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
- req->rq_status = -ERESTARTSYS;
+ DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
+ req->rq_status = -ERESTARTSYS;
- cfs_spin_lock(&req->rq_lock);
- req->rq_restart = 1;
- req->rq_timedout = 0;
- ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_restart = 1;
+ req->rq_timedout = 0;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
}
+EXPORT_SYMBOL(ptlrpc_restart_req);
/**
* Grab additional reference on a request \a req
cfs_atomic_inc(&req->rq_refcount);
RETURN(req);
}
+EXPORT_SYMBOL(ptlrpc_request_addref);
/**
* Add a request to import replay_list.
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
- cfs_list_t *tmp;
+ cfs_list_t *tmp;
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ LASSERT(spin_is_locked(&imp->imp_lock));
if (req->rq_transno == 0) {
DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
+EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
/**
* Send request and wait until it completes.
RETURN(-ENOMEM);
}
- /* for distributed debugging */
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ /* for distributed debugging */
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
/* add a ref for the set (see comment in ptlrpc_set_add_req) */
ptlrpc_request_addref(req);
RETURN(rc);
}
+EXPORT_SYMBOL(ptlrpc_queue_wait);
struct ptlrpc_replay_async_args {
int praa_old_state;
/** VBR: check version failure */
if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
- /** replay was failed due to version mismatch */
- DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 1;
- imp->imp_no_lock_replay = 1;
- cfs_spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
+ /** replay was failed due to version mismatch */
+ DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
+ spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 1;
+ imp->imp_no_lock_replay = 1;
+ spin_unlock(&imp->imp_lock);
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
lustre_msg_get_transno(req->rq_repmsg));
}
- cfs_spin_lock(&imp->imp_lock);
- /** if replays by version then gap was occur on server, no trust to locks */
- if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
- imp->imp_no_lock_replay = 1;
- imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ /** if replays by version then gap occur on server, no trust to locks */
+ if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
+ imp->imp_no_lock_replay = 1;
+ imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
+ spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
/* transaction number shouldn't be bigger than the latest replayed */
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_replay_req);
/**
* Aborts all in-flight request on import \a imp sending and delayed lists
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
* this flag and then putting requests on sending_list or delayed_list.
*/
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/* XXX locking? Maybe we should remove each request with the list
* locked? Also, how do we know if the requests on the list are
DEBUG_REQ(D_RPCTRACE, req, "inflight");
- cfs_spin_lock (&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock (&req->rq_lock);
- }
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
+ req->rq_status = -EIO;
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
- cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ struct ptlrpc_request *req =
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
- DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
+ DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
- cfs_spin_lock (&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock (&req->rq_lock);
- }
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
+ req->rq_status = -EIO;
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
- /* Last chance to free reqs left on the replay list, but we
- * will still leak reqs that haven't committed. */
- if (imp->imp_replayable)
- ptlrpc_free_committed(imp);
+ /* Last chance to free reqs left on the replay list, but we
+ * will still leak reqs that haven't committed. */
+ if (imp->imp_replayable)
+ ptlrpc_free_committed(imp);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- EXIT;
+ EXIT;
}
+EXPORT_SYMBOL(ptlrpc_abort_inflight);
/**
* Abort all uncompleted requests in request set \a set
cfs_list_entry(pos, struct ptlrpc_request,
rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
- if (req->rq_phase != RQ_PHASE_RPC) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ spin_lock(&req->rq_lock);
+ if (req->rq_phase != RQ_PHASE_RPC) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
- }
+ req->rq_err = 1;
+ req->rq_status = -EINTR;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+ }
}
static __u64 ptlrpc_last_xid;
-static cfs_spinlock_t ptlrpc_last_xid_lock;
+static spinlock_t ptlrpc_last_xid_lock;
/**
* Initialize the XID for the node. This is common among all requests on
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
- time_t now = cfs_time_current_sec();
+ time_t now = cfs_time_current_sec();
- cfs_spin_lock_init(&ptlrpc_last_xid_lock);
- if (now < YEAR_2004) {
- cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
- ptlrpc_last_xid >>= 2;
- ptlrpc_last_xid |= (1ULL << 61);
- } else {
- ptlrpc_last_xid = (__u64)now << 20;
- }
+ spin_lock_init(&ptlrpc_last_xid_lock);
+ if (now < YEAR_2004) {
+ cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
+ ptlrpc_last_xid >>= 2;
+ ptlrpc_last_xid |= (1ULL << 61);
+ } else {
+ ptlrpc_last_xid = (__u64)now << 20;
+ }
+
+ /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
+ CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
+ ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
}
/**
- * Increase xid and returns resultng new value to the caller.
+ * Increase xid and returns resulting new value to the caller.
+ *
+ * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
+ * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
+ * itself uses the last bulk xid needed, so the server can determine the
+ * the number of bulk transfers from the RPC XID and a bitmask. The starting
+ * xid must align to a power-of-two value.
+ *
+ * This is assumed to be true due to the initial ptlrpc_last_xid
+ * value also being initialized to a power-of-two value. LU-1431
*/
__u64 ptlrpc_next_xid(void)
{
- __u64 tmp;
- cfs_spin_lock(&ptlrpc_last_xid_lock);
- tmp = ++ptlrpc_last_xid;
- cfs_spin_unlock(&ptlrpc_last_xid_lock);
- return tmp;
+ __u64 next;
+
+ spin_lock(&ptlrpc_last_xid_lock);
+ next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
+ ptlrpc_last_xid = next;
+ spin_unlock(&ptlrpc_last_xid_lock);
+
+ return next;
}
+EXPORT_SYMBOL(ptlrpc_next_xid);
/**
* Get a glimpse at what next xid value might have been.
__u64 ptlrpc_sample_next_xid(void)
{
#if BITS_PER_LONG == 32
- /* need to avoid possible word tearing on 32-bit systems */
- __u64 tmp;
- cfs_spin_lock(&ptlrpc_last_xid_lock);
- tmp = ptlrpc_last_xid + 1;
- cfs_spin_unlock(&ptlrpc_last_xid_lock);
- return tmp;
+ /* need to avoid possible word tearing on 32-bit systems */
+ __u64 next;
+
+ spin_lock(&ptlrpc_last_xid_lock);
+ next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
+ spin_unlock(&ptlrpc_last_xid_lock);
+
+ return next;
#else
- /* No need to lock, since returned value is racy anyways */
- return ptlrpc_last_xid + 1;
+ /* No need to lock, since returned value is racy anyways */
+ return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
#endif
}
EXPORT_SYMBOL(ptlrpc_sample_next_xid);
* have delay before it really runs by ptlrpcd thread.
*/
struct ptlrpc_work_async_args {
- __u64 magic;
- int (*cb)(const struct lu_env *, void *);
- void *cbdata;
+ int (*cb)(const struct lu_env *, void *);
+ void *cbdata;
};
-#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
+{
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+}
static int work_interpreter(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
+ struct ptlrpc_request *req, void *data, int rc)
{
- struct ptlrpc_work_async_args *arg = data;
+ struct ptlrpc_work_async_args *arg = data;
+
+ LASSERT(ptlrpcd_check_work(req));
+ LASSERT(arg->cb != NULL);
+
+ rc = arg->cb(env, arg->cbdata);
- LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
- LASSERT(arg->cb != NULL);
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
- return arg->cb(env, arg->cbdata);
+ if (atomic_dec_return(&req->rq_refcount) > 1) {
+ atomic_set(&req->rq_refcount, 2);
+ ptlrpcd_add_work_req(req);
+ }
+ return rc;
+}
+
+static int worker_format;
+
+static int ptlrpcd_check_work(struct ptlrpc_request *req)
+{
+ return req->rq_pill.rc_fmt == (void *)&worker_format;
}
/**
* Create a work for ptlrpc.
*/
void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *cbdata)
+ int (*cb)(const struct lu_env *, void *), void *cbdata)
{
- struct ptlrpc_request *req = NULL;
- struct ptlrpc_work_async_args *args;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_work_async_args *args;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- if (cb == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ if (cb == NULL)
+ RETURN(ERR_PTR(-EINVAL));
/* copy some code from deprecated fakereq. */
- OBD_ALLOC_PTR(req);
+ req = ptlrpc_request_cache_alloc(__GFP_IO);
if (req == NULL) {
CERROR("ptlrpc: run out of memory!\n");
RETURN(ERR_PTR(-ENOMEM));
req->rq_receiving_reply = 0;
req->rq_must_unlink = 0;
req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_pill.rc_fmt = (void *)&worker_format;
- cfs_spin_lock_init(&req->rq_lock);
- CFS_INIT_LIST_HEAD(&req->rq_list);
- CFS_INIT_LIST_HEAD(&req->rq_replay_list);
- CFS_INIT_LIST_HEAD(&req->rq_set_chain);
- CFS_INIT_LIST_HEAD(&req->rq_history_list);
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
- cfs_waitq_init(&req->rq_reply_waitq);
- cfs_waitq_init(&req->rq_set_waitq);
- cfs_atomic_set(&req->rq_refcount, 1);
-
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
- args = ptlrpc_req_async_args(req);
- args->magic = PTLRPC_WORK_MAGIC;
- args->cb = cb;
- args->cbdata = cbdata;
+ spin_lock_init(&req->rq_lock);
+ CFS_INIT_LIST_HEAD(&req->rq_list);
+ CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&req->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&req->rq_history_list);
+ CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ init_waitqueue_head(&req->rq_reply_waitq);
+ init_waitqueue_head(&req->rq_set_waitq);
+ atomic_set(&req->rq_refcount, 1);
- RETURN(req);
+ CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ args = ptlrpc_req_async_args(req);
+ args->cb = cb;
+ args->cbdata = cbdata;
+
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpcd_alloc_work);
int ptlrpcd_queue_work(void *handler)
{
- struct ptlrpc_request *req = handler;
+ struct ptlrpc_request *req = handler;
/*
* Check if the req is already being queued.
* for this purpose. This is okay because the caller should use this
* req as opaque data. - Jinshan
*/
- LASSERT(cfs_atomic_read(&req->rq_refcount) > 0);
- if (cfs_atomic_read(&req->rq_refcount) > 1)
- return -EBUSY;
-
- if (cfs_atomic_inc_return(&req->rq_refcount) > 2) { /* race */
- cfs_atomic_dec(&req->rq_refcount);
- return -EBUSY;
- }
-
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- return 0;
+ LASSERT(atomic_read(&req->rq_refcount) > 0);
+ if (atomic_inc_return(&req->rq_refcount) == 2)
+ ptlrpcd_add_work_req(req);
+ return 0;
}
EXPORT_SYMBOL(ptlrpcd_queue_work);