* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* Lustre is a trademark of Sun Microsystems, Inc.
*/
+/** Implementation of client-side PortalRPC interfaces */
+
#define DEBUG_SUBSYSTEM S_RPC
#ifndef __KERNEL__
#include <errno.h>
#include "ptlrpc_internal.h"
+/**
+ * Initialize passed in client structure \a cl.
+ */
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *cl)
{
cl->cli_name = name;
}
+/**
+ * Return PortalRPC connection for remore uud \a uuid
+ */
struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
{
struct ptlrpc_connection *c;
err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
if (err != 0) {
- CERROR("cannot find peer %s!\n", uuid->uuid);
+ CNETERR("cannot find peer %s!\n", uuid->uuid);
return NULL;
}
return c;
}
+/**
+ * Allocate and initialize new bulk descriptor
+ * Returns pointer to the descriptor or NULL on error.
+ */
static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
{
struct ptlrpc_bulk_desc *desc;
if (!desc)
return NULL;
- spin_lock_init(&desc->bd_lock);
+ cfs_spin_lock_init(&desc->bd_lock);
cfs_waitq_init(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
return desc;
}
+/**
+ * Prepare bulk descriptor for specified outgoing request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on client-side.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
int npages, int type, int portal)
{
return desc;
}
+/**
+ * Prepare bulk descriptor for specified incoming request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on server-side after request was already
+ * received.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
int npages, int type, int portal)
{
return desc;
}
+/**
+ * Add a page \a page to the bulk descriptor \a desc.
+ * Data to transfer in the page starts at offset \a pageoffset and
+ * amount of data to transfer from the page is \a len
+ */
void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
cfs_page_t *page, int pageoffset, int len)
{
desc->bd_nob += len;
+ cfs_page_pin(page);
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
+/**
+ * Uninitialize and free bulk descriptor \a desc.
+ * Works on bulk descriptors both from server and client side.
+ */
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
{
+ int i;
ENTRY;
LASSERT(desc != NULL);
else
class_import_put(desc->bd_import);
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ cfs_page_unpin(desc->bd_iov[i].kiov_page);
+
OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
bd_iov[desc->bd_max_iov]));
EXIT;
}
-/* Set server timelimit for this req */
+/**
+ * Set server timelimit for this req, i.e. how long are we willing to wait
+ * for reply before timing out this request.
+ */
void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
{
__u32 serv_est;
if (AT_OFF) {
/* non-AT settings */
+ /**
+ * \a imp_server_timeout means this is reverse import and
+ * we send (currently only) ASTs to the client and cannot afford
+ * to wait too long for the reply, otherwise the other client
+ * (because of which we are sending this request) would
+ * timeout waiting for us
+ */
req->rq_timeout = req->rq_import->imp_server_timeout ?
obd_timeout / 2 : obd_timeout;
} else {
idx = import_at_get_index(req->rq_import, req->rq_request_portal);
/* max service estimates are tracked on the server side,
so just keep minimal history here */
- oldse = at_add(&at->iat_service_estimate[idx], serv_est);
+ oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
if (oldse != 0)
CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
"has changed from %d to %d\n",
CFS_DURATION_T"\n", service_time,
cfs_time_sub(now, req->rq_sent));
- oldnl = at_add(&at->iat_net_latency, nl);
+ oldnl = at_measured(&at->iat_net_latency, nl);
if (oldnl != 0)
CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
"has changed from %d to %d\n",
return 0;
}
-/*
+/**
* Handle an early reply message, called with the rq_lock held.
* If anything goes wrong just ignore it - same as if it never happened
*/
ENTRY;
req->rq_early = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
if (rc) {
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
RETURN(rc);
}
sptlrpc_cli_finish_early_reply(early_req);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (rc == 0) {
/* Adjust the local timeout for this req */
RETURN(rc);
}
+/**
+ * Wind down request pool \a pool.
+ * Frees all requests from the pool too
+ */
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- struct list_head *l, *tmp;
+ cfs_list_t *l, *tmp;
struct ptlrpc_request *req;
LASSERT(pool != NULL);
- spin_lock(&pool->prp_lock);
- list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = list_entry(l, struct ptlrpc_request, rq_list);
- list_del(&req->rq_list);
+ cfs_spin_lock(&pool->prp_lock);
+ cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+ cfs_list_del(&req->rq_list);
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
OBD_FREE(req->rq_reqbuf, pool->prp_rq_size);
OBD_FREE(req, sizeof(*req));
}
- spin_unlock(&pool->prp_lock);
+ cfs_spin_unlock(&pool->prp_lock);
OBD_FREE(pool, sizeof(*pool));
}
+/**
+ * Allocates, initializes and adds \a num_rq requests to the pool \a pool
+ */
void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
{
int i;
int size = 1;
- while (size < pool->prp_rq_size + SPTLRPC_MAX_PAYLOAD)
+ while (size < pool->prp_rq_size)
size <<= 1;
- LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size,
+ LASSERTF(cfs_list_empty(&pool->prp_req_list) ||
+ size == pool->prp_rq_size,
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
- spin_lock(&pool->prp_lock);
+ cfs_spin_lock(&pool->prp_lock);
pool->prp_rq_size = size;
for (i = 0; i < num_rq; i++) {
struct ptlrpc_request *req;
struct lustre_msg *msg;
- spin_unlock(&pool->prp_lock);
+ cfs_spin_unlock(&pool->prp_lock);
OBD_ALLOC(req, sizeof(struct ptlrpc_request));
if (!req)
return;
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
req->rq_pool = pool;
- spin_lock(&pool->prp_lock);
- list_add_tail(&req->rq_list, &pool->prp_req_list);
+ cfs_spin_lock(&pool->prp_lock);
+ cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
}
- spin_unlock(&pool->prp_lock);
+ cfs_spin_unlock(&pool->prp_lock);
return;
}
+/**
+ * Create and initialize new request pool with given attributes:
+ * \a num_rq - initial number of requests to create for the pool
+ * \a msgsize - maximum message size possible for requests in thid pool
+ * \a populate_pool - function to be called when more requests need to be added
+ * to the pool
+ * Returns pointer to newly created pool or NULL on error.
+ */
struct ptlrpc_request_pool *
ptlrpc_init_rq_pool(int num_rq, int msgsize,
void (*populate_pool)(struct ptlrpc_request_pool *, int))
/* Request next power of two for the allocation, because internally
kernel would do exactly this */
- spin_lock_init(&pool->prp_lock);
+ cfs_spin_lock_init(&pool->prp_lock);
CFS_INIT_LIST_HEAD(&pool->prp_req_list);
- pool->prp_rq_size = msgsize;
+ pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
pool->prp_populate = populate_pool;
populate_pool(pool, num_rq);
- if (list_empty(&pool->prp_req_list)) {
+ if (cfs_list_empty(&pool->prp_req_list)) {
/* have not allocated a single request for the pool */
OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
pool = NULL;
return pool;
}
+/**
+ * Fetches one request from pool \a pool
+ */
static struct ptlrpc_request *
ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
{
if (!pool)
return NULL;
- spin_lock(&pool->prp_lock);
+ cfs_spin_lock(&pool->prp_lock);
/* See if we have anything in a pool, and bail out if nothing,
* in writeout path, where this matters, this is safe to do, because
* nothing is lost in this case, and when some in-flight requests
* complete, this code will be called again. */
- if (unlikely(list_empty(&pool->prp_req_list))) {
- spin_unlock(&pool->prp_lock);
+ if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ cfs_spin_unlock(&pool->prp_lock);
return NULL;
}
- request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- list_del_init(&request->rq_list);
- spin_unlock(&pool->prp_lock);
+ request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ cfs_list_del_init(&request->rq_list);
+ cfs_spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
LASSERT(request->rq_pool);
return request;
}
+/**
+ * Returns freed \a request to pool.
+ */
static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
{
struct ptlrpc_request_pool *pool = request->rq_pool;
- spin_lock(&pool->prp_lock);
- LASSERT(list_empty(&request->rq_list));
+ cfs_spin_lock(&pool->prp_lock);
+ LASSERT(cfs_list_empty(&request->rq_list));
LASSERT(!request->rq_receiving_reply);
- list_add_tail(&request->rq_list, &pool->prp_req_list);
- spin_unlock(&pool->prp_lock);
+ cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ cfs_spin_unlock(&pool->prp_lock);
}
static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- spin_lock_init(&request->rq_lock);
+ cfs_spin_lock_init(&request->rq_lock);
CFS_INIT_LIST_HEAD(&request->rq_list);
CFS_INIT_LIST_HEAD(&request->rq_timed_list);
CFS_INIT_LIST_HEAD(&request->rq_replay_list);
CFS_INIT_LIST_HEAD(&request->rq_history_list);
CFS_INIT_LIST_HEAD(&request->rq_exp_list);
cfs_waitq_init(&request->rq_reply_waitq);
+ cfs_waitq_init(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
+ cfs_atomic_set(&request->rq_refcount, 1);
lustre_msg_set_opc(request->rq_reqmsg, opcode);
}
EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
+/**
+ * Pack request buffers for network transfer, performing necessary encryption
+ * steps if necessary.
+ */
int ptlrpc_request_pack(struct ptlrpc_request *request,
__u32 version, int opcode)
{
return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
}
+/**
+ * Helper function to allocate new request on import \a imp
+ * and possibly using existing request from pool \a pool if provided.
+ * Returns allocated request structure with import field filled or
+ * NULL on error.
+ */
static inline
struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
struct ptlrpc_request_pool *pool)
return request;
}
+/**
+ * Helper function for creating a request.
+ * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
+ * buffer structures according to capsule template \a format.
+ * Returns allocated request structure pointer or NULL on error.
+ */
static struct ptlrpc_request *
ptlrpc_request_alloc_internal(struct obd_import *imp,
struct ptlrpc_request_pool * pool,
return request;
}
+/**
+ * Allocate new request structure for import \a imp and initialize its
+ * buffer structure according to capsule template \a format.
+ */
struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
const struct req_format *format)
{
return ptlrpc_request_alloc_internal(imp, NULL, format);
}
+/**
+ * Allocate new request structure for import \a imp from pool \a pool and
+ * initialize its buffer structure according to capsule template \a format.
+ */
struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
struct ptlrpc_request_pool * pool,
const struct req_format *format)
return ptlrpc_request_alloc_internal(imp, pool, format);
}
+/**
+ * For requests not from pool, free memory of the request structure.
+ * For requests obtained from a pool earlier, return request back to pool.
+ */
void ptlrpc_request_free(struct ptlrpc_request *request)
{
if (request->rq_pool)
OBD_FREE_PTR(request);
}
+/**
+ * Allocate new request for operatione \a opcode and immediatelly pack it for
+ * network transfer.
+ * Only used for simple requests like OBD_PING where the only important
+ * part of the request is operation itself.
+ * Returns allocated request or NULL on error.
+ */
struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
const struct req_format *format,
__u32 version, int opcode)
return req;
}
+/**
+ * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
+ * for operation \a opcode. Request would contain \a count buffers.
+ * Sizes of buffers are described in array \a lengths and buffers themselves
+ * are provided by a pointer \a bufs.
+ * Returns prepared request structure pointer or NULL on error.
+ */
struct ptlrpc_request *
ptlrpc_prep_req_pool(struct obd_import *imp,
__u32 version, int opcode,
return request;
}
+/**
+ * Same as ptlrpc_prep_req_pool, but without pool
+ */
struct ptlrpc_request *
ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
__u32 *lengths, char **bufs)
NULL);
}
+/**
+ * Allocate "fake" request that would not be sent anywhere in the end.
+ * Only used as a hack because we have no other way of performing
+ * async actions in lustre between layers.
+ * Used on MDS to request object preallocations from more than one OST at a
+ * time.
+ */
struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
unsigned int timeout,
ptlrpc_interpterer_t interpreter)
request->rq_no_delay = request->rq_no_resend = 1;
request->rq_fake = 1;
- spin_lock_init(&request->rq_lock);
+ cfs_spin_lock_init(&request->rq_lock);
CFS_INIT_LIST_HEAD(&request->rq_list);
CFS_INIT_LIST_HEAD(&request->rq_replay_list);
CFS_INIT_LIST_HEAD(&request->rq_set_chain);
CFS_INIT_LIST_HEAD(&request->rq_history_list);
CFS_INIT_LIST_HEAD(&request->rq_exp_list);
cfs_waitq_init(&request->rq_reply_waitq);
+ cfs_waitq_init(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
+ cfs_atomic_set(&request->rq_refcount, 1);
RETURN(request);
}
+/**
+ * Indicate that processing of "fake" request is finished.
+ */
void ptlrpc_fakereq_finished(struct ptlrpc_request *req)
{
/* if we kill request before timeout - need adjust counter */
struct ptlrpc_request_set *set = req->rq_set;
if (set)
- set->set_remaining --;
+ cfs_atomic_dec(&set->set_remaining);
}
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- list_del_init(&req->rq_list);
+ cfs_list_del_init(&req->rq_list);
}
-
+/**
+ * Allocate and initialize new request set structure.
+ * Returns a pointer to the newly allocated set structure or NULL on error.
+ */
struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
struct ptlrpc_request_set *set;
RETURN(NULL);
CFS_INIT_LIST_HEAD(&set->set_requests);
cfs_waitq_init(&set->set_waitq);
- set->set_remaining = 0;
- spin_lock_init(&set->set_new_req_lock);
+ cfs_atomic_set(&set->set_remaining, 0);
+ cfs_spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
RETURN(set);
}
-/* Finish with this set; opposite of prep_set. */
+/**
+ * Wind down and free request set structure previously allocated with
+ * ptlrpc_prep_set.
+ * Ensures that all requests on the set have completed and removes
+ * all requests from the request list in a set.
+ * If any unsent request happen to be on the list, pretends that they got
+ * an error in flight and calls their completion handler.
+ */
void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
- struct list_head *next;
+ cfs_list_t *tmp;
+ cfs_list_t *next;
int expected_phase;
int n = 0;
ENTRY;
/* Requests on the set should either all be completed, or all be new */
- expected_phase = (set->set_remaining == 0) ?
+ expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each (tmp, &set->set_requests) {
+ cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
n++;
}
- LASSERTF(set->set_remaining == 0 || set->set_remaining == n, "%d / %d\n",
- set->set_remaining, n);
+ LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 ||
+ cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n",
+ cfs_atomic_read(&set->set_remaining), n);
- list_for_each_safe(tmp, next, &set->set_requests) {
+ cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
- list_del_init(&req->rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ cfs_list_del_init(&req->rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
if (req->rq_phase == RQ_PHASE_NEW) {
ptlrpc_req_interpret(NULL, req, -EBADR);
- set->set_remaining--;
+ cfs_atomic_dec(&set->set_remaining);
}
+ cfs_spin_lock(&req->rq_lock);
req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ cfs_spin_unlock(&req->rq_lock);
+
ptlrpc_req_finished (req);
}
- LASSERT(set->set_remaining == 0);
+ LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
OBD_FREE(set, sizeof(*set));
EXIT;
}
+/**
+ * Add a callback function \a fn to the set.
+ * This function would be called when all requests on this set are completed.
+ * The function will be passed \a data argument.
+ */
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data)
{
cbdata->psc_interpret = fn;
cbdata->psc_data = data;
- list_add_tail(&cbdata->psc_item, &set->set_cblist);
+ cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist);
RETURN(0);
}
+/**
+ * Add a new request to the general purpose request set.
+ * Assumes request reference from the caller.
+ */
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
/* The set takes over the caller's request reference */
- list_add_tail(&req->rq_set_chain, &set->set_requests);
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
- set->set_remaining++;
+ cfs_atomic_inc(&set->set_remaining);
+ req->rq_queued_time = cfs_time_current(); /* Where is the best place to set this? */
}
/**
- * Lock so many callers can add things, the context that owns the set
- * is supposed to notice these and move them into the set proper.
+ * Add a request to a request with dedicated server thread
+ * and wake the thread to make any necessary processing.
+ * Currently only used for ptlrpcd.
+ * Returns 0 if succesful or non zero error code on error.
+ * (the only possible error for now is if the dedicated server thread
+ * is shutting down)
*/
int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
struct ptlrpc_request *req)
* Let caller know that we stopped and will not handle this request.
* It needs to take care itself of request.
*/
- if (test_bit(LIOD_STOP, &pc->pc_flags))
+ if (cfs_test_bit(LIOD_STOP, &pc->pc_flags))
return -EALREADY;
- spin_lock(&set->set_new_req_lock);
+ cfs_spin_lock(&set->set_new_req_lock);
/*
* The set takes over the caller's request reference.
*/
- list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
req->rq_set = set;
- spin_unlock(&set->set_new_req_lock);
+ cfs_spin_unlock(&set->set_new_req_lock);
cfs_waitq_signal(&set->set_waitq);
return 0;
}
-/*
+/**
* Based on the current state of the import, determine if the request
* can be sent, is an error, or should be delayed.
*
} else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
+ } else if (ptlrpc_send_limit_expired(req)) {
+ /* probably doesn't need to be a D_ERROR after initial testing */
+ DEBUG_REQ(D_ERROR, req, "send limit expired ");
+ *status = -EIO;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
/* allow CONNECT even if import is invalid */ ;
- if (atomic_read(&imp->imp_inval_count) != 0) {
+ if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
}
- } else if ((imp->imp_invalid && (!imp->imp_recon_bk)) ||
- imp->imp_obd->obd_no_recov) {
- /* If the import has been invalidated (such as by an OST
- * failure), and if the import(MGC) tried all of its connection
- * list (Bug 13464), the request must fail with -ESHUTDOWN.
- * This indicates the requests should be discarded; an -EIO
- * may result in a resend of the request. */
+ } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
if (!imp->imp_deactive)
DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
*status = -ESHUTDOWN; /* bz 12940 */
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
/* invalidate in progress - any requests should be drop */
- if (atomic_read(&imp->imp_inval_count) != 0) {
+ if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
} else if (imp->imp_dlm_fake || req->rq_no_delay) {
RETURN(delay);
}
-
-/* Conditionally suppress specific console messages */
+/**
+ * Decide if the eror message regarding provided request \a req
+ * should be printed to the console or not.
+ * Makes it's decision on request status and other properties.
+ * Returns 1 to print error on the system console or 0 if not.
+ */
static int ptlrpc_console_allow(struct ptlrpc_request *req)
{
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
return 1;
}
+/**
+ * Check request processing status.
+ * Returns the status.
+ */
static int ptlrpc_check_status(struct ptlrpc_request *req)
{
int err;
}
/**
- * save pre-versions for replay
+ * save pre-versions of objects into request for replay.
+ * Versions are obtained from server reply.
+ * used for VBR.
*/
static void ptlrpc_save_versions(struct ptlrpc_request *req)
{
/**
* Callback function called when client receives RPC reply for \a req.
+ * Returns 0 on success or error code.
+ * The return alue would be assigned to req->rq_status by the caller
+ * as request processing status.
+ * This function also decides if the request needs to be saved for later replay.
*/
static int after_reply(struct ptlrpc_request *req)
{
long timediff;
ENTRY;
- LASSERT(!req->rq_receiving_reply);
- LASSERT(obd);
- LASSERT(req->rq_nob_received <= req->rq_repbuf_len);
+ LASSERT(obd != NULL);
+ /* repbuf must be unlinked */
+ LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink);
+
+ if (req->rq_reply_truncate) {
+ if (ptlrpc_no_resend(req)) {
+ DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
+ " expected: %d, actual size: %d",
+ req->rq_nob_received, req->rq_repbuf_len);
+ RETURN(-EOVERFLOW);
+ }
- if (req->rq_reply_truncate && !req->rq_no_resend) {
- req->rq_resend = 1;
sptlrpc_cli_free_repbuf(req);
- req->rq_replen = req->rq_nob_received;
+ /* Pass the required reply buffer size (include
+ * space for early reply).
+ * NB: no need to roundup because alloc_repbuf
+ * will roundup it */
+ req->rq_replen = req->rq_nob_received;
+ req->rq_nob_received = 0;
+ req->rq_resend = 1;
RETURN(0);
}
* NB Until this point, the whole of the incoming message,
* including buflens, status etc is in the sender's byte order.
*/
-
rc = sptlrpc_cli_unwrap_reply(req);
if (rc) {
DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
if (rc)
RETURN(rc);
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
if (obd->obd_svc_stats != NULL) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
RETURN(-EPROTO);
}
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
+ if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
+ OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
ptlrpc_at_adj_net_latency(req,
lustre_msg_get_service_time(req->rq_repmsg));
}
if (imp->imp_replayable) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/*
* No point in adding already-committed requests to the replay
* list, we will just remove them immediately. b=9829
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
} else if (req->rq_commit_cb != NULL) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req->rq_commit_cb(req);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
}
/*
lustre_msg_get_last_committed(req->rq_repmsg);
}
ptlrpc_free_committed(imp);
- spin_unlock(&imp->imp_lock);
+
+ if (req->rq_transno > imp->imp_peer_committed_transno)
+ ptlrpc_pinger_commit_expected(imp);
+
+ cfs_spin_unlock(&imp->imp_lock);
}
RETURN(rc);
}
+/**
+ * Helper function to send request \a req over the network for the first time
+ * Also adjusts request phase.
+ * Returns 0 on success or error code.
+ */
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp;
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
imp = req->rq_import;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
req->rq_import_generation = imp->imp_generation;
if (ptlrpc_import_delay_req(imp, req, &rc)) {
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_waiting = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
"(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
if (rc != 0) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req->rq_status = rc;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
RETURN(rc);
}
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_sending_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ cfs_spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
RETURN(0);
}
-/* this sends any unsent RPCs in @set and returns TRUE if all are sent */
+/**
+ * this sends any unsent RPCs in \a set and returns 1 if all are sent
+ * and no more replies are expected.
+ * (it is possible to get less replies than requests sent e.g. due to timed out
+ * requests or requests that we had trouble to send out)
+ */
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int force_timer_recalc = 0;
ENTRY;
- if (set->set_remaining == 0)
+ if (cfs_atomic_read(&set->set_remaining) == 0)
RETURN(1);
- list_for_each(tmp, &set->set_requests) {
+ cfs_list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
struct obd_import *imp = req->rq_import;
+ int unregistered = 0;
int rc = 0;
if (req->rq_phase == RQ_PHASE_NEW &&
if (ptlrpc_client_recv_or_unlink(req) ||
ptlrpc_client_bulk_active(req))
continue;
+ /* If there is no need to resend, fail it now. */
+ if (req->rq_no_resend) {
+ if (req->rq_status == 0)
+ req->rq_status = -EIO;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ } else {
+ continue;
+ }
}
if (req->rq_err) {
+ cfs_spin_lock(&req->rq_lock);
req->rq_replied = 0;
+ cfs_spin_unlock(&req->rq_lock);
if (req->rq_status == 0)
req->rq_status = -EIO;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
if (!ptlrpc_unregister_reply(req, 1))
continue;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (ptlrpc_import_delay_req(imp, req, &status)){
/* put on delay list - only if we wait
* recovery finished - before send */
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- spin_unlock(&imp->imp_lock);
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp-> \
+ imp_delayed_list);
+ cfs_spin_unlock(&imp->imp_lock);
continue;
}
req->rq_status = status;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
- if (req->rq_no_resend && !req->rq_wait_ctx) {
+ if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) {
req->rq_status = -ENOTCONN;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list,
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
&imp->imp_sending_list);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_waiting = 0;
+ cfs_spin_unlock(&req->rq_lock);
- if (req->rq_timedout||req->rq_resend) {
+ if (req->rq_timedout || req->rq_resend) {
/* This is re-sending anyways,
* let's mark req as resend. */
+ cfs_spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ cfs_spin_unlock(&req->rq_lock);
if (req->rq_bulk) {
__u64 old_xid;
if (status) {
if (req->rq_err) {
req->rq_status = status;
+ cfs_spin_lock(&req->rq_lock);
req->rq_wait_ctx = 0;
+ cfs_spin_unlock(&req->rq_lock);
force_timer_recalc = 1;
} else {
+ cfs_spin_lock(&req->rq_lock);
req->rq_wait_ctx = 1;
+ cfs_spin_unlock(&req->rq_lock);
}
continue;
} else {
+ cfs_spin_lock(&req->rq_lock);
req->rq_wait_ctx = 0;
+ cfs_spin_unlock(&req->rq_lock);
}
rc = ptl_send_rpc(req, 0);
DEBUG_REQ(D_HA, req, "send failed (%d)",
rc);
force_timer_recalc = 1;
+ cfs_spin_lock(&req->rq_lock);
req->rq_net_err = 1;
+ cfs_spin_unlock(&req->rq_lock);
}
/* need to reset the timeout */
force_timer_recalc = 1;
}
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (ptlrpc_client_early(req)) {
ptlrpc_at_recv_early_reply(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
/* Still waiting for a reply? */
if (ptlrpc_client_recv(req)) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
/* Did we actually receive a reply? */
if (!ptlrpc_client_replied(req)) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
+
+ /* unlink from net because we are going to
+ * swab in-place of reply buffer */
+ unregistered = ptlrpc_unregister_reply(req, 1);
+ if (!unregistered)
+ continue;
req->rq_status = after_reply(req);
if (req->rq_resend)
/* This moves to "unregistering" phase we need to wait for
* reply unlink. */
- if (!ptlrpc_unregister_reply(req, 1))
+ if (!unregistered && !ptlrpc_unregister_reply(req, 1))
continue;
if (!ptlrpc_unregister_bulk(req, 1))
libcfs_nid2str(imp->imp_connection->c_peer.nid),
req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
* may happen in the case of marking it erroneous for the case
* ptlrpc_import_delay_req(req, status) find it impossible to
* allow sending this rpc and returns *status != 0. */
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- atomic_dec(&imp->imp_inflight);
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
- set->set_remaining--;
+ cfs_atomic_dec(&set->set_remaining);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
}
/* If we hit an error, we want to recover promptly. */
- RETURN(set->set_remaining == 0 || force_timer_recalc);
+ RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
-/* Return 1 if we should give up, else 0 */
+/**
+ * Time out request \a req. is \a async_unlink is set, that means do not wait
+ * until LNet actually confirms network buffer unlinking.
+ * Return 1 if we should give up further retrying attempts or 0 otherwise.
+ */
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
struct obd_import *imp = req->rq_import;
int rc = 0;
ENTRY;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_timedout = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
- DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req,
- "Request x"LPU64" sent from %s to NID %s "CFS_DURATION_T"s "
- "ago has %s ("CFS_DURATION_T"s prior to deadline).\n",
+ DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, "Request x"LPU64
+ " sent from %s to NID %s has %s: [sent "CFS_DURATION_T"] "
+ "[real_sent "CFS_DURATION_T"] [current "CFS_DURATION_T"] "
+ "[deadline "CFS_DURATION_T"s] [delay "CFS_DURATION_T"s]",
req->rq_xid, imp ? imp->imp_obd->obd_name : "<?>",
- imp ? libcfs_nid2str(imp->imp_connection->c_peer.nid) : "<?>",
- cfs_time_sub(cfs_time_current_sec(), req->rq_sent),
- req->rq_net_err ? "failed due to network error" : "timed out",
- cfs_time_sub(req->rq_deadline, req->rq_sent));
+ imp ? libcfs_nid2str(imp->imp_connection->c_peer.nid) : "<?>",
+ req->rq_net_err ? "failed due to network error" :
+ ((req->rq_real_sent == 0 ||
+ cfs_time_before(req->rq_real_sent, req->rq_sent) ||
+ cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+ "timed out for sent delay" : "timed out for slow reply"),
+ req->rq_sent, req->rq_real_sent, cfs_time_current_sec(),
+ cfs_time_sub(req->rq_deadline, req->rq_sent),
+ cfs_time_sub(cfs_time_current_sec(), req->rq_deadline));
if (imp != NULL && obd_debug_peer_on_timeout)
LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
if (req->rq_fake)
RETURN(1);
- atomic_inc(&imp->imp_timeouts);
+ cfs_atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
if (imp->imp_dlm_fake)
DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_status = -ETIMEDOUT;
req->rq_err = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
RETURN(1);
}
/* if a request can't be resent we can't wait for an answer after
the timeout */
- if (req->rq_no_resend) {
+ if (ptlrpc_no_resend(req)) {
DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
rc = 1;
}
RETURN(rc);
}
+/**
+ * Time out all uncompleted requests in request set pointed by \a data
+ * Callback used when waiting on sets with l_wait_event.
+ * Always returns 1.
+ */
int ptlrpc_expired_set(void *data)
{
struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
+ cfs_list_t *tmp;
time_t now = cfs_time_current_sec();
ENTRY;
/*
* A timeout expired. See which reqs it applies to...
*/
- list_for_each (tmp, &set->set_requests) {
+ cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
/* don't expire request waiting for context */
if (req->rq_wait_ctx)
RETURN(1);
}
+/**
+ * Sets rq_intr flag in \a req under spinlock.
+ */
void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
{
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
+/**
+ * Interrupts (sets interrupted flag) all uncompleted requests in
+ * a set \a data. Callback for l_wait_event for interruptible waits.
+ */
void ptlrpc_interrupted_set(void *data)
{
struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT(set != NULL);
CERROR("INTERRUPTED SET %p\n", set);
- list_for_each(tmp, &set->set_requests) {
+ cfs_list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
if (req->rq_phase != RQ_PHASE_RPC &&
req->rq_phase != RQ_PHASE_UNREGISTERING)
*/
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
time_t now = cfs_time_current_sec();
int timeout = 0;
struct ptlrpc_request *req;
SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/*
* Request in-flight?
RETURN(timeout);
}
+/**
+ * Send all unset request from the set and then wait untill all
+ * requests in the set complete (either get a reply, timeout, get an
+ * error or otherwise be interrupted).
+ * Returns 0 on success or error code otherwise.
+ */
int ptlrpc_set_wait(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout;
ENTRY;
- if (list_empty(&set->set_requests))
+ if (cfs_list_empty(&set->set_requests))
RETURN(0);
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
(void)ptlrpc_send_new_req(req);
}
* EINTR.
* I don't really care if we go once more round the loop in
* the error cases -eeb. */
- } while (rc != 0 || set->set_remaining != 0);
+ if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) {
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ cfs_spin_lock(&req->rq_lock);
+ req->rq_invalid_rqset = 1;
+ cfs_spin_unlock(&req->rq_lock);
+ }
+ }
+ } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
- LASSERT(set->set_remaining == 0);
+ LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
rc = 0;
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
if (req->rq_status != 0)
struct ptlrpc_set_cbdata *cbdata, *n;
int err;
- list_for_each_entry_safe(cbdata, n,
+ cfs_list_for_each_entry_safe(cbdata, n,
&set->set_cblist, psc_item) {
- list_del_init(&cbdata->psc_item);
+ cfs_list_del_init(&cbdata->psc_item);
err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
if (err && !rc)
rc = err;
RETURN(rc);
}
+/**
+ * Helper fuction for request freeing.
+ * Called when request count reached zero and request needs to be freed.
+ * Removes request from all sorts of sending/replay lists it might be on,
+ * frees network buffers if any are present.
+ * If \a locked is set, that means caller is already holding import imp_lock
+ * and so we no longer need to reobtain it (for certain lists manipulations)
+ */
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
ENTRY;
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
- LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
LASSERTF(!request->rq_replay, "req %p\n", request);
LASSERT(request->rq_cli_ctx || request->rq_fake);
* request->rq_reqmsg to NULL while osc_close is dereferencing it. */
if (request->rq_import != NULL) {
if (!locked)
- spin_lock(&request->rq_import->imp_lock);
- list_del_init(&request->rq_replay_list);
+ cfs_spin_lock(&request->rq_import->imp_lock);
+ cfs_list_del_init(&request->rq_replay_list);
if (!locked)
- spin_unlock(&request->rq_import->imp_lock);
+ cfs_spin_unlock(&request->rq_import->imp_lock);
}
- LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
+ LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
- if (atomic_read(&request->rq_refcount) != 0) {
+ if (cfs_atomic_read(&request->rq_refcount) != 0) {
DEBUG_REQ(D_ERROR, request,
"freeing request with nonzero refcount");
LBUG();
}
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
+/**
+ * Drop one request reference. Must be called with import imp_lock held.
+ * When reference count drops to zero, reuqest is freed.
+ */
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
(void)__ptlrpc_req_finished(request, 1);
}
+/**
+ * Helper function
+ * Drops one reference count for request \a request.
+ * \a locked set indicates that caller holds import imp_lock.
+ * Frees the request whe reference count reaches zero.
+ */
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
ENTRY;
}
DEBUG_REQ(D_INFO, request, "refcount now %u",
- atomic_read(&request->rq_refcount) - 1);
+ cfs_atomic_read(&request->rq_refcount) - 1);
- if (atomic_dec_and_test(&request->rq_refcount)) {
+ if (cfs_atomic_dec_and_test(&request->rq_refcount)) {
__ptlrpc_free_req(request, locked);
RETURN(1);
}
RETURN(0);
}
+/**
+ * Drops one reference count for a request.
+ */
void ptlrpc_req_finished(struct ptlrpc_request *request)
{
__ptlrpc_req_finished(request, 0);
}
+/**
+ * Returns xid of a \a request
+ */
__u64 ptlrpc_req_xid(struct ptlrpc_request *request)
{
return request->rq_xid;
}
EXPORT_SYMBOL(ptlrpc_req_xid);
-/* Disengage the client's reply buffer from the network
+/**
+ * Disengage the client's reply buffer from the network
* NB does _NOT_ unregister any client-side bulk.
* IDEMPOTENT, but _not_ safe against concurrent callers.
* The request owner (i.e. the thread doing the I/O) must call...
+ * Returns 0 on success or 1 if unregistering cannot be made.
*/
int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
/*
* Might sleep.
*/
- LASSERT(!in_interrupt());
+ LASSERT(!cfs_in_interrupt());
/*
* Let's setup deadline for reply unlink.
RETURN(0);
}
-/* caller must hold imp->imp_lock */
+/**
+ * Iterates through replay_list on import and prunes
+ * all requests have transno smaller than last_committed for the
+ * import and don't have rq_replay set.
+ * Since requests are sorted in transno order, stops when meetign first
+ * transno bigger than last_committed.
+ * caller must hold imp->imp_lock
+ */
void ptlrpc_free_committed(struct obd_import *imp)
{
- struct list_head *tmp, *saved;
+ cfs_list_t *tmp, *saved;
struct ptlrpc_request *req;
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
ENTRY;
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
- CDEBUG(D_RPCTRACE, "%s: skip recheck: last_committed "LPU64"\n",
+ CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
EXIT;
return;
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
break;
}
- DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
+ DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
if (req->rq_commit_cb != NULL)
req->rq_commit_cb(req);
- list_del_init(&req->rq_replay_list);
+ cfs_list_del_init(&req->rq_replay_list);
__ptlrpc_req_finished(req, 1);
}
return;
}
+/**
+ * Schedule previously sent request for resend.
+ * For bulk requests we assign new xid (to avoid problems with
+ * lost replies and therefore several transfers landing into same buffer
+ * from different sending attempts).
+ */
void ptlrpc_resend_req(struct ptlrpc_request *req)
{
DEBUG_REQ(D_HA, req, "going to resend");
lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
req->rq_status = -EAGAIN;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
old_xid, req->rq_xid);
}
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
/* XXX: this function and rq_status are currently unused */
DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
req->rq_status = -ERESTARTSYS;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_restart = 1;
req->rq_timedout = 0;
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
+/**
+ * Grab additional reference on a request \a req
+ */
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
ENTRY;
- atomic_inc(&req->rq_refcount);
+ cfs_atomic_inc(&req->rq_refcount);
RETURN(req);
}
+/**
+ * Add a request to import replay_list.
+ * Must be called under imp_lock
+ */
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
LASSERT_SPIN_LOCKED(&imp->imp_lock);
lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
/* don't re-add requests that have been replayed */
- if (!list_empty(&req->rq_replay_list))
+ if (!cfs_list_empty(&req->rq_replay_list))
return;
lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
LASSERT(imp->imp_replayable);
/* Balanced in ptlrpc_free_committed, usually. */
ptlrpc_request_addref(req);
- list_for_each_prev(tmp, &imp->imp_replay_list) {
+ cfs_list_for_each_prev(tmp, &imp->imp_replay_list) {
struct ptlrpc_request *iter =
- list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
/* We may have duplicate transnos if we create and then
* open a file, or for closes retained if to match creating
continue;
}
- list_add(&req->rq_replay_list, &iter->rq_replay_list);
+ cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list);
return;
}
- list_add(&req->rq_replay_list, &imp->imp_replay_list);
+ cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
+/**
+ * Send request and wait until it completes.
+ * Returns request processing status.
+ */
int ptlrpc_queue_wait(struct ptlrpc_request *req)
{
struct ptlrpc_request_set *set;
int praa_old_status;
};
+/**
+ * Callback used for replayed requests reply processing.
+ * In case of succesful reply calls registeresd request replay callback.
+ * In case of error restart replay process.
+ */
static int ptlrpc_replay_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void * data, int rc)
struct obd_import *imp = req->rq_import;
ENTRY;
- atomic_dec(&imp->imp_replay_inflight);
+ cfs_atomic_dec(&imp->imp_replay_inflight);
if (!ptlrpc_client_replied(req)) {
CERROR("request replay timed out, restarting recovery\n");
if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
/** replay was failed due to version mismatch */
DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_vbr_failed = 1;
imp->imp_no_lock_replay = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
lustre_msg_get_transno(req->rq_repmsg));
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/** if replays by version then gap was occur on server, no trust to locks */
if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
imp->imp_no_lock_replay = 1;
imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
DEBUG_REQ(D_HA, req, "got rep");
* imp_last_replay_transno shouldn't be set to 0 anyway
*/
if (req->rq_transno > 0) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
LASSERT(req->rq_transno <= imp->imp_last_replay_transno);
imp->imp_last_replay_transno = req->rq_transno;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
} else
CERROR("Transno is 0 during replay!\n");
/* continue with recovery */
RETURN(rc);
}
+/**
+ * Prepares and queues request for replay.
+ * Adds it to ptlrpcd queue for actual sending.
+ * Returns 0 on success.
+ */
int ptlrpc_replay_req(struct ptlrpc_request *req)
{
struct ptlrpc_replay_async_args *aa;
ENTRY;
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- /* Not handling automatic bulk replay yet (or ever?) */
- LASSERT(req->rq_bulk == NULL);
LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
aa = ptlrpc_req_async_args(req);
DEBUG_REQ(D_HA, req, "REPLAY");
- atomic_inc(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
}
+/**
+ * Aborts all in-flight request on import \a imp sending and delayed lists
+ */
void ptlrpc_abort_inflight(struct obd_import *imp)
{
- struct list_head *tmp, *n;
+ cfs_list_t *tmp, *n;
ENTRY;
/* Make sure that no new requests get processed for this import.
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
* this flag and then putting requests on sending_list or delayed_list.
*/
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/* XXX locking? Maybe we should remove each request with the list
* locked? Also, how do we know if the requests on the list are
* being freed at this time?
*/
- list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_RPCTRACE, req, "inflight");
- spin_lock (&req->rq_lock);
+ cfs_spin_lock (&req->rq_lock);
if (req->rq_import_generation < imp->imp_generation) {
req->rq_err = 1;
req->rq_status = -EINTR;
ptlrpc_client_wake_req(req);
}
- spin_unlock (&req->rq_lock);
+ cfs_spin_unlock (&req->rq_lock);
}
- list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
- spin_lock (&req->rq_lock);
+ cfs_spin_lock (&req->rq_lock);
if (req->rq_import_generation < imp->imp_generation) {
req->rq_err = 1;
req->rq_status = -EINTR;
ptlrpc_client_wake_req(req);
}
- spin_unlock (&req->rq_lock);
+ cfs_spin_unlock (&req->rq_lock);
}
/* Last chance to free reqs left on the replay list, but we
if (imp->imp_replayable)
ptlrpc_free_committed(imp);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
EXIT;
}
+/**
+ * Abort all uncompleted requests in request set \a set
+ */
void ptlrpc_abort_set(struct ptlrpc_request_set *set)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
LASSERT(set != NULL);
- list_for_each_safe(pos, tmp, &set->set_requests) {
+ cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ cfs_list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (req->rq_phase != RQ_PHASE_RPC) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
continue;
}
req->rq_err = 1;
req->rq_status = -EINTR;
ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
}
static __u64 ptlrpc_last_xid;
-static spinlock_t ptlrpc_last_xid_lock;
+static cfs_spinlock_t ptlrpc_last_xid_lock;
-/* Initialize the XID for the node. This is common among all requests on
+/**
+ * Initialize the XID for the node. This is common among all requests on
* this node, and only requires the property that it is monotonically
* increasing. It does not need to be sequential. Since this is also used
* as the RDMA match bits, it is important that a single client NOT have
{
time_t now = cfs_time_current_sec();
- spin_lock_init(&ptlrpc_last_xid_lock);
+ cfs_spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
- ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
+ cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
ptlrpc_last_xid |= (1ULL << 61);
} else {
}
}
+/**
+ * Increase xid and returns resultng new value to the caller.
+ */
__u64 ptlrpc_next_xid(void)
{
__u64 tmp;
- spin_lock(&ptlrpc_last_xid_lock);
+ cfs_spin_lock(&ptlrpc_last_xid_lock);
tmp = ++ptlrpc_last_xid;
- spin_unlock(&ptlrpc_last_xid_lock);
+ cfs_spin_unlock(&ptlrpc_last_xid_lock);
return tmp;
}
+/**
+ * Get a glimpse at what next xid value might have been.
+ * Returns possible next xid.
+ */
__u64 ptlrpc_sample_next_xid(void)
{
#if BITS_PER_LONG == 32
/* need to avoid possible word tearing on 32-bit systems */
__u64 tmp;
- spin_lock(&ptlrpc_last_xid_lock);
+ cfs_spin_lock(&ptlrpc_last_xid_lock);
tmp = ptlrpc_last_xid + 1;
- spin_unlock(&ptlrpc_last_xid_lock);
+ cfs_spin_unlock(&ptlrpc_last_xid_lock);
return tmp;
#else
/* No need to lock, since returned value is racy anyways */