* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
+/** Implementation of client-side PortalRPC interfaces */
+
#define DEBUG_SUBSYSTEM S_RPC
#ifndef __KERNEL__
#include <errno.h>
#include "ptlrpc_internal.h"
+/**
+ * Initialize passed in client structure \a cl.
+ */
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *cl)
{
cl->cli_name = name;
}
+/**
+ * Return PortalRPC connection for remore uud \a uuid
+ */
struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
{
struct ptlrpc_connection *c;
err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
if (err != 0) {
- CERROR("cannot find peer %s!\n", uuid->uuid);
+ CNETERR("cannot find peer %s!\n", uuid->uuid);
return NULL;
}
return c;
}
+/**
+ * Allocate and initialize new bulk descriptor
+ * Returns pointer to the descriptor or NULL on error.
+ */
static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
{
struct ptlrpc_bulk_desc *desc;
return desc;
}
+/**
+ * Prepare bulk descriptor for specified outgoing request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on client-side.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
int npages, int type, int portal)
{
return desc;
}
+/**
+ * Prepare bulk descriptor for specified incoming request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on server-side after request was already
+ * received.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
int npages, int type, int portal)
{
return desc;
}
+/**
+ * Add a page \a page to the bulk descriptor \a desc.
+ * Data to transfer in the page starts at offset \a pageoffset and
+ * amount of data to transfer from the page is \a len
+ */
void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
cfs_page_t *page, int pageoffset, int len)
{
desc->bd_nob += len;
+ cfs_page_pin(page);
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
+/**
+ * Uninitialize and free bulk descriptor \a desc.
+ * Works on bulk descriptors both from server and client side.
+ */
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
{
+ int i;
ENTRY;
LASSERT(desc != NULL);
else
class_import_put(desc->bd_import);
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ cfs_page_unpin(desc->bd_iov[i].kiov_page);
+
OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
bd_iov[desc->bd_max_iov]));
EXIT;
}
-/* Set server timelimit for this req */
+/**
+ * Set server timelimit for this req, i.e. how long are we willing to wait
+ * for reply before timing out this request.
+ */
void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
{
__u32 serv_est;
if (AT_OFF) {
/* non-AT settings */
+ /**
+ * \a imp_server_timeout means this is reverse import and
+ * we send (currently only) ASTs to the client and cannot afford
+ * to wait too long for the reply, otherwise the other client
+ * (because of which we are sending this request) would
+ * timeout waiting for us
+ */
req->rq_timeout = req->rq_import->imp_server_timeout ?
obd_timeout / 2 : obd_timeout;
} else {
return 0;
}
-/*
+/**
* Handle an early reply message, called with the rq_lock held.
* If anything goes wrong just ignore it - same as if it never happened
*/
RETURN(rc);
}
+/**
+ * Wind down request pool \a pool.
+ * Frees all requests from the pool too
+ */
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
cfs_list_t *l, *tmp;
cfs_list_del(&req->rq_list);
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- OBD_FREE(req->rq_reqbuf, pool->prp_rq_size);
+ OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
OBD_FREE(req, sizeof(*req));
}
cfs_spin_unlock(&pool->prp_lock);
OBD_FREE(pool, sizeof(*pool));
}
+/**
+ * Allocates, initializes and adds \a num_rq requests to the pool \a pool
+ */
void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
{
int i;
OBD_ALLOC(req, sizeof(struct ptlrpc_request));
if (!req)
return;
- OBD_ALLOC_GFP(msg, size, CFS_ALLOC_STD);
+ OBD_ALLOC_LARGE(msg, size);
if (!msg) {
OBD_FREE(req, sizeof(struct ptlrpc_request));
return;
return;
}
+/**
+ * Create and initialize new request pool with given attributes:
+ * \a num_rq - initial number of requests to create for the pool
+ * \a msgsize - maximum message size possible for requests in thid pool
+ * \a populate_pool - function to be called when more requests need to be added
+ * to the pool
+ * Returns pointer to newly created pool or NULL on error.
+ */
struct ptlrpc_request_pool *
ptlrpc_init_rq_pool(int num_rq, int msgsize,
void (*populate_pool)(struct ptlrpc_request_pool *, int))
return pool;
}
+/**
+ * Fetches one request from pool \a pool
+ */
static struct ptlrpc_request *
ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
{
return request;
}
+/**
+ * Returns freed \a request to pool.
+ */
static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
{
struct ptlrpc_request_pool *pool = request->rq_pool;
CFS_INIT_LIST_HEAD(&request->rq_history_list);
CFS_INIT_LIST_HEAD(&request->rq_exp_list);
cfs_waitq_init(&request->rq_reply_waitq);
+ cfs_waitq_init(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
cfs_atomic_set(&request->rq_refcount, 1);
}
EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
+/**
+ * Pack request buffers for network transfer, performing necessary encryption
+ * steps if necessary.
+ */
int ptlrpc_request_pack(struct ptlrpc_request *request,
__u32 version, int opcode)
{
return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
}
+/**
+ * Helper function to allocate new request on import \a imp
+ * and possibly using existing request from pool \a pool if provided.
+ * Returns allocated request structure with import field filled or
+ * NULL on error.
+ */
static inline
struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
struct ptlrpc_request_pool *pool)
return request;
}
+/**
+ * Helper function for creating a request.
+ * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
+ * buffer structures according to capsule template \a format.
+ * Returns allocated request structure pointer or NULL on error.
+ */
static struct ptlrpc_request *
ptlrpc_request_alloc_internal(struct obd_import *imp,
struct ptlrpc_request_pool * pool,
return request;
}
+/**
+ * Allocate new request structure for import \a imp and initialize its
+ * buffer structure according to capsule template \a format.
+ */
struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
const struct req_format *format)
{
return ptlrpc_request_alloc_internal(imp, NULL, format);
}
+/**
+ * Allocate new request structure for import \a imp from pool \a pool and
+ * initialize its buffer structure according to capsule template \a format.
+ */
struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
struct ptlrpc_request_pool * pool,
const struct req_format *format)
return ptlrpc_request_alloc_internal(imp, pool, format);
}
+/**
+ * For requests not from pool, free memory of the request structure.
+ * For requests obtained from a pool earlier, return request back to pool.
+ */
void ptlrpc_request_free(struct ptlrpc_request *request)
{
if (request->rq_pool)
OBD_FREE_PTR(request);
}
+/**
+ * Allocate new request for operatione \a opcode and immediatelly pack it for
+ * network transfer.
+ * Only used for simple requests like OBD_PING where the only important
+ * part of the request is operation itself.
+ * Returns allocated request or NULL on error.
+ */
struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
const struct req_format *format,
__u32 version, int opcode)
return req;
}
+/**
+ * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
+ * for operation \a opcode. Request would contain \a count buffers.
+ * Sizes of buffers are described in array \a lengths and buffers themselves
+ * are provided by a pointer \a bufs.
+ * Returns prepared request structure pointer or NULL on error.
+ */
struct ptlrpc_request *
ptlrpc_prep_req_pool(struct obd_import *imp,
__u32 version, int opcode,
return request;
}
+/**
+ * Same as ptlrpc_prep_req_pool, but without pool
+ */
struct ptlrpc_request *
ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
__u32 *lengths, char **bufs)
NULL);
}
+/**
+ * Allocate "fake" request that would not be sent anywhere in the end.
+ * Only used as a hack because we have no other way of performing
+ * async actions in lustre between layers.
+ * Used on MDS to request object preallocations from more than one OST at a
+ * time.
+ */
struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
unsigned int timeout,
ptlrpc_interpterer_t interpreter)
CFS_INIT_LIST_HEAD(&request->rq_history_list);
CFS_INIT_LIST_HEAD(&request->rq_exp_list);
cfs_waitq_init(&request->rq_reply_waitq);
+ cfs_waitq_init(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
cfs_atomic_set(&request->rq_refcount, 1);
RETURN(request);
}
+/**
+ * Indicate that processing of "fake" request is finished.
+ */
void ptlrpc_fakereq_finished(struct ptlrpc_request *req)
{
- /* if we kill request before timeout - need adjust counter */
- if (req->rq_phase == RQ_PHASE_RPC) {
- struct ptlrpc_request_set *set = req->rq_set;
+ struct ptlrpc_request_set *set = req->rq_set;
+ int wakeup = 0;
- if (set)
- set->set_remaining --;
- }
+ /* hold ref on the request to prevent others (ptlrpcd) to free it */
+ ptlrpc_request_addref(req);
+ cfs_list_del_init(&req->rq_list);
+
+ /* if we kill request before timeout - need adjust counter */
+ if (req->rq_phase == RQ_PHASE_RPC && set != NULL &&
+ cfs_atomic_dec_and_test(&set->set_remaining))
+ wakeup = 1;
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- cfs_list_del_init(&req->rq_list);
-}
+ /* Only need to call wakeup once when to be empty. */
+ if (wakeup)
+ cfs_waitq_signal(&set->set_waitq);
+ ptlrpc_req_finished(req);
+}
+/**
+ * Allocate and initialize new request set structure.
+ * Returns a pointer to the newly allocated set structure or NULL on error.
+ */
struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
struct ptlrpc_request_set *set;
OBD_ALLOC(set, sizeof *set);
if (!set)
RETURN(NULL);
+ cfs_atomic_set(&set->set_refcount, 1);
CFS_INIT_LIST_HEAD(&set->set_requests);
cfs_waitq_init(&set->set_waitq);
- set->set_remaining = 0;
+ cfs_atomic_set(&set->set_new_count, 0);
+ cfs_atomic_set(&set->set_remaining, 0);
cfs_spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
RETURN(set);
}
-/* Finish with this set; opposite of prep_set. */
+/**
+ * Wind down and free request set structure previously allocated with
+ * ptlrpc_prep_set.
+ * Ensures that all requests on the set have completed and removes
+ * all requests from the request list in a set.
+ * If any unsent request happen to be on the list, pretends that they got
+ * an error in flight and calls their completion handler.
+ */
void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
{
cfs_list_t *tmp;
ENTRY;
/* Requests on the set should either all be completed, or all be new */
- expected_phase = (set->set_remaining == 0) ?
+ expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
n++;
}
- LASSERTF(set->set_remaining == 0 || set->set_remaining == n, "%d / %d\n",
- set->set_remaining, n);
+ LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 ||
+ cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n",
+ cfs_atomic_read(&set->set_remaining), n);
cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
if (req->rq_phase == RQ_PHASE_NEW) {
ptlrpc_req_interpret(NULL, req, -EBADR);
- set->set_remaining--;
+ cfs_atomic_dec(&set->set_remaining);
}
+ cfs_spin_lock(&req->rq_lock);
req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ cfs_spin_unlock(&req->rq_lock);
+
ptlrpc_req_finished (req);
}
- LASSERT(set->set_remaining == 0);
+ LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
- OBD_FREE(set, sizeof(*set));
+ ptlrpc_reqset_put(set);
EXIT;
}
+/**
+ * Add a callback function \a fn to the set.
+ * This function would be called when all requests on this set are completed.
+ * The function will be passed \a data argument.
+ */
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data)
{
RETURN(0);
}
+/**
+ * Add a new request to the general purpose request set.
+ * Assumes request reference from the caller.
+ */
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
+ LASSERT(cfs_list_empty(&req->rq_set_chain));
+
/* The set takes over the caller's request reference */
cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
- set->set_remaining++;
+ cfs_atomic_inc(&set->set_remaining);
+ req->rq_queued_time = cfs_time_current();
}
/**
- * Lock so many callers can add things, the context that owns the set
- * is supposed to notice these and move them into the set proper.
+ * Add a request to a request with dedicated server thread
+ * and wake the thread to make any necessary processing.
+ * Currently only used for ptlrpcd.
*/
-int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
+void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
struct ptlrpc_request *req)
{
struct ptlrpc_request_set *set = pc->pc_set;
+ int count, i;
- /*
- * Let caller know that we stopped and will not handle this request.
- * It needs to take care itself of request.
- */
- if (cfs_test_bit(LIOD_STOP, &pc->pc_flags))
- return -EALREADY;
+ LASSERT(req->rq_set == NULL);
+ LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0);
cfs_spin_lock(&set->set_new_req_lock);
/*
* The set takes over the caller's request reference.
*/
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
req->rq_set = set;
+ req->rq_queued_time = cfs_time_current();
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = cfs_atomic_inc_return(&set->set_new_count);
cfs_spin_unlock(&set->set_new_req_lock);
- cfs_waitq_signal(&set->set_waitq);
- return 0;
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ cfs_waitq_signal(&set->set_waitq);
+
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
}
-/*
+/**
* Based on the current state of the import, determine if the request
* can be sent, is an error, or should be delayed.
*
} else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
+ } else if (ptlrpc_send_limit_expired(req)) {
+ /* probably doesn't need to be a D_ERROR after initial testing */
+ DEBUG_REQ(D_ERROR, req, "send limit expired ");
+ *status = -EIO;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
/* allow CONNECT even if import is invalid */ ;
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
}
- } else if ((imp->imp_invalid && (!imp->imp_recon_bk)) ||
- imp->imp_obd->obd_no_recov) {
- /* If the import has been invalidated (such as by an OST
- * failure), and if the import(MGC) tried all of its connection
- * list (Bug 13464), the request must fail with -ESHUTDOWN.
- * This indicates the requests should be discarded; an -EIO
- * may result in a resend of the request. */
+ } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
if (!imp->imp_deactive)
DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
*status = -ESHUTDOWN; /* bz 12940 */
RETURN(delay);
}
-/* Conditionally suppress specific console messages */
+/**
+ * Decide if the eror message regarding provided request \a req
+ * should be printed to the console or not.
+ * Makes it's decision on request status and other properties.
+ * Returns 1 to print error on the system console or 0 if not.
+ */
static int ptlrpc_console_allow(struct ptlrpc_request *req)
{
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
return 1;
}
+/**
+ * Check request processing status.
+ * Returns the status.
+ */
static int ptlrpc_check_status(struct ptlrpc_request *req)
{
int err;
}
/**
- * save pre-versions for replay
+ * save pre-versions of objects into request for replay.
+ * Versions are obtained from server reply.
+ * used for VBR.
*/
static void ptlrpc_save_versions(struct ptlrpc_request *req)
{
/**
* Callback function called when client receives RPC reply for \a req.
+ * Returns 0 on success or error code.
+ * The return alue would be assigned to req->rq_status by the caller
+ * as request processing status.
+ * This function also decides if the request needs to be saved for later replay.
*/
static int after_reply(struct ptlrpc_request *req)
{
LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink);
if (req->rq_reply_truncate) {
- if (req->rq_no_resend) {
+ if (ptlrpc_no_resend(req)) {
DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
" expected: %d, actual size: %d",
req->rq_nob_received, req->rq_repbuf_len);
}
if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
ptlrpc_at_adj_net_latency(req,
lustre_msg_get_service_time(req->rq_repmsg));
lustre_msg_get_last_committed(req->rq_repmsg);
}
ptlrpc_free_committed(imp);
+
+ if (req->rq_transno > imp->imp_peer_committed_transno)
+ ptlrpc_pinger_commit_expected(imp);
+
cfs_spin_unlock(&imp->imp_lock);
}
RETURN(rc);
}
+/**
+ * Helper function to send request \a req over the network for the first time
+ * Also adjusts request phase.
+ * Returns 0 on success or error code.
+ */
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp;
RETURN(0);
}
-/* this sends any unsent RPCs in @set and returns TRUE if all are sent */
+/**
+ * this sends any unsent RPCs in \a set and returns 1 if all are sent
+ * and no more replies are expected.
+ * (it is possible to get less replies than requests sent e.g. due to timed out
+ * requests or requests that we had trouble to send out)
+ */
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
cfs_list_t *tmp;
int force_timer_recalc = 0;
ENTRY;
- if (set->set_remaining == 0)
+ if (cfs_atomic_read(&set->set_remaining) == 0)
RETURN(1);
cfs_list_for_each(tmp, &set->set_requests) {
if (ptlrpc_client_recv_or_unlink(req) ||
ptlrpc_client_bulk_active(req))
continue;
+ /* If there is no need to resend, fail it now. */
+ if (req->rq_no_resend) {
+ if (req->rq_status == 0)
+ req->rq_status = -EIO;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ } else {
+ continue;
+ }
}
if (req->rq_err) {
cfs_spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
- if (req->rq_no_resend && !req->rq_wait_ctx) {
+ if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) {
req->rq_status = -ENOTCONN;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
req->rq_waiting = 0;
cfs_spin_unlock(&req->rq_lock);
- if (req->rq_timedout||req->rq_resend) {
+ if (req->rq_timedout || req->rq_resend) {
/* This is re-sending anyways,
* let's mark req as resend. */
cfs_spin_lock(&req->rq_lock);
/* This moves to "unregistering" phase we need to wait for
* reply unlink. */
- if (!unregistered && !ptlrpc_unregister_reply(req, 1))
+ if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
+ /* start async bulk unlink too */
+ ptlrpc_unregister_bulk(req, 1);
continue;
+ }
if (!ptlrpc_unregister_bulk(req, 1))
continue;
}
cfs_spin_unlock(&imp->imp_lock);
- set->set_remaining--;
+ cfs_atomic_dec(&set->set_remaining);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
}
/* If we hit an error, we want to recover promptly. */
- RETURN(set->set_remaining == 0 || force_timer_recalc);
+ RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
-/* Return 1 if we should give up, else 0 */
+/**
+ * Time out request \a req. is \a async_unlink is set, that means do not wait
+ * until LNet actually confirms network buffer unlinking.
+ * Return 1 if we should give up further retrying attempts or 0 otherwise.
+ */
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
struct obd_import *imp = req->rq_import;
req->rq_timedout = 1;
cfs_spin_unlock(&req->rq_lock);
- DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req,
- "Request x"LPU64" sent from %s to NID %s "CFS_DURATION_T"s "
- "ago has %s ("CFS_DURATION_T"s prior to deadline).\n",
- req->rq_xid, imp ? imp->imp_obd->obd_name : "<?>",
- imp ? libcfs_nid2str(imp->imp_connection->c_peer.nid) : "<?>",
- cfs_time_sub(cfs_time_current_sec(), req->rq_sent),
- req->rq_net_err ? "failed due to network error" : "timed out",
- cfs_time_sub(req->rq_deadline, req->rq_sent));
+ DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, "Request "
+ " sent has %s: [sent "CFS_DURATION_T"/"
+ "real "CFS_DURATION_T"]",
+ req->rq_net_err ? "failed due to network error" :
+ ((req->rq_real_sent == 0 ||
+ cfs_time_before(req->rq_real_sent, req->rq_sent) ||
+ cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+ "timed out for sent delay" : "timed out for slow reply"),
+ req->rq_sent, req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
/* if a request can't be resent we can't wait for an answer after
the timeout */
- if (req->rq_no_resend) {
+ if (ptlrpc_no_resend(req)) {
DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
rc = 1;
}
RETURN(rc);
}
+/**
+ * Time out all uncompleted requests in request set pointed by \a data
+ * Callback used when waiting on sets with l_wait_event.
+ * Always returns 1.
+ */
int ptlrpc_expired_set(void *data)
{
struct ptlrpc_request_set *set = data;
RETURN(1);
}
+/**
+ * Sets rq_intr flag in \a req under spinlock.
+ */
void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
{
cfs_spin_lock(&req->rq_lock);
cfs_spin_unlock(&req->rq_lock);
}
+/**
+ * Interrupts (sets interrupted flag) all uncompleted requests in
+ * a set \a data. Callback for l_wait_event for interruptible waits.
+ */
void ptlrpc_interrupted_set(void *data)
{
struct ptlrpc_request_set *set = data;
cfs_list_t *tmp;
LASSERT(set != NULL);
- CERROR("INTERRUPTED SET %p\n", set);
+ CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
cfs_list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
RETURN(timeout);
}
+/**
+ * Send all unset request from the set and then wait untill all
+ * requests in the set complete (either get a reply, timeout, get an
+ * error or otherwise be interrupted).
+ * Returns 0 on success or error code otherwise.
+ */
int ptlrpc_set_wait(struct ptlrpc_request_set *set)
{
cfs_list_t *tmp;
rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
+ /* LU-769 - if we ignored the signal because it was already
+ * pending when we started, we need to handle it now or we risk
+ * it being ignored forever */
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ cfs_signal_pending()) {
+ cfs_sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (cfs_signal_pending())
+ ptlrpc_interrupted_set(set);
+ cfs_block_sigs(blocked_sigs);
+ }
+
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
/* -EINTR => all requests have been flagged rq_intr so next
* EINTR.
* I don't really care if we go once more round the loop in
* the error cases -eeb. */
- } while (rc != 0 || set->set_remaining != 0);
+ if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) {
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ cfs_spin_lock(&req->rq_lock);
+ req->rq_invalid_rqset = 1;
+ cfs_spin_unlock(&req->rq_lock);
+ }
+ }
+ } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
- LASSERT(set->set_remaining == 0);
+ LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
rc = 0;
cfs_list_for_each(tmp, &set->set_requests) {
RETURN(rc);
}
+/**
+ * Helper fuction for request freeing.
+ * Called when request count reached zero and request needs to be freed.
+ * Removes request from all sorts of sending/replay lists it might be on,
+ * frees network buffers if any are present.
+ * If \a locked is set, that means caller is already holding import imp_lock
+ * and so we no longer need to reobtain it (for certain lists manipulations)
+ */
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
ENTRY;
}
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
+/**
+ * Drop one request reference. Must be called with import imp_lock held.
+ * When reference count drops to zero, reuqest is freed.
+ */
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
(void)__ptlrpc_req_finished(request, 1);
}
+/**
+ * Helper function
+ * Drops one reference count for request \a request.
+ * \a locked set indicates that caller holds import imp_lock.
+ * Frees the request whe reference count reaches zero.
+ */
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
ENTRY;
RETURN(0);
}
+/**
+ * Drops one reference count for a request.
+ */
void ptlrpc_req_finished(struct ptlrpc_request *request)
{
__ptlrpc_req_finished(request, 0);
}
+/**
+ * Returns xid of a \a request
+ */
__u64 ptlrpc_req_xid(struct ptlrpc_request *request)
{
return request->rq_xid;
}
EXPORT_SYMBOL(ptlrpc_req_xid);
-/* Disengage the client's reply buffer from the network
+/**
+ * Disengage the client's reply buffer from the network
* NB does _NOT_ unregister any client-side bulk.
* IDEMPOTENT, but _not_ safe against concurrent callers.
* The request owner (i.e. the thread doing the I/O) must call...
+ * Returns 0 on success or 1 if unregistering cannot be made.
*/
int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
RETURN(0);
}
-/* caller must hold imp->imp_lock */
+/**
+ * Iterates through replay_list on import and prunes
+ * all requests have transno smaller than last_committed for the
+ * import and don't have rq_replay set.
+ * Since requests are sorted in transno order, stops when meetign first
+ * transno bigger than last_committed.
+ * caller must hold imp->imp_lock
+ */
void ptlrpc_free_committed(struct obd_import *imp)
{
cfs_list_t *tmp, *saved;
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
- CDEBUG(D_RPCTRACE, "%s: skip recheck: last_committed "LPU64"\n",
+ CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
EXIT;
return;
break;
}
- DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
+ DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
cfs_spin_lock(&req->rq_lock);
return;
}
+/**
+ * Schedule previously sent request for resend.
+ * For bulk requests we assign new xid (to avoid problems with
+ * lost replies and therefore several transfers landing into same buffer
+ * from different sending attempts).
+ */
void ptlrpc_resend_req(struct ptlrpc_request *req)
{
DEBUG_REQ(D_HA, req, "going to resend");
cfs_spin_unlock(&req->rq_lock);
}
+/**
+ * Grab additional reference on a request \a req
+ */
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
ENTRY;
RETURN(req);
}
+/**
+ * Add a request to import replay_list.
+ * Must be called under imp_lock
+ */
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
+/**
+ * Send request and wait until it completes.
+ * Returns request processing status.
+ */
int ptlrpc_queue_wait(struct ptlrpc_request *req)
{
struct ptlrpc_request_set *set;
int praa_old_status;
};
+/**
+ * Callback used for replayed requests reply processing.
+ * In case of succesful reply calls registeresd request replay callback.
+ * In case of error restart replay process.
+ */
static int ptlrpc_replay_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void * data, int rc)
imp->imp_vbr_failed = 1;
imp->imp_no_lock_replay = 1;
cfs_spin_unlock(&imp->imp_lock);
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
cfs_spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
+ /* transaction number shouldn't be bigger than the latest replayed */
+ if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
+ DEBUG_REQ(D_ERROR, req,
+ "Reported transno "LPU64" is bigger than the "
+ "replayed one: "LPU64, req->rq_transno,
+ lustre_msg_get_transno(req->rq_reqmsg));
+ GOTO(out, rc = -EINVAL);
+ }
+
DEBUG_REQ(D_HA, req, "got rep");
/* let the callback do fixups, possibly including in the request */
* Errors while replay can set transno to 0, but
* imp_last_replay_transno shouldn't be set to 0 anyway
*/
- if (req->rq_transno > 0) {
- cfs_spin_lock(&imp->imp_lock);
- LASSERT(req->rq_transno <= imp->imp_last_replay_transno);
- imp->imp_last_replay_transno = req->rq_transno;
- cfs_spin_unlock(&imp->imp_lock);
- } else
+ if (req->rq_transno == 0)
CERROR("Transno is 0 during replay!\n");
+
/* continue with recovery */
rc = ptlrpc_import_recovery_state_machine(imp);
out:
if (rc != 0)
/* this replay failed, so restart recovery */
- ptlrpc_connect_import(imp, NULL);
+ ptlrpc_connect_import(imp);
RETURN(rc);
}
+/**
+ * Prepares and queues request for replay.
+ * Adds it to ptlrpcd queue for actual sending.
+ * Returns 0 on success.
+ */
int ptlrpc_replay_req(struct ptlrpc_request *req)
{
struct ptlrpc_replay_async_args *aa;
ENTRY;
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- /* Not handling automatic bulk replay yet (or ever?) */
- LASSERT(req->rq_bulk == NULL);
LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
aa = ptlrpc_req_async_args(req);
cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
- ptlrpcd_add_req(req, PSCOPE_OTHER);
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
RETURN(0);
}
+/**
+ * Aborts all in-flight request on import \a imp sending and delayed lists
+ */
void ptlrpc_abort_inflight(struct obd_import *imp)
{
cfs_list_t *tmp, *n;
EXIT;
}
+/**
+ * Abort all uncompleted requests in request set \a set
+ */
void ptlrpc_abort_set(struct ptlrpc_request_set *set)
{
cfs_list_t *tmp, *pos;
static __u64 ptlrpc_last_xid;
static cfs_spinlock_t ptlrpc_last_xid_lock;
-/* Initialize the XID for the node. This is common among all requests on
+/**
+ * Initialize the XID for the node. This is common among all requests on
* this node, and only requires the property that it is monotonically
* increasing. It does not need to be sequential. Since this is also used
* as the RDMA match bits, it is important that a single client NOT have
cfs_spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
- ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
+ cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
ptlrpc_last_xid |= (1ULL << 61);
} else {
}
}
+/**
+ * Increase xid and returns resultng new value to the caller.
+ */
__u64 ptlrpc_next_xid(void)
{
__u64 tmp;
return tmp;
}
+/**
+ * Get a glimpse at what next xid value might have been.
+ * Returns possible next xid.
+ */
__u64 ptlrpc_sample_next_xid(void)
{
#if BITS_PER_LONG == 32