-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "ptlrpc_internal.h"
+static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+
/**
* Initialize passed in client structure \a cl.
*/
cl->cli_reply_portal = rep_portal;
cl->cli_name = name;
}
+EXPORT_SYMBOL(ptlrpc_init_client);
/**
* Return PortalRPC connection for remore uud \a uuid
err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
if (err != 0) {
- CDEBUG(D_NETERROR, "cannot find peer %s!\n", uuid->uuid);
+ CNETERR("cannot find peer %s!\n", uuid->uuid);
return NULL;
}
return c;
}
+EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
/**
* Allocate and initialize new bulk descriptor
* Returns pointer to the descriptor or NULL on error.
*/
-static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
+struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
{
struct ptlrpc_bulk_desc *desc;
return desc;
}
-
-/**
- * Prepare bulk descriptor for specified incoming request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
- * the bulk to be sent. Used on server-side after request was already
- * received.
- * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
- * error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
- int npages, int type, int portal)
-{
- struct obd_export *exp = req->rq_export;
- struct ptlrpc_bulk_desc *desc;
-
- ENTRY;
- LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
-
- desc = new_bulk(npages, type, portal);
- if (desc == NULL)
- RETURN(NULL);
-
- desc->bd_export = class_export_get(exp);
- desc->bd_req = req;
-
- desc->bd_cbid.cbid_fn = server_bulk_callback;
- desc->bd_cbid.cbid_arg = desc;
-
- /* NB we don't assign rq_bulk here; server-side requests are
- * re-used, and the handler frees the bulk desc explicitly. */
-
- return desc;
-}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
/**
* Add a page \a page to the bulk descriptor \a desc.
desc->bd_nob += len;
+ cfs_page_pin(page);
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_page);
/**
* Uninitialize and free bulk descriptor \a desc.
*/
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
{
+ int i;
ENTRY;
LASSERT(desc != NULL);
else
class_import_put(desc->bd_import);
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ cfs_page_unpin(desc->bd_iov[i].kiov_page);
+
OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
bd_iov[desc->bd_max_iov]));
EXIT;
}
+EXPORT_SYMBOL(ptlrpc_free_bulk);
/**
* Set server timelimit for this req, i.e. how long are we willing to wait
reqmsg*/
lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
}
+EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
/* Adjust max service estimate based on server value */
static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
cfs_list_del(&req->rq_list);
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- OBD_FREE(req->rq_reqbuf, pool->prp_rq_size);
+ OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
OBD_FREE(req, sizeof(*req));
}
cfs_spin_unlock(&pool->prp_lock);
OBD_FREE(pool, sizeof(*pool));
}
+EXPORT_SYMBOL(ptlrpc_free_rq_pool);
/**
* Allocates, initializes and adds \a num_rq requests to the pool \a pool
OBD_ALLOC(req, sizeof(struct ptlrpc_request));
if (!req)
return;
- OBD_ALLOC_GFP(msg, size, CFS_ALLOC_STD);
+ OBD_ALLOC_LARGE(msg, size);
if (!msg) {
OBD_FREE(req, sizeof(struct ptlrpc_request));
return;
cfs_spin_unlock(&pool->prp_lock);
return;
}
+EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
/**
* Create and initialize new request pool with given attributes:
}
return pool;
}
+EXPORT_SYMBOL(ptlrpc_init_rq_pool);
/**
* Fetches one request from pool \a pool
int ptlrpc_request_pack(struct ptlrpc_request *request,
__u32 version, int opcode)
{
- return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
-}
+ int rc;
+ rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
+ if (rc)
+ return rc;
+
+ /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
+ * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
+ * have to send old ptlrpc_body to keep interoprability with these
+ * clients.
+ *
+ * Only three kinds of server->client RPCs so far:
+ * - LDLM_BL_CALLBACK
+ * - LDLM_CP_CALLBACK
+ * - LDLM_GL_CALLBACK
+ *
+ * XXX This should be removed whenever we drop the interoprability with
+ * the these old clients.
+ */
+ if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
+ opcode == LDLM_GL_CALLBACK)
+ req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
+ sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_request_pack);
/**
* Helper function to allocate new request on import \a imp
{
return ptlrpc_request_alloc_internal(imp, NULL, format);
}
+EXPORT_SYMBOL(ptlrpc_request_alloc);
/**
* Allocate new request structure for import \a imp from pool \a pool and
{
return ptlrpc_request_alloc_internal(imp, pool, format);
}
+EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
/**
* For requests not from pool, free memory of the request structure.
else
OBD_FREE_PTR(request);
}
+EXPORT_SYMBOL(ptlrpc_request_free);
/**
* Allocate new request for operatione \a opcode and immediatelly pack it for
}
return req;
}
+EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
/**
* Prepare request (fetched from pool \a poolif not NULL) on import \a imp
}
return request;
}
+EXPORT_SYMBOL(ptlrpc_prep_req_pool);
/**
* Same as ptlrpc_prep_req_pool, but without pool
return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
NULL);
}
+EXPORT_SYMBOL(ptlrpc_prep_req);
/**
* Allocate "fake" request that would not be sent anywhere in the end.
RETURN(request);
}
+EXPORT_SYMBOL(ptlrpc_prep_fakereq);
/**
* Indicate that processing of "fake" request is finished.
*/
void ptlrpc_fakereq_finished(struct ptlrpc_request *req)
{
- /* if we kill request before timeout - need adjust counter */
- if (req->rq_phase == RQ_PHASE_RPC) {
- struct ptlrpc_request_set *set = req->rq_set;
+ struct ptlrpc_request_set *set = req->rq_set;
+ int wakeup = 0;
- if (set)
- cfs_atomic_dec(&set->set_remaining);
- }
+ /* hold ref on the request to prevent others (ptlrpcd) to free it */
+ ptlrpc_request_addref(req);
+ cfs_list_del_init(&req->rq_list);
+
+ /* if we kill request before timeout - need adjust counter */
+ if (req->rq_phase == RQ_PHASE_RPC && set != NULL &&
+ cfs_atomic_dec_and_test(&set->set_remaining))
+ wakeup = 1;
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- cfs_list_del_init(&req->rq_list);
+
+ /* Only need to call wakeup once when to be empty. */
+ if (wakeup)
+ cfs_waitq_signal(&set->set_waitq);
+ ptlrpc_req_finished(req);
}
+EXPORT_SYMBOL(ptlrpc_fakereq_finished);
/**
* Allocate and initialize new request set structure.
*/
struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
- struct ptlrpc_request_set *set;
+ struct ptlrpc_request_set *set;
+
+ ENTRY;
+ OBD_ALLOC(set, sizeof *set);
+ if (!set)
+ RETURN(NULL);
+ cfs_atomic_set(&set->set_refcount, 1);
+ CFS_INIT_LIST_HEAD(&set->set_requests);
+ cfs_waitq_init(&set->set_waitq);
+ cfs_atomic_set(&set->set_new_count, 0);
+ cfs_atomic_set(&set->set_remaining, 0);
+ cfs_spin_lock_init(&set->set_new_req_lock);
+ CFS_INIT_LIST_HEAD(&set->set_new_requests);
+ CFS_INIT_LIST_HEAD(&set->set_cblist);
+ set->set_max_inflight = UINT_MAX;
+ set->set_producer = NULL;
+ set->set_producer_arg = NULL;
+ set->set_rc = 0;
+
+ RETURN(set);
+}
+EXPORT_SYMBOL(ptlrpc_prep_set);
- ENTRY;
- OBD_ALLOC(set, sizeof *set);
- if (!set)
- RETURN(NULL);
- CFS_INIT_LIST_HEAD(&set->set_requests);
- cfs_waitq_init(&set->set_waitq);
- cfs_atomic_set(&set->set_remaining, 0);
- cfs_spin_lock_init(&set->set_new_req_lock);
- CFS_INIT_LIST_HEAD(&set->set_new_requests);
- CFS_INIT_LIST_HEAD(&set->set_cblist);
+/**
+ * Allocate and initialize new request set structure with flow control
+ * extension. This extension allows to control the number of requests in-flight
+ * for the whole set. A callback function to generate requests must be provided
+ * and the request set will keep the number of requests sent over the wire to
+ * @max_inflight.
+ * Returns a pointer to the newly allocated set structure or NULL on error.
+ */
+struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
+ void *arg)
+
+{
+ struct ptlrpc_request_set *set;
+
+ set = ptlrpc_prep_set();
+ if (!set)
+ RETURN(NULL);
+
+ set->set_max_inflight = max;
+ set->set_producer = func;
+ set->set_producer_arg = arg;
- RETURN(set);
+ RETURN(set);
}
+EXPORT_SYMBOL(ptlrpc_prep_fcset);
/**
* Wind down and free request set structure previously allocated with
LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
- OBD_FREE(set, sizeof(*set));
+ ptlrpc_reqset_put(set);
EXIT;
}
+EXPORT_SYMBOL(ptlrpc_set_destroy);
/**
* Add a callback function \a fn to the set.
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_set_add_cb);
/**
* Add a new request to the general purpose request set.
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
- /* The set takes over the caller's request reference */
- cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
- req->rq_set = set;
- cfs_atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current(); /* Where is the best place to set this? */
+ LASSERT(cfs_list_empty(&req->rq_set_chain));
+
+ /* The set takes over the caller's request reference */
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
+ req->rq_set = set;
+ cfs_atomic_inc(&set->set_remaining);
+ req->rq_queued_time = cfs_time_current();
+
+ if (req->rq_reqmsg != NULL)
+ lustre_msg_set_jobid(req->rq_reqmsg, NULL);
+
+ if (set->set_producer != NULL)
+ /* If the request set has a producer callback, the RPC must be
+ * sent straight away */
+ ptlrpc_send_new_req(req);
}
+EXPORT_SYMBOL(ptlrpc_set_add_req);
/**
* Add a request to a request with dedicated server thread
* and wake the thread to make any necessary processing.
* Currently only used for ptlrpcd.
- * Returns 0 if succesful or non zero error code on error.
- * (the only possible error for now is if the dedicated server thread
- * is shutting down)
*/
-int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
+void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
struct ptlrpc_request *req)
{
struct ptlrpc_request_set *set = pc->pc_set;
+ int count, i;
- /*
- * Let caller know that we stopped and will not handle this request.
- * It needs to take care itself of request.
- */
- if (cfs_test_bit(LIOD_STOP, &pc->pc_flags))
- return -EALREADY;
+ LASSERT(req->rq_set == NULL);
+ LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0);
cfs_spin_lock(&set->set_new_req_lock);
/*
* The set takes over the caller's request reference.
*/
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
req->rq_set = set;
+ req->rq_queued_time = cfs_time_current();
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = cfs_atomic_inc_return(&set->set_new_count);
cfs_spin_unlock(&set->set_new_req_lock);
- cfs_waitq_signal(&set->set_waitq);
- return 0;
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ cfs_waitq_signal(&set->set_waitq);
+
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
}
+EXPORT_SYMBOL(ptlrpc_set_add_new_req);
/**
* Based on the current state of the import, determine if the request
} else if (imp->imp_state == LUSTRE_IMP_NEW) {
DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
*status = -EIO;
- LBUG();
} else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
*/
static int ptlrpc_console_allow(struct ptlrpc_request *req)
{
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+ __u32 opc;
int err;
+ /* Fake requests include no rq_reqmsg */
+ if (req->rq_fake)
+ return 0;
+
+ LASSERT(req->rq_reqmsg != NULL);
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+
/* Suppress particular reconnect errors which are to be expected. No
* errors are suppressed for the initial connection on an import */
if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
RETURN(rc);
}
+ /* retry indefinitely on EINPROGRESS */
+ if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
+ ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
+ time_t now = cfs_time_current_sec();
+
+ DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
+ req->rq_resend = 1;
+ req->rq_nr_resend++;
+
+ /* Readjust the timeout for current conditions */
+ ptlrpc_at_set_req_timeout(req);
+ /* delay resend to give a chance to the server to get ready.
+ * The delay is increased by 1s on every resend and is capped to
+ * the current request timeout (i.e. obd_timeout if AT is off,
+ * or AT service time x 125% + 5s, see at_est2timeout) */
+ if (req->rq_nr_resend > req->rq_timeout)
+ req->rq_sent = now + req->rq_timeout;
+ else
+ req->rq_sent = now + req->rq_nr_resend;
+ }
+
/*
* Security layer unwrap might ask resend this request.
*/
}
if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
ptlrpc_at_adj_net_latency(req,
lustre_msg_get_service_time(req->rq_repmsg));
lustre_msg_get_last_committed(req->rq_repmsg);
}
ptlrpc_free_committed(imp);
+
+ if (req->rq_transno > imp->imp_peer_committed_transno)
+ ptlrpc_pinger_commit_expected(imp);
+
cfs_spin_unlock(&imp->imp_lock);
}
* Helper function to send request \a req over the network for the first time
* Also adjusts request phase.
* Returns 0 on success or error code.
- */
+ */
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
- struct obd_import *imp;
+ struct obd_import *imp = req->rq_import;
int rc;
ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
- if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()))
+ if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ (!req->rq_generation_set ||
+ req->rq_import_generation == imp->imp_generation))
RETURN (0);
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
- imp = req->rq_import;
cfs_spin_lock(&imp->imp_lock);
- req->rq_import_generation = imp->imp_generation;
+ if (!req->rq_generation_set)
+ req->rq_import_generation = imp->imp_generation;
if (ptlrpc_import_delay_req(imp, req, &rc)) {
cfs_spin_lock(&req->rq_lock);
RETURN(0);
}
+static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
+{
+ int remaining, rc;
+ ENTRY;
+
+ LASSERT(set->set_producer != NULL);
+
+ remaining = cfs_atomic_read(&set->set_remaining);
+
+ /* populate the ->set_requests list with requests until we
+ * reach the maximum number of RPCs in flight for this set */
+ while (cfs_atomic_read(&set->set_remaining) < set->set_max_inflight) {
+ rc = set->set_producer(set, set->set_producer_arg);
+ if (rc == -ENOENT) {
+ /* no more RPC to produce */
+ set->set_producer = NULL;
+ set->set_producer_arg = NULL;
+ RETURN(0);
+ }
+ }
+
+ RETURN((cfs_atomic_read(&set->set_remaining) - remaining));
+}
+
/**
* this sends any unsent RPCs in \a set and returns 1 if all are sent
* and no more replies are expected.
*/
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp;
+ cfs_list_t *tmp, *next;
int force_timer_recalc = 0;
ENTRY;
if (cfs_atomic_read(&set->set_remaining) == 0)
RETURN(1);
- cfs_list_for_each(tmp, &set->set_requests) {
+ cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
/* delayed send - skip */
if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
- continue;
+ continue;
+
+ /* delayed resend - skip */
+ if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
+ req->rq_sent > cfs_time_current_sec())
+ continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_phase == RQ_PHASE_BULK ||
if (ptlrpc_client_recv_or_unlink(req) ||
ptlrpc_client_bulk_active(req))
continue;
+ /* If there is no need to resend, fail it now. */
+ if (req->rq_no_resend) {
+ if (req->rq_status == 0)
+ req->rq_status = -EIO;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ } else {
+ continue;
+ }
}
if (req->rq_err) {
* process the reply. Similarly if the RPC returned
* an error, and therefore the bulk will never arrive.
*/
- if (req->rq_bulk == NULL || req->rq_status != 0) {
+ if (req->rq_bulk == NULL || req->rq_status < 0) {
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
GOTO(interpret, req->rq_status);
}
* was good after getting the REPLY for her GET or
* the ACK for her PUT. */
DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
- LBUG();
+ req->rq_status = -EIO;
}
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
/* This moves to "unregistering" phase we need to wait for
* reply unlink. */
- if (!unregistered && !ptlrpc_unregister_reply(req, 1))
+ if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
+ /* start async bulk unlink too */
+ ptlrpc_unregister_bulk(req, 1);
continue;
+ }
if (!ptlrpc_unregister_bulk(req, 1))
continue;
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:"
- "opc %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
- imp->imp_obd->obd_uuid.uuid,
- req->rq_reqmsg ? lustre_msg_get_status(req->rq_reqmsg):-1,
- req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1);
+ CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
+ "Completed RPC pname:cluuid:pid:xid:nid:"
+ "opc %s:%s:%d:"LPU64":%s:%d\n",
+ cfs_curproc_comm(), imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
cfs_spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
cfs_atomic_dec(&set->set_remaining);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+
+ if (set->set_producer) {
+ /* produce a new request if possible */
+ if (ptlrpc_set_producer(set) > 0)
+ force_timer_recalc = 1;
+
+ /* free the request that has just been completed
+ * in order not to pollute set->set_requests */
+ cfs_list_del_init(&req->rq_set_chain);
+ cfs_spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ cfs_spin_unlock(&req->rq_lock);
+
+ /* record rq_status to compute the final status later */
+ if (req->rq_status != 0)
+ set->set_rc = req->rq_status;
+ ptlrpc_req_finished(req);
+ }
}
/* If we hit an error, we want to recover promptly. */
RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
+EXPORT_SYMBOL(ptlrpc_check_set);
/**
* Time out request \a req. is \a async_unlink is set, that means do not wait
req->rq_timedout = 1;
cfs_spin_unlock(&req->rq_lock);
- DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, "Request x"LPU64
- " sent from %s to NID %s has %s: [sent "CFS_DURATION_T"] "
- "[real_sent "CFS_DURATION_T"] [current "CFS_DURATION_T"] "
- "[deadline "CFS_DURATION_T"s] [delay "CFS_DURATION_T"s]",
- req->rq_xid, imp ? imp->imp_obd->obd_name : "<?>",
- imp ? libcfs_nid2str(imp->imp_connection->c_peer.nid) : "<?>",
+ DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, "Request "
+ " sent has %s: [sent "CFS_DURATION_T"/"
+ "real "CFS_DURATION_T"]",
req->rq_net_err ? "failed due to network error" :
((req->rq_real_sent == 0 ||
cfs_time_before(req->rq_real_sent, req->rq_sent) ||
cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
"timed out for sent delay" : "timed out for slow reply"),
- req->rq_sent, req->rq_real_sent, cfs_time_current_sec(),
- cfs_time_sub(req->rq_deadline, req->rq_sent),
- cfs_time_sub(cfs_time_current_sec(), req->rq_deadline));
+ req->rq_sent, req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
*/
RETURN(1);
}
+EXPORT_SYMBOL(ptlrpc_expired_set);
/**
* Sets rq_intr flag in \a req under spinlock.
req->rq_intr = 1;
cfs_spin_unlock(&req->rq_lock);
}
+EXPORT_SYMBOL(ptlrpc_mark_interrupted);
/**
* Interrupts (sets interrupted flag) all uncompleted requests in
cfs_list_t *tmp;
LASSERT(set != NULL);
- CERROR("INTERRUPTED SET %p\n", set);
+ CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
cfs_list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
ptlrpc_mark_interrupted(req);
}
}
+EXPORT_SYMBOL(ptlrpc_interrupted_set);
/**
* Get the smallest timeout in the set; this does NOT set a timeout.
if (req->rq_phase == RQ_PHASE_NEW)
deadline = req->rq_sent;
+ else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
+ deadline = req->rq_sent;
else
deadline = req->rq_sent + req->rq_timeout;
}
RETURN(timeout);
}
+EXPORT_SYMBOL(ptlrpc_set_next_timeout);
/**
* Send all unset request from the set and then wait untill all
int rc, timeout;
ENTRY;
+ if (set->set_producer)
+ (void)ptlrpc_set_producer(set);
+ else
+ cfs_list_for_each(tmp, &set->set_requests) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ if (req->rq_phase == RQ_PHASE_NEW)
+ (void)ptlrpc_send_new_req(req);
+ }
+
if (cfs_list_empty(&set->set_requests))
RETURN(0);
- cfs_list_for_each(tmp, &set->set_requests) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
- if (req->rq_phase == RQ_PHASE_NEW)
- (void)ptlrpc_send_new_req(req);
- }
-
do {
timeout = ptlrpc_set_next_timeout(set);
rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
+ /* LU-769 - if we ignored the signal because it was already
+ * pending when we started, we need to handle it now or we risk
+ * it being ignored forever */
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ cfs_signal_pending()) {
+ cfs_sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (cfs_signal_pending())
+ ptlrpc_interrupted_set(set);
+ cfs_restore_sigs(blocked_sigs);
+ }
+
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
/* -EINTR => all requests have been flagged rq_intr so next
LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
- rc = 0;
+ rc = set->set_rc; /* rq_status of already freed requests if any */
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
RETURN(rc);
}
+EXPORT_SYMBOL(ptlrpc_set_wait);
/**
* Helper fuction for request freeing.
LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
LASSERTF(!request->rq_replay, "req %p\n", request);
- LASSERT(request->rq_cli_ctx || request->rq_fake);
req_capsule_fini(&request->rq_pill);
LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
(void)__ptlrpc_req_finished(request, 1);
}
+EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
/**
* Helper function
{
__ptlrpc_req_finished(request, 0);
}
+EXPORT_SYMBOL(ptlrpc_req_finished);
/**
* Returns xid of a \a request
}
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_unregister_reply);
/**
* Iterates through replay_list on import and prunes
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
- CDEBUG(D_RPCTRACE, "%s: skip recheck: last_committed "LPU64"\n",
+ CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
EXIT;
return;
break;
}
- DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
+ DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
cfs_spin_lock(&req->rq_lock);
EXIT;
return;
}
+EXPORT_SYMBOL(ptlrpc_cleanup_client);
/**
* Schedule previously sent request for resend.
ptlrpc_client_wake_req(req);
cfs_spin_unlock(&req->rq_lock);
}
+EXPORT_SYMBOL(ptlrpc_resend_req);
/* XXX: this function and rq_status are currently unused */
void ptlrpc_restart_req(struct ptlrpc_request *req)
ptlrpc_client_wake_req(req);
cfs_spin_unlock(&req->rq_lock);
}
+EXPORT_SYMBOL(ptlrpc_restart_req);
/**
* Grab additional reference on a request \a req
cfs_atomic_inc(&req->rq_refcount);
RETURN(req);
}
+EXPORT_SYMBOL(ptlrpc_request_addref);
/**
* Add a request to import replay_list.
cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
+EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
/**
* Send request and wait until it completes.
RETURN(rc);
}
+EXPORT_SYMBOL(ptlrpc_queue_wait);
struct ptlrpc_replay_async_args {
int praa_old_state;
imp->imp_vbr_failed = 1;
imp->imp_no_lock_replay = 1;
cfs_spin_unlock(&imp->imp_lock);
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
cfs_spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
+ /* transaction number shouldn't be bigger than the latest replayed */
+ if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
+ DEBUG_REQ(D_ERROR, req,
+ "Reported transno "LPU64" is bigger than the "
+ "replayed one: "LPU64, req->rq_transno,
+ lustre_msg_get_transno(req->rq_reqmsg));
+ GOTO(out, rc = -EINVAL);
+ }
+
DEBUG_REQ(D_HA, req, "got rep");
/* let the callback do fixups, possibly including in the request */
* Errors while replay can set transno to 0, but
* imp_last_replay_transno shouldn't be set to 0 anyway
*/
- if (req->rq_transno > 0) {
- cfs_spin_lock(&imp->imp_lock);
- LASSERT(req->rq_transno <= imp->imp_last_replay_transno);
- imp->imp_last_replay_transno = req->rq_transno;
- cfs_spin_unlock(&imp->imp_lock);
- } else
+ if (req->rq_transno == 0)
CERROR("Transno is 0 during replay!\n");
+
/* continue with recovery */
rc = ptlrpc_import_recovery_state_machine(imp);
out:
if (rc != 0)
/* this replay failed, so restart recovery */
- ptlrpc_connect_import(imp, NULL);
+ ptlrpc_connect_import(imp);
RETURN(rc);
}
ENTRY;
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- /* Not handling automatic bulk replay yet (or ever?) */
- LASSERT(req->rq_bulk == NULL);
LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
aa = ptlrpc_req_async_args(req);
/* Readjust the timeout for current conditions */
ptlrpc_at_set_req_timeout(req);
+ /* Tell server the net_latency, so the server can calculate how long
+ * it should wait for next replay */
+ lustre_msg_set_service_time(req->rq_reqmsg,
+ ptlrpc_at_get_net_latency(req));
DEBUG_REQ(D_HA, req, "REPLAY");
cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
- ptlrpcd_add_req(req, PSCOPE_OTHER);
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_replay_req);
/**
* Aborts all in-flight request on import \a imp sending and delayed lists
cfs_spin_lock (&req->rq_lock);
if (req->rq_import_generation < imp->imp_generation) {
req->rq_err = 1;
- req->rq_status = -EINTR;
+ req->rq_status = -EIO;
ptlrpc_client_wake_req(req);
}
cfs_spin_unlock (&req->rq_lock);
cfs_spin_lock (&req->rq_lock);
if (req->rq_import_generation < imp->imp_generation) {
req->rq_err = 1;
- req->rq_status = -EINTR;
+ req->rq_status = -EIO;
ptlrpc_client_wake_req(req);
}
cfs_spin_unlock (&req->rq_lock);
EXIT;
}
+EXPORT_SYMBOL(ptlrpc_abort_inflight);
/**
* Abort all uncompleted requests in request set \a set
cfs_spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
- ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
+ cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
ptlrpc_last_xid |= (1ULL << 61);
} else {
cfs_spin_unlock(&ptlrpc_last_xid_lock);
return tmp;
}
+EXPORT_SYMBOL(ptlrpc_next_xid);
/**
* Get a glimpse at what next xid value might have been.
#endif
}
EXPORT_SYMBOL(ptlrpc_sample_next_xid);
+
+/**
+ * Functions for operating ptlrpc workers.
+ *
+ * A ptlrpc work is a function which will be running inside ptlrpc context.
+ * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
+ *
+ * 1. after a work is created, it can be used many times, that is:
+ * handler = ptlrpcd_alloc_work();
+ * ptlrpcd_queue_work();
+ *
+ * queue it again when necessary:
+ * ptlrpcd_queue_work();
+ * ptlrpcd_destroy_work();
+ * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
+ * it will only be queued once in any time. Also as its name implies, it may
+ * have delay before it really runs by ptlrpcd thread.
+ */
+struct ptlrpc_work_async_args {
+ __u64 magic;
+ int (*cb)(const struct lu_env *, void *);
+ void *cbdata;
+};
+
+#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+
+static int work_interpreter(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc)
+{
+ struct ptlrpc_work_async_args *arg = data;
+
+ LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
+ LASSERT(arg->cb != NULL);
+
+ return arg->cb(env, arg->cbdata);
+}
+
+/**
+ * Create a work for ptlrpc.
+ */
+void *ptlrpcd_alloc_work(struct obd_import *imp,
+ int (*cb)(const struct lu_env *, void *), void *cbdata)
+{
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_work_async_args *args;
+ ENTRY;
+
+ cfs_might_sleep();
+
+ if (cb == NULL)
+ RETURN(ERR_PTR(-EINVAL));
+
+ /* copy some code from deprecated fakereq. */
+ OBD_ALLOC_PTR(req);
+ if (req == NULL) {
+ CERROR("ptlrpc: run out of memory!\n");
+ RETURN(ERR_PTR(-ENOMEM));
+ }
+
+ req->rq_send_state = LUSTRE_IMP_FULL;
+ req->rq_type = PTL_RPC_MSG_REQUEST;
+ req->rq_import = class_import_get(imp);
+ req->rq_export = NULL;
+ req->rq_interpret_reply = work_interpreter;
+ /* don't want reply */
+ req->rq_receiving_reply = 0;
+ req->rq_must_unlink = 0;
+ req->rq_no_delay = req->rq_no_resend = 1;
+
+ cfs_spin_lock_init(&req->rq_lock);
+ CFS_INIT_LIST_HEAD(&req->rq_list);
+ CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&req->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&req->rq_history_list);
+ CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ cfs_waitq_init(&req->rq_reply_waitq);
+ cfs_waitq_init(&req->rq_set_waitq);
+ cfs_atomic_set(&req->rq_refcount, 1);
+
+ CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ args = ptlrpc_req_async_args(req);
+ args->magic = PTLRPC_WORK_MAGIC;
+ args->cb = cb;
+ args->cbdata = cbdata;
+
+ RETURN(req);
+}
+EXPORT_SYMBOL(ptlrpcd_alloc_work);
+
+void ptlrpcd_destroy_work(void *handler)
+{
+ struct ptlrpc_request *req = handler;
+
+ if (req)
+ ptlrpc_req_finished(req);
+}
+EXPORT_SYMBOL(ptlrpcd_destroy_work);
+
+int ptlrpcd_queue_work(void *handler)
+{
+ struct ptlrpc_request *req = handler;
+
+ /*
+ * Check if the req is already being queued.
+ *
+ * Here comes a trick: it lacks a way of checking if a req is being
+ * processed reliably in ptlrpc. Here I have to use refcount of req
+ * for this purpose. This is okay because the caller should use this
+ * req as opaque data. - Jinshan
+ */
+ LASSERT(cfs_atomic_read(&req->rq_refcount) > 0);
+ if (cfs_atomic_read(&req->rq_refcount) > 1)
+ return -EBUSY;
+
+ if (cfs_atomic_inc_return(&req->rq_refcount) > 2) { /* race */
+ cfs_atomic_dec(&req->rq_refcount);
+ return -EBUSY;
+ }
+
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpcd_queue_work);