};
EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
+ .add_iov_frag = ptlrpc_prep_bulk_frag,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
+
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
static int ptlrpcd_check_work(struct ptlrpc_request *req);
static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
void *frag, int len)
{
struct kvec *iovec;
+ ENTRY;
LASSERT(desc->bd_iov_count < desc->bd_max_iov);
LASSERT(frag != NULL);
desc->bd_iov_count++;
- return desc->bd_nob;
+ RETURN(desc->bd_nob);
}
EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
/**
* Allocates, initializes and adds \a num_rq requests to the pool \a pool
*/
-void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
+int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
{
int i;
int size = 1;
spin_unlock(&pool->prp_lock);
req = ptlrpc_request_cache_alloc(GFP_NOFS);
if (!req)
- return;
+ return i;
OBD_ALLOC_LARGE(msg, size);
if (!msg) {
ptlrpc_request_cache_free(req);
- return;
+ return i;
}
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
list_add_tail(&req->rq_list, &pool->prp_req_list);
}
spin_unlock(&pool->prp_lock);
- return;
+ return num_rq;
}
EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
*/
struct ptlrpc_request_pool *
ptlrpc_init_rq_pool(int num_rq, int msgsize,
- void (*populate_pool)(struct ptlrpc_request_pool *, int))
+ int (*populate_pool)(struct ptlrpc_request_pool *, int))
{
struct ptlrpc_request_pool *pool;
populate_pool(pool, num_rq);
- if (list_empty(&pool->prp_req_list)) {
- /* have not allocated a single request for the pool */
- OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
- pool = NULL;
- }
return pool;
}
EXPORT_SYMBOL(ptlrpc_init_rq_pool);
ptlrpc_at_set_req_timeout(request);
- request->rq_xid = ptlrpc_next_xid();
lustre_msg_set_opc(request->rq_reqmsg, opcode);
RETURN(0);
{
struct ptlrpc_request *request = NULL;
- if (pool)
- request = ptlrpc_prep_req_from_pool(pool);
+ request = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!request)
- request = ptlrpc_request_cache_alloc(GFP_NOFS);
+ if (!request && pool)
+ request = ptlrpc_prep_req_from_pool(pool);
if (request) {
ptlrpc_cli_req_init(request);
}
/**
- * Allocate and initialize new request set structure.
+ * Allocate and initialize new request set structure on the current CPT.
* Returns a pointer to the newly allocated set structure or NULL on error.
*/
struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
- struct ptlrpc_request_set *set;
+ struct ptlrpc_request_set *set;
+ int cpt;
ENTRY;
- OBD_ALLOC(set, sizeof *set);
+ cpt = cfs_cpt_current(cfs_cpt_table, 0);
+ OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set);
if (!set)
RETURN(NULL);
atomic_set(&set->set_refcount, 1);
struct obd_device *obd = req->rq_import->imp_obd;
int rc;
struct timeval work_start;
+ __u64 committed;
long timediff;
ENTRY;
spin_unlock(&req->rq_lock);
req->rq_nr_resend++;
- /* allocate new xid to avoid reply reconstruction */
- if (!req->rq_bulk) {
- /* new xid is already allocated for bulk in
- * ptlrpc_check_set() */
- req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
- "resend on EINPROGRESS");
- }
-
/* Readjust the timeout for current conditions */
ptlrpc_at_set_req_timeout(req);
/* delay resend to give a chance to the server to get ready.
/*
* Replay-enabled imports return commit-status information.
*/
- if (lustre_msg_get_last_committed(req->rq_repmsg)) {
- imp->imp_peer_committed_transno =
- lustre_msg_get_last_committed(req->rq_repmsg);
- }
+ committed = lustre_msg_get_last_committed(req->rq_repmsg);
+ if (likely(committed > imp->imp_peer_committed_transno))
+ imp->imp_peer_committed_transno = committed;
ptlrpc_free_committed(imp);
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
+ struct list_head *tmp;
+ __u64 min_xid = ~0ULL;
int rc;
ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+ /* do not try to go further if there is not enough memory in enc_pool */
+ if (req->rq_sent && req->rq_bulk != NULL)
+ if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+ pool_is_at_full_capacity())
+ RETURN(-ENOMEM);
+
if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
spin_lock(&imp->imp_lock);
+ /* the very first time we assign XID. it's important to assign XID
+ * and put it on the list atomically, so that the lowest assigned
+ * XID is always known. this is vital for multislot last_rcvd */
+ if (req->rq_send_state == LUSTRE_IMP_REPLAY) {
+ LASSERT(req->rq_xid != 0);
+ } else {
+ LASSERT(req->rq_xid == 0);
+ req->rq_xid = ptlrpc_next_xid();
+ }
+
if (!req->rq_generation_set)
req->rq_import_generation = imp->imp_generation;
LASSERT(list_empty(&req->rq_list));
list_add_tail(&req->rq_list, &imp->imp_sending_list);
atomic_inc(&req->rq_import->imp_inflight);
+
+ /* find the lowest unreplied XID */
+ list_for_each(tmp, &imp->imp_delayed_list) {
+ struct ptlrpc_request *r;
+ r = list_entry(tmp, struct ptlrpc_request, rq_list);
+ if (r->rq_xid < min_xid)
+ min_xid = r->rq_xid;
+ }
+ list_for_each(tmp, &imp->imp_sending_list) {
+ struct ptlrpc_request *r;
+ r = list_entry(tmp, struct ptlrpc_request, rq_list);
+ if (r->rq_xid < min_xid)
+ min_xid = r->rq_xid;
+ }
spin_unlock(&imp->imp_lock);
+ if (likely(min_xid != ~0ULL))
+ lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1);
+
lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&req->rq_import->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ RETURN(rc);
+ }
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
spin_lock(&req->rq_lock);
spin_lock(&req->rq_lock);
req->rq_resend = 1;
spin_unlock(&req->rq_lock);
- if (req->rq_bulk) {
- __u64 old_xid;
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* ensure previous bulk fails */
- old_xid = req->rq_xid;
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk "
- "old x"LPU64
- " new x"LPU64"\n",
- old_xid, req->rq_xid);
- }
+
+ if (req->rq_bulk != NULL &&
+ !ptlrpc_unregister_bulk(req, 1))
+ continue;
}
/*
* rq_wait_ctx is only touched by ptlrpcd,
}
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list))
+ list_del_init(&req->rq_list);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ continue;
+ }
if (rc) {
DEBUG_REQ(D_HA, req,
"send failed: rc = %d", rc);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
- if (req->rq_bulk) {
- __u64 old_xid = req->rq_xid;
- /* ensure previous bulk fails */
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
- old_xid, req->rq_xid);
- }
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 0;
+ spin_unlock(&req->rq_lock);
+
LASSERT(imp->imp_replayable);
/* Balanced in ptlrpc_free_committed, usually. */
ptlrpc_request_addref(req);
ENTRY;
atomic_dec(&imp->imp_replay_inflight);
- if (!ptlrpc_client_replied(req)) {
- CERROR("request replay timed out, restarting recovery\n");
- GOTO(out, rc = -ETIMEDOUT);
- }
+ /* Note: if it is bulk replay (MDS-MDS replay), then even if
+ * server got the request, but bulk transfer timeout, let's
+ * replay the bulk req again */
+ if (!ptlrpc_client_replied(req) ||
+ (req->rq_bulk != NULL &&
+ lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
+ DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
+ GOTO(out, rc = -ETIMEDOUT);
+ }
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
(lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
atomic_inc(&req->rq_import->imp_replay_inflight);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ ptlrpcd_add_req(req);
RETURN(0);
}
}
/**
+ * If request has a new allocated XID (new request or EINPROGRESS resend),
+ * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
+ * request to ensure previous bulk fails and avoid problems with lost replies
+ * and therefore several transfers landing into the same buffer from different
+ * sending attempts.
+ */
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *bd = req->rq_bulk;
+
+ LASSERT(bd != NULL);
+
+ if (!req->rq_resend || req->rq_nr_resend != 0) {
+ /* this request has a new xid, just use it as bulk matchbits */
+ req->rq_mbits = req->rq_xid;
+
+ } else { /* needs to generate a new matchbits for resend */
+ __u64 old_mbits = req->rq_mbits;
+
+ if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_BULK_MBITS) != 0)
+ req->rq_mbits = ptlrpc_next_xid();
+ else /* old version transfers rq_xid to peer as matchbits */
+ req->rq_mbits = req->rq_xid = ptlrpc_next_xid();
+
+ CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
+ old_mbits, req->rq_mbits);
+ }
+
+ /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
+ * that server can infer the number of bulks that were prepared,
+ * see LU-1431 */
+ req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
+ LNET_MAX_IOV) - 1;
+}
+
+/**
* Get a glimpse at what next xid value might have been.
* Returns possible next xid.
*/
req->rq_xid = ptlrpc_next_xid();
req->rq_import_generation = req->rq_import->imp_generation;
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ ptlrpcd_add_req(req);
}
static int work_interpreter(const struct lu_env *env,