#include "ptlrpc_internal.h"
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+static int ptlrpcd_check_work(struct ptlrpc_request *req);
/**
* Initialize passed in client structure \a cl.
return NULL;
spin_lock_init(&desc->bd_lock);
- cfs_waitq_init(&desc->bd_waitq);
+ init_waitqueue_head(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
desc->bd_portal = portal;
ptlrpc_at_set_req_timeout(request);
spin_lock_init(&request->rq_lock);
- CFS_INIT_LIST_HEAD(&request->rq_list);
- CFS_INIT_LIST_HEAD(&request->rq_timed_list);
- CFS_INIT_LIST_HEAD(&request->rq_replay_list);
- CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
- CFS_INIT_LIST_HEAD(&request->rq_set_chain);
- CFS_INIT_LIST_HEAD(&request->rq_history_list);
- CFS_INIT_LIST_HEAD(&request->rq_exp_list);
- cfs_waitq_init(&request->rq_reply_waitq);
- cfs_waitq_init(&request->rq_set_waitq);
- request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
-
- lustre_msg_set_opc(request->rq_reqmsg, opcode);
-
- RETURN(0);
+ CFS_INIT_LIST_HEAD(&request->rq_list);
+ CFS_INIT_LIST_HEAD(&request->rq_timed_list);
+ CFS_INIT_LIST_HEAD(&request->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_history_list);
+ CFS_INIT_LIST_HEAD(&request->rq_exp_list);
+ init_waitqueue_head(&request->rq_reply_waitq);
+ init_waitqueue_head(&request->rq_set_waitq);
+ request->rq_xid = ptlrpc_next_xid();
+ cfs_atomic_set(&request->rq_refcount, 1);
+
+ lustre_msg_set_opc(request->rq_reqmsg, opcode);
+
+ RETURN(0);
out_ctx:
- sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
+ sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
- class_import_put(imp);
- return rc;
+ class_import_put(imp);
+ return rc;
}
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
RETURN(NULL);
cfs_atomic_set(&set->set_refcount, 1);
CFS_INIT_LIST_HEAD(&set->set_requests);
- cfs_waitq_init(&set->set_waitq);
+ init_waitqueue_head(&set->set_waitq);
cfs_atomic_set(&set->set_new_count, 0);
cfs_atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
count = cfs_atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
- /* Only need to call wakeup once for the first entry. */
- if (count == 1) {
- cfs_waitq_signal(&set->set_waitq);
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ wake_up(&set->set_waitq);
- /* XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future. */
- for (i = 0; i < pc->pc_npartners; i++)
- cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
- }
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
}
EXPORT_SYMBOL(ptlrpc_set_add_new_req);
* will roundup it */
req->rq_replen = req->rq_nob_received;
req->rq_nob_received = 0;
- req->rq_resend = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
/** version recovery */
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL) {
+ } else if (req->rq_commit_cb != NULL &&
+ list_empty(&req->rq_replay_list)) {
+ /* NB: don't call rq_commit_cb if it's already on
+ * rq_replay_list, ptlrpc_free_committed() will call
+ * it later, see LU-3618 for details */
spin_unlock(&imp->imp_lock);
req->rq_commit_cb(req);
spin_lock(&imp->imp_lock);
cfs_atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
if (rc) {
req->rq_status = rc;
RETURN(1);
} else {
- req->rq_wait_ctx = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
RETURN(0);
}
}
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
- imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
+ " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
- req->rq_net_err = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
RETURN(rc);
}
RETURN(0);
spin_lock(&req->rq_lock);
req->rq_net_err = 1;
spin_unlock(&req->rq_lock);
+ continue;
}
/* need to reset the timeout */
force_timer_recalc = 1;
ptlrpc_req_interpret(env, req, req->rq_status);
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
+ if (ptlrpcd_check_work(req)) {
+ atomic_dec(&set->set_remaining);
+ continue;
+ }
+ ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
"Completed RPC pname:cluuid:pid:xid:nid:"
"opc %s:%s:%d:"LPU64":%s:%d\n",
- cfs_curproc_comm(), imp->imp_obd->obd_uuid.uuid,
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
libcfs_nid2str(imp->imp_connection->c_peer.nid),
lustre_msg_get_opc(req->rq_reqmsg));
}
spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+ cfs_atomic_dec(&set->set_remaining);
+ wake_up_all(&imp->imp_recovery_waitq);
if (set->set_producer) {
/* produce a new request if possible */
/* LU-769 - if we ignored the signal because it was already
* pending when we started, we need to handle it now or we risk
* it being ignored forever */
- if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
- cfs_sigset_t blocked_sigs =
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- /* In fact we only interrupt for the "fatal" signals
- * like SIGINT or SIGKILL. We still ignore less
- * important signals since ptlrpc set is not easily
- * reentrant from userspace again */
- if (cfs_signal_pending())
- ptlrpc_interrupted_set(set);
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ cfs_signal_pending()) {
+ sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (cfs_signal_pending())
+ ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
- }
+ }
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
*/
int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
- int rc;
- struct l_wait_info lwi;
+ int rc;
+ struct l_wait_info lwi;
- /*
- * Might sleep.
- */
- LASSERT(!cfs_in_interrupt());
+ /*
+ * Might sleep.
+ */
+ LASSERT(!in_interrupt());
- /*
- * Let's setup deadline for reply unlink.
- */
+ /*
+ * Let's setup deadline for reply unlink.
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
for (;;) {
#ifdef __KERNEL__
/* The wq argument is ignored by user-space wait_event macros */
- cfs_waitq_t *wq = (request->rq_set != NULL) ?
- &request->rq_set->set_waitq :
- &request->rq_reply_waitq;
+ wait_queue_head_t *wq = (request->rq_set != NULL) ?
+ &request->rq_set->set_waitq :
+ &request->rq_reply_waitq;
#endif
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
}
EXPORT_SYMBOL(ptlrpc_unregister_reply);
+static void ptlrpc_free_request(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_commit_cb != NULL)
+ req->rq_commit_cb(req);
+ cfs_list_del_init(&req->rq_replay_list);
+
+ __ptlrpc_req_finished(req, 1);
+}
+
+/**
+ * the request is committed and dropped from the replay list of its import
+ */
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
+{
+ struct obd_import *imp = req->rq_import;
+
+ spin_lock(&imp->imp_lock);
+ if (cfs_list_empty(&req->rq_replay_list)) {
+ spin_unlock(&imp->imp_lock);
+ return;
+ }
+
+ if (force || req->rq_transno <= imp->imp_peer_committed_transno)
+ ptlrpc_free_request(req);
+
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_request_committed);
+
/**
* Iterates through replay_list on import and prunes
* all requests have transno smaller than last_committed for the
*/
void ptlrpc_free_committed(struct obd_import *imp)
{
- cfs_list_t *tmp, *saved;
- struct ptlrpc_request *req;
- struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- ENTRY;
+ struct ptlrpc_request *req, *saved;
+ struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
+ bool skip_committed_list = true;
+ ENTRY;
LASSERT(imp != NULL);
LASSERT(spin_is_locked(&imp->imp_lock));
imp->imp_generation == imp->imp_last_generation_checked) {
CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
- EXIT;
- return;
+ RETURN_EXIT;
}
CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
+
+ if (imp->imp_generation != imp->imp_last_generation_checked)
+ skip_committed_list = false;
+
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
+ cfs_list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
+ rq_replay_list) {
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
last_req = req;
GOTO(free_req, 0);
}
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- continue;
- }
-
/* not yet committed */
if (req->rq_transno > imp->imp_peer_committed_transno) {
DEBUG_REQ(D_RPCTRACE, req, "stopping search");
break;
}
+ if (req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
+ cfs_list_move_tail(&req->rq_replay_list,
+ &imp->imp_committed_list);
+ continue;
+ }
+
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
- if (req->rq_commit_cb != NULL)
- req->rq_commit_cb(req);
- cfs_list_del_init(&req->rq_replay_list);
- __ptlrpc_req_finished(req, 1);
+ ptlrpc_free_request(req);
}
+ if (skip_committed_list)
+ GOTO(out, 0);
+
+ cfs_list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
+ rq_replay_list) {
+ LASSERT(req->rq_transno != 0);
+ if (req->rq_import_generation < imp->imp_generation) {
+ DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
+ ptlrpc_free_request(req);
+ }
+ }
+out:
EXIT;
- return;
}
void ptlrpc_cleanup_client(struct obd_import *imp)
{
ENTRY;
EXIT;
- return;
}
EXPORT_SYMBOL(ptlrpc_cleanup_client);
RETURN(-ENOMEM);
}
- /* for distributed debugging */
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ /* for distributed debugging */
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
/* add a ref for the set (see comment in ptlrpc_set_add_req) */
ptlrpc_request_addref(req);
* have delay before it really runs by ptlrpcd thread.
*/
struct ptlrpc_work_async_args {
- __u64 magic;
- int (*cb)(const struct lu_env *, void *);
- void *cbdata;
+ int (*cb)(const struct lu_env *, void *);
+ void *cbdata;
};
-#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
+{
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+}
static int work_interpreter(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
+ struct ptlrpc_request *req, void *data, int rc)
{
- struct ptlrpc_work_async_args *arg = data;
+ struct ptlrpc_work_async_args *arg = data;
+
+ LASSERT(ptlrpcd_check_work(req));
+ LASSERT(arg->cb != NULL);
+
+ rc = arg->cb(env, arg->cbdata);
+
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+
+ if (atomic_dec_return(&req->rq_refcount) > 1) {
+ atomic_set(&req->rq_refcount, 2);
+ ptlrpcd_add_work_req(req);
+ }
+ return rc;
+}
- LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
- LASSERT(arg->cb != NULL);
+static int worker_format;
- return arg->cb(env, arg->cbdata);
+static int ptlrpcd_check_work(struct ptlrpc_request *req)
+{
+ return req->rq_pill.rc_fmt == (void *)&worker_format;
}
/**
* Create a work for ptlrpc.
*/
void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *cbdata)
+ int (*cb)(const struct lu_env *, void *), void *cbdata)
{
- struct ptlrpc_request *req = NULL;
- struct ptlrpc_work_async_args *args;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_work_async_args *args;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- if (cb == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ if (cb == NULL)
+ RETURN(ERR_PTR(-EINVAL));
/* copy some code from deprecated fakereq. */
req = ptlrpc_request_cache_alloc(__GFP_IO);
req->rq_receiving_reply = 0;
req->rq_must_unlink = 0;
req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_pill.rc_fmt = (void *)&worker_format;
spin_lock_init(&req->rq_lock);
- CFS_INIT_LIST_HEAD(&req->rq_list);
- CFS_INIT_LIST_HEAD(&req->rq_replay_list);
- CFS_INIT_LIST_HEAD(&req->rq_set_chain);
- CFS_INIT_LIST_HEAD(&req->rq_history_list);
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
- cfs_waitq_init(&req->rq_reply_waitq);
- cfs_waitq_init(&req->rq_set_waitq);
- cfs_atomic_set(&req->rq_refcount, 1);
-
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
- args = ptlrpc_req_async_args(req);
- args->magic = PTLRPC_WORK_MAGIC;
- args->cb = cb;
- args->cbdata = cbdata;
-
- RETURN(req);
+ CFS_INIT_LIST_HEAD(&req->rq_list);
+ CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&req->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&req->rq_history_list);
+ CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ init_waitqueue_head(&req->rq_reply_waitq);
+ init_waitqueue_head(&req->rq_set_waitq);
+ atomic_set(&req->rq_refcount, 1);
+
+ CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ args = ptlrpc_req_async_args(req);
+ args->cb = cb;
+ args->cbdata = cbdata;
+
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpcd_alloc_work);
int ptlrpcd_queue_work(void *handler)
{
- struct ptlrpc_request *req = handler;
+ struct ptlrpc_request *req = handler;
/*
* Check if the req is already being queued.
* for this purpose. This is okay because the caller should use this
* req as opaque data. - Jinshan
*/
- LASSERT(cfs_atomic_read(&req->rq_refcount) > 0);
- if (cfs_atomic_read(&req->rq_refcount) > 1)
- return -EBUSY;
-
- if (cfs_atomic_inc_return(&req->rq_refcount) > 2) { /* race */
- cfs_atomic_dec(&req->rq_refcount);
- return -EBUSY;
- }
-
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- return 0;
+ LASSERT(atomic_read(&req->rq_refcount) > 0);
+ if (atomic_inc_return(&req->rq_refcount) == 2)
+ ptlrpcd_add_work_req(req);
+ return 0;
}
EXPORT_SYMBOL(ptlrpcd_queue_work);