return;
spin_lock(&svc->srv_at_lock);
- list_del_init(&req->rq_timed_list);
if (req->rq_at_linked) {
struct ptlrpc_at_array *array = &svc->srv_at_array;
__u32 index = req->rq_at_index;
+ LASSERT(!list_empty(&req->rq_timed_list));
+ list_del_init(&req->rq_timed_list);
req->rq_at_linked = 0;
array->paa_reqs_count[index]--;
array->paa_count--;
- }
+ } else
+ LASSERT(list_empty(&req->rq_timed_list));
spin_unlock(&svc->srv_at_lock);
/* finalize request */
exp->exp_obd->obd_eviction_timer =
cfs_time_current_sec() + 3 * PING_INTERVAL;
CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
- exp->exp_obd->obd_name, obd_export_nid2str(exp),
- oldest_time);
+ exp->exp_obd->obd_name,
+ obd_export_nid2str(oldest_exp), oldest_time);
}
} else {
if (cfs_time_current_sec() >
list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
rq_timed_list) {
if (rq->rq_deadline <= now + at_early_margin) {
- list_del(&rq->rq_timed_list);
+ list_del_init(&rq->rq_timed_list);
/**
* ptlrpc_server_drop_request() may drop
* refcount to 0 already. Let's check this and
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg));
-
put_rpc_export:
if (export != NULL)
class_export_rpc_put(export);
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
- CDEBUG(D_RPCTRACE, "request x"LPU64" opc %u from %s processed in "
- "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
- request->rq_xid, lustre_msg_get_opc(request->rq_reqmsg),
- libcfs_id2str(request->rq_peer), timediff,
- cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
- request->rq_repmsg ? lustre_msg_get_transno(request->rq_repmsg) :
- request->rq_transno, request->rq_status,
- request->rq_repmsg ? lustre_msg_get_status(request->rq_repmsg):
- -999);
+ CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
+ "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+ "%lds (%lds total) trans "LPU64" rc %d/%d\n",
+ cfs_curproc_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg),
+ request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg),
+ timediff,
+ cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ (request->rq_repmsg ?
+ lustre_msg_get_transno(request->rq_repmsg) :
+ request->rq_transno),
+ request->rq_status,
+ (request->rq_repmsg ?
+ lustre_msg_get_status(request->rq_repmsg) : -999));
if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
__u32 op = lustre_msg_get_opc(request->rq_reqmsg);
int opc = opcode_offset(op);
int counter = 0, rc = 0;
ENTRY;
+ thread->t_pid = cfs_curproc_pid();
cfs_daemonize_ctxt(data->name);
#if defined(HAVE_NODE_TO_CPUMASK) && defined(CONFIG_NUMA)
/* XXX maintain a list of all managed devices: insert here */
- while ((thread->t_flags & SVC_STOPPING) == 0) {
+ while (!(thread->t_flags & SVC_STOPPING) && !svc->srv_is_stopping) {
/* Don't exit while there are replies to be handled */
struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
ptlrpc_retry_rqbds, svc);
cond_resched();
l_wait_event_exclusive (svc->srv_waitq,
- ((thread->t_flags & SVC_STOPPING) != 0) ||
+ thread->t_flags & SVC_STOPPING ||
+ svc->srv_is_stopping ||
(!list_empty(&svc->srv_idle_rqbds) &&
svc->srv_rqbd_timeout == 0) ||
!list_empty(&svc->srv_req_in_queue) ||
svc->srv_at_check,
&lwi);
+ if (thread->t_flags & SVC_STOPPING || svc->srv_is_stopping)
+ break;
+
lc_watchdog_touch(thread->t_watchdog, GET_TIMEOUT(svc));
ptlrpc_check_rqbd_pool(svc);
- if ((svc->srv_threads_started < svc->srv_threads_max) &&
- (svc->srv_n_active_reqs >= (svc->srv_threads_started - 1))){
+ if (svc->srv_threads_started < svc->srv_threads_max &&
+ svc->srv_n_active_reqs >= (svc->srv_threads_started - 1))
/* Ignore return code - we tried... */
ptlrpc_start_thread(dev, svc);
- }
if (!list_empty(&svc->srv_req_in_queue)) {
/* Process all incoming reqs before handling any */
lu_context_fini(&env.le_ctx);
out:
- CDEBUG(D_NET, "service thread %d exiting: rc %d\n", thread->t_id, rc);
+ CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
+ thread, thread->t_pid, thread->t_id, rc);
spin_lock(&svc->srv_lock);
svc->srv_threads_running--; /* must know immediately */
struct l_wait_info lwi = { 0 };
ENTRY;
- CDEBUG(D_RPCTRACE, "Stopping thread %p\n", thread);
+ CDEBUG(D_RPCTRACE, "Stopping thread [ %p : %u ]\n",
+ thread, thread->t_pid);
+
spin_lock(&svc->srv_lock);
/* let the thread know that we would like it to stop asap */
thread->t_flags |= SVC_STOPPING;
spin_unlock(&svc->srv_lock);
cfs_waitq_broadcast(&svc->srv_waitq);
- l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
- &lwi);
+ l_wait_event(thread->t_ctl_waitq,
+ (thread->t_flags & SVC_STOPPED), &lwi);
spin_lock(&svc->srv_lock);
list_del(&thread->t_link);
CERROR("cannot start %s thread #%d: rc %d\n",
svc->srv_thread_name, i, rc);
ptlrpc_stop_all_threads(svc);
+ break;
}
}
RETURN(rc);
CDEBUG(D_RPCTRACE, "%s started %d min %d max %d running %d\n",
svc->srv_name, svc->srv_threads_started, svc->srv_threads_min,
svc->srv_threads_max, svc->srv_threads_running);
+
+ if (unlikely(svc->srv_is_stopping))
+ RETURN(-ESRCH);
+
if (unlikely(svc->srv_threads_started >= svc->srv_threads_max) ||
(OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
svc->srv_threads_started == svc->srv_threads_min - 1))