struct ptlrpc_hr_thread {
int hrt_id; /* thread ID */
spinlock_t hrt_lock;
- cfs_waitq_t hrt_waitq;
+ wait_queue_head_t hrt_waitq;
cfs_list_t hrt_queue; /* RS queue */
struct ptlrpc_hr_partition *hrt_partition;
};
/* CPU partition table, it's just cfs_cpt_table for now */
struct cfs_cpt_table *hr_cpt_table;
/** controller sleep waitq */
- cfs_waitq_t hr_waitq;
+ wait_queue_head_t hr_waitq;
unsigned int hr_stopping;
/** roundrobin rotor for non-affinity service */
unsigned int hr_rotor;
cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
- cfs_waitq_signal(&hrt->hrt_waitq);
+ wake_up(&hrt->hrt_waitq);
b->rsb_n_replies = 0;
}
}
cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
- cfs_waitq_signal(&hrt->hrt_waitq);
+ wake_up(&hrt->hrt_waitq);
EXIT;
#else
cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current();
- cfs_waitq_signal(&svcpt->scp_waitq);
+ wake_up(&svcpt->scp_waitq);
}
static void
CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
- cfs_waitq_init(&svcpt->scp_waitq);
+ init_waitqueue_head(&svcpt->scp_waitq);
/* history request & rqbd list */
CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
#endif
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
- cfs_waitq_init(&svcpt->scp_rep_waitq);
+ init_waitqueue_head(&svcpt->scp_rep_waitq);
cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
/* adaptive timeout */
sptlrpc_svc_ctx_decref(req);
if (req != &req->rq_rqbd->rqbd_req) {
- /* NB request buffers use an embedded
- * req if the incoming req unlinked the
- * MD; this isn't one of them! */
- OBD_FREE(req, sizeof(*req));
- }
+ /* NB request buffers use an embedded
+ * req if the incoming req unlinked the
+ * MD; this isn't one of them! */
+ ptlrpc_request_cache_free(req);
+ }
}
/**
{
ptlrpc_server_hpreq_fini(req);
+ if (req->rq_session.lc_thread != NULL) {
+ lu_context_exit(&req->rq_session);
+ lu_context_fini(&req->rq_session);
+ }
+
ptlrpc_server_drop_request(req);
}
}
newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
- OBD_ALLOC(reqcopy, sizeof *reqcopy);
- if (reqcopy == NULL)
- RETURN(-ENOMEM);
- OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
- if (!reqmsg) {
- OBD_FREE(reqcopy, sizeof *reqcopy);
- RETURN(-ENOMEM);
- }
+ reqcopy = ptlrpc_request_cache_alloc(__GFP_IO);
+ if (reqcopy == NULL)
+ RETURN(-ENOMEM);
+ OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
+ if (!reqmsg)
+ GOTO(out_free, rc = -ENOMEM);
*reqcopy = *req;
reqcopy->rq_reply_state = NULL;
out_put:
class_export_rpc_dec(reqcopy->rq_export);
- class_export_put(reqcopy->rq_export);
+ class_export_put(reqcopy->rq_export);
out:
- sptlrpc_svc_ctx_decref(reqcopy);
- OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
- OBD_FREE(reqcopy, sizeof *reqcopy);
- RETURN(rc);
+ sptlrpc_svc_ctx_decref(reqcopy);
+ OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
+out_free:
+ ptlrpc_request_cache_free(reqcopy);
+ RETURN(rc);
}
/* Send early replies to everybody expiring within at_early_margin
goto err_req;
}
- req->rq_svc_thread = thread;
+ req->rq_svc_thread = thread;
+ if (thread != NULL) {
+ /* initialize request session, it is needed for request
+ * processing by target */
+ rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION |
+ LCT_NOREF);
+ if (rc) {
+ CERROR("%s: failure to initialize session: rc = %d\n",
+ thread->t_name, rc);
+ goto err_req;
+ }
+ req->rq_session.lc_thread = thread;
+ lu_context_enter(&req->rq_session);
+ req->rq_svc_thread->t_env->le_ses = &req->rq_session;
+ }
- ptlrpc_at_add_timed(req);
+ ptlrpc_at_add_timed(req);
- /* Move it over to the request processing queue */
+ /* Move it over to the request processing queue */
rc = ptlrpc_server_request_add(svcpt, req);
if (rc)
GOTO(err_req, rc);
- cfs_waitq_signal(&svcpt->scp_waitq);
+ wake_up(&svcpt->scp_waitq);
RETURN(1);
err_req:
ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
struct ptlrpc_thread *thread)
{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *request;
- struct timeval work_start;
- struct timeval work_end;
- long timediff;
- int rc;
- int fail_opc = 0;
- ENTRY;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request *request;
+ struct timeval work_start;
+ struct timeval work_end;
+ long timediff;
+ int fail_opc = 0;
+
+ ENTRY;
request = ptlrpc_server_request_get(svcpt, false);
if (request == NULL)
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
- if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
- libcfs_debug_dumplog();
+ if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+ libcfs_debug_dumplog();
- cfs_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
- if (likely(svc->srv_stats != NULL)) {
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+ if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
timediff);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
at_get(&svcpt->scp_at_estimate));
}
- rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("Failure to initialize session: %d\n", rc);
- goto out_req;
- }
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x5;
- lu_context_enter(&request->rq_session);
-
- CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
-
- request->rq_svc_thread = thread;
- if (thread)
- request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
- if (likely(request->rq_export)) {
+ if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
goto put_conn;
}
- CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg));
+ CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
+ "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg));
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
- rc = svc->srv_ops.so_req_handler(request);
+ CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
- ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
+ /* re-assign request and sesson thread to the current one */
+ request->rq_svc_thread = thread;
+ if (thread != NULL) {
+ LASSERT(request->rq_session.lc_thread != NULL);
+ request->rq_session.lc_thread = thread;
+ request->rq_session.lc_cookie = 0x55;
+ thread->t_env->le_ses = &request->rq_session;
+ }
+ svc->srv_ops.so_req_handler(request);
-put_conn:
- lu_context_exit(&request->rq_session);
- lu_context_fini(&request->rq_session);
+ ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
+put_conn:
if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request, "Request took longer "
"than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
request->rq_deadline));
}
- cfs_gettimeofday(&work_end);
- timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
+ do_gettimeofday(&work_end);
+ timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
- "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
- cfs_curproc_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+ "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
+ current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg),
request->rq_arrival_time.tv_sec));
}
-out_req:
ptlrpc_server_finish_active_request(svcpt, request);
RETURN(1);
ptlrpc_rs_decref (rs);
if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
- cfs_waitq_broadcast(&svcpt->scp_waitq);
+ wake_up_all(&svcpt->scp_waitq);
RETURN(1);
}
lc_watchdog_disable(thread->t_watchdog);
- cfs_cond_resched();
+ cond_resched();
l_wait_event_exclusive_head(svcpt->scp_waitq,
ptlrpc_thread_stopping(thread) ||
struct ptlrpc_service *svc = svcpt->scp_service;
struct ptlrpc_reply_state *rs;
#ifdef WITH_GROUP_INFO
- cfs_group_info_t *ginfo = NULL;
+ struct group_info *ginfo = NULL;
#endif
- struct lu_env *env;
- int counter = 0, rc = 0;
- ENTRY;
+ struct lu_env *env;
+ int counter = 0, rc = 0;
+ ENTRY;
- thread->t_pid = cfs_curproc_pid();
+ thread->t_pid = current_pid();
unshare_fs_struct();
/* NB: we will call cfs_cpt_bind() for all threads, because we
}
#ifdef WITH_GROUP_INFO
- ginfo = cfs_groups_alloc(0);
- if (!ginfo) {
- rc = -ENOMEM;
- goto out;
- }
+ ginfo = groups_alloc(0);
+ if (!ginfo) {
+ rc = -ENOMEM;
+ goto out;
+ }
- cfs_set_current_groups(ginfo);
- cfs_put_group_info(ginfo);
+ set_current_groups(ginfo);
+ put_group_info(ginfo);
#endif
if (svc->srv_ops.so_thr_init != NULL) {
spin_unlock(&svcpt->scp_lock);
/* wake up our creator in case he's still waiting. */
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
NULL, NULL);
spin_lock(&svcpt->scp_rep_lock);
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- cfs_waitq_signal(&svcpt->scp_rep_waitq);
+ wake_up(&svcpt->scp_rep_waitq);
spin_unlock(&svcpt->scp_rep_lock);
CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
thread->t_id = rc;
thread_add_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
spin_unlock(&svcpt->scp_lock);
return rc;
}
cfs_atomic_inc(&hrp->hrp_nstarted);
- cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+ wake_up(&ptlrpc_hr.hr_waitq);
while (!ptlrpc_hr.hr_stopping) {
l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
}
cfs_atomic_inc(&hrp->hrp_nstopped);
- cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+ wake_up(&ptlrpc_hr.hr_waitq);
return 0;
}
if (hrp->hrp_thrs == NULL)
continue; /* uninitialized */
for (j = 0; j < hrp->hrp_nthrs; j++)
- cfs_waitq_broadcast(&hrp->hrp_thrs[j].hrt_waitq);
+ wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
}
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
if (hrp->hrp_thrs == NULL)
continue; /* uninitialized */
- cfs_wait_event(ptlrpc_hr.hr_waitq,
+ wait_event(ptlrpc_hr.hr_waitq,
cfs_atomic_read(&hrp->hrp_nstopped) ==
cfs_atomic_read(&hrp->hrp_nstarted));
}
if (IS_ERR_VALUE(rc))
break;
}
- cfs_wait_event(ptlrpc_hr.hr_waitq,
+ wait_event(ptlrpc_hr.hr_waitq,
cfs_atomic_read(&hrp->hrp_nstarted) == j);
if (!IS_ERR_VALUE(rc))
continue;
thread_add_flags(thread, SVC_STOPPING);
}
- cfs_waitq_broadcast(&svcpt->scp_waitq);
+ wake_up_all(&svcpt->scp_waitq);
while (!cfs_list_empty(&svcpt->scp_threads)) {
thread = cfs_list_entry(svcpt->scp_threads.next,
OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
if (thread == NULL)
RETURN(-ENOMEM);
- cfs_waitq_init(&thread->t_ctl_waitq);
+ init_waitqueue_head(&thread->t_ctl_waitq);
spin_lock(&svcpt->scp_lock);
if (!ptlrpc_threads_increasable(svcpt)) {
if (wait) {
CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
svc->srv_thread_name, svcpt->scp_thr_nextid);
- cfs_schedule();
+ schedule();
goto again;
}
* by ptlrpc_svcpt_stop_threads now
*/
thread_add_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
spin_unlock(&svcpt->scp_lock);
} else {
cfs_list_del(&thread->t_link);
if (ptlrpc_hr.hr_partitions == NULL)
RETURN(-ENOMEM);
- cfs_waitq_init(&ptlrpc_hr.hr_waitq);
+ init_waitqueue_head(&ptlrpc_hr.hr_waitq);
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
hrp->hrp_cpt = i;
hrt->hrt_id = j;
hrt->hrt_partition = hrp;
- cfs_waitq_init(&hrt->hrt_waitq);
+ init_waitqueue_head(&hrt->hrt_waitq);
spin_lock_init(&hrt->hrt_lock);
CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
}
struct timeval right_now;
long timediff;
- cfs_gettimeofday(&right_now);
+ do_gettimeofday(&right_now);
spin_lock(&svcpt->scp_req_lock);
- /* How long has the next entry been waiting? */
+ /* How long has the next entry been waiting? */
if (ptlrpc_server_high_pending(svcpt, true))
request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
else if (ptlrpc_server_normal_pending(svcpt, true))