X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fservice.c;h=a1d1060034fd0455708e40f1755a675c3f218817;hb=96b814bda9b2d923885291849ae0f14f660c90e1;hp=aaf8091272de18ffb64f41c4cb4c0ff9700da7a4;hpb=1a77288332ff8bb9f658c574f226c85f71e1e580;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/service.c b/lustre/ptlrpc/service.c index aaf8091..a1d1060 100644 --- a/lustre/ptlrpc/service.c +++ b/lustre/ptlrpc/service.c @@ -49,21 +49,15 @@ int test_req_buffer_pressure = 0; CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444, "set non-zero to put pressure on request buffer pools"); -unsigned int at_min = 0; CFS_MODULE_PARM(at_min, "i", int, 0644, "Adaptive timeout minimum (sec)"); -unsigned int at_max = 600; -EXPORT_SYMBOL(at_max); CFS_MODULE_PARM(at_max, "i", int, 0644, "Adaptive timeout maximum (sec)"); -unsigned int at_history = 600; CFS_MODULE_PARM(at_history, "i", int, 0644, "Adaptive timeouts remember the slowest event that took place " "within this period (sec)"); -static int at_early_margin = 5; CFS_MODULE_PARM(at_early_margin, "i", int, 0644, "How soon before an RPC deadline to send an early reply"); -static int at_extra = 30; CFS_MODULE_PARM(at_extra, "i", int, 0644, "How much extra time to give with each early reply"); @@ -72,7 +66,7 @@ CFS_MODULE_PARM(at_extra, "i", int, 0644, static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc); static CFS_LIST_HEAD(ptlrpc_all_services); -spinlock_t ptlrpc_all_services_lock; +cfs_spinlock_t ptlrpc_all_services_lock; static char * ptlrpc_alloc_request_buffer (int size) @@ -117,10 +111,10 @@ ptlrpc_alloc_rqbd (struct ptlrpc_service *svc) return (NULL); } - spin_lock(&svc->srv_lock); - list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds); + cfs_spin_lock(&svc->srv_lock); + cfs_list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds); svc->srv_nbufs++; - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); return (rqbd); } @@ -131,12 +125,12 @@ ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd) struct ptlrpc_service *svc = rqbd->rqbd_service; LASSERT (rqbd->rqbd_refcount == 0); - LASSERT (list_empty(&rqbd->rqbd_reqs)); + LASSERT (cfs_list_empty(&rqbd->rqbd_reqs)); - spin_lock(&svc->srv_lock); - list_del(&rqbd->rqbd_list); + cfs_spin_lock(&svc->srv_lock); + cfs_list_del(&rqbd->rqbd_list); svc->srv_nbufs--; - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size); OBD_FREE_PTR(rqbd); @@ -168,8 +162,8 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service *svc) } void -ptlrpc_save_lock (struct ptlrpc_request *req, - struct lustre_handle *lock, int mode) +ptlrpc_save_lock(struct ptlrpc_request *req, + struct lustre_handle *lock, int mode, int no_ack) { struct ptlrpc_reply_state *rs = req->rq_reply_state; int idx; @@ -177,61 +171,225 @@ ptlrpc_save_lock (struct ptlrpc_request *req, LASSERT(rs != NULL); LASSERT(rs->rs_nlocks < RS_MAX_LOCKS); - idx = rs->rs_nlocks++; - rs->rs_locks[idx] = *lock; - rs->rs_modes[idx] = mode; - rs->rs_difficult = 1; + if (req->rq_export->exp_disconnected) { + ldlm_lock_decref(lock, mode); + } else { + idx = rs->rs_nlocks++; + rs->rs_locks[idx] = *lock; + rs->rs_modes[idx] = mode; + rs->rs_difficult = 1; + rs->rs_no_ack = !!no_ack; + } } -void -ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs) +#ifdef __KERNEL__ + +#define HRT_RUNNING 0 +#define HRT_STOPPING 1 + +struct ptlrpc_hr_thread { + cfs_spinlock_t hrt_lock; + unsigned long hrt_flags; + cfs_waitq_t hrt_wait; + cfs_list_t hrt_queue; + cfs_completion_t hrt_completion; +}; + +struct ptlrpc_hr_service { + int hr_index; + int hr_n_threads; + int hr_size; + struct ptlrpc_hr_thread hr_threads[0]; +}; + +struct rs_batch { + cfs_list_t rsb_replies; + struct ptlrpc_service *rsb_svc; + unsigned int rsb_n_replies; +}; + +/** + * A pointer to per-node reply handling service. + */ +static struct ptlrpc_hr_service *ptlrpc_hr = NULL; + +/** + * maximum mumber of replies scheduled in one batch + */ +#define MAX_SCHEDULED 256 + +/** + * Initialize a reply batch. + * + * \param b batch + */ +static void rs_batch_init(struct rs_batch *b) +{ + memset(b, 0, sizeof *b); + CFS_INIT_LIST_HEAD(&b->rsb_replies); +} + +/** + * Choose an hr thread to dispatch requests to. + */ +static unsigned int get_hr_thread_index(struct ptlrpc_hr_service *hr) +{ + unsigned int idx; + + /* Concurrent modification of hr_index w/o any spinlock + protection is harmless as long as the result fits + [0..(hr_n_threads-1)] range and each thread gets near equal + load. */ + idx = hr->hr_index; + hr->hr_index = (idx >= hr->hr_n_threads - 1) ? 0 : idx + 1; + return idx; +} + +/** + * Dispatch all replies accumulated in the batch to one from + * dedicated reply handling threads. + * + * \param b batch + */ +static void rs_batch_dispatch(struct rs_batch *b) +{ + if (b->rsb_n_replies != 0) { + struct ptlrpc_hr_service *hr = ptlrpc_hr; + int idx; + + idx = get_hr_thread_index(hr); + + cfs_spin_lock(&hr->hr_threads[idx].hrt_lock); + cfs_list_splice_init(&b->rsb_replies, + &hr->hr_threads[idx].hrt_queue); + cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock); + cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait); + b->rsb_n_replies = 0; + } +} + +/** + * Add a reply to a batch. + * Add one reply object to a batch, schedule batched replies if overload. + * + * \param b batch + * \param rs reply + */ +static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs) { struct ptlrpc_service *svc = rs->rs_service; -#ifdef CONFIG_SMP - LASSERT (spin_is_locked (&svc->srv_lock)); + if (svc != b->rsb_svc || b->rsb_n_replies >= MAX_SCHEDULED) { + if (b->rsb_svc != NULL) { + rs_batch_dispatch(b); + cfs_spin_unlock(&b->rsb_svc->srv_lock); + } + cfs_spin_lock(&svc->srv_lock); + b->rsb_svc = svc; + } + cfs_spin_lock(&rs->rs_lock); + rs->rs_scheduled_ever = 1; + if (rs->rs_scheduled == 0) { + cfs_list_move(&rs->rs_list, &b->rsb_replies); + rs->rs_scheduled = 1; + b->rsb_n_replies++; + } + rs->rs_committed = 1; + cfs_spin_unlock(&rs->rs_lock); +} + +/** + * Reply batch finalization. + * Dispatch remaining replies from the batch + * and release remaining spinlock. + * + * \param b batch + */ +static void rs_batch_fini(struct rs_batch *b) +{ + if (b->rsb_svc != 0) { + rs_batch_dispatch(b); + cfs_spin_unlock(&b->rsb_svc->srv_lock); + } +} + +#define DECLARE_RS_BATCH(b) struct rs_batch b + +#else /* __KERNEL__ */ + +#define rs_batch_init(b) do{}while(0) +#define rs_batch_fini(b) do{}while(0) +#define rs_batch_add(b, r) ptlrpc_schedule_difficult_reply(r) +#define DECLARE_RS_BATCH(b) + +#endif /* __KERNEL__ */ + +void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs) +{ +#ifdef __KERNEL__ + struct ptlrpc_hr_service *hr = ptlrpc_hr; + int idx; + ENTRY; + + LASSERT(cfs_list_empty(&rs->rs_list)); + + idx = get_hr_thread_index(hr); + cfs_spin_lock(&hr->hr_threads[idx].hrt_lock); + cfs_list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue); + cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock); + cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait); + EXIT; +#else + cfs_list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue); #endif +} + +void +ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs) +{ + ENTRY; + + LASSERT_SPIN_LOCKED(&rs->rs_service->srv_lock); + LASSERT_SPIN_LOCKED(&rs->rs_lock); LASSERT (rs->rs_difficult); rs->rs_scheduled_ever = 1; /* flag any notification attempt */ - if (rs->rs_scheduled) /* being set up or already notified */ + if (rs->rs_scheduled) { /* being set up or already notified */ + EXIT; return; + } rs->rs_scheduled = 1; - list_del (&rs->rs_list); - list_add (&rs->rs_list, &svc->srv_reply_queue); - cfs_waitq_signal (&svc->srv_waitq); + cfs_list_del_init(&rs->rs_list); + ptlrpc_dispatch_difficult_reply(rs); + EXIT; } -void -ptlrpc_commit_replies (struct obd_device *obd) +void ptlrpc_commit_replies(struct obd_export *exp) { - struct list_head *tmp; - struct list_head *nxt; + struct ptlrpc_reply_state *rs, *nxt; + DECLARE_RS_BATCH(batch); + ENTRY; + rs_batch_init(&batch); /* Find any replies that have been committed and get their service * to attend to complete them. */ /* CAVEAT EMPTOR: spinlock ordering!!! */ - spin_lock(&obd->obd_uncommitted_replies_lock); - - list_for_each_safe (tmp, nxt, &obd->obd_uncommitted_replies) { - struct ptlrpc_reply_state *rs = - list_entry(tmp, struct ptlrpc_reply_state, rs_obd_list); - + cfs_spin_lock(&exp->exp_uncommitted_replies_lock); + cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies, + rs_obd_list) { LASSERT (rs->rs_difficult); - - if (rs->rs_transno <= obd->obd_last_committed) { - struct ptlrpc_service *svc = rs->rs_service; - - spin_lock (&svc->srv_lock); - list_del_init (&rs->rs_obd_list); - ptlrpc_schedule_difficult_reply (rs); - spin_unlock (&svc->srv_lock); + /* VBR: per-export last_committed */ + LASSERT(rs->rs_export); + if (rs->rs_transno <= exp->exp_last_committed) { + cfs_list_del_init(&rs->rs_obd_list); + rs_batch_add(&batch, rs); } } - - spin_unlock(&obd->obd_uncommitted_replies_lock); + cfs_spin_unlock(&exp->exp_uncommitted_replies_lock); + rs_batch_fini(&batch); + EXIT; } static int @@ -242,23 +400,23 @@ ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc) int posted = 0; for (;;) { - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); - if (list_empty (&svc->srv_idle_rqbds)) { - spin_unlock(&svc->srv_lock); + if (cfs_list_empty (&svc->srv_idle_rqbds)) { + cfs_spin_unlock(&svc->srv_lock); return (posted); } - rqbd = list_entry(svc->srv_idle_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); - list_del (&rqbd->rqbd_list); + rqbd = cfs_list_entry(svc->srv_idle_rqbds.next, + struct ptlrpc_request_buffer_desc, + rqbd_list); + cfs_list_del (&rqbd->rqbd_list); /* assume we will post successfully */ svc->srv_nrqbd_receiving++; - list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds); + cfs_list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds); - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); rc = ptlrpc_register_rqbd(rqbd); if (rc != 0) @@ -267,16 +425,16 @@ ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc) posted = 1; } - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); svc->srv_nrqbd_receiving--; - list_del(&rqbd->rqbd_list); - list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds); + cfs_list_del(&rqbd->rqbd_list); + cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds); /* Don't complain if no request buffers are posted right now; LNET * won't drop requests because we set the portal lazy! */ - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); return (-1); } @@ -293,7 +451,7 @@ struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c, c->psc_watchdog_factor, h, name, proc_entry, prntfn, c->psc_min_threads, c->psc_max_threads, - threadname, c->psc_ctx_tags); + threadname, c->psc_ctx_tags, NULL); } EXPORT_SYMBOL(ptlrpc_init_svc_conf); @@ -313,10 +471,13 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size, cfs_proc_dir_entry_t *proc_entry, svcreq_printfn_t svcreq_printfn, int min_threads, int max_threads, - char *threadname, __u32 ctx_tags) + char *threadname, __u32 ctx_tags, + svc_hpreq_handler_t hp_handler) { - int rc; - struct ptlrpc_service *service; + int rc; + struct ptlrpc_at_array *array; + struct ptlrpc_service *service; + unsigned int size, index; ENTRY; LASSERT (nbufs > 0); @@ -330,7 +491,7 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size, /* First initialise enough for early teardown */ service->srv_name = name; - spin_lock_init(&service->srv_lock); + cfs_spin_lock_init(&service->srv_lock); CFS_INIT_LIST_HEAD(&service->srv_threads); cfs_waitq_init(&service->srv_waitq); @@ -348,31 +509,57 @@ ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size, service->srv_threads_max = max_threads; service->srv_thread_name = threadname; service->srv_ctx_tags = ctx_tags; + service->srv_hpreq_handler = hp_handler; + service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO; + service->srv_hpreq_count = 0; + service->srv_n_hpreq = 0; rc = LNetSetLazyPortal(service->srv_req_portal); LASSERT (rc == 0); CFS_INIT_LIST_HEAD(&service->srv_request_queue); + CFS_INIT_LIST_HEAD(&service->srv_request_hpq); CFS_INIT_LIST_HEAD(&service->srv_idle_rqbds); CFS_INIT_LIST_HEAD(&service->srv_active_rqbds); CFS_INIT_LIST_HEAD(&service->srv_history_rqbds); CFS_INIT_LIST_HEAD(&service->srv_request_history); CFS_INIT_LIST_HEAD(&service->srv_active_replies); +#ifndef __KERNEL__ CFS_INIT_LIST_HEAD(&service->srv_reply_queue); +#endif CFS_INIT_LIST_HEAD(&service->srv_free_rs_list); cfs_waitq_init(&service->srv_free_rs_waitq); + cfs_atomic_set(&service->srv_n_difficult_replies, 0); - spin_lock_init(&service->srv_at_lock); + cfs_spin_lock_init(&service->srv_at_lock); CFS_INIT_LIST_HEAD(&service->srv_req_in_queue); - CFS_INIT_LIST_HEAD(&service->srv_at_list); + + array = &service->srv_at_array; + size = at_est2timeout(at_max); + array->paa_size = size; + array->paa_count = 0; + array->paa_deadline = -1; + + /* allocate memory for srv_at_array (ptlrpc_at_array) */ + OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size); + if (array->paa_reqs_array == NULL) + GOTO(failed, NULL); + + for (index = 0; index < size; index++) + CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]); + + OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size); + if (array->paa_reqs_count == NULL) + GOTO(failed, NULL); + cfs_timer_init(&service->srv_at_timer, ptlrpc_at_timer, service); /* At SOW, service time should be quick; 10s seems generous. If client timeout is less than this, we'll be sending an early reply. */ at_init(&service->srv_at_estimate, 10, 0); - spin_lock (&ptlrpc_all_services_lock); - list_add (&service->srv_list, &ptlrpc_all_services); - spin_unlock (&ptlrpc_all_services_lock); + cfs_spin_lock (&ptlrpc_all_services_lock); + cfs_list_add (&service->srv_list, &ptlrpc_all_services); + cfs_spin_unlock (&ptlrpc_all_services_lock); /* Now allocate the request buffers */ rc = ptlrpc_grow_req_bufs(service); @@ -406,8 +593,8 @@ failed: */ static void ptlrpc_server_free_request(struct ptlrpc_request *req) { - LASSERT(atomic_read(&req->rq_refcount) == 0); - LASSERT(list_empty(&req->rq_timed_list)); + LASSERT(cfs_atomic_read(&req->rq_refcount) == 0); + LASSERT(cfs_list_empty(&req->rq_timed_list)); /* DEBUG_REQ() assumes the reply state of a request with a valid * ref will not be destroyed until that reference is dropped. */ @@ -424,84 +611,134 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) } /** + * increment the number of active requests consuming service threads. + */ +void ptlrpc_server_active_request_inc(struct ptlrpc_request *req) +{ + struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd; + struct ptlrpc_service *svc = rqbd->rqbd_service; + + cfs_spin_lock(&svc->srv_lock); + svc->srv_n_active_reqs++; + cfs_spin_unlock(&svc->srv_lock); +} + +/** + * decrement the number of active requests consuming service threads. + */ +void ptlrpc_server_active_request_dec(struct ptlrpc_request *req) +{ + struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd; + struct ptlrpc_service *svc = rqbd->rqbd_service; + + cfs_spin_lock(&svc->srv_lock); + svc->srv_n_active_reqs--; + cfs_spin_unlock(&svc->srv_lock); +} + +/** * drop a reference count of the request. if it reaches 0, we either * put it into history list, or free it immediately. */ -static void ptlrpc_server_drop_request(struct ptlrpc_request *req) +void ptlrpc_server_drop_request(struct ptlrpc_request *req) { struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd; struct ptlrpc_service *svc = rqbd->rqbd_service; int refcount; - struct list_head *tmp; - struct list_head *nxt; + cfs_list_t *tmp; + cfs_list_t *nxt; - if (!atomic_dec_and_test(&req->rq_refcount)) + if (!cfs_atomic_dec_and_test(&req->rq_refcount)) return; - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_at_lock); + if (req->rq_at_linked) { + struct ptlrpc_at_array *array = &svc->srv_at_array; + __u32 index = req->rq_at_index; + + LASSERT(!cfs_list_empty(&req->rq_timed_list)); + cfs_list_del_init(&req->rq_timed_list); + cfs_spin_lock(&req->rq_lock); + req->rq_at_linked = 0; + cfs_spin_unlock(&req->rq_lock); + array->paa_reqs_count[index]--; + array->paa_count--; + } else + LASSERT(cfs_list_empty(&req->rq_timed_list)); + cfs_spin_unlock(&svc->srv_at_lock); + + /* finalize request */ + if (req->rq_export) { + class_export_put(req->rq_export); + req->rq_export = NULL; + } + + cfs_spin_lock(&svc->srv_lock); svc->srv_n_active_reqs--; - list_add(&req->rq_list, &rqbd->rqbd_reqs); + cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs); refcount = --(rqbd->rqbd_refcount); if (refcount == 0) { /* request buffer is now idle: add to history */ - list_del(&rqbd->rqbd_list); - list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds); + cfs_list_del(&rqbd->rqbd_list); + cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds); svc->srv_n_history_rqbds++; /* cull some history? * I expect only about 1 or 2 rqbds need to be recycled here */ while (svc->srv_n_history_rqbds > svc->srv_max_history_rqbds) { - rqbd = list_entry(svc->srv_history_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + rqbd = cfs_list_entry(svc->srv_history_rqbds.next, + struct ptlrpc_request_buffer_desc, + rqbd_list); - list_del(&rqbd->rqbd_list); + cfs_list_del(&rqbd->rqbd_list); svc->srv_n_history_rqbds--; /* remove rqbd's reqs from svc's req history while * I've got the service lock */ - list_for_each(tmp, &rqbd->rqbd_reqs) { - req = list_entry(tmp, struct ptlrpc_request, - rq_list); + cfs_list_for_each(tmp, &rqbd->rqbd_reqs) { + req = cfs_list_entry(tmp, struct ptlrpc_request, + rq_list); /* Track the highest culled req seq */ if (req->rq_history_seq > svc->srv_request_max_cull_seq) svc->srv_request_max_cull_seq = req->rq_history_seq; - list_del(&req->rq_history_list); + cfs_list_del(&req->rq_history_list); } - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); - list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { - req = list_entry(rqbd->rqbd_reqs.next, - struct ptlrpc_request, - rq_list); - list_del(&req->rq_list); + cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { + req = cfs_list_entry(rqbd->rqbd_reqs.next, + struct ptlrpc_request, + rq_list); + cfs_list_del(&req->rq_list); ptlrpc_server_free_request(req); } - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); /* * now all reqs including the embedded req has been * disposed, schedule request buffer for re-use. */ - LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0); - list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds); + LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) == + 0); + cfs_list_add_tail(&rqbd->rqbd_list, + &svc->srv_idle_rqbds); } - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) { /* If we are low on memory, we are not interested in history */ - list_del(&req->rq_list); - list_del_init(&req->rq_history_list); - spin_unlock(&svc->srv_lock); + cfs_list_del(&req->rq_list); + cfs_list_del_init(&req->rq_history_list); + cfs_spin_unlock(&svc->srv_lock); ptlrpc_server_free_request(req); } else { - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); } } @@ -511,16 +748,6 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) */ static void ptlrpc_server_finish_request(struct ptlrpc_request *req) { - struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service; - - if (req->rq_phase != RQ_PHASE_NEW) /* incorrect message magic */ - DEBUG_REQ(D_INFO, req, "free req"); - - spin_lock(&svc->srv_at_lock); - req->rq_sent_final = 1; - list_del_init(&req->rq_timed_list); - spin_unlock(&svc->srv_at_lock); - ptlrpc_server_drop_request(req); } @@ -530,7 +757,7 @@ static void ptlrpc_server_finish_request(struct ptlrpc_request *req) static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) { struct obd_export *oldest_exp; - time_t oldest_time; + time_t oldest_time, new_time; ENTRY; @@ -541,9 +768,13 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) of the list, we can be really lazy here - we don't have to evict at the exact right moment. Eventually, all silent exports will make it to the top of the list. */ - exp->exp_last_request_time = max(exp->exp_last_request_time, - cfs_time_current_sec() + extra_delay); + /* Do not pay attention on 1sec or smaller renewals. */ + new_time = cfs_time_current_sec() + extra_delay; + if (exp->exp_last_request_time + 1 /*second */ >= new_time) + RETURN_EXIT; + + exp->exp_last_request_time = new_time; CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n", exp->exp_client_uuid.uuid, exp->exp_last_request_time, exp); @@ -551,22 +782,21 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) /* exports may get disconnected from the chain even though the export has references, so we must keep the spin lock while manipulating the lists */ - spin_lock(&exp->exp_obd->obd_dev_lock); + cfs_spin_lock(&exp->exp_obd->obd_dev_lock); - if (list_empty(&exp->exp_obd_chain_timed)) { + if (cfs_list_empty(&exp->exp_obd_chain_timed)) { /* this one is not timed */ - spin_unlock(&exp->exp_obd->obd_dev_lock); - EXIT; - return; + cfs_spin_unlock(&exp->exp_obd->obd_dev_lock); + RETURN_EXIT; } - list_move_tail(&exp->exp_obd_chain_timed, - &exp->exp_obd->obd_exports_timed); + cfs_list_move_tail(&exp->exp_obd_chain_timed, + &exp->exp_obd->obd_exports_timed); - oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next, - struct obd_export, exp_obd_chain_timed); + oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next, + struct obd_export, exp_obd_chain_timed); oldest_time = oldest_exp->exp_last_request_time; - spin_unlock(&exp->exp_obd->obd_dev_lock); + cfs_spin_unlock(&exp->exp_obd->obd_dev_lock); if (exp->exp_obd->obd_recovering) { /* be nice to everyone during recovery */ @@ -586,8 +816,8 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) exp->exp_obd->obd_eviction_timer = cfs_time_current_sec() + 3 * PING_INTERVAL; CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n", - exp->exp_obd->obd_name, obd_export_nid2str(exp), - oldest_time); + exp->exp_obd->obd_name, + obd_export_nid2str(oldest_exp), oldest_time); } } else { if (cfs_time_current_sec() > @@ -629,26 +859,24 @@ static int ptlrpc_check_req(struct ptlrpc_request *req) static void ptlrpc_at_set_timer(struct ptlrpc_service *svc) { - struct ptlrpc_request *rq; + struct ptlrpc_at_array *array = &svc->srv_at_array; __s32 next; - spin_lock(&svc->srv_at_lock); - if (list_empty(&svc->srv_at_list)) { + cfs_spin_lock(&svc->srv_at_lock); + if (array->paa_count == 0) { cfs_timer_disarm(&svc->srv_at_timer); - spin_unlock(&svc->srv_at_lock); + cfs_spin_unlock(&svc->srv_at_lock); return; } /* Set timer for closest deadline */ - rq = list_entry(svc->srv_at_list.next, struct ptlrpc_request, - rq_timed_list); - next = (__s32)(rq->rq_deadline - cfs_time_current_sec() - + next = (__s32)(array->paa_deadline - cfs_time_current_sec() - at_early_margin); if (next <= 0) ptlrpc_at_timer((unsigned long)svc); else cfs_timer_arm(&svc->srv_at_timer, cfs_time_shift(next)); - spin_unlock(&svc->srv_at_lock); + cfs_spin_unlock(&svc->srv_at_lock); CDEBUG(D_INFO, "armed %s at %+ds\n", svc->srv_name, next); } @@ -656,7 +884,9 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service *svc) static int ptlrpc_at_add_timed(struct ptlrpc_request *req) { struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service; - struct ptlrpc_request *rq; + struct ptlrpc_request *rq = NULL; + struct ptlrpc_at_array *array = &svc->srv_at_array; + __u32 index; int found = 0; if (AT_OFF) @@ -668,31 +898,40 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0) return(-ENOSYS); - spin_lock(&svc->srv_at_lock); - - if (unlikely(req->rq_sent_final)) { - spin_unlock(&svc->srv_at_lock); - return 0; - } - - LASSERT(list_empty(&req->rq_timed_list)); - /* Add to sorted list. Presumably latest rpcs will have the latest - deadlines, so search backward. */ - list_for_each_entry_reverse(rq, &svc->srv_at_list, rq_timed_list) { - if (req->rq_deadline > rq->rq_deadline) { - list_add(&req->rq_timed_list, &rq->rq_timed_list); - found++; - break; + cfs_spin_lock(&svc->srv_at_lock); + LASSERT(cfs_list_empty(&req->rq_timed_list)); + + index = (unsigned long)req->rq_deadline % array->paa_size; + if (array->paa_reqs_count[index] > 0) { + /* latest rpcs will have the latest deadlines in the list, + * so search backward. */ + cfs_list_for_each_entry_reverse(rq, + &array->paa_reqs_array[index], + rq_timed_list) { + if (req->rq_deadline >= rq->rq_deadline) { + cfs_list_add(&req->rq_timed_list, + &rq->rq_timed_list); + break; + } } } - if (!found) - /* Add to front if shortest deadline or list empty */ - list_add(&req->rq_timed_list, &svc->srv_at_list); - /* Check if we're the head of the list */ - found = (svc->srv_at_list.next == &req->rq_timed_list); + /* Add the request at the head of the list */ + if (cfs_list_empty(&req->rq_timed_list)) + cfs_list_add(&req->rq_timed_list, + &array->paa_reqs_array[index]); - spin_unlock(&svc->srv_at_lock); + cfs_spin_lock(&req->rq_lock); + req->rq_at_linked = 1; + cfs_spin_unlock(&req->rq_lock); + req->rq_at_index = index; + array->paa_reqs_count[index]++; + array->paa_count++; + if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) { + array->paa_deadline = req->rq_deadline; + found = 1; + } + cfs_spin_unlock(&svc->srv_at_lock); if (found) ptlrpc_at_set_timer(svc); @@ -700,8 +939,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) return 0; } -static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req, - int extra_time) +static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) { struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service; struct ptlrpc_request *reqcopy; @@ -717,7 +955,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req, "%ssending early reply (deadline %+lds, margin %+lds) for " "%d+%d", AT_OFF ? "AT off - not " : "", olddl, olddl - at_get(&svc->srv_at_estimate), - at_get(&svc->srv_at_estimate), extra_time); + at_get(&svc->srv_at_estimate), at_extra); if (AT_OFF) RETURN(0); @@ -737,24 +975,37 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req, RETURN(-ENOSYS); } - if (extra_time) { - /* Fake our processing time into the future to ask the - clients for some extra amount of time */ - extra_time += cfs_time_current_sec() - - req->rq_arrival_time.tv_sec; - at_add(&svc->srv_at_estimate, extra_time); - } - - newdl = req->rq_arrival_time.tv_sec + at_get(&svc->srv_at_estimate); - if (req->rq_deadline >= newdl) { - /* We're not adding any time, no need to send an early reply - (e.g. maybe at adaptive_max) */ - DEBUG_REQ(D_WARNING, req, "Couldn't add any time (" - CFS_DURATION_T"/"CFS_DURATION_T"), " - "not sending early reply\n", olddl, - cfs_time_sub(newdl, cfs_time_current_sec())); - RETURN(-ETIMEDOUT); + if (req->rq_export && + lustre_msg_get_flags(req->rq_reqmsg) & + (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) { + /* During recovery, we don't want to send too many early + * replies, but on the other hand we want to make sure the + * client has enough time to resend if the rpc is lost. So + * during the recovery period send at least 4 early replies, + * spacing them every at_extra if we can. at_estimate should + * always equal this fixed value during recovery. */ + at_measured(&svc->srv_at_estimate, min(at_extra, + req->rq_export->exp_obd->obd_recovery_timeout / 4)); + } else { + /* Fake our processing time into the future to ask the clients + * for some extra amount of time */ + at_measured(&svc->srv_at_estimate, at_extra + + cfs_time_current_sec() - + req->rq_arrival_time.tv_sec); + + /* Check to see if we've actually increased the deadline - + * we may be past adaptive_max */ + if (req->rq_deadline >= req->rq_arrival_time.tv_sec + + at_get(&svc->srv_at_estimate)) { + DEBUG_REQ(D_WARNING, req, "Couldn't add any time " + "(%ld/%ld), not sending early reply\n", + olddl, req->rq_arrival_time.tv_sec + + at_get(&svc->srv_at_estimate) - + cfs_time_current_sec()); + RETURN(-ETIMEDOUT); + } } + newdl = cfs_time_current_sec() + at_get(&svc->srv_at_estimate); OBD_ALLOC(reqcopy, sizeof *reqcopy); if (reqcopy == NULL) @@ -776,10 +1027,12 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req, reqcopy->rq_reqmsg = reqmsg; memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen); - if (req->rq_sent_final) { + LASSERT(cfs_atomic_read(&req->rq_refcount)); + /** if it is last refcount then early reply isn't needed */ + if (cfs_atomic_read(&req->rq_refcount) == 1) { DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, " "abort sending early reply\n"); - GOTO(out, rc = 0); + GOTO(out, rc = -EINVAL); } /* Connection ref */ @@ -827,32 +1080,33 @@ out: static int ptlrpc_at_check_timed(struct ptlrpc_service *svc) { struct ptlrpc_request *rq, *n; - struct list_head work_list; + cfs_list_t work_list; + struct ptlrpc_at_array *array = &svc->srv_at_array; + __u32 index, count; + time_t deadline; time_t now = cfs_time_current_sec(); cfs_duration_t delay; int first, counter = 0; ENTRY; - spin_lock(&svc->srv_at_lock); + cfs_spin_lock(&svc->srv_at_lock); if (svc->srv_at_check == 0) { - spin_unlock(&svc->srv_at_lock); + cfs_spin_unlock(&svc->srv_at_lock); RETURN(0); } delay = cfs_time_sub(cfs_time_current(), svc->srv_at_checktime); svc->srv_at_check = 0; - if (list_empty(&svc->srv_at_list)) { - spin_unlock(&svc->srv_at_lock); + if (array->paa_count == 0) { + cfs_spin_unlock(&svc->srv_at_lock); RETURN(0); } /* The timer went off, but maybe the nearest rpc already completed. */ - rq = list_entry(svc->srv_at_list.next, struct ptlrpc_request, - rq_timed_list); - first = (int)(rq->rq_deadline - now); + first = array->paa_deadline - now; if (first > at_early_margin) { /* We've still got plenty of time. Reset the timer. */ - spin_unlock(&svc->srv_at_lock); + cfs_spin_unlock(&svc->srv_at_lock); ptlrpc_at_set_timer(svc); RETURN(0); } @@ -860,16 +1114,44 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc) /* We're close to a timeout, and we don't know how much longer the server will take. Send early replies to everyone expiring soon. */ CFS_INIT_LIST_HEAD(&work_list); - list_for_each_entry_safe(rq, n, &svc->srv_at_list, rq_timed_list) { - if (rq->rq_deadline <= now + at_early_margin) { - list_move_tail(&rq->rq_timed_list, &work_list); - counter++; - } else { + deadline = -1; + index = (unsigned long)array->paa_deadline % array->paa_size; + count = array->paa_count; + while (count > 0) { + count -= array->paa_reqs_count[index]; + cfs_list_for_each_entry_safe(rq, n, + &array->paa_reqs_array[index], + rq_timed_list) { + if (rq->rq_deadline <= now + at_early_margin) { + cfs_list_del_init(&rq->rq_timed_list); + /** + * ptlrpc_server_drop_request() may drop + * refcount to 0 already. Let's check this and + * don't add entry to work_list + */ + if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount))) + cfs_list_add(&rq->rq_timed_list, &work_list); + counter++; + array->paa_reqs_count[index]--; + array->paa_count--; + cfs_spin_lock(&rq->rq_lock); + rq->rq_at_linked = 0; + cfs_spin_unlock(&rq->rq_lock); + continue; + } + + /* update the earliest deadline */ + if (deadline == -1 || rq->rq_deadline < deadline) + deadline = rq->rq_deadline; + break; } - } - spin_unlock(&svc->srv_at_lock); + if (++index >= array->paa_size) + index = 0; + } + array->paa_deadline = deadline; + cfs_spin_unlock(&svc->srv_at_lock); /* we have a new earliest deadline, restart the timer */ ptlrpc_at_set_timer(svc); @@ -887,29 +1169,184 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service *svc) at_get(&svc->srv_at_estimate), delay); } - /* ptlrpc_server_finish_request may delete an entry out of - * the work list */ - spin_lock(&svc->srv_at_lock); - while (!list_empty(&work_list)) { - rq = list_entry(work_list.next, struct ptlrpc_request, - rq_timed_list); - list_del_init(&rq->rq_timed_list); - /* if the entry is still in the worklist, it hasn't been - deleted, and is safe to take a ref to keep the req around */ - atomic_inc(&rq->rq_refcount); - spin_unlock(&svc->srv_at_lock); + /* we took additional refcount so entries can't be deleted from list, no + * locking is needed */ + while (!cfs_list_empty(&work_list)) { + rq = cfs_list_entry(work_list.next, struct ptlrpc_request, + rq_timed_list); + cfs_list_del_init(&rq->rq_timed_list); - if (ptlrpc_at_send_early_reply(rq, at_extra) == 0) + if (ptlrpc_at_send_early_reply(rq) == 0) ptlrpc_at_add_timed(rq); ptlrpc_server_drop_request(rq); - spin_lock(&svc->srv_at_lock); } - spin_unlock(&svc->srv_at_lock); RETURN(0); } +/** + * Put the request to the export list if the request may become + * a high priority one. + */ +static int ptlrpc_hpreq_init(struct ptlrpc_service *svc, + struct ptlrpc_request *req) +{ + int rc; + ENTRY; + + if (svc->srv_hpreq_handler) { + rc = svc->srv_hpreq_handler(req); + if (rc) + RETURN(rc); + } + if (req->rq_export && req->rq_ops) { + cfs_spin_lock(&req->rq_export->exp_lock); + cfs_list_add(&req->rq_exp_list, + &req->rq_export->exp_queued_rpc); + cfs_spin_unlock(&req->rq_export->exp_lock); + } + + RETURN(0); +} + +/** Remove the request from the export list. */ +static void ptlrpc_hpreq_fini(struct ptlrpc_request *req) +{ + ENTRY; + if (req->rq_export && req->rq_ops) { + cfs_spin_lock(&req->rq_export->exp_lock); + cfs_list_del_init(&req->rq_exp_list); + cfs_spin_unlock(&req->rq_export->exp_lock); + } + EXIT; +} + +/** + * Make the request a high priority one. + * + * All the high priority requests are queued in a separate FIFO + * ptlrpc_service::srv_request_hpq list which is parallel to + * ptlrpc_service::srv_request_queue list but has a higher priority + * for handling. + * + * \see ptlrpc_server_handle_request(). + */ +static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service *svc, + struct ptlrpc_request *req) +{ + ENTRY; + LASSERT(svc != NULL); + cfs_spin_lock(&req->rq_lock); + if (req->rq_hp == 0) { + int opc = lustre_msg_get_opc(req->rq_reqmsg); + + /* Add to the high priority queue. */ + cfs_list_move_tail(&req->rq_list, &svc->srv_request_hpq); + req->rq_hp = 1; + if (opc != OBD_PING) + DEBUG_REQ(D_NET, req, "high priority req"); + } + cfs_spin_unlock(&req->rq_lock); + EXIT; +} + +void ptlrpc_hpreq_reorder(struct ptlrpc_request *req) +{ + struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service; + ENTRY; + + cfs_spin_lock(&svc->srv_lock); + /* It may happen that the request is already taken for the processing + * but still in the export list, do not re-add it into the HP list. */ + if (req->rq_phase == RQ_PHASE_NEW) + ptlrpc_hpreq_reorder_nolock(svc, req); + cfs_spin_unlock(&svc->srv_lock); + EXIT; +} + +/** Check if the request is a high priority one. */ +static int ptlrpc_server_hpreq_check(struct ptlrpc_request *req) +{ + int opc, rc = 0; + ENTRY; + + /* Check by request opc. */ + opc = lustre_msg_get_opc(req->rq_reqmsg); + if (opc == OBD_PING) + RETURN(1); + + /* Perform request specific check. */ + if (req->rq_ops && req->rq_ops->hpreq_check) + rc = req->rq_ops->hpreq_check(req); + RETURN(rc); +} + +/** Check if a request is a high priority one. */ +static int ptlrpc_server_request_add(struct ptlrpc_service *svc, + struct ptlrpc_request *req) +{ + int rc; + ENTRY; + + rc = ptlrpc_server_hpreq_check(req); + if (rc < 0) + RETURN(rc); + + cfs_spin_lock(&svc->srv_lock); + /* Before inserting the request into the queue, check if it is not + * inserted yet, or even already handled -- it may happen due to + * a racing ldlm_server_blocking_ast(). */ + if (req->rq_phase == RQ_PHASE_NEW && cfs_list_empty(&req->rq_list)) { + if (rc) + ptlrpc_hpreq_reorder_nolock(svc, req); + else + cfs_list_add_tail(&req->rq_list, + &svc->srv_request_queue); + } + cfs_spin_unlock(&svc->srv_lock); + + RETURN(0); +} + +/* Only allow normal priority requests on a service that has a high-priority + * queue if forced (i.e. cleanup), if there are other high priority requests + * already being processed (i.e. those threads can service more high-priority + * requests), or if there are enough idle threads that a later thread can do + * a high priority request. */ +static int ptlrpc_server_allow_normal(struct ptlrpc_service *svc, int force) +{ + return force || !svc->srv_hpreq_handler || svc->srv_n_hpreq > 0 || + svc->srv_threads_running < svc->srv_threads_started - 2; +} + +static struct ptlrpc_request * +ptlrpc_server_request_get(struct ptlrpc_service *svc) +{ + struct ptlrpc_request *req = NULL; + ENTRY; + + if (!cfs_list_empty(&svc->srv_request_queue) && + (cfs_list_empty(&svc->srv_request_hpq) || + svc->srv_hpreq_count >= svc->srv_hpreq_ratio)) { + req = cfs_list_entry(svc->srv_request_queue.next, + struct ptlrpc_request, rq_list); + svc->srv_hpreq_count = 0; + } else if (!cfs_list_empty(&svc->srv_request_hpq)) { + req = cfs_list_entry(svc->srv_request_hpq.next, + struct ptlrpc_request, rq_list); + svc->srv_hpreq_count++; + } + RETURN(req); +} + +static int ptlrpc_server_request_pending(struct ptlrpc_service *svc, int force) +{ + return ((ptlrpc_server_allow_normal(svc, force) && + !cfs_list_empty(&svc->srv_request_queue)) || + !cfs_list_empty(&svc->srv_request_hpq)); +} + /* Handle freshly incoming reqs, add to timed early reply list, pass on to regular request queue */ static int @@ -922,18 +1359,18 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc) LASSERT(svc); - spin_lock(&svc->srv_lock); - if (list_empty(&svc->srv_req_in_queue)) { - spin_unlock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); + if (cfs_list_empty(&svc->srv_req_in_queue)) { + cfs_spin_unlock(&svc->srv_lock); RETURN(0); } - req = list_entry(svc->srv_req_in_queue.next, - struct ptlrpc_request, rq_list); - list_del_init (&req->rq_list); + req = cfs_list_entry(svc->srv_req_in_queue.next, + struct ptlrpc_request, rq_list); + cfs_list_del_init (&req->rq_list); /* Consider this still a "queued" request as far as stats are concerned */ - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); /* go through security check/transform */ rc = sptlrpc_svc_unwrap_request(req); @@ -949,15 +1386,18 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc) LBUG(); } - /* Clear request swab mask; this is a new request */ - req->rq_req_swab_mask = 0; - - rc = lustre_unpack_msg(req->rq_reqmsg, req->rq_reqlen); - if (rc != 0) { - CERROR("error unpacking request: ptl %d from %s x"LPU64"\n", - svc->srv_req_portal, libcfs_id2str(req->rq_peer), - req->rq_xid); - goto err_req; + /* + * for null-flavored rpc, msg has been unpacked by sptlrpc, although + * redo it wouldn't be harmful. + */ + if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) { + rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen); + if (rc != 0) { + CERROR("error unpacking request: ptl %d from %s " + "x"LPU64"\n", svc->srv_req_portal, + libcfs_id2str(req->rq_peer), req->rq_xid); + goto err_req; + } } rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); @@ -968,6 +1408,13 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc) goto err_req; } + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) && + lustre_msg_get_opc(req->rq_reqmsg) == obd_fail_val) { + CERROR("drop incoming rpc opc %u, x"LPU64"\n", + obd_fail_val, req->rq_xid); + goto err_req; + } + rc = -EINVAL; if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) { CERROR("wrong packet type received (type=%u) from %s\n", @@ -976,7 +1423,18 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc) goto err_req; } - CDEBUG(D_NET, "got req "LPD64"\n", req->rq_xid); + switch(lustre_msg_get_opc(req->rq_reqmsg)) { + case MDS_WRITEPAGE: + case OST_WRITE: + req->rq_bulk_write = 1; + break; + case MDS_READPAGE: + case OST_READ: + req->rq_bulk_read = 1; + break; + } + + CDEBUG(D_NET, "got req "LPU64"\n", req->rq_xid); req->rq_export = class_conn2export( lustre_msg_get_handle(req->rq_reqmsg)); @@ -989,10 +1447,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc) "illegal security flavor,"); } - class_export_put(req->rq_export); - req->rq_export = NULL; if (rc) goto err_req; + ptlrpc_update_export_timer(req->rq_export, 0); } /* req_in handling should/must be fast */ @@ -1013,19 +1470,22 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service *svc) } ptlrpc_at_add_timed(req); + rc = ptlrpc_hpreq_init(svc, req); + if (rc) + GOTO(err_req, rc); /* Move it over to the request processing queue */ - spin_lock(&svc->srv_lock); - list_add_tail(&req->rq_list, &svc->srv_request_queue); + rc = ptlrpc_server_request_add(svc, req); + if (rc) + GOTO(err_req, rc); cfs_waitq_signal(&svc->srv_waitq); - spin_unlock(&svc->srv_lock); RETURN(1); err_req: - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); svc->srv_n_queued_reqs--; svc->srv_n_active_reqs++; - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); ptlrpc_server_finish_request(req); RETURN(1); @@ -1040,39 +1500,62 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc, struct timeval work_start; struct timeval work_end; long timediff; - int rc; + int opc, rc; + int fail_opc = 0; ENTRY; LASSERT(svc); - spin_lock(&svc->srv_lock); - if (unlikely(list_empty (&svc->srv_request_queue) || - ( + cfs_spin_lock(&svc->srv_lock); #ifndef __KERNEL__ - /* !@%$# liblustre only has 1 thread */ - svc->srv_n_difficult_replies != 0 && + /* !@%$# liblustre only has 1 thread */ + if (cfs_atomic_read(&svc->srv_n_difficult_replies) != 0) { + cfs_spin_unlock(&svc->srv_lock); + RETURN(0); + } #endif - svc->srv_n_active_reqs >= (svc->srv_threads_running - 1)))) { - /* Don't handle regular requests in the last thread, in order * re - * to handle difficult replies (which might block other threads) - * as well as handle any incoming reqs, early replies, etc. - * That means we always need at least 2 service threads. */ - spin_unlock(&svc->srv_lock); + request = ptlrpc_server_request_get(svc); + if (request == NULL) { + cfs_spin_unlock(&svc->srv_lock); RETURN(0); } - request = list_entry (svc->srv_request_queue.next, - struct ptlrpc_request, rq_list); - list_del_init (&request->rq_list); + opc = lustre_msg_get_opc(request->rq_reqmsg); + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT)) + fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT; + else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT)) + fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT; + + if (unlikely(fail_opc)) { + if (request->rq_export && request->rq_ops) { + cfs_spin_unlock(&svc->srv_lock); + OBD_FAIL_TIMEOUT(fail_opc, 4); + cfs_spin_lock(&svc->srv_lock); + request = ptlrpc_server_request_get(svc); + if (request == NULL) { + cfs_spin_unlock(&svc->srv_lock); + RETURN(0); + } + } + } + + cfs_list_del_init(&request->rq_list); svc->srv_n_queued_reqs--; svc->srv_n_active_reqs++; + if (request->rq_hp) + svc->srv_n_hpreq++; - spin_unlock(&svc->srv_lock); + /* The phase is changed under the lock here because we need to know + * the request is under processing (see ptlrpc_hpreq_reorder()). */ + ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET); + cfs_spin_unlock(&svc->srv_lock); + + ptlrpc_hpreq_fini(request); if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG)) libcfs_debug_dumplog(); - do_gettimeofday(&work_start); + cfs_gettimeofday(&work_start); timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL); if (likely(svc->srv_stats != NULL)) { lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR, @@ -1085,12 +1568,14 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc, at_get(&svc->srv_at_estimate)); } - rc = lu_context_init(&request->rq_session, LCT_SESSION); + rc = lu_context_init(&request->rq_session, + LCT_SESSION|LCT_REMEMBER|LCT_NOREF); if (rc) { CERROR("Failure to initialize session: %d\n", rc); goto out_req; } request->rq_session.lc_thread = thread; + request->rq_session.lc_cookie = 0x5; lu_context_enter(&request->rq_session); CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid); @@ -1099,9 +1584,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc, if (thread) request->rq_svc_thread->t_env->le_ses = &request->rq_session; - request->rq_export = class_conn2export( - lustre_msg_get_handle(request->rq_reqmsg)); - if (likely(request->rq_export)) { if (unlikely(ptlrpc_check_req(request))) goto put_conn; @@ -1122,41 +1604,27 @@ ptlrpc_server_handle_request(struct ptlrpc_service *svc, goto put_rpc_export; } - request->rq_phase = RQ_PHASE_INTERPRET; - CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc " "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(), (request->rq_export ? (char *)request->rq_export->exp_client_uuid.uuid : "0"), (request->rq_export ? - atomic_read(&request->rq_export->exp_refcount) : -99), + cfs_atomic_read(&request->rq_export->exp_refcount) : -99), lustre_msg_get_status(request->rq_reqmsg), request->rq_xid, libcfs_id2str(request->rq_peer), lustre_msg_get_opc(request->rq_reqmsg)); - OBD_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, obd_fail_val); + if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING) + OBD_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, obd_fail_val); rc = svc->srv_handler(request); - request->rq_phase = RQ_PHASE_COMPLETE; - - CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc " - "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(), - (request->rq_export ? - (char *)request->rq_export->exp_client_uuid.uuid : "0"), - (request->rq_export ? - atomic_read(&request->rq_export->exp_refcount) : -99), - lustre_msg_get_status(request->rq_reqmsg), request->rq_xid, - libcfs_id2str(request->rq_peer), - lustre_msg_get_opc(request->rq_reqmsg)); + ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE); put_rpc_export: if (export != NULL) class_export_rpc_put(export); put_conn: - if (likely(request->rq_export != NULL)) - class_export_put(request->rq_export); - lu_context_exit(&request->rq_session); lu_context_fini(&request->rq_session); @@ -1170,17 +1638,28 @@ put_conn: request->rq_deadline)); } - do_gettimeofday(&work_end); + cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); - CDEBUG(D_RPCTRACE, "request x"LPU64" opc %u from %s processed in " + CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc " + "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in " "%ldus (%ldus total) trans "LPU64" rc %d/%d\n", - request->rq_xid, lustre_msg_get_opc(request->rq_reqmsg), - libcfs_id2str(request->rq_peer), timediff, - cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL), - request->rq_repmsg ? lustre_msg_get_transno(request->rq_repmsg) : - request->rq_transno, request->rq_status, - request->rq_repmsg ? lustre_msg_get_status(request->rq_repmsg): - -999); + cfs_curproc_comm(), + (request->rq_export ? + (char *)request->rq_export->exp_client_uuid.uuid : "0"), + (request->rq_export ? + cfs_atomic_read(&request->rq_export->exp_refcount) : -99), + lustre_msg_get_status(request->rq_reqmsg), + request->rq_xid, + libcfs_id2str(request->rq_peer), + lustre_msg_get_opc(request->rq_reqmsg), + timediff, + cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL), + (request->rq_repmsg ? + lustre_msg_get_transno(request->rq_repmsg) : + request->rq_transno), + request->rq_status, + (request->rq_repmsg ? + lustre_msg_get_status(request->rq_repmsg) : -999)); if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) { __u32 op = lustre_msg_get_opc(request->rq_reqmsg); int opc = opcode_offset(op); @@ -1201,52 +1680,69 @@ put_conn: } out_req: + cfs_spin_lock(&svc->srv_lock); + if (request->rq_hp) + svc->srv_n_hpreq--; + cfs_spin_unlock(&svc->srv_lock); ptlrpc_server_finish_request(request); RETURN(1); } +/** + * An internal function to process a single reply state object. + */ static int -ptlrpc_server_handle_reply (struct ptlrpc_service *svc) +ptlrpc_handle_rs (struct ptlrpc_reply_state *rs) { - struct ptlrpc_reply_state *rs; + struct ptlrpc_service *svc = rs->rs_service; struct obd_export *exp; struct obd_device *obd; int nlocks; int been_handled; ENTRY; - spin_lock(&svc->srv_lock); - if (list_empty (&svc->srv_reply_queue)) { - spin_unlock(&svc->srv_lock); - RETURN(0); - } - - rs = list_entry (svc->srv_reply_queue.next, - struct ptlrpc_reply_state, rs_list); - exp = rs->rs_export; obd = exp->exp_obd; LASSERT (rs->rs_difficult); LASSERT (rs->rs_scheduled); + LASSERT (cfs_list_empty(&rs->rs_list)); - list_del_init (&rs->rs_list); - - /* Disengage from notifiers carefully (lock order - irqrestore below!)*/ - spin_unlock(&svc->srv_lock); - - spin_lock (&obd->obd_uncommitted_replies_lock); - /* Noop if removed already */ - list_del_init (&rs->rs_obd_list); - spin_unlock (&obd->obd_uncommitted_replies_lock); - - spin_lock (&exp->exp_lock); + cfs_spin_lock (&exp->exp_lock); /* Noop if removed already */ - list_del_init (&rs->rs_exp_list); - spin_unlock (&exp->exp_lock); + cfs_list_del_init (&rs->rs_exp_list); + cfs_spin_unlock (&exp->exp_lock); + + /* The disk commit callback holds exp_uncommitted_replies_lock while it + * iterates over newly committed replies, removing them from + * exp_uncommitted_replies. It then drops this lock and schedules the + * replies it found for handling here. + * + * We can avoid contention for exp_uncommitted_replies_lock between the + * HRT threads and further commit callbacks by checking rs_committed + * which is set in the commit callback while it holds both + * rs_lock and exp_uncommitted_reples. + * + * If we see rs_committed clear, the commit callback _may_ not have + * handled this reply yet and we race with it to grab + * exp_uncommitted_replies_lock before removing the reply from + * exp_uncommitted_replies. Note that if we lose the race and the + * reply has already been removed, list_del_init() is a noop. + * + * If we see rs_committed set, we know the commit callback is handling, + * or has handled this reply since store reordering might allow us to + * see rs_committed set out of sequence. But since this is done + * holding rs_lock, we can be sure it has all completed once we hold + * rs_lock, which we do right next. + */ + if (!rs->rs_committed) { + cfs_spin_lock(&exp->exp_uncommitted_replies_lock); + cfs_list_del_init(&rs->rs_obd_list); + cfs_spin_unlock(&exp->exp_uncommitted_replies_lock); + } - spin_lock(&svc->srv_lock); + cfs_spin_lock(&rs->rs_lock); been_handled = rs->rs_handled; rs->rs_handled = 1; @@ -1260,13 +1756,12 @@ ptlrpc_server_handle_reply (struct ptlrpc_service *svc) CWARN("All locks stolen from rs %p x"LPD64".t"LPD64 " o%d NID %s\n", rs, - rs->rs_xid, rs->rs_transno, - lustre_msg_get_opc(rs->rs_msg), + rs->rs_xid, rs->rs_transno, rs->rs_opc, libcfs_nid2str(exp->exp_connection->c_peer.nid)); } if ((!been_handled && rs->rs_on_net) || nlocks > 0) { - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&rs->rs_lock); if (!been_handled && rs->rs_on_net) { LNetMDUnlink(rs->rs_md_h); @@ -1278,43 +1773,73 @@ ptlrpc_server_handle_reply (struct ptlrpc_service *svc) ldlm_lock_decref(&rs->rs_locks[nlocks], rs->rs_modes[nlocks]); - spin_lock(&svc->srv_lock); + cfs_spin_lock(&rs->rs_lock); } rs->rs_scheduled = 0; if (!rs->rs_on_net) { /* Off the net */ - svc->srv_n_difficult_replies--; - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&rs->rs_lock); class_export_put (exp); rs->rs_export = NULL; ptlrpc_rs_decref (rs); - atomic_dec (&svc->srv_outstanding_replies); + cfs_atomic_dec (&svc->srv_outstanding_replies); + if (cfs_atomic_dec_and_test(&svc->srv_n_difficult_replies) && + svc->srv_is_stopping) + cfs_waitq_broadcast(&svc->srv_waitq); RETURN(1); } /* still on the net; callback will schedule */ - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&rs->rs_lock); RETURN(1); } #ifndef __KERNEL__ + +/** + * Check whether given service has a reply available for processing + * and process it. + * + * \param svc a ptlrpc service + * \retval 0 no replies processed + * \retval 1 one reply processed + */ +static int +ptlrpc_server_handle_reply(struct ptlrpc_service *svc) +{ + struct ptlrpc_reply_state *rs = NULL; + ENTRY; + + cfs_spin_lock(&svc->srv_lock); + if (!cfs_list_empty(&svc->srv_reply_queue)) { + rs = cfs_list_entry(svc->srv_reply_queue.prev, + struct ptlrpc_reply_state, + rs_list); + cfs_list_del_init(&rs->rs_list); + } + cfs_spin_unlock(&svc->srv_lock); + if (rs != NULL) + ptlrpc_handle_rs(rs); + RETURN(rs != NULL); +} + /* FIXME make use of timeout later */ int liblustre_check_services (void *arg) { int did_something = 0; int rc; - struct list_head *tmp, *nxt; + cfs_list_t *tmp, *nxt; ENTRY; /* I'm relying on being single threaded, not to have to lock * ptlrpc_all_services etc */ - list_for_each_safe (tmp, nxt, &ptlrpc_all_services) { + cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) { struct ptlrpc_service *svc = - list_entry (tmp, struct ptlrpc_service, srv_list); + cfs_list_entry (tmp, struct ptlrpc_service, srv_list); if (svc->srv_threads_running != 0) /* I've recursed */ continue; @@ -1344,18 +1869,6 @@ liblustre_check_services (void *arg) #else /* __KERNEL__ */ -/* Don't use daemonize, it removes fs struct from new thread (bug 418) */ -void ptlrpc_daemonize(char *name) -{ - struct fs_struct *fs = current->fs; - - atomic_inc(&fs->count); - cfs_daemonize(name); - exit_fs(cfs_current()); - current->fs = fs; - ll_set_fs_pwd(current->fs, init_task.fs->pwdmnt, init_task.fs->pwd); -} - static void ptlrpc_check_rqbd_pool(struct ptlrpc_service *svc) { @@ -1387,6 +1900,85 @@ ptlrpc_retry_rqbds(void *arg) return (-ETIMEDOUT); } +/** + * Status bits to pass todo info from + * ptlrpc_main_check_event to ptlrpc_main. + */ +#define PTLRPC_MAIN_STOPPING 0x01 +#define PTLRPC_MAIN_IN_REQ 0x02 +#define PTLRPC_MAIN_ACTIVE_REQ 0x04 +#define PTLRPC_MAIN_CHECK_TIMED 0x08 +#define PTLRPC_MAIN_REPOST 0x10 + +/** + * A container to share per-thread status variables between + * ptlrpc_main_check_event and ptlrpc_main functions. + */ +struct ptlrpc_main_check_s { + /** todo info for the ptrlrpc_main */ + int todo; + /** is this thread counted as running or not? */ + int running; +}; + +/** + * Check whether current service thread has work to do. + */ +static int ptlrpc_main_check_event(struct ptlrpc_thread *t, + struct ptlrpc_main_check_s *status) +{ + struct ptlrpc_service *svc = t->t_svc; + ENTRY; + + status->todo = 0; + + /* check the stop flags w/o any locking to make all + * concurrently running threads stop faster. */ + if (unlikely((t->t_flags & SVC_STOPPING) || + svc->srv_is_stopping)) { + status->todo |= PTLRPC_MAIN_STOPPING; + goto out; + } + + cfs_spin_lock(&svc->srv_lock); + /* count this thread as not running before possible sleep in + * the outer wait event if it is not done yet. */ + if (status->running) { + LASSERT(svc->srv_threads_running > 0); + svc->srv_threads_running--; + status->running = 0; + } + /* Process all incoming reqs before handling any */ + if (!cfs_list_empty(&svc->srv_req_in_queue)) { + status->todo |= PTLRPC_MAIN_IN_REQ; + } + /* Don't handle regular requests in the last thread, in order + * to handle any incoming reqs, early replies, etc. */ + if (ptlrpc_server_request_pending(svc, 0) && + (svc->srv_threads_running < (svc->srv_threads_started - 1))) { + status->todo |= PTLRPC_MAIN_ACTIVE_REQ; + } + if (svc->srv_at_check) { + status->todo |= PTLRPC_MAIN_CHECK_TIMED; + } + if ((!cfs_list_empty(&svc->srv_idle_rqbds) && + svc->srv_rqbd_timeout == 0)) { + status->todo |= PTLRPC_MAIN_REPOST; + } + /* count this thread as active if it goes out the outer + * wait event */ + if (status->todo) { + svc->srv_threads_running++; + status->running = 1; + } + cfs_spin_unlock(&svc->srv_lock); + out: + RETURN(status->todo); +} + +/** + * Main prlrpc service thread routine. + */ static int ptlrpc_main(void *arg) { struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg; @@ -1394,15 +1986,16 @@ static int ptlrpc_main(void *arg) struct ptlrpc_thread *thread = data->thread; struct obd_device *dev = data->dev; struct ptlrpc_reply_state *rs; - struct lc_watchdog *watchdog; + struct ptlrpc_main_check_s st; #ifdef WITH_GROUP_INFO - struct group_info *ginfo = NULL; + cfs_group_info_t *ginfo = NULL; #endif struct lu_env env; int counter = 0, rc = 0; ENTRY; - ptlrpc_daemonize(data->name); + thread->t_pid = cfs_curproc_pid(); + cfs_daemonize_ctxt(data->name); #if defined(HAVE_NODE_TO_CPUMASK) && defined(CONFIG_NUMA) /* we need to do this before any per-thread allocation is done so that @@ -1410,26 +2003,28 @@ static int ptlrpc_main(void *arg) if (svc->srv_cpu_affinity) { int cpu, num_cpu; - for (cpu = 0, num_cpu = 0; cpu < num_possible_cpus(); cpu++) { - if (!cpu_online(cpu)) + for (cpu = 0, num_cpu = 0; cpu < cfs_num_possible_cpus(); + cpu++) { + if (!cfs_cpu_online(cpu)) continue; - if (num_cpu == thread->t_id % num_online_cpus()) + if (num_cpu == thread->t_id % cfs_num_online_cpus()) break; num_cpu++; } - set_cpus_allowed(cfs_current(), node_to_cpumask(cpu_to_node(cpu))); + cfs_set_cpus_allowed(cfs_current(), + node_to_cpumask(cpu_to_node(cpu))); } #endif #ifdef WITH_GROUP_INFO - ginfo = groups_alloc(0); + ginfo = cfs_groups_alloc(0); if (!ginfo) { rc = -ENOMEM; goto out; } - set_current_groups(ginfo); - put_group_info(ginfo); + cfs_set_current_groups(ginfo); + cfs_put_group_info(ginfo); #endif if (svc->srv_init != NULL) { @@ -1438,12 +2033,14 @@ static int ptlrpc_main(void *arg) goto out; } - rc = lu_context_init(&env.le_ctx, svc->srv_ctx_tags); + rc = lu_context_init(&env.le_ctx, + svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF); if (rc) goto out_srv_fini; thread->t_env = &env; env.le_ctx.lc_thread = thread; + env.le_ctx.lc_cookie = 0x6; /* Alloc reply state structure for this one */ OBD_ALLOC_GFP(rs, svc->srv_max_reply_size, CFS_ALLOC_STD); @@ -1452,100 +2049,85 @@ static int ptlrpc_main(void *arg) goto out_srv_fini; } - /* Record that the thread is running */ - thread->t_flags = SVC_RUNNING; + cfs_spin_lock(&svc->srv_lock); + /* SVC_STOPPING may already be set here if someone else is trying + * to stop the service while this new thread has been dynamically + * forked. We still set SVC_RUNNING to let our creator know that + * we are now running, however we will exit as soon as possible */ + thread->t_flags |= SVC_RUNNING; + cfs_spin_unlock(&svc->srv_lock); + /* * wake up our creator. Note: @data is invalid after this point, * because it's allocated on ptlrpc_start_thread() stack. */ cfs_waitq_signal(&thread->t_ctl_waitq); - watchdog = lc_watchdog_add(max_t(int, obd_timeout, AT_OFF ? 0 : - at_get(&svc->srv_at_estimate)) * - svc->srv_watchdog_factor, NULL, NULL); + thread->t_watchdog = lc_watchdog_add(CFS_GET_TIMEOUT(svc), NULL, NULL); - spin_lock(&svc->srv_lock); - svc->srv_threads_running++; - list_add(&rs->rs_list, &svc->srv_free_rs_list); - spin_unlock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); + cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list); + cfs_spin_unlock(&svc->srv_lock); cfs_waitq_signal(&svc->srv_free_rs_waitq); - CDEBUG(D_NET, "service thread %d (#%d)started\n", thread->t_id, - svc->srv_threads_running); + CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id, + svc->srv_threads_running); /* XXX maintain a list of all managed devices: insert here */ - while ((thread->t_flags & SVC_STOPPING) == 0 || - svc->srv_n_difficult_replies != 0) { + st.running = 0; + st.todo = 0; + + while (!(st.todo & PTLRPC_MAIN_STOPPING)) { /* Don't exit while there are replies to be handled */ struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout, ptlrpc_retry_rqbds, svc); - lc_watchdog_disable(watchdog); + lc_watchdog_disable(thread->t_watchdog); - cond_resched(); + cfs_cond_resched(); l_wait_event_exclusive (svc->srv_waitq, - ((thread->t_flags & SVC_STOPPING) != 0 && - svc->srv_n_difficult_replies == 0) || - (!list_empty(&svc->srv_idle_rqbds) && - svc->srv_rqbd_timeout == 0) || - !list_empty(&svc->srv_req_in_queue) || - !list_empty(&svc->srv_reply_queue) || - (!list_empty(&svc->srv_request_queue) && - (svc->srv_n_active_reqs < - (svc->srv_threads_running - 1))) || - svc->srv_at_check, - &lwi); - - lc_watchdog_touch_ms(watchdog, max_t(int, obd_timeout, - AT_OFF ? 0 : - at_get(&svc->srv_at_estimate)) * - svc->srv_watchdog_factor); + ptlrpc_main_check_event(thread, &st), + &lwi); + + lc_watchdog_touch(thread->t_watchdog, CFS_GET_TIMEOUT(svc)); ptlrpc_check_rqbd_pool(svc); - if ((svc->srv_threads_started < svc->srv_threads_max) && - (svc->srv_n_active_reqs >= (svc->srv_threads_started - 1))){ + if (svc->srv_threads_started < svc->srv_threads_max && + svc->srv_n_active_reqs >= (svc->srv_threads_started - 1)) /* Ignore return code - we tried... */ ptlrpc_start_thread(dev, svc); - } - - if (!list_empty(&svc->srv_reply_queue)) - ptlrpc_server_handle_reply(svc); - if (!list_empty(&svc->srv_req_in_queue)) { - /* Process all incoming reqs before handling any */ + if (st.todo & PTLRPC_MAIN_IN_REQ) { ptlrpc_server_handle_req_in(svc); /* but limit ourselves in case of flood */ if (counter++ < 1000) continue; counter = 0; } - - if (svc->srv_at_check) + if (st.todo & PTLRPC_MAIN_CHECK_TIMED) { ptlrpc_at_check_timed(svc); - - /* don't handle requests in the last thread */ - if (!list_empty (&svc->srv_request_queue) && - (svc->srv_n_active_reqs < (svc->srv_threads_running - 1))) { + } + if (st.todo & PTLRPC_MAIN_ACTIVE_REQ) { lu_context_enter(&env.le_ctx); ptlrpc_server_handle_request(svc, thread); lu_context_exit(&env.le_ctx); } - - if (!list_empty(&svc->srv_idle_rqbds) && + if ((st.todo & PTLRPC_MAIN_REPOST) && ptlrpc_server_post_idle_rqbds(svc) < 0) { - /* I just failed to repost request buffers. Wait - * for a timeout (unless something else happens) - * before I try again */ + /* I just failed to repost request buffers. + * Wait for a timeout (unless something else + * happens) before I try again */ svc->srv_rqbd_timeout = cfs_time_seconds(1)/10; CDEBUG(D_RPCTRACE,"Posted buffers: %d\n", svc->srv_nrqbd_receiving); } } - lc_watchdog_delete(watchdog); + lc_watchdog_delete(thread->t_watchdog); + thread->t_watchdog = NULL; out_srv_fini: /* @@ -1556,54 +2138,203 @@ out_srv_fini: lu_context_fini(&env.le_ctx); out: - CDEBUG(D_NET, "service thread %d exiting: rc %d\n", thread->t_id, rc); + CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n", + thread, thread->t_pid, thread->t_id, rc); + + if (st.running) { + cfs_spin_lock(&svc->srv_lock); + svc->srv_threads_running--; + cfs_spin_unlock(&svc->srv_lock); + } - spin_lock(&svc->srv_lock); - svc->srv_threads_running--; /* must know immediately */ thread->t_id = rc; thread->t_flags = SVC_STOPPED; cfs_waitq_signal(&thread->t_ctl_waitq); - spin_unlock(&svc->srv_lock); + return rc; +} + +struct ptlrpc_hr_args { + int thread_index; + int cpu_index; + struct ptlrpc_hr_service *hrs; +}; + +static int hrt_dont_sleep(struct ptlrpc_hr_thread *t, + cfs_list_t *replies) +{ + int result; + + cfs_spin_lock(&t->hrt_lock); + cfs_list_splice_init(&t->hrt_queue, replies); + result = cfs_test_bit(HRT_STOPPING, &t->hrt_flags) || + !cfs_list_empty(replies); + cfs_spin_unlock(&t->hrt_lock); + return result; +} + +static int ptlrpc_hr_main(void *arg) +{ + struct ptlrpc_hr_args * hr_args = arg; + struct ptlrpc_hr_service *hr = hr_args->hrs; + struct ptlrpc_hr_thread *t = &hr->hr_threads[hr_args->thread_index]; + char threadname[20]; + CFS_LIST_HEAD(replies); + + snprintf(threadname, sizeof(threadname), + "ptlrpc_hr_%d", hr_args->thread_index); + + cfs_daemonize_ctxt(threadname); +#if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK) + cfs_set_cpus_allowed(cfs_current(), + node_to_cpumask(cpu_to_node(hr_args->cpu_index))); +#endif + cfs_set_bit(HRT_RUNNING, &t->hrt_flags); + cfs_waitq_signal(&t->hrt_wait); + + while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) { + + l_cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies)); + while (!cfs_list_empty(&replies)) { + struct ptlrpc_reply_state *rs; + + rs = cfs_list_entry(replies.prev, + struct ptlrpc_reply_state, + rs_list); + cfs_list_del_init(&rs->rs_list); + ptlrpc_handle_rs(rs); + } + } + + cfs_clear_bit(HRT_RUNNING, &t->hrt_flags); + cfs_complete(&t->hrt_completion); + + return 0; +} + +static int ptlrpc_start_hr_thread(struct ptlrpc_hr_service *hr, int n, int cpu) +{ + struct ptlrpc_hr_thread *t = &hr->hr_threads[n]; + struct ptlrpc_hr_args args; + int rc; + ENTRY; + args.thread_index = n; + args.cpu_index = cpu; + args.hrs = hr; + + rc = cfs_kernel_thread(ptlrpc_hr_main, (void*)&args, + CLONE_VM|CLONE_FILES); + if (rc < 0) { + cfs_complete(&t->hrt_completion); + GOTO(out, rc); + } + l_cfs_wait_event(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags)); + RETURN(0); + out: return rc; } +static void ptlrpc_stop_hr_thread(struct ptlrpc_hr_thread *t) +{ + ENTRY; + + cfs_set_bit(HRT_STOPPING, &t->hrt_flags); + cfs_waitq_signal(&t->hrt_wait); + cfs_wait_for_completion(&t->hrt_completion); + + EXIT; +} + +static void ptlrpc_stop_hr_threads(struct ptlrpc_hr_service *hrs) +{ + int n; + ENTRY; + + for (n = 0; n < hrs->hr_n_threads; n++) + ptlrpc_stop_hr_thread(&hrs->hr_threads[n]); + + EXIT; +} + +static int ptlrpc_start_hr_threads(struct ptlrpc_hr_service *hr) +{ + int rc = -ENOMEM; + int n, cpu, threads_started = 0; + ENTRY; + + LASSERT(hr != NULL); + LASSERT(hr->hr_n_threads > 0); + + for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) { +#if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK) + while(!cfs_cpu_online(cpu)) { + cpu++; + if (cpu >= cfs_num_possible_cpus()) + cpu = 0; + } +#endif + rc = ptlrpc_start_hr_thread(hr, n, cpu); + if (rc != 0) + break; + threads_started++; + cpu++; + } + if (threads_started == 0) { + CERROR("No reply handling threads started\n"); + RETURN(-ESRCH); + } + if (threads_started < hr->hr_n_threads) { + CWARN("Started only %d reply handling threads from %d\n", + threads_started, hr->hr_n_threads); + hr->hr_n_threads = threads_started; + } + RETURN(0); +} + static void ptlrpc_stop_thread(struct ptlrpc_service *svc, struct ptlrpc_thread *thread) { struct l_wait_info lwi = { 0 }; + ENTRY; + + CDEBUG(D_RPCTRACE, "Stopping thread [ %p : %u ]\n", + thread, thread->t_pid); - spin_lock(&svc->srv_lock); - thread->t_flags = SVC_STOPPING; - spin_unlock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); + /* let the thread know that we would like it to stop asap */ + thread->t_flags |= SVC_STOPPING; + cfs_spin_unlock(&svc->srv_lock); cfs_waitq_broadcast(&svc->srv_waitq); - l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED), - &lwi); + l_wait_event(thread->t_ctl_waitq, + (thread->t_flags & SVC_STOPPED), &lwi); - spin_lock(&svc->srv_lock); - list_del(&thread->t_link); - spin_unlock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); + cfs_list_del(&thread->t_link); + cfs_spin_unlock(&svc->srv_lock); OBD_FREE_PTR(thread); + EXIT; } void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) { struct ptlrpc_thread *thread; + ENTRY; - spin_lock(&svc->srv_lock); - while (!list_empty(&svc->srv_threads)) { - thread = list_entry(svc->srv_threads.next, - struct ptlrpc_thread, t_link); + cfs_spin_lock(&svc->srv_lock); + while (!cfs_list_empty(&svc->srv_threads)) { + thread = cfs_list_entry(svc->srv_threads.next, + struct ptlrpc_thread, t_link); - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); ptlrpc_stop_thread(svc, thread); - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); } - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); + EXIT; } int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc) @@ -1623,6 +2354,7 @@ int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc) CERROR("cannot start %s thread #%d: rc %d\n", svc->srv_thread_name, i, rc); ptlrpc_stop_all_threads(svc); + break; } } RETURN(rc); @@ -1640,6 +2372,10 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc) CDEBUG(D_RPCTRACE, "%s started %d min %d max %d running %d\n", svc->srv_name, svc->srv_threads_started, svc->srv_threads_min, svc->srv_threads_max, svc->srv_threads_running); + + if (unlikely(svc->srv_is_stopping)) + RETURN(-ESRCH); + if (unlikely(svc->srv_threads_started >= svc->srv_threads_max) || (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) && svc->srv_threads_started == svc->srv_threads_min - 1)) @@ -1650,16 +2386,17 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc) RETURN(-ENOMEM); cfs_waitq_init(&thread->t_ctl_waitq); - spin_lock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); if (svc->srv_threads_started >= svc->srv_threads_max) { - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); OBD_FREE_PTR(thread); RETURN(-EMFILE); } - list_add(&thread->t_link, &svc->srv_threads); + cfs_list_add(&thread->t_link, &svc->srv_threads); id = svc->srv_threads_started++; - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); + thread->t_svc = svc; thread->t_id = id; sprintf(name, "%s_%02d", svc->srv_thread_name, id); d.dev = dev; @@ -1669,17 +2406,17 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc) CDEBUG(D_RPCTRACE, "starting thread '%s'\n", name); - /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we - * just drop the VM and FILES in ptlrpc_daemonize() right away. + /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we + * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */ rc = cfs_kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES); if (rc < 0) { CERROR("cannot start thread '%s': rc %d\n", name, rc); - spin_lock(&svc->srv_lock); - list_del(&thread->t_link); + cfs_spin_lock(&svc->srv_lock); + cfs_list_del(&thread->t_link); --svc->srv_threads_started; - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); OBD_FREE(thread, sizeof(*thread)); RETURN(rc); @@ -1690,23 +2427,90 @@ int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc) rc = (thread->t_flags & SVC_STOPPED) ? thread->t_id : 0; RETURN(rc); } -#endif + + +int ptlrpc_hr_init(void) +{ + int i; + int n_cpus = cfs_num_online_cpus(); + struct ptlrpc_hr_service *hr; + int size; + int rc; + ENTRY; + + LASSERT(ptlrpc_hr == NULL); + + size = offsetof(struct ptlrpc_hr_service, hr_threads[n_cpus]); + OBD_ALLOC(hr, size); + if (hr == NULL) + RETURN(-ENOMEM); + for (i = 0; i < n_cpus; i++) { + struct ptlrpc_hr_thread *t = &hr->hr_threads[i]; + + cfs_spin_lock_init(&t->hrt_lock); + cfs_waitq_init(&t->hrt_wait); + CFS_INIT_LIST_HEAD(&t->hrt_queue); + cfs_init_completion(&t->hrt_completion); + } + hr->hr_n_threads = n_cpus; + hr->hr_size = size; + ptlrpc_hr = hr; + + rc = ptlrpc_start_hr_threads(hr); + if (rc) { + OBD_FREE(hr, hr->hr_size); + ptlrpc_hr = NULL; + } + RETURN(rc); +} + +void ptlrpc_hr_fini(void) +{ + if (ptlrpc_hr != NULL) { + ptlrpc_stop_hr_threads(ptlrpc_hr); + OBD_FREE(ptlrpc_hr, ptlrpc_hr->hr_size); + ptlrpc_hr = NULL; + } +} + +#endif /* __KERNEL__ */ + +/** + * Wait until all already scheduled replies are processed. + */ +static void ptlrpc_wait_replies(struct ptlrpc_service *svc) +{ + while (1) { + int rc; + struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10), + NULL, NULL); + rc = l_wait_event(svc->srv_waitq, cfs_atomic_read(&svc-> \ + srv_n_difficult_replies) == 0, + &lwi); + if (rc == 0) + break; + CWARN("Unexpectedly long timeout %p\n", svc); + } +} int ptlrpc_unregister_service(struct ptlrpc_service *service) { int rc; struct l_wait_info lwi; - struct list_head *tmp; + cfs_list_t *tmp; struct ptlrpc_reply_state *rs, *t; + struct ptlrpc_at_array *array = &service->srv_at_array; + ENTRY; + service->srv_is_stopping = 1; cfs_timer_disarm(&service->srv_at_timer); ptlrpc_stop_all_threads(service); - LASSERT(list_empty(&service->srv_threads)); + LASSERT(cfs_list_empty(&service->srv_threads)); - spin_lock (&ptlrpc_all_services_lock); - list_del_init (&service->srv_list); - spin_unlock (&ptlrpc_all_services_lock); + cfs_spin_lock (&ptlrpc_all_services_lock); + cfs_list_del_init (&service->srv_list); + cfs_spin_unlock (&ptlrpc_all_services_lock); ptlrpc_lprocfs_unregister_service(service); @@ -1721,10 +2525,10 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) /* Unlink all the request buffers. This forces a 'final' event with * its 'unlink' flag set for each posted rqbd */ - list_for_each(tmp, &service->srv_active_rqbds) { + cfs_list_for_each(tmp, &service->srv_active_rqbds) { struct ptlrpc_request_buffer_desc *rqbd = - list_entry(tmp, struct ptlrpc_request_buffer_desc, - rqbd_list); + cfs_list_entry(tmp, struct ptlrpc_request_buffer_desc, + rqbd_list); rc = LNetMDUnlink(rqbd->rqbd_md_h); LASSERT (rc == 0 || rc == -ENOENT); @@ -1733,16 +2537,17 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) /* Wait for the network to release any buffers it's currently * filling */ for (;;) { - spin_lock(&service->srv_lock); + cfs_spin_lock(&service->srv_lock); rc = service->srv_nrqbd_receiving; - spin_unlock(&service->srv_lock); + cfs_spin_unlock(&service->srv_lock); if (rc == 0) break; /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish NALs */ - lwi = LWI_TIMEOUT(cfs_time_seconds(LONG_UNLINK), NULL, NULL); + lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), + cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(service->srv_waitq, service->srv_nrqbd_receiving == 0, &lwi); @@ -1752,83 +2557,82 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) } /* schedule all outstanding replies to terminate them */ - spin_lock(&service->srv_lock); - while (!list_empty(&service->srv_active_replies)) { + cfs_spin_lock(&service->srv_lock); + while (!cfs_list_empty(&service->srv_active_replies)) { struct ptlrpc_reply_state *rs = - list_entry(service->srv_active_replies.next, - struct ptlrpc_reply_state, rs_list); + cfs_list_entry(service->srv_active_replies.next, + struct ptlrpc_reply_state, rs_list); + cfs_spin_lock(&rs->rs_lock); ptlrpc_schedule_difficult_reply(rs); + cfs_spin_unlock(&rs->rs_lock); } - spin_unlock(&service->srv_lock); + cfs_spin_unlock(&service->srv_lock); /* purge the request queue. NB No new replies (rqbds all unlinked) * and no service threads, so I'm the only thread noodling the * request queue now */ - while (!list_empty(&service->srv_req_in_queue)) { + while (!cfs_list_empty(&service->srv_req_in_queue)) { struct ptlrpc_request *req = - list_entry(service->srv_req_in_queue.next, - struct ptlrpc_request, - rq_list); + cfs_list_entry(service->srv_req_in_queue.next, + struct ptlrpc_request, + rq_list); - list_del(&req->rq_list); + cfs_list_del(&req->rq_list); service->srv_n_queued_reqs--; service->srv_n_active_reqs++; ptlrpc_server_finish_request(req); } - while (!list_empty(&service->srv_request_queue)) { - struct ptlrpc_request *req = - list_entry(service->srv_request_queue.next, - struct ptlrpc_request, - rq_list); + while (ptlrpc_server_request_pending(service, 1)) { + struct ptlrpc_request *req; - list_del(&req->rq_list); + req = ptlrpc_server_request_get(service); + cfs_list_del(&req->rq_list); service->srv_n_queued_reqs--; service->srv_n_active_reqs++; - + ptlrpc_hpreq_fini(req); ptlrpc_server_finish_request(req); } LASSERT(service->srv_n_queued_reqs == 0); LASSERT(service->srv_n_active_reqs == 0); LASSERT(service->srv_n_history_rqbds == 0); - LASSERT(list_empty(&service->srv_active_rqbds)); + LASSERT(cfs_list_empty(&service->srv_active_rqbds)); /* Now free all the request buffers since nothing references them * any more... */ - while (!list_empty(&service->srv_idle_rqbds)) { + while (!cfs_list_empty(&service->srv_idle_rqbds)) { struct ptlrpc_request_buffer_desc *rqbd = - list_entry(service->srv_idle_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + cfs_list_entry(service->srv_idle_rqbds.next, + struct ptlrpc_request_buffer_desc, + rqbd_list); ptlrpc_free_rqbd(rqbd); } - /* wait for all outstanding replies to complete (they were - * scheduled having been flagged to abort above) */ - while (atomic_read(&service->srv_outstanding_replies) != 0) { - struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL); - - rc = l_wait_event(service->srv_waitq, - !list_empty(&service->srv_reply_queue), &lwi); - LASSERT(rc == 0 || rc == -ETIMEDOUT); - - if (rc == 0) { - ptlrpc_server_handle_reply(service); - continue; - } - CWARN("Unexpectedly long timeout %p\n", service); - } + ptlrpc_wait_replies(service); - list_for_each_entry_safe(rs, t, &service->srv_free_rs_list, rs_list) { - list_del(&rs->rs_list); + cfs_list_for_each_entry_safe(rs, t, &service->srv_free_rs_list, + rs_list) { + cfs_list_del(&rs->rs_list); OBD_FREE(rs, service->srv_max_reply_size); } /* In case somebody rearmed this in the meantime */ cfs_timer_disarm(&service->srv_at_timer); + if (array->paa_reqs_array != NULL) { + OBD_FREE(array->paa_reqs_array, + sizeof(cfs_list_t) * array->paa_size); + array->paa_reqs_array = NULL; + } + + if (array->paa_reqs_count != NULL) { + OBD_FREE(array->paa_reqs_count, + sizeof(__u32) * array->paa_size); + array->paa_reqs_count= NULL; + } + OBD_FREE_PTR(service); - return 0; + RETURN(0); } /* Returns 0 if the service is healthy. @@ -1845,19 +2649,23 @@ int ptlrpc_service_health_check(struct ptlrpc_service *svc) if (svc == NULL) return 0; - do_gettimeofday(&right_now); + cfs_gettimeofday(&right_now); - spin_lock(&svc->srv_lock); - if (list_empty(&svc->srv_request_queue)) { - spin_unlock(&svc->srv_lock); + cfs_spin_lock(&svc->srv_lock); + if (!ptlrpc_server_request_pending(svc, 1)) { + cfs_spin_unlock(&svc->srv_lock); return 0; } /* How long has the next entry been waiting? */ - request = list_entry(svc->srv_request_queue.next, - struct ptlrpc_request, rq_list); + if (cfs_list_empty(&svc->srv_request_queue)) + request = cfs_list_entry(svc->srv_request_hpq.next, + struct ptlrpc_request, rq_list); + else + request = cfs_list_entry(svc->srv_request_queue.next, + struct ptlrpc_request, rq_list); timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL); - spin_unlock(&svc->srv_lock); + cfs_spin_unlock(&svc->srv_lock); if ((timediff / ONE_MILLION) > (AT_OFF ? obd_timeout * 3/2 : at_max)) {