X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fservice.c;h=672704de69d878955c72b1196c7a02d9b9de1bda;hp=13f699e2b183c6694c871e462aa227bc278b59e8;hb=0c3cb273e1ce629072da4d790cba5e13163c43c6;hpb=8a5b8dbda960b155f669c13602504f1233a84c7e;ds=sidebyside diff --git a/lustre/ptlrpc/service.c b/lustre/ptlrpc/service.c index 13f699e..672704d 100644 --- a/lustre/ptlrpc/service.c +++ b/lustre/ptlrpc/service.c @@ -65,6 +65,7 @@ CFS_MODULE_PARM(at_extra, "i", int, 0644, /* forward ref */ static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt); static void ptlrpc_hpreq_fini(struct ptlrpc_request *req); +static void ptlrpc_at_remove_timed(struct ptlrpc_request *req); static CFS_LIST_HEAD(ptlrpc_all_services); cfs_spinlock_t ptlrpc_all_services_lock; @@ -75,21 +76,21 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) struct ptlrpc_service *svc = svcpt->scp_service; struct ptlrpc_request_buffer_desc *rqbd; - OBD_ALLOC_PTR(rqbd); + OBD_CPT_ALLOC_PTR(rqbd, svc->srv_cptable, svcpt->scp_cpt); if (rqbd == NULL) return NULL; rqbd->rqbd_svcpt = svcpt; - rqbd->rqbd_refcount = 0; - rqbd->rqbd_cbid.cbid_fn = request_in_callback; - rqbd->rqbd_cbid.cbid_arg = rqbd; - CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs); - OBD_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_buf_size); - - if (rqbd->rqbd_buffer == NULL) { - OBD_FREE_PTR(rqbd); - return (NULL); - } + rqbd->rqbd_refcount = 0; + rqbd->rqbd_cbid.cbid_fn = request_in_callback; + rqbd->rqbd_cbid.cbid_arg = rqbd; + CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs); + OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable, + svcpt->scp_cpt, svc->srv_buf_size); + if (rqbd->rqbd_buffer == NULL) { + OBD_FREE_PTR(rqbd); + return NULL; + } cfs_spin_lock(&svcpt->scp_lock); cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); @@ -117,7 +118,7 @@ ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd) } int -ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt) +ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) { struct ptlrpc_service *svc = svcpt->scp_service; struct ptlrpc_request_buffer_desc *rqbd; @@ -138,17 +139,15 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt) rc = -ENOMEM; break; } - - if (ptlrpc_server_post_idle_rqbds(svcpt) < 0) { - rc = -EAGAIN; - break; - } } CDEBUG(D_RPCTRACE, "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n", - svc->srv_name, i, svc->srv_buf_size, - svcpt->scp_nrqbds_posted, svcpt->scp_nrqbds_total, rc); + svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted, + svcpt->scp_nrqbds_total, rc); + + if (post && rc == 0) + rc = ptlrpc_server_post_idle_rqbds(svcpt); return rc; } @@ -177,25 +176,48 @@ ptlrpc_save_lock(struct ptlrpc_request *req, rs->rs_no_ack = !!no_ack; } } +EXPORT_SYMBOL(ptlrpc_save_lock); #ifdef __KERNEL__ -#define HRT_RUNNING 0 -#define HRT_STOPPING 1 +struct ptlrpc_hr_partition; struct ptlrpc_hr_thread { - cfs_spinlock_t hrt_lock; - unsigned long hrt_flags; - cfs_waitq_t hrt_wait; - cfs_list_t hrt_queue; - cfs_completion_t hrt_completion; + int hrt_id; /* thread ID */ + cfs_spinlock_t hrt_lock; + cfs_waitq_t hrt_waitq; + cfs_list_t hrt_queue; /* RS queue */ + struct ptlrpc_hr_partition *hrt_partition; +}; + +struct ptlrpc_hr_partition { + /* # of started threads */ + cfs_atomic_t hrp_nstarted; + /* # of stopped threads */ + cfs_atomic_t hrp_nstopped; + /* cpu partition id */ + int hrp_cpt; + /* round-robin rotor for choosing thread */ + int hrp_rotor; + /* total number of threads on this partition */ + int hrp_nthrs; + /* threads table */ + struct ptlrpc_hr_thread *hrp_thrs; }; +#define HRT_RUNNING 0 +#define HRT_STOPPING 1 + struct ptlrpc_hr_service { - int hr_index; - int hr_n_threads; - int hr_size; - struct ptlrpc_hr_thread hr_threads[0]; + /* CPU partition table, it's just cfs_cpt_table for now */ + struct cfs_cpt_table *hr_cpt_table; + /** controller sleep waitq */ + cfs_waitq_t hr_waitq; + unsigned int hr_stopping; + /** roundrobin rotor for non-affinity service */ + unsigned int hr_rotor; + /* partition data */ + struct ptlrpc_hr_partition **hr_partitions; }; struct rs_batch { @@ -204,10 +226,8 @@ struct rs_batch { struct ptlrpc_service_part *rsb_svcpt; }; -/** - * A pointer to per-node reply handling service. - */ -static struct ptlrpc_hr_service *ptlrpc_hr = NULL; +/** reply handling service. */ +static struct ptlrpc_hr_service ptlrpc_hr; /** * maximum mumber of replies scheduled in one batch @@ -228,17 +248,26 @@ static void rs_batch_init(struct rs_batch *b) /** * Choose an hr thread to dispatch requests to. */ -static unsigned int get_hr_thread_index(struct ptlrpc_hr_service *hr) +static struct ptlrpc_hr_thread * +ptlrpc_hr_select(struct ptlrpc_service_part *svcpt) { - unsigned int idx; + struct ptlrpc_hr_partition *hrp; + unsigned int rotor; - /* Concurrent modification of hr_index w/o any spinlock - protection is harmless as long as the result fits - [0..(hr_n_threads-1)] range and each thread gets near equal - load. */ - idx = hr->hr_index; - hr->hr_index = (idx >= hr->hr_n_threads - 1) ? 0 : idx + 1; - return idx; + if (svcpt->scp_cpt >= 0 && + svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) { + /* directly match partition */ + hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt]; + + } else { + rotor = ptlrpc_hr.hr_rotor++; + rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table); + + hrp = ptlrpc_hr.hr_partitions[rotor]; + } + + rotor = hrp->hrp_rotor++; + return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs]; } /** @@ -249,19 +278,18 @@ static unsigned int get_hr_thread_index(struct ptlrpc_hr_service *hr) */ static void rs_batch_dispatch(struct rs_batch *b) { - if (b->rsb_n_replies != 0) { - struct ptlrpc_hr_service *hr = ptlrpc_hr; - int idx; + if (b->rsb_n_replies != 0) { + struct ptlrpc_hr_thread *hrt; - idx = get_hr_thread_index(hr); + hrt = ptlrpc_hr_select(b->rsb_svcpt); - cfs_spin_lock(&hr->hr_threads[idx].hrt_lock); - cfs_list_splice_init(&b->rsb_replies, - &hr->hr_threads[idx].hrt_queue); - cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock); - cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait); - b->rsb_n_replies = 0; - } + cfs_spin_lock(&hrt->hrt_lock); + cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue); + cfs_spin_unlock(&hrt->hrt_lock); + + cfs_waitq_signal(&hrt->hrt_waitq); + b->rsb_n_replies = 0; + } } /** @@ -327,18 +355,19 @@ static void rs_batch_fini(struct rs_batch *b) void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs) { #ifdef __KERNEL__ - struct ptlrpc_hr_service *hr = ptlrpc_hr; - int idx; - ENTRY; + struct ptlrpc_hr_thread *hrt; + ENTRY; - LASSERT(cfs_list_empty(&rs->rs_list)); + LASSERT(cfs_list_empty(&rs->rs_list)); - idx = get_hr_thread_index(hr); - cfs_spin_lock(&hr->hr_threads[idx].hrt_lock); - cfs_list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue); - cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock); - cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait); - EXIT; + hrt = ptlrpc_hr_select(rs->rs_svcpt); + + cfs_spin_lock(&hrt->hrt_lock); + cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue); + cfs_spin_unlock(&hrt->hrt_lock); + + cfs_waitq_signal(&hrt->hrt_waitq); + EXIT; #else cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue); #endif @@ -364,6 +393,7 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs) ptlrpc_dispatch_difficult_reply(rs); EXIT; } +EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply); void ptlrpc_commit_replies(struct obd_export *exp) { @@ -391,6 +421,7 @@ void ptlrpc_commit_replies(struct obd_export *exp) rs_batch_fini(&batch); EXIT; } +EXPORT_SYMBOL(ptlrpc_commit_replies); static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) @@ -451,49 +482,106 @@ static void ptlrpc_at_timer(unsigned long castmeharder) } static void -ptlrpc_server_nthreads_check(struct ptlrpc_service_conf *conf, - int *min_p, int *max_p) +ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, + struct ptlrpc_service_conf *conf) { #ifdef __KERNEL__ struct ptlrpc_service_thr_conf *tc = &conf->psc_thr; - int nthrs_min; - int nthrs; + unsigned init; + unsigned total; + unsigned nthrs; + int weight; - nthrs_min = PTLRPC_NTHRS_MIN + (conf->psc_ops.so_hpreq_handler != NULL); - nthrs_min = max_t(int, nthrs_min, tc->tc_nthrs_min); + /* + * Common code for estimating & validating threads number. + * CPT affinity service could have percpt thread-pool instead + * of a global thread-pool, which means user might not always + * get the threads number they give it in conf::tc_nthrs_user + * even they did set. It's because we need to validate threads + * number for each CPT to guarantee each pool will have enough + * threads to keep the service healthy. + */ + init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL); + init = max_t(int, init, tc->tc_nthrs_init); + + /* NB: please see comments in lustre_lnet.h for definition + * details of these members */ + LASSERT(tc->tc_nthrs_max != 0); + + if (tc->tc_nthrs_user != 0) { + /* In case there is a reason to test a service with many + * threads, we give a less strict check here, it can + * be up to 8 * nthrs_max */ + total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user); + nthrs = total / svc->srv_ncpts; + init = max(init, nthrs); + goto out; + } - nthrs = tc->tc_nthrs_user; - if (nthrs != 0) { /* validate it */ - nthrs = min_t(int, nthrs, tc->tc_nthrs_max); - nthrs = max_t(int, nthrs, nthrs_min); - *min_p = *max_p = nthrs; - return; + total = tc->tc_nthrs_max; + if (tc->tc_nthrs_base == 0) { + /* don't care about base threads number per partition, + * this is most for non-affinity service */ + nthrs = total / svc->srv_ncpts; + goto out; } - /* - * NB: we will add some common code here for estimating, for example: - * add a new member ptlrpc_service_thr_conf::tc_factor, and estimate - * threads number based on: - * (online_cpus * conf::tc_factor) + conf::tc_nthrs_base. - * - * So we can remove code block like estimation in ost_setup, also, - * we might estimate MDS threads number as well instead of using - * absolute number, and have more threads on fat servers to improve - * availability of service. - * - * Also, we will need to validate threads number at here for - * CPT affinity service (CPU ParTion) in the future. - * A service can have percpt thread-pool instead of a global thread - * pool for each service, which means user might not always get the - * threads number they want even they set it in conf::tc_nthrs_user, - * because we need to adjust threads number for each CPT, instead of - * just use (conf::tc_nthrs_user / NCPTS), to make sure each pool - * will be healthy. - */ - *max_p = tc->tc_nthrs_max; - *min_p = nthrs_min; -#else /* __KERNEL__ */ - *max_p = *min_p = 1; /* whatever */ + nthrs = tc->tc_nthrs_base; + if (svc->srv_ncpts == 1) { + int i; + + /* NB: Increase the base number if it's single partition + * and total number of cores/HTs is larger or equal to 4. + * result will always < 2 * nthrs_base */ + weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY); + for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */ + (tc->tc_nthrs_base >> i) != 0; i++) + nthrs += tc->tc_nthrs_base >> i; + } + + if (tc->tc_thr_factor != 0) { + int factor = tc->tc_thr_factor; + const int fade = 4; + + /* + * User wants to increase number of threads with for + * each CPU core/HT, most likely the factor is larger then + * one thread/core because service threads are supposed to + * be blocked by lock or wait for IO. + */ + /* + * Amdahl's law says that adding processors wouldn't give + * a linear increasing of parallelism, so it's nonsense to + * have too many threads no matter how many cores/HTs + * there are. + */ + if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */ + /* depress thread factor for hyper-thread */ + factor = factor - (factor >> 1) + (factor >> 3); + } + + weight = cfs_cpt_weight(svc->srv_cptable, 0); + LASSERT(weight > 0); + + for (; factor > 0 && weight > 0; factor--, weight -= fade) + nthrs += min(weight, fade) * factor; + } + + if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) { + nthrs = max(tc->tc_nthrs_base, + tc->tc_nthrs_max / svc->srv_ncpts); + } + out: + nthrs = max(nthrs, tc->tc_nthrs_init); + svc->srv_nthrs_cpt_limit = nthrs; + svc->srv_nthrs_cpt_init = init; + + if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) { + LCONSOLE_WARN("%s: This service may have more threads (%d) " + "than the given soft limit (%d)\n", + svc->srv_name, nthrs * svc->srv_ncpts, + tc->tc_nthrs_max); + } #endif } @@ -502,13 +590,14 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service_conf *conf, */ static int ptlrpc_service_part_init(struct ptlrpc_service *svc, - struct ptlrpc_service_part *svcpt) + struct ptlrpc_service_part *svcpt, int cpt) { struct ptlrpc_at_array *array; int size; int index; int rc; + svcpt->scp_cpt = cpt; CFS_INIT_LIST_HEAD(&svcpt->scp_threads); /* rqbd and incoming request queue */ @@ -546,14 +635,16 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_deadline = -1; /* allocate memory for scp_at_array (ptlrpc_at_array) */ - OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size); + OBD_CPT_ALLOC(array->paa_reqs_array, + svc->srv_cptable, cpt, sizeof(cfs_list_t) * size); if (array->paa_reqs_array == NULL) return -ENOMEM; for (index = 0; index < size; index++) CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]); - OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size); + OBD_CPT_ALLOC(array->paa_reqs_count, + svc->srv_cptable, cpt, sizeof(__u32) * size); if (array->paa_reqs_count == NULL) goto failed; @@ -565,7 +656,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, /* assign this before call ptlrpc_grow_req_bufs */ svcpt->scp_service = svc; /* Now allocate the request buffers, but don't post them now */ - rc = ptlrpc_grow_req_bufs(svcpt); + rc = ptlrpc_grow_req_bufs(svcpt, 0); /* We shouldn't be under memory pressure at startup, so * fail if we can't allocate all our buffers at this time. */ if (rc != 0) @@ -597,8 +688,15 @@ struct ptlrpc_service * ptlrpc_register_service(struct ptlrpc_service_conf *conf, cfs_proc_dir_entry_t *proc_entry) { + struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt; struct ptlrpc_service *service; + struct ptlrpc_service_part *svcpt; + struct cfs_cpt_table *cptable; + __u32 *cpts = NULL; + int ncpts; + int cpt; int rc; + int i; ENTRY; LASSERT(conf->psc_buf.bc_nbufs > 0); @@ -606,9 +704,51 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD); LASSERT(conf->psc_thr.tc_ctx_tags != 0); - OBD_ALLOC_PTR(service); - if (service == NULL) + cptable = cconf->cc_cptable; + if (cptable == NULL) + cptable = cfs_cpt_table; + + if (!conf->psc_thr.tc_cpu_affinity) { + ncpts = 1; + } else { + ncpts = cfs_cpt_number(cptable); + if (cconf->cc_pattern != NULL) { + struct cfs_expr_list *el; + + rc = cfs_expr_list_parse(cconf->cc_pattern, + strlen(cconf->cc_pattern), + 0, ncpts - 1, &el); + if (rc != 0) { + CERROR("%s: invalid CPT pattern string: %s", + conf->psc_name, cconf->cc_pattern); + RETURN(ERR_PTR(-EINVAL)); + } + + rc = cfs_expr_list_values(el, ncpts, &cpts); + cfs_expr_list_free(el); + if (rc <= 0) { + CERROR("%s: failed to parse CPT array %s: %d\n", + conf->psc_name, cconf->cc_pattern, rc); + RETURN(ERR_PTR(rc < 0 ? rc : -EINVAL)); + } + ncpts = rc; + } + } + + OBD_ALLOC(service, offsetof(struct ptlrpc_service, srv_parts[ncpts])); + if (service == NULL) { + if (cpts != NULL) + OBD_FREE(cpts, sizeof(*cpts) * ncpts); RETURN(ERR_PTR(-ENOMEM)); + } + + service->srv_cptable = cptable; + service->srv_cpts = cpts; + service->srv_ncpts = ncpts; + + service->srv_cpt_bits = 0; /* it's zero already, easy to read... */ + while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable)) + service->srv_cpt_bits++; /* public members */ cfs_spin_lock_init(&service->srv_lock); @@ -617,8 +757,9 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */ /* buffer configuration */ - service->srv_nbuf_per_group = test_req_buffer_pressure ? - 1 : conf->psc_buf.bc_nbufs; + service->srv_nbuf_per_group = test_req_buffer_pressure ? 1 : + max(conf->psc_buf.bc_nbufs / + service->srv_ncpts, 1U); service->srv_max_req_size = conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD; service->srv_buf_size = conf->psc_buf.bc_buf_size; @@ -631,22 +772,28 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD) service->srv_max_reply_size <<= 1; - ptlrpc_server_nthreads_check(conf, &service->srv_threads_min, - &service->srv_threads_max); - service->srv_thread_name = conf->psc_thr.tc_thr_name; service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags; - service->srv_cpu_affinity = !!conf->psc_thr.tc_cpu_affinity; service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO; service->srv_ops = conf->psc_ops; - OBD_ALLOC_PTR(service->srv_part); - if (service->srv_part == NULL) - GOTO(failed, rc = -ENOMEM); + for (i = 0; i < ncpts; i++) { + if (!conf->psc_thr.tc_cpu_affinity) + cpt = CFS_CPT_ANY; + else + cpt = cpts != NULL ? cpts[i] : i; - rc = ptlrpc_service_part_init(service, service->srv_part); - if (rc != 0) - GOTO(failed, rc); + OBD_CPT_ALLOC(svcpt, cptable, cpt, sizeof(*svcpt)); + if (svcpt == NULL) + GOTO(failed, rc = -ENOMEM); + + service->srv_parts[i] = svcpt; + rc = ptlrpc_service_part_init(service, svcpt, cpt); + if (rc != 0) + GOTO(failed, rc); + } + + ptlrpc_server_nthreads_check(service, conf); rc = LNetSetLazyPortal(service->srv_req_portal); LASSERT(rc == 0); @@ -675,6 +822,7 @@ failed: ptlrpc_unregister_service(service); RETURN(ERR_PTR(rc)); } +EXPORT_SYMBOL(ptlrpc_register_service); /** * to actually free the request, must be called without holding svc_lock. @@ -715,22 +863,16 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) if (!cfs_atomic_dec_and_test(&req->rq_refcount)) return; - cfs_spin_lock(&svcpt->scp_at_lock); if (req->rq_at_linked) { - struct ptlrpc_at_array *array = &svcpt->scp_at_array; - __u32 index = req->rq_at_index; - - LASSERT(!cfs_list_empty(&req->rq_timed_list)); - cfs_list_del_init(&req->rq_timed_list); - cfs_spin_lock(&req->rq_lock); - req->rq_at_linked = 0; - cfs_spin_unlock(&req->rq_lock); - array->paa_reqs_count[index]--; - array->paa_count--; - } else - LASSERT(cfs_list_empty(&req->rq_timed_list)); + cfs_spin_lock(&svcpt->scp_at_lock); + /* recheck with lock, in case it's unlinked by + * ptlrpc_at_check_timed() */ + if (likely(req->rq_at_linked)) + ptlrpc_at_remove_timed(req); + cfs_spin_unlock(&svcpt->scp_at_lock); + } - cfs_spin_unlock(&svcpt->scp_at_lock); + LASSERT(cfs_list_empty(&req->rq_timed_list)); /* finalize request */ if (req->rq_export) { @@ -752,7 +894,7 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) /* cull some history? * I expect only about 1 or 2 rqbds need to be recycled here */ - while (svcpt->scp_hist_nrqbds > svc->srv_max_history_rqbds) { + while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) { rqbd = cfs_list_entry(svcpt->scp_hist_rqbds.next, struct ptlrpc_request_buffer_desc, rqbd_list); @@ -801,6 +943,10 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) cfs_list_del(&req->rq_list); cfs_list_del_init(&req->rq_history_list); + /* Track the highest culled req seq */ + if (req->rq_history_seq > svcpt->scp_hist_seq_culled) + svcpt->scp_hist_seq_culled = req->rq_history_seq; + cfs_spin_unlock(&svcpt->scp_lock); ptlrpc_server_free_request(req); @@ -962,10 +1108,8 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt) struct ptlrpc_at_array *array = &svcpt->scp_at_array; __s32 next; - cfs_spin_lock(&svcpt->scp_at_lock); if (array->paa_count == 0) { cfs_timer_disarm(&svcpt->scp_at_timer); - cfs_spin_unlock(&svcpt->scp_at_lock); return; } @@ -979,7 +1123,6 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt) CDEBUG(D_INFO, "armed %s at %+ds\n", svcpt->scp_service->srv_name, next); } - cfs_spin_unlock(&svcpt->scp_at_lock); } /* Add rpc to early reply check list */ @@ -989,7 +1132,6 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) struct ptlrpc_at_array *array = &svcpt->scp_at_array; struct ptlrpc_request *rq = NULL; __u32 index; - int found = 0; if (AT_OFF) return(0); @@ -1031,16 +1173,32 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) array->paa_count++; if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) { array->paa_deadline = req->rq_deadline; - found = 1; - } - cfs_spin_unlock(&svcpt->scp_at_lock); - - if (found) ptlrpc_at_set_timer(svcpt); + } + cfs_spin_unlock(&svcpt->scp_at_lock); return 0; } +static void +ptlrpc_at_remove_timed(struct ptlrpc_request *req) +{ + struct ptlrpc_at_array *array; + + array = &req->rq_rqbd->rqbd_svcpt->scp_at_array; + + /* NB: must call with hold svcpt::scp_at_lock */ + LASSERT(!cfs_list_empty(&req->rq_timed_list)); + cfs_list_del_init(&req->rq_timed_list); + + cfs_spin_lock(&req->rq_lock); + req->rq_at_linked = 0; + cfs_spin_unlock(&req->rq_lock); + + array->paa_reqs_count[req->rq_at_index]--; + array->paa_count--; +} + static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) { struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; @@ -1208,10 +1366,10 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) first = array->paa_deadline - now; if (first > at_early_margin) { /* We've still got plenty of time. Reset the timer. */ - cfs_spin_unlock(&svcpt->scp_at_lock); ptlrpc_at_set_timer(svcpt); - RETURN(0); - } + cfs_spin_unlock(&svcpt->scp_at_lock); + RETURN(0); + } /* We're close to a timeout, and we don't know how much longer the server will take. Send early replies to everyone expiring soon. */ @@ -1224,40 +1382,34 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) cfs_list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index], rq_timed_list) { - if (rq->rq_deadline <= now + at_early_margin) { - cfs_list_del_init(&rq->rq_timed_list); - /** - * ptlrpc_server_drop_request() may drop - * refcount to 0 already. Let's check this and - * don't add entry to work_list - */ - if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount))) - cfs_list_add(&rq->rq_timed_list, &work_list); - counter++; - array->paa_reqs_count[index]--; - array->paa_count--; - cfs_spin_lock(&rq->rq_lock); - rq->rq_at_linked = 0; - cfs_spin_unlock(&rq->rq_lock); - continue; - } - - /* update the earliest deadline */ - if (deadline == -1 || rq->rq_deadline < deadline) - deadline = rq->rq_deadline; + if (rq->rq_deadline > now + at_early_margin) { + /* update the earliest deadline */ + if (deadline == -1 || + rq->rq_deadline < deadline) + deadline = rq->rq_deadline; + break; + } - break; + ptlrpc_at_remove_timed(rq); + /** + * ptlrpc_server_drop_request() may drop + * refcount to 0 already. Let's check this and + * don't add entry to work_list + */ + if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount))) + cfs_list_add(&rq->rq_timed_list, &work_list); + counter++; } if (++index >= array->paa_size) index = 0; } array->paa_deadline = deadline; - cfs_spin_unlock(&svcpt->scp_at_lock); - /* we have a new earliest deadline, restart the timer */ ptlrpc_at_set_timer(svcpt); + cfs_spin_unlock(&svcpt->scp_at_lock); + CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early " "replies\n", first, at_extra, counter); if (first < 0) { @@ -1337,6 +1489,32 @@ static void ptlrpc_hpreq_fini(struct ptlrpc_request *req) EXIT; } +static int ptlrpc_hpreq_check(struct ptlrpc_request *req) +{ + return 1; +} + +static struct ptlrpc_hpreq_ops ptlrpc_hpreq_common = { + .hpreq_lock_match = NULL, + .hpreq_check = ptlrpc_hpreq_check, + .hpreq_fini = NULL, +}; + +/* Hi-Priority RPC check by RPC operation code. */ +int ptlrpc_hpreq_handler(struct ptlrpc_request *req) +{ + int opc = lustre_msg_get_opc(req->rq_reqmsg); + + /* Check for export to let only reconnects for not yet evicted + * export to become a HP rpc. */ + if ((req->rq_export != NULL) && + (opc == OBD_PING || opc == MDS_CONNECT || opc == OST_CONNECT)) + req->rq_ops = &ptlrpc_hpreq_common; + + return 0; +} +EXPORT_SYMBOL(ptlrpc_hpreq_handler); + /** * Make the request a high priority one. * @@ -1384,18 +1562,13 @@ void ptlrpc_hpreq_reorder(struct ptlrpc_request *req) cfs_spin_unlock(&svcpt->scp_req_lock); EXIT; } +EXPORT_SYMBOL(ptlrpc_hpreq_reorder); /** Check if the request is a high priority one. */ static int ptlrpc_server_hpreq_check(struct ptlrpc_service *svc, struct ptlrpc_request *req) { - ENTRY; - - /* Check by request opc. */ - if (OBD_PING == lustre_msg_get_opc(req->rq_reqmsg)) - RETURN(1); - - RETURN(ptlrpc_hpreq_init(svc, req)); + return ptlrpc_hpreq_init(svc, req); } /** Check if a request is a high priority one. */ @@ -2013,7 +2186,8 @@ liblustre_check_services (void *arg) cfs_list_entry (tmp, struct ptlrpc_service, srv_list); struct ptlrpc_service_part *svcpt; - svcpt = svc->srv_part; + LASSERT(svc->srv_ncpts == 1); + svcpt = svc->srv_parts[0]; if (svcpt->scp_nthrs_running != 0) /* I've recursed */ continue; @@ -2058,7 +2232,7 @@ ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt) * space. */ if (avail <= low_water) - ptlrpc_grow_req_bufs(svcpt); + ptlrpc_grow_req_bufs(svcpt, 1); if (svcpt->scp_service->srv_stats) { lprocfs_counter_add(svcpt->scp_service->srv_stats, @@ -2092,7 +2266,8 @@ static inline int ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt) { return svcpt->scp_nthrs_running + - svcpt->scp_nthrs_starting < svcpt->scp_service->srv_threads_max; + svcpt->scp_nthrs_starting < + svcpt->scp_service->srv_nthrs_cpt_limit; } /** @@ -2185,24 +2360,14 @@ static int ptlrpc_main(void *arg) thread->t_pid = cfs_curproc_pid(); cfs_daemonize_ctxt(thread->t_name); -#if defined(HAVE_NODE_TO_CPUMASK) && defined(CONFIG_NUMA) - /* we need to do this before any per-thread allocation is done so that - * we get the per-thread allocations on local node. bug 7342 */ - if (svc->srv_cpu_affinity) { - int cpu, num_cpu; - - for (cpu = 0, num_cpu = 0; cpu < cfs_num_possible_cpus(); - cpu++) { - if (!cpu_online(cpu)) - continue; - if (num_cpu == thread->t_id % cfs_num_online_cpus()) - break; - num_cpu++; - } - cfs_set_cpus_allowed(cfs_current(), - node_to_cpumask(cpu_to_node(cpu))); - } -#endif + /* NB: we will call cfs_cpt_bind() for all threads, because we + * might want to run lustre server only on a subset of system CPUs, + * in that case ->scp_cpt is CFS_CPT_ANY */ + rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt); + if (rc != 0) { + CWARN("%s: failed to bind %s on CPT %d\n", + svc->srv_name, thread->t_name, svcpt->scp_cpt); + } #ifdef WITH_GROUP_INFO ginfo = cfs_groups_alloc(0); @@ -2236,6 +2401,16 @@ static int ptlrpc_main(void *arg) env->le_ctx.lc_thread = thread; env->le_ctx.lc_cookie = 0x6; + while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) { + rc = ptlrpc_server_post_idle_rqbds(svcpt); + if (rc >= 0) + continue; + + CERROR("Failed to post rqbd for %s on CPT %d: %d\n", + svc->srv_name, svcpt->scp_cpt, rc); + goto out_srv_fini; + } + /* Alloc reply state structure for this one */ OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size); if (!rs) { @@ -2350,23 +2525,18 @@ out: return rc; } -struct ptlrpc_hr_args { - int thread_index; - int cpu_index; - struct ptlrpc_hr_service *hrs; -}; - -static int hrt_dont_sleep(struct ptlrpc_hr_thread *t, - cfs_list_t *replies) +static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt, + cfs_list_t *replies) { - int result; + int result; + + cfs_spin_lock(&hrt->hrt_lock); + + cfs_list_splice_init(&hrt->hrt_queue, replies); + result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies); - cfs_spin_lock(&t->hrt_lock); - cfs_list_splice_init(&t->hrt_queue, replies); - result = cfs_test_bit(HRT_STOPPING, &t->hrt_flags) || - !cfs_list_empty(replies); - cfs_spin_unlock(&t->hrt_lock); - return result; + cfs_spin_unlock(&hrt->hrt_lock); + return result; } /** @@ -2375,26 +2545,28 @@ static int hrt_dont_sleep(struct ptlrpc_hr_thread *t, */ static int ptlrpc_hr_main(void *arg) { - struct ptlrpc_hr_args * hr_args = arg; - struct ptlrpc_hr_service *hr = hr_args->hrs; - struct ptlrpc_hr_thread *t = &hr->hr_threads[hr_args->thread_index]; - char threadname[20]; - CFS_LIST_HEAD(replies); + struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg; + struct ptlrpc_hr_partition *hrp = hrt->hrt_partition; + CFS_LIST_HEAD (replies); + char threadname[20]; + int rc; - snprintf(threadname, sizeof(threadname), - "ptlrpc_hr_%d", hr_args->thread_index); + snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d", + hrp->hrp_cpt, hrt->hrt_id); + cfs_daemonize_ctxt(threadname); - cfs_daemonize_ctxt(threadname); -#if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK) - cfs_set_cpus_allowed(cfs_current(), - node_to_cpumask(cpu_to_node(hr_args->cpu_index))); -#endif - cfs_set_bit(HRT_RUNNING, &t->hrt_flags); - cfs_waitq_signal(&t->hrt_wait); + rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt); + if (rc != 0) { + CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n", + threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc); + } - while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) { + cfs_atomic_inc(&hrp->hrp_nstarted); + cfs_waitq_signal(&ptlrpc_hr.hr_waitq); + + while (!ptlrpc_hr.hr_stopping) { + l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies)); - l_wait_condition(t->hrt_wait, hrt_dont_sleep(t, &replies)); while (!cfs_list_empty(&replies)) { struct ptlrpc_reply_state *rs; @@ -2406,89 +2578,64 @@ static int ptlrpc_hr_main(void *arg) } } - cfs_clear_bit(HRT_RUNNING, &t->hrt_flags); - cfs_complete(&t->hrt_completion); + cfs_atomic_inc(&hrp->hrp_nstopped); + cfs_waitq_signal(&ptlrpc_hr.hr_waitq); - return 0; + return 0; } -static int ptlrpc_start_hr_thread(struct ptlrpc_hr_service *hr, int n, int cpu) +static void ptlrpc_stop_hr_threads(void) { - struct ptlrpc_hr_thread *t = &hr->hr_threads[n]; - struct ptlrpc_hr_args args; - int rc; - ENTRY; + struct ptlrpc_hr_partition *hrp; + int i; + int j; - args.thread_index = n; - args.cpu_index = cpu; - args.hrs = hr; + ptlrpc_hr.hr_stopping = 1; - rc = cfs_create_thread(ptlrpc_hr_main, (void*)&args, CFS_DAEMON_FLAGS); - if (rc < 0) { - cfs_complete(&t->hrt_completion); - GOTO(out, rc); - } - l_wait_condition(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags)); - RETURN(0); - out: - return rc; -} - -static void ptlrpc_stop_hr_thread(struct ptlrpc_hr_thread *t) -{ - ENTRY; - - cfs_set_bit(HRT_STOPPING, &t->hrt_flags); - cfs_waitq_signal(&t->hrt_wait); - cfs_wait_for_completion(&t->hrt_completion); + cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { + if (hrp->hrp_thrs == NULL) + continue; /* uninitialized */ + for (j = 0; j < hrp->hrp_nthrs; j++) + cfs_waitq_broadcast(&hrp->hrp_thrs[j].hrt_waitq); + } - EXIT; + cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { + if (hrp->hrp_thrs == NULL) + continue; /* uninitialized */ + cfs_wait_event(ptlrpc_hr.hr_waitq, + cfs_atomic_read(&hrp->hrp_nstopped) == + cfs_atomic_read(&hrp->hrp_nstarted)); + } } -static void ptlrpc_stop_hr_threads(struct ptlrpc_hr_service *hrs) +static int ptlrpc_start_hr_threads(void) { - int n; - ENTRY; - - for (n = 0; n < hrs->hr_n_threads; n++) - ptlrpc_stop_hr_thread(&hrs->hr_threads[n]); - - EXIT; -} + struct ptlrpc_hr_partition *hrp; + int i; + int j; + ENTRY; -static int ptlrpc_start_hr_threads(struct ptlrpc_hr_service *hr) -{ - int rc = -ENOMEM; - int n, cpu, threads_started = 0; - ENTRY; + cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { + int rc = 0; - LASSERT(hr != NULL); - LASSERT(hr->hr_n_threads > 0); + for (j = 0; j < hrp->hrp_nthrs; j++) { + rc = cfs_create_thread(ptlrpc_hr_main, + &hrp->hrp_thrs[j], + CLONE_VM | CLONE_FILES); + if (rc < 0) + break; + } + cfs_wait_event(ptlrpc_hr.hr_waitq, + cfs_atomic_read(&hrp->hrp_nstarted) == j); + if (rc >= 0) + continue; - for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) { -#if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK) - while (!cpu_online(cpu)) { - cpu++; - if (cpu >= cfs_num_possible_cpus()) - cpu = 0; - } -#endif - rc = ptlrpc_start_hr_thread(hr, n, cpu); - if (rc != 0) - break; - threads_started++; - cpu++; - } - if (threads_started == 0) { - CERROR("No reply handling threads started\n"); - RETURN(-ESRCH); - } - if (threads_started < hr->hr_n_threads) { - CWARN("Started only %d reply handling threads from %d\n", - threads_started, hr->hr_n_threads); - hr->hr_n_threads = threads_started; - } - RETURN(0); + CERROR("Reply handling thread %d:%d Failed on starting: " + "rc = %d\n", i, j, rc); + ptlrpc_stop_hr_threads(); + RETURN(rc); + } + RETURN(0); } static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) @@ -2546,37 +2693,50 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) */ void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) { + struct ptlrpc_service_part *svcpt; + int i; ENTRY; - if (svc != NULL && svc->srv_part != NULL) - ptlrpc_svcpt_stop_threads(svc->srv_part); + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (svcpt->scp_service != NULL) + ptlrpc_svcpt_stop_threads(svcpt); + } + EXIT; } +EXPORT_SYMBOL(ptlrpc_stop_all_threads); int ptlrpc_start_threads(struct ptlrpc_service *svc) { - int i, rc = 0; - ENTRY; + int rc = 0; + int i; + int j; + ENTRY; - /* We require 2 threads min - see note in - ptlrpc_server_handle_request */ - LASSERT(svc->srv_threads_min >= 2); - for (i = 0; i < svc->srv_threads_min; i++) { - rc = ptlrpc_start_thread(svc->srv_part, 1); - /* We have enough threads, don't start more. b=15759 */ - if (rc == -EMFILE) { - rc = 0; - break; - } - if (rc) { - CERROR("cannot start %s thread #%d: rc %d\n", - svc->srv_thread_name, i, rc); - ptlrpc_stop_all_threads(svc); - break; - } - } - RETURN(rc); + /* We require 2 threads min, see note in ptlrpc_server_handle_request */ + LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT); + + for (i = 0; i < svc->srv_ncpts; i++) { + for (j = 0; j < svc->srv_nthrs_cpt_init; j++) { + rc = ptlrpc_start_thread(svc->srv_parts[i], 1); + if (rc == 0) + continue; + + if (rc != -EMFILE) + goto failed; + /* We have enough threads, don't start more. b=15759 */ + break; + } + } + + RETURN(0); + failed: + CERROR("cannot start %s thread #%d_%d: rc %d\n", + svc->srv_thread_name, i, j, rc); + ptlrpc_stop_all_threads(svc); + RETURN(rc); } +EXPORT_SYMBOL(ptlrpc_start_threads); int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) { @@ -2588,9 +2748,9 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) LASSERT(svcpt != NULL); - CDEBUG(D_RPCTRACE, "%s started %d min %d max %d\n", - svc->srv_name, svcpt->scp_nthrs_running, - svc->srv_threads_min, svc->srv_threads_max); + CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n", + svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running, + svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit); again: if (unlikely(svc->srv_is_stopping)) @@ -2598,10 +2758,10 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) if (!ptlrpc_threads_increasable(svcpt) || (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) && - svcpt->scp_nthrs_running == svc->srv_threads_min - 1)) + svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1)) RETURN(-EMFILE); - OBD_ALLOC_PTR(thread); + OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt); if (thread == NULL) RETURN(-ENOMEM); cfs_waitq_init(&thread->t_ctl_waitq); @@ -2639,8 +2799,13 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) cfs_list_add(&thread->t_link, &svcpt->scp_threads); cfs_spin_unlock(&svcpt->scp_lock); - snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, - "%s_%02d", svc->srv_thread_name, thread->t_id); + if (svcpt->scp_cpt >= 0) { + snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d", + svc->srv_thread_name, svcpt->scp_cpt, thread->t_id); + } else { + snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d", + svc->srv_thread_name, thread->t_id); + } CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); /* @@ -2673,46 +2838,75 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) int ptlrpc_hr_init(void) { - int i; - int n_cpus = cfs_num_online_cpus(); - struct ptlrpc_hr_service *hr; - int size; - int rc; - ENTRY; + struct ptlrpc_hr_partition *hrp; + struct ptlrpc_hr_thread *hrt; + int rc; + int i; + int j; + ENTRY; - LASSERT(ptlrpc_hr == NULL); + memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr)); + ptlrpc_hr.hr_cpt_table = cfs_cpt_table; - size = offsetof(struct ptlrpc_hr_service, hr_threads[n_cpus]); - OBD_ALLOC(hr, size); - if (hr == NULL) - RETURN(-ENOMEM); - for (i = 0; i < n_cpus; i++) { - struct ptlrpc_hr_thread *t = &hr->hr_threads[i]; + ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table, + sizeof(*hrp)); + if (ptlrpc_hr.hr_partitions == NULL) + RETURN(-ENOMEM); - cfs_spin_lock_init(&t->hrt_lock); - cfs_waitq_init(&t->hrt_wait); - CFS_INIT_LIST_HEAD(&t->hrt_queue); - cfs_init_completion(&t->hrt_completion); - } - hr->hr_n_threads = n_cpus; - hr->hr_size = size; - ptlrpc_hr = hr; + cfs_waitq_init(&ptlrpc_hr.hr_waitq); - rc = ptlrpc_start_hr_threads(hr); - if (rc) { - OBD_FREE(hr, hr->hr_size); - ptlrpc_hr = NULL; - } - RETURN(rc); + cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { + hrp->hrp_cpt = i; + + cfs_atomic_set(&hrp->hrp_nstarted, 0); + cfs_atomic_set(&hrp->hrp_nstopped, 0); + + hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i); + hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0); + + LASSERT(hrp->hrp_nthrs > 0); + OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i, + hrp->hrp_nthrs * sizeof(*hrt)); + if (hrp->hrp_thrs == NULL) + GOTO(out, rc = -ENOMEM); + + for (j = 0; j < hrp->hrp_nthrs; j++) { + hrt = &hrp->hrp_thrs[j]; + + hrt->hrt_id = j; + hrt->hrt_partition = hrp; + cfs_waitq_init(&hrt->hrt_waitq); + cfs_spin_lock_init(&hrt->hrt_lock); + CFS_INIT_LIST_HEAD(&hrt->hrt_queue); + } + } + + rc = ptlrpc_start_hr_threads(); +out: + if (rc != 0) + ptlrpc_hr_fini(); + RETURN(rc); } void ptlrpc_hr_fini(void) { - if (ptlrpc_hr != NULL) { - ptlrpc_stop_hr_threads(ptlrpc_hr); - OBD_FREE(ptlrpc_hr, ptlrpc_hr->hr_size); - ptlrpc_hr = NULL; - } + struct ptlrpc_hr_partition *hrp; + int i; + + if (ptlrpc_hr.hr_partitions == NULL) + return; + + ptlrpc_stop_hr_threads(); + + cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { + if (hrp->hrp_thrs != NULL) { + OBD_FREE(hrp->hrp_thrs, + hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0])); + } + } + + cfs_percpt_free(ptlrpc_hr.hr_partitions); + ptlrpc_hr.hr_partitions = NULL; } #endif /* __KERNEL__ */ @@ -2739,16 +2933,14 @@ static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt) static void ptlrpc_service_del_atimer(struct ptlrpc_service *svc) { - struct ptlrpc_service_part *svcpt; + struct ptlrpc_service_part *svcpt; + int i; /* early disarm AT timer... */ - do { /* iterrate over multiple partitions in the future */ - svcpt = svc->srv_part; - if (svcpt == NULL || svcpt->scp_service == NULL) - break; - - cfs_timer_disarm(&svcpt->scp_at_timer); - } while (0); + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (svcpt->scp_service != NULL) + cfs_timer_disarm(&svcpt->scp_at_timer); + } } static void @@ -2758,17 +2950,17 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) struct ptlrpc_request_buffer_desc *rqbd; struct l_wait_info lwi; int rc; + int i; - /* All history will be culled when the next request buffer is + /* All history will be culled when the next request buffer is * freed in ptlrpc_service_purge_all() */ - svc->srv_max_history_rqbds = 0; + svc->srv_hist_nrqbds_cpt_max = 0; rc = LNetClearLazyPortal(svc->srv_req_portal); LASSERT(rc == 0); - do { /* iterrate over multiple partitions in the future */ - svcpt = svc->srv_part; - if (svcpt == NULL || svcpt->scp_service == NULL) + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (svcpt->scp_service == NULL) break; /* Unlink all the request buffers. This forces a 'final' @@ -2778,11 +2970,10 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) rc = LNetMDUnlink(rqbd->rqbd_md_h); LASSERT(rc == 0 || rc == -ENOENT); } - } while (0); + } - do { /* iterrate over multiple partitions in the future */ - svcpt = svc->srv_part; - if (svcpt == NULL || svcpt->scp_service == NULL) + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (svcpt->scp_service == NULL) break; /* Wait for the network to release any buffers @@ -2806,7 +2997,7 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) cfs_spin_lock(&svcpt->scp_lock); } cfs_spin_unlock(&svcpt->scp_lock); - } while (0); + } } static void @@ -2816,11 +3007,10 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) struct ptlrpc_request_buffer_desc *rqbd; struct ptlrpc_request *req; struct ptlrpc_reply_state *rs; + int i; - do { /* iterrate over multiple partitions in the future */ - /* schedule all outstanding replies to terminate them */ - svcpt = svc->srv_part; - if (svcpt == NULL || svcpt->scp_service == NULL) + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (svcpt->scp_service == NULL) break; cfs_spin_lock(&svcpt->scp_rep_lock); @@ -2879,7 +3069,7 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) cfs_list_del(&rs->rs_list); OBD_FREE_LARGE(rs, svc->srv_max_reply_size); } - } while (0); + } } static void @@ -2887,10 +3077,10 @@ ptlrpc_service_free(struct ptlrpc_service *svc) { struct ptlrpc_service_part *svcpt; struct ptlrpc_at_array *array; + int i; - do { /* iterrate over multiple partitions in the future */ - svcpt = svc->srv_part; - if (svcpt == NULL || svcpt->scp_service == NULL) + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (svcpt->scp_service == NULL) break; /* In case somebody rearmed this in the meantime */ @@ -2908,16 +3098,16 @@ ptlrpc_service_free(struct ptlrpc_service *svc) sizeof(__u32) * array->paa_size); array->paa_reqs_count = NULL; } - svcpt->scp_service = NULL; - } while (0); + } + + ptlrpc_service_for_each_part(svcpt, i, svc) + OBD_FREE_PTR(svcpt); - do { /* iterrate over multiple partitions in the future */ - svcpt = svc->srv_part; - if (svcpt != NULL) - OBD_FREE_PTR(svcpt); - } while (0); + if (svc->srv_cpts != NULL) + cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts); - OBD_FREE_PTR(svc); + OBD_FREE(svc, offsetof(struct ptlrpc_service, + srv_parts[svc->srv_ncpts])); } int ptlrpc_unregister_service(struct ptlrpc_service *service) @@ -2943,6 +3133,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) RETURN(0); } +EXPORT_SYMBOL(ptlrpc_unregister_service); /** * Returns 0 if the service is healthy. @@ -2950,19 +3141,14 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) * Right now, it just checks to make sure that requests aren't languishing * in the queue. We'll use this health check to govern whether a node needs * to be shot, so it's intentionally non-aggressive. */ -int ptlrpc_service_health_check(struct ptlrpc_service *svc) +int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt) { - struct ptlrpc_service_part *svcpt; struct ptlrpc_request *request; struct timeval right_now; long timediff; - if (svc == NULL || svc->srv_part == NULL) - return 0; - cfs_gettimeofday(&right_now); - svcpt = svc->srv_part; cfs_spin_lock(&svcpt->scp_req_lock); if (!ptlrpc_server_request_pending(svcpt, 1)) { cfs_spin_unlock(&svcpt->scp_req_lock); @@ -2990,3 +3176,22 @@ int ptlrpc_service_health_check(struct ptlrpc_service *svc) return 0; } + +int +ptlrpc_service_health_check(struct ptlrpc_service *svc) +{ + struct ptlrpc_service_part *svcpt; + int i; + + if (svc == NULL || svc->srv_parts == NULL) + return 0; + + ptlrpc_service_for_each_part(svcpt, i, svc) { + int rc = ptlrpc_svcpt_health_check(svcpt); + + if (rc != 0) + return rc; + } + return 0; +} +EXPORT_SYMBOL(ptlrpc_service_health_check);