+static void
+ptlrpc_server_nthreads_check(struct ptlrpc_service_conf *conf,
+ int *min_p, int *max_p)
+{
+#ifdef __KERNEL__
+ struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
+ int nthrs_min;
+ int nthrs;
+
+ nthrs_min = PTLRPC_NTHRS_MIN + (conf->psc_ops.so_hpreq_handler != NULL);
+ nthrs_min = max_t(int, nthrs_min, tc->tc_nthrs_min);
+
+ nthrs = tc->tc_nthrs_user;
+ if (nthrs != 0) { /* validate it */
+ nthrs = min_t(int, nthrs, tc->tc_nthrs_max);
+ nthrs = max_t(int, nthrs, nthrs_min);
+ *min_p = *max_p = nthrs;
+ return;
+ }
+
+ /*
+ * NB: we will add some common at here for estimating, for example:
+ * add a new member ptlrpc_service_thr_conf::tc_factor, and estimate
+ * threads number based on:
+ * (online_cpus * conf::tc_factor) + conf::tc_nthrs_base.
+ *
+ * So we can remove code block like estimation in ost_setup, also,
+ * we might estimate MDS threads number as well instead of using
+ * absolute number, and have more threads on fat servers to improve
+ * availability of service.
+ *
+ * Also, we will need to validate threads number at here for
+ * CPT affinity service (CPU ParTiion) in the future.
+ * A service can have percpt thread-pool instead of a global thread
+ * pool for each service, which means user might not always get the
+ * threads number they want even they set it in conf::tc_nthrs_user,
+ * because we need to adjust threads number for each CPT, instead of
+ * just use (conf::tc_nthrs_user / NCPTS), to make sure each pool
+ * will be healthy.
+ */
+ *max_p = tc->tc_nthrs_max;
+ *min_p = nthrs_min;
+#else /* __KERNEL__ */
+ *max_p = *min_p = 1; /* whatever */
+#endif
+}
+