+ struct ptlrpc_service_part *svcpt;
+
+ svcpt = (struct ptlrpc_service_part *)castmeharder;
+
+ svcpt->scp_at_check = 1;
+ svcpt->scp_at_checktime = cfs_time_current();
+ cfs_waitq_signal(&svcpt->scp_waitq);
+}
+
+static void
+ptlrpc_server_nthreads_check(struct ptlrpc_service_conf *conf,
+ int *min_p, int *max_p)
+{
+#ifdef __KERNEL__
+ struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
+ int nthrs_min;
+ int nthrs;
+
+ nthrs_min = PTLRPC_NTHRS_MIN + (conf->psc_ops.so_hpreq_handler != NULL);
+ nthrs_min = max_t(int, nthrs_min, tc->tc_nthrs_min);
+
+ nthrs = tc->tc_nthrs_user;
+ if (nthrs != 0) { /* validate it */
+ nthrs = min_t(int, nthrs, tc->tc_nthrs_max);
+ nthrs = max_t(int, nthrs, nthrs_min);
+ *min_p = *max_p = nthrs;
+ return;
+ }
+
+ /*
+ * NB: we will add some common code here for estimating, for example:
+ * add a new member ptlrpc_service_thr_conf::tc_factor, and estimate
+ * threads number based on:
+ * (online_cpus * conf::tc_factor) + conf::tc_nthrs_base.
+ *
+ * So we can remove code block like estimation in ost_setup, also,
+ * we might estimate MDS threads number as well instead of using
+ * absolute number, and have more threads on fat servers to improve
+ * availability of service.
+ *
+ * Also, we will need to validate threads number at here for
+ * CPT affinity service (CPU ParTion) in the future.
+ * A service can have percpt thread-pool instead of a global thread
+ * pool for each service, which means user might not always get the
+ * threads number they want even they set it in conf::tc_nthrs_user,
+ * because we need to adjust threads number for each CPT, instead of
+ * just use (conf::tc_nthrs_user / NCPTS), to make sure each pool
+ * will be healthy.
+ */
+ *max_p = tc->tc_nthrs_max;
+ *min_p = nthrs_min;
+#else /* __KERNEL__ */
+ *max_p = *min_p = 1; /* whatever */
+#endif
+}
+
+/**
+ * Initialize percpt data for a service
+ */
+static int
+ptlrpc_service_part_init(struct ptlrpc_service *svc,
+ struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_at_array *array;
+ int size;
+ int index;
+ int rc;
+
+ CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
+
+ /* rqbd and incoming request queue */
+ cfs_spin_lock_init(&svcpt->scp_lock);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
+ cfs_waitq_init(&svcpt->scp_waitq);
+ /* history request & rqbd list */
+ CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
+
+ /* acitve requests and hp requests */
+ cfs_spin_lock_init(&svcpt->scp_req_lock);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_req_pending);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_hreq_pending);
+
+ /* reply states */
+ cfs_spin_lock_init(&svcpt->scp_rep_lock);
+ CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
+#ifndef __KERNEL__
+ CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
+#endif
+ CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
+ cfs_waitq_init(&svcpt->scp_rep_waitq);
+ cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
+
+ /* adaptive timeout */
+ cfs_spin_lock_init(&svcpt->scp_at_lock);
+ array = &svcpt->scp_at_array;
+
+ size = at_est2timeout(at_max);
+ array->paa_size = size;
+ array->paa_count = 0;
+ array->paa_deadline = -1;
+
+ /* allocate memory for scp_at_array (ptlrpc_at_array) */
+ OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size);
+ if (array->paa_reqs_array == NULL)
+ return -ENOMEM;
+
+ for (index = 0; index < size; index++)
+ CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
+
+ OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size);
+ if (array->paa_reqs_count == NULL)
+ goto failed;
+
+ cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
+ /* At SOW, service time should be quick; 10s seems generous. If client
+ * timeout is less than this, we'll be sending an early reply. */
+ at_init(&svcpt->scp_at_estimate, 10, 0);
+
+ /* assign this before call ptlrpc_grow_req_bufs */
+ svcpt->scp_service = svc;
+ /* Now allocate the request buffers, but don't post them now */
+ rc = ptlrpc_grow_req_bufs(svcpt);
+ /* We shouldn't be under memory pressure at startup, so
+ * fail if we can't allocate all our buffers at this time. */
+ if (rc != 0)
+ goto failed;
+
+ return 0;
+
+ failed:
+ if (array->paa_reqs_count != NULL) {
+ OBD_FREE(array->paa_reqs_count, sizeof(__u32) * size);
+ array->paa_reqs_count = NULL;
+ }
+
+ if (array->paa_reqs_array != NULL) {
+ OBD_FREE(array->paa_reqs_array,
+ sizeof(cfs_list_t) * array->paa_size);
+ array->paa_reqs_array = NULL;
+ }
+
+ return -ENOMEM;