+ rc = ptlrpc_server_hpreq_check(svcpt->scp_service, req);
+ if (rc < 0)
+ RETURN(rc);
+
+ cfs_spin_lock(&svcpt->scp_req_lock);
+
+ if (rc)
+ ptlrpc_hpreq_reorder_nolock(svcpt, req);
+ else
+ cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_pending);
+
+ cfs_spin_unlock(&svcpt->scp_req_lock);
+
+ RETURN(0);
+}
+
+/**
+ * Allow to handle high priority request
+ * User can call it w/o any lock but need to hold
+ * ptlrpc_service_part::scp_req_lock to get reliable result
+ */
+static int ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
+ int force)
+{
+ if (force)
+ return 1;
+
+ if (svcpt->scp_nreqs_active >= svcpt->scp_nthrs_running - 1)
+ return 0;
+
+ return cfs_list_empty(&svcpt->scp_req_pending) ||
+ svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
+}
+
+static int ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
+ int force)
+{
+ return ptlrpc_server_allow_high(svcpt, force) &&
+ !cfs_list_empty(&svcpt->scp_hreq_pending);
+}
+
+/**
+ * Only allow normal priority requests on a service that has a high-priority
+ * queue if forced (i.e. cleanup), if there are other high priority requests
+ * already being processed (i.e. those threads can service more high-priority
+ * requests), or if there are enough idle threads that a later thread can do
+ * a high priority request.
+ * User can call it w/o any lock but need to hold
+ * ptlrpc_service_part::scp_req_lock to get reliable result
+ */
+static int ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
+ int force)
+{
+#ifndef __KERNEL__
+ if (1) /* always allow to handle normal request for liblustre */
+ return 1;
+#endif
+ if (force ||
+ svcpt->scp_nreqs_active < svcpt->scp_nthrs_running - 2)
+ return 1;
+
+ if (svcpt->scp_nreqs_active >= svcpt->scp_nthrs_running - 1)
+ return 0;
+
+ return svcpt->scp_nhreqs_active > 0 ||
+ svcpt->scp_service->srv_ops.so_hpreq_handler == NULL;
+}
+
+static int ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
+ int force)
+{
+ return ptlrpc_server_allow_normal(svcpt, force) &&
+ !cfs_list_empty(&svcpt->scp_req_pending);
+}
+
+/**
+ * Returns true if there are requests available in incoming
+ * request queue for processing and it is allowed to fetch them.
+ * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
+ * to get reliable result
+ * \see ptlrpc_server_allow_normal
+ * \see ptlrpc_server_allow high
+ */
+static inline int
+ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, int force)
+{
+ return ptlrpc_server_high_pending(svcpt, force) ||
+ ptlrpc_server_normal_pending(svcpt, force);
+}
+
+/**
+ * Fetch a request for processing from queue of unprocessed requests.
+ * Favors high-priority requests.
+ * Returns a pointer to fetched request.
+ */
+static struct ptlrpc_request *
+ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, int force)
+{
+ struct ptlrpc_request *req;
+ ENTRY;
+
+ if (ptlrpc_server_high_pending(svcpt, force)) {
+ req = cfs_list_entry(svcpt->scp_hreq_pending.next,
+ struct ptlrpc_request, rq_list);
+ svcpt->scp_hreq_count++;
+ RETURN(req);
+ }
+
+ if (ptlrpc_server_normal_pending(svcpt, force)) {
+ req = cfs_list_entry(svcpt->scp_req_pending.next,
+ struct ptlrpc_request, rq_list);
+ svcpt->scp_hreq_count = 0;
+ RETURN(req);
+ }
+ RETURN(NULL);
+}
+
+/**
+ * Handle freshly incoming reqs, add to timed early reply list,
+ * pass on to regular request queue.
+ * All incoming requests pass through here before getting into
+ * ptlrpc_server_handle_req later on.
+ */
+static int
+ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request *req;
+ __u32 deadline;
+ int rc;
+ ENTRY;
+
+ cfs_spin_lock(&svcpt->scp_lock);
+ if (cfs_list_empty(&svcpt->scp_req_incoming)) {
+ cfs_spin_unlock(&svcpt->scp_lock);
+ RETURN(0);
+ }
+
+ req = cfs_list_entry(svcpt->scp_req_incoming.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ svcpt->scp_nreqs_incoming--;
+ /* Consider this still a "queued" request as far as stats are
+ * concerned */
+ cfs_spin_unlock(&svcpt->scp_lock);