void ptlrpc_daemonize(char *name);
int ptlrpc_service_health_check(struct ptlrpc_service *);
void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
+void ptlrpc_server_active_request_inc(struct ptlrpc_request *req);
+void ptlrpc_server_active_request_dec(struct ptlrpc_request *req);
void ptlrpc_server_drop_request(struct ptlrpc_request *req);
#ifdef __KERNEL__
cfs_atomic_inc(&req->rq_refcount);
/** let export know it has replays to be handled */
cfs_atomic_inc(&req->rq_export->exp_replay_count);
+ /* release service thread while request is queued
+ * we are moving the request from active processing
+ * to waiting on the replay queue */
+ ptlrpc_server_active_request_dec(req);
}
static void target_request_copy_put(struct ptlrpc_request *req)
LASSERT(cfs_atomic_read(&req->rq_export->exp_replay_count) > 0);
cfs_atomic_dec(&req->rq_export->exp_replay_count);
class_export_rpc_put(req->rq_export);
+ /* ptlrpc_server_drop_request() assumes the request is active */
+ ptlrpc_server_active_request_inc(req);
ptlrpc_server_drop_request(req);
}
}
/**
+ * increment the number of active requests consuming service threads.
+ */
+void ptlrpc_server_active_request_inc(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
+ struct ptlrpc_service *svc = rqbd->rqbd_service;
+
+ cfs_spin_lock(&svc->srv_lock);
+ svc->srv_n_active_reqs++;
+ cfs_spin_unlock(&svc->srv_lock);
+}
+
+/**
+ * decrement the number of active requests consuming service threads.
+ */
+void ptlrpc_server_active_request_dec(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
+ struct ptlrpc_service *svc = rqbd->rqbd_service;
+
+ cfs_spin_lock(&svc->srv_lock);
+ svc->srv_n_active_reqs--;
+ cfs_spin_unlock(&svc->srv_lock);
+}
+
+/**
* drop a reference count of the request. if it reaches 0, we either
* put it into history list, or free it immediately.
*/