+static int
+ptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ *eof = 1;
+ return snprintf(page, count, "%d\n", svc->srv_n_history_rqbds);
+}
+
+static int
+ptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ *eof = 1;
+ return snprintf(page, count, "%d\n", svc->srv_max_history_rqbds);
+}
+
+static int
+ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int bufpages;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val < 0)
+ return -ERANGE;
+
+ /* This sanity check is more of an insanity check; we can still
+ * hose a kernel by allowing the request history to grow too
+ * far. */
+ bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ if (val > cfs_num_physpages/(2 * bufpages))
+ return -ERANGE;
+
+ cfs_spin_lock(&svc->srv_lock);
+ svc->srv_max_history_rqbds = val;
+ cfs_spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+
+static int
+ptlrpc_lprocfs_rd_threads_min(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ return snprintf(page, count, "%d\n", svc->srv_threads_min);
+}
+
+static int
+ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val < 2)
+ return -ERANGE;
+
+ if (val > svc->srv_threads_max)
+ return -ERANGE;
+
+ cfs_spin_lock(&svc->srv_lock);
+ svc->srv_threads_min = val;
+ cfs_spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+
+static int
+ptlrpc_lprocfs_rd_threads_started(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ return snprintf(page, count, "%d\n", svc->srv_threads_started);
+}
+
+static int
+ptlrpc_lprocfs_rd_threads_max(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+
+ return snprintf(page, count, "%d\n", svc->srv_threads_max);
+}
+
+static int
+ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val < 2)
+ return -ERANGE;
+
+ if (val < svc->srv_threads_min)
+ return -ERANGE;
+
+ cfs_spin_lock(&svc->srv_lock);
+ svc->srv_threads_max = val;
+ cfs_spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+
+struct ptlrpc_srh_iterator {
+ __u64 srhi_seq;
+ struct ptlrpc_request *srhi_req;
+};
+
+int
+ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
+ struct ptlrpc_srh_iterator *srhi,
+ __u64 seq)
+{
+ cfs_list_t *e;
+ struct ptlrpc_request *req;
+
+ if (srhi->srhi_req != NULL &&
+ srhi->srhi_seq > svc->srv_request_max_cull_seq &&
+ srhi->srhi_seq <= seq) {
+ /* If srhi_req was set previously, hasn't been culled and
+ * we're searching for a seq on or after it (i.e. more
+ * recent), search from it onwards.
+ * Since the service history is LRU (i.e. culled reqs will
+ * be near the head), we shouldn't have to do long
+ * re-scans */
+ LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);
+ LASSERT (!cfs_list_empty(&svc->srv_request_history));
+ e = &srhi->srhi_req->rq_history_list;
+ } else {
+ /* search from start */
+ e = svc->srv_request_history.next;
+ }
+
+ while (e != &svc->srv_request_history) {
+ req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list);
+
+ if (req->rq_history_seq >= seq) {
+ srhi->srhi_seq = req->rq_history_seq;
+ srhi->srhi_req = req;
+ return 0;
+ }
+ e = e->next;
+ }
+
+ return -ENOENT;
+}
+
+static void *
+ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi;
+ int rc;
+
+ OBD_ALLOC(srhi, sizeof(*srhi));
+ if (srhi == NULL)
+ return NULL;
+
+ srhi->srhi_seq = 0;
+ srhi->srhi_req = NULL;
+
+ cfs_spin_lock(&svc->srv_lock);
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);
+ cfs_spin_unlock(&svc->srv_lock);
+
+ if (rc == 0) {
+ *pos = srhi->srhi_seq;
+ return srhi;
+ }
+
+ OBD_FREE(srhi, sizeof(*srhi));
+ return NULL;
+}
+
+static void
+ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
+{
+ struct ptlrpc_srh_iterator *srhi = iter;
+
+ if (srhi != NULL)
+ OBD_FREE(srhi, sizeof(*srhi));
+}
+
+static void *
+ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
+ void *iter, loff_t *pos)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi = iter;
+ int rc;
+
+ cfs_spin_lock(&svc->srv_lock);
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1);
+ cfs_spin_unlock(&svc->srv_lock);
+
+ if (rc != 0) {
+ OBD_FREE(srhi, sizeof(*srhi));
+ return NULL;
+ }
+
+ *pos = srhi->srhi_seq;
+ return srhi;
+}
+
+/* common ost/mdt srv_request_history_print_fn */
+void target_print_req(void *seq_file, struct ptlrpc_request *req)
+{
+ /* Called holding srv_lock with irqs disabled.
+ * Print specific req contents and a newline.
+ * CAVEAT EMPTOR: check request message length before printing!!!
+ * You might have received any old crap so you must be just as
+ * careful here as the service's request parser!!! */
+ struct seq_file *sf = seq_file;
+
+ switch (req->rq_phase) {
+ case RQ_PHASE_NEW:
+ /* still awaiting a service thread's attention, or rejected
+ * because the generic request message didn't unpack */
+ seq_printf(sf, "<not swabbed>\n");
+ break;
+ case RQ_PHASE_INTERPRET:
+ /* being handled, so basic msg swabbed, and opc is valid
+ * but racing with mds_handle() */
+ case RQ_PHASE_COMPLETE:
+ /* been handled by mds_handle() reply state possibly still
+ * volatile */
+ seq_printf(sf, "opc %d\n", lustre_msg_get_opc(req->rq_reqmsg));
+ break;
+ default:
+ DEBUG_REQ(D_ERROR, req, "bad phase %d", req->rq_phase);
+ }
+}
+EXPORT_SYMBOL(target_print_req);
+
+static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi = iter;
+ struct ptlrpc_request *req;
+ int rc;
+
+ cfs_spin_lock(&svc->srv_lock);
+
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq);
+
+ if (rc == 0) {
+ req = srhi->srhi_req;
+
+ /* Print common req fields.
+ * CAVEAT EMPTOR: we're racing with the service handler
+ * here. The request could contain any old crap, so you
+ * must be just as careful as the service's request
+ * parser. Currently I only print stuff here I know is OK
+ * to look at coz it was set up in request_in_callback()!!! */
+ seq_printf(s, LPD64":%s:%s:x"LPU64":%d:%s:%ld:%lds(%+lds) ",
+ req->rq_history_seq, libcfs_nid2str(req->rq_self),
+ libcfs_id2str(req->rq_peer), req->rq_xid,
+ req->rq_reqlen, ptlrpc_rqphase2str(req),
+ req->rq_arrival_time.tv_sec,
+ req->rq_sent - req->rq_arrival_time.tv_sec,
+ req->rq_sent - req->rq_deadline);
+ if (svc->srv_request_history_print_fn == NULL)
+ seq_printf(s, "\n");
+ else
+ svc->srv_request_history_print_fn(s, srhi->srhi_req);
+ }
+
+ cfs_spin_unlock(&svc->srv_lock);
+
+ return rc;
+}
+
+static int
+ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
+{
+ static struct seq_operations sops = {
+ .start = ptlrpc_lprocfs_svc_req_history_start,
+ .stop = ptlrpc_lprocfs_svc_req_history_stop,
+ .next = ptlrpc_lprocfs_svc_req_history_next,
+ .show = ptlrpc_lprocfs_svc_req_history_show,
+ };
+ struct proc_dir_entry *dp = PDE(inode);
+ struct seq_file *seqf;
+ int rc;
+
+ LPROCFS_ENTRY_AND_CHECK(dp);
+ rc = seq_open(file, &sops);
+ if (rc) {
+ LPROCFS_EXIT();
+ return rc;
+ }
+
+ seqf = file->private_data;
+ seqf->private = dp->data;
+ return 0;
+}
+
+/* See also lprocfs_rd_timeouts */
+static int ptlrpc_lprocfs_rd_timeouts(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ unsigned int cur, worst;
+ time_t worstt;
+ struct dhms ts;
+ int rc = 0;
+
+ *eof = 1;
+ cur = at_get(&svc->srv_at_estimate);
+ worst = svc->srv_at_estimate.at_worst_ever;
+ worstt = svc->srv_at_estimate.at_worst_time;
+ s2dhms(&ts, cfs_time_current_sec() - worstt);
+ if (AT_OFF)
+ rc += snprintf(page + rc, count - rc,
+ "adaptive timeouts off, using obd_timeout %u\n",
+ obd_timeout);
+ rc += snprintf(page + rc, count - rc,
+ "%10s : cur %3u worst %3u (at %ld, "DHMS_FMT" ago) ",
+ "service", cur, worst, worstt,
+ DHMS_VARS(&ts));
+ rc = lprocfs_at_hist_helper(page, count, rc,
+ &svc->srv_at_estimate);
+ return rc;
+}
+
+static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int rc = snprintf(page, count, "%d", svc->srv_hpreq_ratio);
+ return rc;
+}
+
+static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct ptlrpc_service *svc = data;
+ int rc, val;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+ if (val < 0)
+ return -ERANGE;
+
+ cfs_spin_lock(&svc->srv_lock);
+ svc->srv_hpreq_ratio = val;
+ cfs_spin_unlock(&svc->srv_lock);
+ return count;
+}
+