+int
+ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
+ struct ptlrpc_srh_iterator *srhi,
+ __u64 seq)
+{
+ struct list_head *e;
+ struct ptlrpc_request *req;
+
+ if (srhi->srhi_req != NULL &&
+ srhi->srhi_seq > svc->srv_request_max_cull_seq &&
+ srhi->srhi_seq <= seq) {
+ /* If srhi_req was set previously, hasn't been culled and
+ * we're searching for a seq on or after it (i.e. more
+ * recent), search from it onwards.
+ * Since the service history is LRU (i.e. culled reqs will
+ * be near the head), we shouldn't have to do long
+ * re-scans */
+ LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);
+ LASSERT (!list_empty(&svc->srv_request_history));
+ e = &srhi->srhi_req->rq_history_list;
+ } else {
+ /* search from start */
+ e = svc->srv_request_history.next;
+ }
+
+ while (e != &svc->srv_request_history) {
+ req = list_entry(e, struct ptlrpc_request, rq_history_list);
+
+ if (req->rq_history_seq >= seq) {
+ srhi->srhi_seq = req->rq_history_seq;
+ srhi->srhi_req = req;
+ return 0;
+ }
+ e = e->next;
+ }
+
+ return -ENOENT;
+}
+
+static void *
+ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi;
+ int rc;
+
+ OBD_ALLOC(srhi, sizeof(*srhi));
+ if (srhi == NULL)
+ return NULL;
+
+ srhi->srhi_seq = 0;
+ srhi->srhi_req = NULL;
+
+ spin_lock(&svc->srv_lock);
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);
+ spin_unlock(&svc->srv_lock);
+
+ if (rc == 0) {
+ *pos = srhi->srhi_seq;
+ return srhi;
+ }
+
+ OBD_FREE(srhi, sizeof(*srhi));
+ return NULL;
+}
+
+static void
+ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
+{
+ struct ptlrpc_srh_iterator *srhi = iter;
+
+ if (srhi != NULL)
+ OBD_FREE(srhi, sizeof(*srhi));
+}
+
+static void *
+ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
+ void *iter, loff_t *pos)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi = iter;
+ int rc;
+
+ spin_lock(&svc->srv_lock);
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1);
+ spin_unlock(&svc->srv_lock);
+
+ if (rc != 0) {
+ OBD_FREE(srhi, sizeof(*srhi));
+ return NULL;
+ }
+
+ *pos = srhi->srhi_seq;
+ return srhi;
+}
+
+static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi = iter;
+ struct ptlrpc_request *req;
+ int rc;
+
+ spin_lock(&svc->srv_lock);
+
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq);
+
+ if (rc == 0) {
+ req = srhi->srhi_req;
+
+ /* Print common req fields.
+ * CAVEAT EMPTOR: we're racing with the service handler
+ * here. The request could contain any old crap, so you
+ * must be just as careful as the service's request
+ * parser. Currently I only print stuff here I know is OK
+ * to look at coz it was set up in request_in_callback()!!! */
+ seq_printf(s, LPD64":%s:%s:"LPD64":%d:%s ",
+ req->rq_history_seq, libcfs_nid2str(req->rq_self),
+ libcfs_id2str(req->rq_peer), req->rq_xid,
+ req->rq_reqlen,ptlrpc_rqphase2str(req));
+
+ if (svc->srv_request_history_print_fn == NULL)
+ seq_printf(s, "\n");
+ else
+ svc->srv_request_history_print_fn(s, srhi->srhi_req);
+ }
+
+ spin_unlock(&svc->srv_lock);
+
+ return rc;
+}
+
+static int
+ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
+{
+ static struct seq_operations sops = {
+ .start = ptlrpc_lprocfs_svc_req_history_start,
+ .stop = ptlrpc_lprocfs_svc_req_history_stop,
+ .next = ptlrpc_lprocfs_svc_req_history_next,
+ .show = ptlrpc_lprocfs_svc_req_history_show,
+ };
+ struct proc_dir_entry *dp = PDE(inode);
+ struct seq_file *seqf;
+ int rc;
+
+ LPROCFS_ENTRY_AND_CHECK(dp);
+ rc = seq_open(file, &sops);
+ if (rc) {
+ LPROCFS_EXIT();
+ return rc;
+ }
+
+ seqf = file->private_data;
+ seqf->private = dp->data;
+ return 0;
+}
+
+void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
+ struct ptlrpc_service *svc)
+{
+ struct lprocfs_vars lproc_vars[] = {
+ {.name = "req_buffer_history_len",
+ .write_fptr = NULL,
+ .read_fptr = ptlrpc_lprocfs_read_req_history_len,
+ .data = svc},
+ {.name = "req_buffer_history_max",
+ .write_fptr = ptlrpc_lprocfs_write_req_history_max,
+ .read_fptr = ptlrpc_lprocfs_read_req_history_max,
+ .data = svc},
+ {NULL}
+ };
+ static struct file_operations req_history_fops = {
+ .owner = THIS_MODULE,
+ .open = ptlrpc_lprocfs_svc_req_history_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = lprocfs_seq_release,
+ };
+ struct proc_dir_entry *req_history;
+
+ ptlrpc_lprocfs_register(entry, svc->srv_name,
+ "stats", &svc->srv_procroot,
+ &svc->srv_stats);
+
+ if (svc->srv_procroot == NULL)
+ return;
+
+ lprocfs_add_vars(svc->srv_procroot, lproc_vars, NULL);
+
+ req_history = create_proc_entry("req_history", 0400,
+ svc->srv_procroot);
+ if (req_history != NULL) {
+ req_history->data = svc;
+ req_history->proc_fops = &req_history_fops;
+ }
+}
+
+void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
+{
+ ptlrpc_lprocfs_register(obddev->obd_proc_entry, NULL, "stats",
+ &obddev->obd_svc_procroot,
+ &obddev->obd_svc_stats);
+}
+EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
+
+void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req)
+{
+ struct lprocfs_stats *svc_stats;
+ int opc = opcode_offset(lustre_msg_get_opc(req->rq_reqmsg));
+
+ svc_stats = req->rq_import->imp_obd->obd_svc_stats;
+ if (svc_stats == NULL || opc <= 0)
+ return;
+ LASSERT(opc < LUSTRE_MAX_OPCODES);
+ /* These two use the ptlrpc_lprocfs_brw below */
+ if (!(opc == OST_WRITE || opc == OST_READ))
+ lprocfs_counter_add(svc_stats, opc + PTLRPC_LAST_CNTR, 0);
+}
+
+void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int opc, int bytes)
+{
+ struct lprocfs_stats *svc_stats;
+ svc_stats = req->rq_import->imp_obd->obd_svc_stats;
+ if (!svc_stats)
+ return;
+ lprocfs_counter_add(svc_stats, opc + PTLRPC_LAST_CNTR, bytes);
+}
+EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
+
+void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
+{
+ if (svc->srv_procroot != NULL)
+ lprocfs_remove(&svc->srv_procroot);
+
+ if (svc->srv_stats)
+ lprocfs_free_stats(&svc->srv_stats);
+}
+
+void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
+{
+ if (obd->obd_svc_procroot)
+ lprocfs_remove(&obd->obd_svc_procroot);
+
+ if (obd->obd_svc_stats)
+ lprocfs_free_stats(&obd->obd_svc_stats);
+}
+EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
+
+
+int lprocfs_wr_evict_client(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct obd_device *obd = data;
+ char tmpbuf[sizeof(struct obd_uuid)];
+
+ /* Kludge code(deadlock situation): the lprocfs lock has been held
+ * since the client is evicted by writting client's
+ * uuid/nid to procfs "evict_client" entry. However,
+ * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
+ * the proc entries under the being destroyed export{}, so I have
+ * to drop the lock at first here.
+ * - jay, jxiong@clusterfs.com */
+ class_incref(obd);
+ LPROCFS_EXIT();
+
+ sscanf(buffer, "%40s", tmpbuf);
+ obd_export_evict_by_uuid(obd, tmpbuf);
+
+ LPROCFS_ENTRY();
+ class_decref(obd);
+
+ return count;
+}
+EXPORT_SYMBOL(lprocfs_wr_evict_client);
+
+int lprocfs_wr_ping(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct obd_device *obd = data;
+ struct ptlrpc_request *req;
+ int rc;
+ ENTRY;
+
+ LPROCFS_CLIMP_CHECK(obd);
+ req = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
+ OBD_PING, 1, NULL, NULL);
+ LPROCFS_CLIMP_EXIT(obd);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ ptlrpc_req_set_repsize(req, 1, NULL);
+ req->rq_send_state = LUSTRE_IMP_FULL;
+ req->rq_no_resend = 1;
+
+ rc = ptlrpc_queue_wait(req);
+
+ ptlrpc_req_finished(req);
+ if (rc >= 0)
+ RETURN(count);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(lprocfs_wr_ping);
+