1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
25 #define DEBUG_SUBSYSTEM S_CLASS
27 #include <obd_support.h>
29 #include <lprocfs_status.h>
30 #include <lustre/lustre_idl.h>
31 #include <lustre_net.h>
32 #include <obd_class.h>
33 #include "ptlrpc_internal.h"
36 struct ll_rpc_opcode {
39 } ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
40 { OST_REPLY, "ost_reply" },
41 { OST_GETATTR, "ost_getattr" },
42 { OST_SETATTR, "ost_setattr" },
43 { OST_READ, "ost_read" },
44 { OST_WRITE, "ost_write" },
45 { OST_CREATE , "ost_create" },
46 { OST_DESTROY, "ost_destroy" },
47 { OST_GET_INFO, "ost_get_info" },
48 { OST_CONNECT, "ost_connect" },
49 { OST_DISCONNECT, "ost_disconnect" },
50 { OST_PUNCH, "ost_punch" },
51 { OST_OPEN, "ost_open" },
52 { OST_CLOSE, "ost_close" },
53 { OST_STATFS, "ost_statfs" },
56 { OST_SYNC, "ost_sync" },
57 { OST_SET_INFO, "ost_set_info" },
58 { OST_QUOTACHECK, "ost_quotacheck" },
59 { OST_QUOTACTL, "ost_quotactl" },
60 { MDS_GETATTR, "mds_getattr" },
61 { MDS_GETATTR_NAME, "mds_getattr_lock" },
62 { MDS_CLOSE, "mds_close" },
63 { MDS_REINT, "mds_reint" },
64 { MDS_READPAGE, "mds_readpage" },
65 { MDS_CONNECT, "mds_connect" },
66 { MDS_DISCONNECT, "mds_disconnect" },
67 { MDS_GETSTATUS, "mds_getstatus" },
68 { MDS_STATFS, "mds_statfs" },
69 { MDS_PIN, "mds_pin" },
70 { MDS_UNPIN, "mds_unpin" },
71 { MDS_SYNC, "mds_sync" },
72 { MDS_DONE_WRITING, "mds_done_writing" },
73 { MDS_SET_INFO, "mds_set_info" },
74 { MDS_QUOTACHECK, "mds_quotacheck" },
75 { MDS_QUOTACTL, "mds_quotactl" },
76 { MDS_GETXATTR, "mds_getxattr" },
77 { MDS_SETXATTR, "mds_setxattr" },
78 { LDLM_ENQUEUE, "ldlm_enqueue" },
79 { LDLM_CONVERT, "ldlm_convert" },
80 { LDLM_CANCEL, "ldlm_cancel" },
81 { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
82 { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
83 { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
84 { OBD_PING, "obd_ping" },
85 { OBD_LOG_CANCEL, "llog_origin_handle_cancel"},
88 const char* ll_opcode2str(__u32 opcode)
90 /* When one of the assertions below fail, chances are that:
91 * 1) A new opcode was added in lustre_idl.h, but was
92 * is missing from the table above.
93 * or 2) The opcode space was renumbered or rearranged,
94 * and the opcode_offset() function in
95 * ptlrpc_internal.h needs to be modified.
97 __u32 offset = opcode_offset(opcode);
98 LASSERT(offset < LUSTRE_MAX_OPCODES);
99 LASSERT(ll_rpc_opcode_table[offset].opcode == opcode);
100 return ll_rpc_opcode_table[offset].opname;
104 void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
105 char *name, struct proc_dir_entry **procroot_ret,
106 struct lprocfs_stats **stats_ret)
108 struct proc_dir_entry *svc_procroot;
109 struct lprocfs_stats *svc_stats;
111 unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
114 LASSERT(*procroot_ret == NULL);
115 LASSERT(*stats_ret == NULL);
117 svc_stats = lprocfs_alloc_stats(PTLRPC_LAST_CNTR + LUSTRE_MAX_OPCODES);
118 if (svc_stats == NULL)
122 svc_procroot = lprocfs_register(dir, root, NULL, NULL);
123 if (IS_ERR(svc_procroot)) {
124 lprocfs_free_stats(&svc_stats);
131 lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
132 svc_counter_config, "req_waittime", "usec");
133 lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
134 svc_counter_config, "req_qdepth", "reqs");
135 lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
136 svc_counter_config, "req_active", "reqs");
137 lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
138 svc_counter_config, "reqbuf_avail", "bufs");
139 for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
140 __u32 opcode = ll_rpc_opcode_table[i].opcode;
141 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
142 svc_counter_config, ll_opcode2str(opcode),
143 (i == OST_WRITE || i == OST_READ) ?
147 rc = lprocfs_register_stats(svc_procroot, name, svc_stats);
150 lprocfs_remove(&svc_procroot);
151 lprocfs_free_stats(&svc_stats);
154 *procroot_ret = svc_procroot;
155 *stats_ret = svc_stats;
160 ptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off,
161 int count, int *eof, void *data)
163 struct ptlrpc_service *svc = data;
166 return snprintf(page, count, "%d\n", svc->srv_n_history_rqbds);
170 ptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off,
171 int count, int *eof, void *data)
173 struct ptlrpc_service *svc = data;
176 return snprintf(page, count, "%d\n", svc->srv_max_history_rqbds);
180 ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
181 unsigned long count, void *data)
183 struct ptlrpc_service *svc = data;
186 int rc = lprocfs_write_helper(buffer, count, &val);
194 /* This sanity check is more of an insanity check; we can still
195 * hose a kernel by allowing the request history to grow too
197 bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
198 if (val > num_physpages/(2*bufpages))
201 spin_lock(&svc->srv_lock);
202 svc->srv_max_history_rqbds = val;
203 spin_unlock(&svc->srv_lock);
208 struct ptlrpc_srh_iterator {
210 struct ptlrpc_request *srhi_req;
214 ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
215 struct ptlrpc_srh_iterator *srhi,
219 struct ptlrpc_request *req;
221 if (srhi->srhi_req != NULL &&
222 srhi->srhi_seq > svc->srv_request_max_cull_seq &&
223 srhi->srhi_seq <= seq) {
224 /* If srhi_req was set previously, hasn't been culled and
225 * we're searching for a seq on or after it (i.e. more
226 * recent), search from it onwards.
227 * Since the service history is LRU (i.e. culled reqs will
228 * be near the head), we shouldn't have to do long
230 LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);
231 LASSERT (!list_empty(&svc->srv_request_history));
232 e = &srhi->srhi_req->rq_history_list;
234 /* search from start */
235 e = svc->srv_request_history.next;
238 while (e != &svc->srv_request_history) {
239 req = list_entry(e, struct ptlrpc_request, rq_history_list);
241 if (req->rq_history_seq >= seq) {
242 srhi->srhi_seq = req->rq_history_seq;
243 srhi->srhi_req = req;
253 ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
255 struct ptlrpc_service *svc = s->private;
256 struct ptlrpc_srh_iterator *srhi;
259 OBD_ALLOC(srhi, sizeof(*srhi));
264 srhi->srhi_req = NULL;
266 spin_lock(&svc->srv_lock);
267 rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);
268 spin_unlock(&svc->srv_lock);
271 *pos = srhi->srhi_seq;
275 OBD_FREE(srhi, sizeof(*srhi));
280 ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
282 struct ptlrpc_srh_iterator *srhi = iter;
285 OBD_FREE(srhi, sizeof(*srhi));
289 ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
290 void *iter, loff_t *pos)
292 struct ptlrpc_service *svc = s->private;
293 struct ptlrpc_srh_iterator *srhi = iter;
296 spin_lock(&svc->srv_lock);
297 rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1);
298 spin_unlock(&svc->srv_lock);
301 OBD_FREE(srhi, sizeof(*srhi));
305 *pos = srhi->srhi_seq;
309 static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
311 struct ptlrpc_service *svc = s->private;
312 struct ptlrpc_srh_iterator *srhi = iter;
313 struct ptlrpc_request *req;
316 spin_lock(&svc->srv_lock);
318 rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq);
321 req = srhi->srhi_req;
323 /* Print common req fields.
324 * CAVEAT EMPTOR: we're racing with the service handler
325 * here. The request could contain any old crap, so you
326 * must be just as careful as the service's request
327 * parser. Currently I only print stuff here I know is OK
328 * to look at coz it was set up in request_in_callback()!!! */
329 seq_printf(s, LPD64":%s:%s:"LPD64":%d:%s ",
330 req->rq_history_seq, libcfs_nid2str(req->rq_self),
331 libcfs_id2str(req->rq_peer), req->rq_xid,
332 req->rq_reqlen,ptlrpc_rqphase2str(req));
334 if (svc->srv_request_history_print_fn == NULL)
337 svc->srv_request_history_print_fn(s, srhi->srhi_req);
340 spin_unlock(&svc->srv_lock);
346 ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
348 static struct seq_operations sops = {
349 .start = ptlrpc_lprocfs_svc_req_history_start,
350 .stop = ptlrpc_lprocfs_svc_req_history_stop,
351 .next = ptlrpc_lprocfs_svc_req_history_next,
352 .show = ptlrpc_lprocfs_svc_req_history_show,
354 struct proc_dir_entry *dp = PDE(inode);
355 struct seq_file *seqf;
358 LPROCFS_ENTRY_AND_CHECK(dp);
359 rc = seq_open(file, &sops);
365 seqf = file->private_data;
366 seqf->private = dp->data;
370 void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
371 struct ptlrpc_service *svc)
373 struct lprocfs_vars lproc_vars[] = {
374 {.name = "req_buffer_history_len",
376 .read_fptr = ptlrpc_lprocfs_read_req_history_len,
378 {.name = "req_buffer_history_max",
379 .write_fptr = ptlrpc_lprocfs_write_req_history_max,
380 .read_fptr = ptlrpc_lprocfs_read_req_history_max,
384 static struct file_operations req_history_fops = {
385 .owner = THIS_MODULE,
386 .open = ptlrpc_lprocfs_svc_req_history_open,
389 .release = lprocfs_seq_release,
391 struct proc_dir_entry *req_history;
393 ptlrpc_lprocfs_register(entry, svc->srv_name,
394 "stats", &svc->srv_procroot,
397 if (svc->srv_procroot == NULL)
400 lprocfs_add_vars(svc->srv_procroot, lproc_vars, NULL);
402 req_history = create_proc_entry("req_history", 0400,
404 if (req_history != NULL) {
405 req_history->data = svc;
406 req_history->proc_fops = &req_history_fops;
410 void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
412 ptlrpc_lprocfs_register(obddev->obd_proc_entry, NULL, "stats",
413 &obddev->obd_svc_procroot,
414 &obddev->obd_svc_stats);
416 EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
418 void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req)
420 struct lprocfs_stats *svc_stats;
421 int opc = opcode_offset(lustre_msg_get_opc(req->rq_reqmsg));
423 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
424 if (svc_stats == NULL || opc <= 0)
426 LASSERT(opc < LUSTRE_MAX_OPCODES);
427 /* These two use the ptlrpc_lprocfs_brw below */
428 if (!(opc == OST_WRITE || opc == OST_READ))
429 lprocfs_counter_add(svc_stats, opc + PTLRPC_LAST_CNTR, 0);
432 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int opc, int bytes)
434 struct lprocfs_stats *svc_stats;
435 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
438 lprocfs_counter_add(svc_stats, opc + PTLRPC_LAST_CNTR, bytes);
440 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
442 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
444 if (svc->srv_procroot != NULL)
445 lprocfs_remove(&svc->srv_procroot);
447 lprocfs_free_stats(&svc->srv_stats);
450 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
452 if (obd->obd_svc_procroot)
453 lprocfs_remove(&obd->obd_svc_procroot);
454 if (obd->obd_svc_stats)
455 lprocfs_free_stats(&obd->obd_svc_stats);
457 EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
460 int lprocfs_wr_evict_client(struct file *file, const char *buffer,
461 unsigned long count, void *data)
463 struct obd_device *obd = data;
464 char tmpbuf[sizeof(struct obd_uuid)];
466 /* Kludge code(deadlock situation): the lprocfs lock has been held
467 * since the client is evicted by writting client's
468 * uuid/nid to procfs "evict_client" entry. However,
469 * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
470 * the proc entries under the being destroyed export{}, so I have
471 * to drop the lock at first here.
472 * - jay, jxiong@clusterfs.com */
476 sscanf(buffer, "%40s", tmpbuf);
477 obd_export_evict_by_uuid(obd, tmpbuf);
484 EXPORT_SYMBOL(lprocfs_wr_evict_client);
486 int lprocfs_wr_ping(struct file *file, const char *buffer,
487 unsigned long count, void *data)
489 struct obd_device *obd = data;
490 struct ptlrpc_request *req;
494 LPROCFS_CLIMP_CHECK(obd);
495 req = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
496 OBD_PING, 1, NULL, NULL);
497 LPROCFS_CLIMP_EXIT(obd);
501 ptlrpc_req_set_repsize(req, 1, NULL);
502 req->rq_send_state = LUSTRE_IMP_FULL;
503 req->rq_no_resend = 1;
505 rc = ptlrpc_queue_wait(req);
507 ptlrpc_req_finished(req);
512 EXPORT_SYMBOL(lprocfs_wr_ping);