1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 #define DEBUG_SUBSYSTEM S_CLASS
39 # include <liblustre.h>
42 #include <obd_support.h>
44 #include <lprocfs_status.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_net.h>
47 #include <obd_class.h>
48 #include "ptlrpc_internal.h"
51 struct ll_rpc_opcode {
54 } ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
55 { OST_REPLY, "ost_reply" },
56 { OST_GETATTR, "ost_getattr" },
57 { OST_SETATTR, "ost_setattr" },
58 { OST_READ, "ost_read" },
59 { OST_WRITE, "ost_write" },
60 { OST_CREATE , "ost_create" },
61 { OST_DESTROY, "ost_destroy" },
62 { OST_GET_INFO, "ost_get_info" },
63 { OST_CONNECT, "ost_connect" },
64 { OST_DISCONNECT, "ost_disconnect" },
65 { OST_PUNCH, "ost_punch" },
66 { OST_OPEN, "ost_open" },
67 { OST_CLOSE, "ost_close" },
68 { OST_STATFS, "ost_statfs" },
69 { 14, NULL }, /* formerly OST_SAN_READ */
70 { 15, NULL }, /* formerly OST_SAN_WRITE */
71 { OST_SYNC, "ost_sync" },
72 { OST_SET_INFO, "ost_set_info" },
73 { OST_QUOTACHECK, "ost_quotacheck" },
74 { OST_QUOTACTL, "ost_quotactl" },
75 { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
76 { MDS_GETATTR, "mds_getattr" },
77 { MDS_GETATTR_NAME, "mds_getattr_lock" },
78 { MDS_CLOSE, "mds_close" },
79 { MDS_REINT, "mds_reint" },
80 { MDS_READPAGE, "mds_readpage" },
81 { MDS_CONNECT, "mds_connect" },
82 { MDS_DISCONNECT, "mds_disconnect" },
83 { MDS_GETSTATUS, "mds_getstatus" },
84 { MDS_STATFS, "mds_statfs" },
85 { MDS_PIN, "mds_pin" },
86 { MDS_UNPIN, "mds_unpin" },
87 { MDS_SYNC, "mds_sync" },
88 { MDS_DONE_WRITING, "mds_done_writing" },
89 { MDS_SET_INFO, "mds_set_info" },
90 { MDS_QUOTACHECK, "mds_quotacheck" },
91 { MDS_QUOTACTL, "mds_quotactl" },
92 { MDS_GETXATTR, "mds_getxattr" },
93 { MDS_SETXATTR, "mds_setxattr" },
94 { MDS_WRITEPAGE, "mds_writepage" },
95 { MDS_IS_SUBDIR, "mds_is_subdir" },
96 { MDS_GET_INFO, "mds_get_info" },
97 { LDLM_ENQUEUE, "ldlm_enqueue" },
98 { LDLM_CONVERT, "ldlm_convert" },
99 { LDLM_CANCEL, "ldlm_cancel" },
100 { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
101 { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
102 { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
103 { MGS_CONNECT, "mgs_connect" },
104 { MGS_DISCONNECT, "mgs_disconnect" },
105 { MGS_EXCEPTION, "mgs_exception" },
106 { MGS_TARGET_REG, "mgs_target_reg" },
107 { MGS_TARGET_DEL, "mgs_target_del" },
108 { MGS_SET_INFO, "mgs_set_info" },
109 { OBD_PING, "obd_ping" },
110 { OBD_LOG_CANCEL, "llog_origin_handle_cancel" },
111 { OBD_QC_CALLBACK, "obd_quota_callback" },
112 { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_create" },
113 { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
114 { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
115 { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
116 { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" },
117 { LLOG_ORIGIN_CONNECT, "llog_origin_connect" },
118 { LLOG_CATINFO, "llog_catinfo" },
119 { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
120 { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" },
121 { QUOTA_DQACQ, "quota_acquire" },
122 { QUOTA_DQREL, "quota_release" },
123 { SEQ_QUERY, "seq_query" },
124 { SEC_CTX_INIT, "sec_ctx_init" },
125 { SEC_CTX_INIT_CONT,"sec_ctx_init_cont" },
126 { SEC_CTX_FINI, "sec_ctx_fini" },
127 { FLD_QUERY, "fld_query" }
133 } ll_eopcode_table[EXTRA_LAST_OPC] = {
134 { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
135 { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
136 { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" },
137 { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" },
138 { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" },
139 { MDS_REINT_SETATTR, "mds_reint_setattr" },
140 { MDS_REINT_CREATE, "mds_reint_create" },
141 { MDS_REINT_LINK, "mds_reint_link" },
142 { MDS_REINT_UNLINK, "mds_reint_unlink" },
143 { MDS_REINT_RENAME, "mds_reint_rename" },
144 { MDS_REINT_OPEN, "mds_reint_open" },
145 { MDS_REINT_SETXATTR, "mds_reint_setxattr" },
146 { BRW_READ_BYTES, "read_bytes" },
147 { BRW_WRITE_BYTES, "write_bytes" },
150 const char *ll_opcode2str(__u32 opcode)
152 /* When one of the assertions below fail, chances are that:
153 * 1) A new opcode was added in include/lustre/lustre_idl.h,
154 * but is missing from the table above.
155 * or 2) The opcode space was renumbered or rearranged,
156 * and the opcode_offset() function in
157 * ptlrpc_internal.h needs to be modified.
159 __u32 offset = opcode_offset(opcode);
160 LASSERTF(offset < LUSTRE_MAX_OPCODES,
161 "offset %u >= LUSTRE_MAX_OPCODES %u\n",
162 offset, LUSTRE_MAX_OPCODES);
163 LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
164 "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
165 offset, ll_rpc_opcode_table[offset].opcode, opcode);
166 return ll_rpc_opcode_table[offset].opname;
169 const char* ll_eopcode2str(__u32 opcode)
171 LASSERT(ll_eopcode_table[opcode].opcode == opcode);
172 return ll_eopcode_table[opcode].opname;
175 void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
176 char *name, struct proc_dir_entry **procroot_ret,
177 struct lprocfs_stats **stats_ret)
179 struct proc_dir_entry *svc_procroot;
180 struct lprocfs_stats *svc_stats;
182 unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
185 LASSERT(*procroot_ret == NULL);
186 LASSERT(*stats_ret == NULL);
188 svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,0);
189 if (svc_stats == NULL)
193 svc_procroot = lprocfs_register(dir, root, NULL, NULL);
194 if (IS_ERR(svc_procroot)) {
195 lprocfs_free_stats(&svc_stats);
202 lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
203 svc_counter_config, "req_waittime", "usec");
204 lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
205 svc_counter_config, "req_qdepth", "reqs");
206 lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
207 svc_counter_config, "req_active", "reqs");
208 lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
209 svc_counter_config, "req_timeout", "sec");
210 lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
211 svc_counter_config, "reqbuf_avail", "bufs");
212 for (i = 0; i < EXTRA_LAST_OPC; i++) {
216 case BRW_WRITE_BYTES:
224 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
226 ll_eopcode2str(i), units);
228 for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
229 __u32 opcode = ll_rpc_opcode_table[i].opcode;
230 lprocfs_counter_init(svc_stats,
231 EXTRA_MAX_OPCODES + i, svc_counter_config,
232 ll_opcode2str(opcode), "usec");
235 rc = lprocfs_register_stats(svc_procroot, name, svc_stats);
238 lprocfs_remove(&svc_procroot);
239 lprocfs_free_stats(&svc_stats);
242 *procroot_ret = svc_procroot;
243 *stats_ret = svc_stats;
248 ptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off,
249 int count, int *eof, void *data)
251 struct ptlrpc_service *svc = data;
254 return snprintf(page, count, "%d\n", svc->srv_n_history_rqbds);
258 ptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off,
259 int count, int *eof, void *data)
261 struct ptlrpc_service *svc = data;
264 return snprintf(page, count, "%d\n", svc->srv_max_history_rqbds);
268 ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
269 unsigned long count, void *data)
271 struct ptlrpc_service *svc = data;
274 int rc = lprocfs_write_helper(buffer, count, &val);
282 /* This sanity check is more of an insanity check; we can still
283 * hose a kernel by allowing the request history to grow too
285 bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
286 if (val > num_physpages/(2 * bufpages))
289 spin_lock(&svc->srv_lock);
290 svc->srv_max_history_rqbds = val;
291 spin_unlock(&svc->srv_lock);
297 ptlrpc_lprocfs_rd_threads_min(char *page, char **start, off_t off,
298 int count, int *eof, void *data)
300 struct ptlrpc_service *svc = data;
302 return snprintf(page, count, "%d\n", svc->srv_threads_min);
306 ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
307 unsigned long count, void *data)
309 struct ptlrpc_service *svc = data;
311 int rc = lprocfs_write_helper(buffer, count, &val);
319 if (val > svc->srv_threads_max)
322 spin_lock(&svc->srv_lock);
323 svc->srv_threads_min = val;
324 spin_unlock(&svc->srv_lock);
330 ptlrpc_lprocfs_rd_threads_started(char *page, char **start, off_t off,
331 int count, int *eof, void *data)
333 struct ptlrpc_service *svc = data;
335 return snprintf(page, count, "%d\n", svc->srv_threads_started);
339 ptlrpc_lprocfs_rd_threads_max(char *page, char **start, off_t off,
340 int count, int *eof, void *data)
342 struct ptlrpc_service *svc = data;
344 return snprintf(page, count, "%d\n", svc->srv_threads_max);
348 ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
349 unsigned long count, void *data)
351 struct ptlrpc_service *svc = data;
353 int rc = lprocfs_write_helper(buffer, count, &val);
361 if (val < svc->srv_threads_min)
364 spin_lock(&svc->srv_lock);
365 svc->srv_threads_max = val;
366 spin_unlock(&svc->srv_lock);
371 struct ptlrpc_srh_iterator {
373 struct ptlrpc_request *srhi_req;
377 ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc,
378 struct ptlrpc_srh_iterator *srhi,
382 struct ptlrpc_request *req;
384 if (srhi->srhi_req != NULL &&
385 srhi->srhi_seq > svc->srv_request_max_cull_seq &&
386 srhi->srhi_seq <= seq) {
387 /* If srhi_req was set previously, hasn't been culled and
388 * we're searching for a seq on or after it (i.e. more
389 * recent), search from it onwards.
390 * Since the service history is LRU (i.e. culled reqs will
391 * be near the head), we shouldn't have to do long
393 LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq);
394 LASSERT (!list_empty(&svc->srv_request_history));
395 e = &srhi->srhi_req->rq_history_list;
397 /* search from start */
398 e = svc->srv_request_history.next;
401 while (e != &svc->srv_request_history) {
402 req = list_entry(e, struct ptlrpc_request, rq_history_list);
404 if (req->rq_history_seq >= seq) {
405 srhi->srhi_seq = req->rq_history_seq;
406 srhi->srhi_req = req;
416 ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
418 struct ptlrpc_service *svc = s->private;
419 struct ptlrpc_srh_iterator *srhi;
422 OBD_ALLOC(srhi, sizeof(*srhi));
427 srhi->srhi_req = NULL;
429 spin_lock(&svc->srv_lock);
430 rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos);
431 spin_unlock(&svc->srv_lock);
434 *pos = srhi->srhi_seq;
438 OBD_FREE(srhi, sizeof(*srhi));
443 ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
445 struct ptlrpc_srh_iterator *srhi = iter;
448 OBD_FREE(srhi, sizeof(*srhi));
452 ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
453 void *iter, loff_t *pos)
455 struct ptlrpc_service *svc = s->private;
456 struct ptlrpc_srh_iterator *srhi = iter;
459 spin_lock(&svc->srv_lock);
460 rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1);
461 spin_unlock(&svc->srv_lock);
464 OBD_FREE(srhi, sizeof(*srhi));
468 *pos = srhi->srhi_seq;
472 /* common ost/mdt srv_request_history_print_fn */
473 void target_print_req(void *seq_file, struct ptlrpc_request *req)
475 /* Called holding srv_lock with irqs disabled.
476 * Print specific req contents and a newline.
477 * CAVEAT EMPTOR: check request message length before printing!!!
478 * You might have received any old crap so you must be just as
479 * careful here as the service's request parser!!! */
480 struct seq_file *sf = seq_file;
482 switch (req->rq_phase) {
484 /* still awaiting a service thread's attention, or rejected
485 * because the generic request message didn't unpack */
486 seq_printf(sf, "<not swabbed>\n");
488 case RQ_PHASE_INTERPRET:
489 /* being handled, so basic msg swabbed, and opc is valid
490 * but racing with mds_handle() */
491 case RQ_PHASE_COMPLETE:
492 /* been handled by mds_handle() reply state possibly still
494 seq_printf(sf, "opc %d\n", lustre_msg_get_opc(req->rq_reqmsg));
497 DEBUG_REQ(D_ERROR, req, "bad phase %d", req->rq_phase);
500 EXPORT_SYMBOL(target_print_req);
502 static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
504 struct ptlrpc_service *svc = s->private;
505 struct ptlrpc_srh_iterator *srhi = iter;
506 struct ptlrpc_request *req;
509 spin_lock(&svc->srv_lock);
511 rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq);
514 req = srhi->srhi_req;
516 /* Print common req fields.
517 * CAVEAT EMPTOR: we're racing with the service handler
518 * here. The request could contain any old crap, so you
519 * must be just as careful as the service's request
520 * parser. Currently I only print stuff here I know is OK
521 * to look at coz it was set up in request_in_callback()!!! */
522 seq_printf(s, LPD64":%s:%s:x"LPU64":%d:%s:%ld:%lds(%+lds) ",
523 req->rq_history_seq, libcfs_nid2str(req->rq_self),
524 libcfs_id2str(req->rq_peer), req->rq_xid,
525 req->rq_reqlen, ptlrpc_rqphase2str(req),
526 req->rq_arrival_time.tv_sec,
527 req->rq_sent - req->rq_arrival_time.tv_sec,
528 req->rq_sent - req->rq_deadline);
529 if (svc->srv_request_history_print_fn == NULL)
532 svc->srv_request_history_print_fn(s, srhi->srhi_req);
535 spin_unlock(&svc->srv_lock);
541 ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
543 static struct seq_operations sops = {
544 .start = ptlrpc_lprocfs_svc_req_history_start,
545 .stop = ptlrpc_lprocfs_svc_req_history_stop,
546 .next = ptlrpc_lprocfs_svc_req_history_next,
547 .show = ptlrpc_lprocfs_svc_req_history_show,
549 struct proc_dir_entry *dp = PDE(inode);
550 struct seq_file *seqf;
553 LPROCFS_ENTRY_AND_CHECK(dp);
554 rc = seq_open(file, &sops);
560 seqf = file->private_data;
561 seqf->private = dp->data;
565 /* See also lprocfs_rd_timeouts */
566 static int ptlrpc_lprocfs_rd_timeouts(char *page, char **start, off_t off,
567 int count, int *eof, void *data)
569 struct ptlrpc_service *svc = data;
570 unsigned int cur, worst;
576 cur = at_get(&svc->srv_at_estimate);
577 worst = svc->srv_at_estimate.at_worst_ever;
578 worstt = svc->srv_at_estimate.at_worst_time;
579 s2dhms(&ts, cfs_time_current_sec() - worstt);
581 rc += snprintf(page + rc, count - rc,
582 "adaptive timeouts off, using obd_timeout %u\n",
584 rc += snprintf(page + rc, count - rc,
585 "%10s : cur %3u worst %3u (at %ld, "DHMS_FMT" ago) ",
586 "service", cur, worst, worstt,
588 rc = lprocfs_at_hist_helper(page, count, rc,
589 &svc->srv_at_estimate);
593 static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off,
594 int count, int *eof, void *data)
596 struct ptlrpc_service *svc = data;
597 int rc = snprintf(page, count, "%d", svc->srv_hpreq_ratio);
601 static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
602 unsigned long count, void *data)
604 struct ptlrpc_service *svc = data;
607 rc = lprocfs_write_helper(buffer, count, &val);
613 spin_lock(&svc->srv_lock);
614 svc->srv_hpreq_ratio = val;
615 spin_unlock(&svc->srv_lock);
619 void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
620 struct ptlrpc_service *svc)
622 struct lprocfs_vars lproc_vars[] = {
623 {.name = "high_priority_ratio",
624 .read_fptr = ptlrpc_lprocfs_rd_hp_ratio,
625 .write_fptr = ptlrpc_lprocfs_wr_hp_ratio,
627 {.name = "req_buffer_history_len",
628 .read_fptr = ptlrpc_lprocfs_read_req_history_len,
630 {.name = "req_buffer_history_max",
631 .write_fptr = ptlrpc_lprocfs_write_req_history_max,
632 .read_fptr = ptlrpc_lprocfs_read_req_history_max,
634 {.name = "threads_min",
635 .read_fptr = ptlrpc_lprocfs_rd_threads_min,
636 .write_fptr = ptlrpc_lprocfs_wr_threads_min,
638 {.name = "threads_max",
639 .read_fptr = ptlrpc_lprocfs_rd_threads_max,
640 .write_fptr = ptlrpc_lprocfs_wr_threads_max,
642 {.name = "threads_started",
643 .read_fptr = ptlrpc_lprocfs_rd_threads_started,
646 .read_fptr = ptlrpc_lprocfs_rd_timeouts,
650 static struct file_operations req_history_fops = {
651 .owner = THIS_MODULE,
652 .open = ptlrpc_lprocfs_svc_req_history_open,
655 .release = lprocfs_seq_release,
660 ptlrpc_lprocfs_register(entry, svc->srv_name,
661 "stats", &svc->srv_procroot,
664 if (svc->srv_procroot == NULL)
667 lprocfs_add_vars(svc->srv_procroot, lproc_vars, NULL);
669 rc = lprocfs_seq_create(svc->srv_procroot, "req_history",
670 0400, &req_history_fops, svc);
672 CWARN("Error adding the req_history file\n");
675 void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
677 ptlrpc_lprocfs_register(obddev->obd_proc_entry, NULL, "stats",
678 &obddev->obd_svc_procroot,
679 &obddev->obd_svc_stats);
681 EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
683 void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
685 struct lprocfs_stats *svc_stats;
686 __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
687 int opc = opcode_offset(op);
689 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
690 if (svc_stats == NULL || opc <= 0)
692 LASSERT(opc < LUSTRE_MAX_OPCODES);
693 if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
694 lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
697 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
699 struct lprocfs_stats *svc_stats;
704 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
707 idx = lustre_msg_get_opc(req->rq_reqmsg);
710 idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
713 idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
716 LASSERTF(0, "unsupported opcode %u\n", idx);
720 lprocfs_counter_add(svc_stats, idx, bytes);
723 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
725 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
727 if (svc->srv_procroot != NULL)
728 lprocfs_remove(&svc->srv_procroot);
731 lprocfs_free_stats(&svc->srv_stats);
734 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
736 if (obd->obd_svc_procroot)
737 lprocfs_remove(&obd->obd_svc_procroot);
739 if (obd->obd_svc_stats)
740 lprocfs_free_stats(&obd->obd_svc_stats);
742 EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
745 int lprocfs_wr_evict_client(struct file *file, const char *buffer,
746 unsigned long count, void *data)
748 struct obd_device *obd = data;
749 char tmpbuf[sizeof(struct obd_uuid)];
751 /* Kludge code(deadlock situation): the lprocfs lock has been held
752 * since the client is evicted by writting client's
753 * uuid/nid to procfs "evict_client" entry. However,
754 * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
755 * the proc entries under the being destroyed export{}, so I have
756 * to drop the lock at first here.
757 * - jay, jxiong@clusterfs.com */
758 class_incref(obd, __FUNCTION__, cfs_current());
761 sscanf(buffer, "%40s", tmpbuf);
762 if (strncmp(tmpbuf, "nid:", 4) == 0)
763 obd_export_evict_by_nid(obd, tmpbuf + 4);
764 else if (strncmp(tmpbuf, "uuid:", 5) == 0)
765 obd_export_evict_by_uuid(obd, tmpbuf + 5);
767 obd_export_evict_by_uuid(obd, tmpbuf);
770 class_decref(obd, __FUNCTION__, cfs_current());
774 EXPORT_SYMBOL(lprocfs_wr_evict_client);
776 int lprocfs_wr_ping(struct file *file, const char *buffer,
777 unsigned long count, void *data)
779 struct obd_device *obd = data;
780 struct ptlrpc_request *req;
784 LPROCFS_CLIMP_CHECK(obd);
785 req = ptlrpc_prep_ping(obd->u.cli.cl_import);
786 LPROCFS_CLIMP_EXIT(obd);
790 req->rq_send_state = LUSTRE_IMP_FULL;
792 rc = ptlrpc_queue_wait(req);
794 ptlrpc_req_finished(req);
799 EXPORT_SYMBOL(lprocfs_wr_ping);