Whamcloud - gitweb
LU-2955 tests: make replay-ost-single/8b SLOW for ZFS
[fs/lustre-release.git] / lustre / ptlrpc / lproc_ptlrpc.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 #define DEBUG_SUBSYSTEM S_CLASS
37
38 #ifndef __KERNEL__
39 # include <liblustre.h>
40 #endif
41
42 #include <obd_support.h>
43 #include <obd.h>
44 #include <lprocfs_status.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_net.h>
47 #include <obd_class.h>
48 #include "ptlrpc_internal.h"
49
50
51 struct ll_rpc_opcode {
52      __u32       opcode;
53      const char *opname;
54 } ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
55         { OST_REPLY,        "ost_reply" },
56         { OST_GETATTR,      "ost_getattr" },
57         { OST_SETATTR,      "ost_setattr" },
58         { OST_READ,         "ost_read" },
59         { OST_WRITE,        "ost_write" },
60         { OST_CREATE ,      "ost_create" },
61         { OST_DESTROY,      "ost_destroy" },
62         { OST_GET_INFO,     "ost_get_info" },
63         { OST_CONNECT,      "ost_connect" },
64         { OST_DISCONNECT,   "ost_disconnect" },
65         { OST_PUNCH,        "ost_punch" },
66         { OST_OPEN,         "ost_open" },
67         { OST_CLOSE,        "ost_close" },
68         { OST_STATFS,       "ost_statfs" },
69         { 14,                NULL },    /* formerly OST_SAN_READ */
70         { 15,                NULL },    /* formerly OST_SAN_WRITE */
71         { OST_SYNC,         "ost_sync" },
72         { OST_SET_INFO,     "ost_set_info" },
73         { OST_QUOTACHECK,   "ost_quotacheck" },
74         { OST_QUOTACTL,     "ost_quotactl" },
75         { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
76         { MDS_GETATTR,      "mds_getattr" },
77         { MDS_GETATTR_NAME, "mds_getattr_lock" },
78         { MDS_CLOSE,        "mds_close" },
79         { MDS_REINT,        "mds_reint" },
80         { MDS_READPAGE,     "mds_readpage" },
81         { MDS_CONNECT,      "mds_connect" },
82         { MDS_DISCONNECT,   "mds_disconnect" },
83         { MDS_GETSTATUS,    "mds_getstatus" },
84         { MDS_STATFS,       "mds_statfs" },
85         { MDS_PIN,          "mds_pin" },
86         { MDS_UNPIN,        "mds_unpin" },
87         { MDS_SYNC,         "mds_sync" },
88         { MDS_DONE_WRITING, "mds_done_writing" },
89         { MDS_SET_INFO,     "mds_set_info" },
90         { MDS_QUOTACHECK,   "mds_quotacheck" },
91         { MDS_QUOTACTL,     "mds_quotactl" },
92         { MDS_GETXATTR,     "mds_getxattr" },
93         { MDS_SETXATTR,     "mds_setxattr" },
94         { MDS_WRITEPAGE,    "mds_writepage" },
95         { MDS_IS_SUBDIR,    "mds_is_subdir" },
96         { MDS_GET_INFO,     "mds_get_info" },
97         { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
98         { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
99         { MDS_HSM_ACTION,   "mds_hsm_action" },
100         { MDS_HSM_PROGRESS, "mds_hsm_progress" },
101         { MDS_HSM_REQUEST,  "mds_hsm_request" },
102         { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
103         { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
104         { MDS_SWAP_LAYOUTS,     "mds_swap_layouts" },
105         { LDLM_ENQUEUE,     "ldlm_enqueue" },
106         { LDLM_CONVERT,     "ldlm_convert" },
107         { LDLM_CANCEL,      "ldlm_cancel" },
108         { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
109         { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
110         { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
111         { LDLM_SET_INFO,    "ldlm_set_info" },
112         { MGS_CONNECT,      "mgs_connect" },
113         { MGS_DISCONNECT,   "mgs_disconnect" },
114         { MGS_EXCEPTION,    "mgs_exception" },
115         { MGS_TARGET_REG,   "mgs_target_reg" },
116         { MGS_TARGET_DEL,   "mgs_target_del" },
117         { MGS_SET_INFO,     "mgs_set_info" },
118         { MGS_CONFIG_READ,  "mgs_config_read" },
119         { OBD_PING,         "obd_ping" },
120         { OBD_LOG_CANCEL,   "llog_origin_handle_cancel" },
121         { OBD_QC_CALLBACK,  "obd_quota_callback" },
122         { OBD_IDX_READ,     "dt_index_read" },
123         { LLOG_ORIGIN_HANDLE_CREATE,     "llog_origin_handle_create" },
124         { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
125         { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
126         { LLOG_ORIGIN_HANDLE_WRITE_REC,  "llog_origin_handle_write_rec" },
127         { LLOG_ORIGIN_HANDLE_CLOSE,      "llog_origin_handle_close" },
128         { LLOG_ORIGIN_CONNECT,           "llog_origin_connect" },
129         { LLOG_CATINFO,                  "llog_catinfo" },
130         { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
131         { LLOG_ORIGIN_HANDLE_DESTROY,    "llog_origin_handle_destroy" },
132         { QUOTA_DQACQ,      "quota_acquire" },
133         { QUOTA_DQREL,      "quota_release" },
134         { SEQ_QUERY,        "seq_query" },
135         { SEC_CTX_INIT,     "sec_ctx_init" },
136         { SEC_CTX_INIT_CONT,"sec_ctx_init_cont" },
137         { SEC_CTX_FINI,     "sec_ctx_fini" },
138         { FLD_QUERY,        "fld_query" },
139         { UPDATE_OBJ,       "update_obj" },
140 };
141
142 struct ll_eopcode {
143      __u32       opcode;
144      const char *opname;
145 } ll_eopcode_table[EXTRA_LAST_OPC] = {
146         { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
147         { LDLM_PLAIN_ENQUEUE,   "ldlm_plain_enqueue" },
148         { LDLM_EXTENT_ENQUEUE,  "ldlm_extent_enqueue" },
149         { LDLM_FLOCK_ENQUEUE,   "ldlm_flock_enqueue" },
150         { LDLM_IBITS_ENQUEUE,   "ldlm_ibits_enqueue" },
151         { MDS_REINT_SETATTR,    "mds_reint_setattr" },
152         { MDS_REINT_CREATE,     "mds_reint_create" },
153         { MDS_REINT_LINK,       "mds_reint_link" },
154         { MDS_REINT_UNLINK,     "mds_reint_unlink" },
155         { MDS_REINT_RENAME,     "mds_reint_rename" },
156         { MDS_REINT_OPEN,       "mds_reint_open" },
157         { MDS_REINT_SETXATTR,   "mds_reint_setxattr" },
158         { BRW_READ_BYTES,       "read_bytes" },
159         { BRW_WRITE_BYTES,      "write_bytes" },
160 };
161
162 const char *ll_opcode2str(__u32 opcode)
163 {
164         /* When one of the assertions below fail, chances are that:
165          *     1) A new opcode was added in include/lustre/lustre_idl.h,
166          *        but is missing from the table above.
167          * or  2) The opcode space was renumbered or rearranged,
168          *        and the opcode_offset() function in
169          *        ptlrpc_internal.h needs to be modified.
170          */
171         __u32 offset = opcode_offset(opcode);
172         LASSERTF(offset < LUSTRE_MAX_OPCODES,
173                  "offset %u >= LUSTRE_MAX_OPCODES %u\n",
174                  offset, LUSTRE_MAX_OPCODES);
175         LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
176                  "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
177                  offset, ll_rpc_opcode_table[offset].opcode, opcode);
178         return ll_rpc_opcode_table[offset].opname;
179 }
180
181 const char* ll_eopcode2str(__u32 opcode)
182 {
183         LASSERT(ll_eopcode_table[opcode].opcode == opcode);
184         return ll_eopcode_table[opcode].opname;
185 }
186 #ifdef LPROCFS
187 void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
188                              char *name, struct proc_dir_entry **procroot_ret,
189                              struct lprocfs_stats **stats_ret)
190 {
191         struct proc_dir_entry *svc_procroot;
192         struct lprocfs_stats *svc_stats;
193         int i, rc;
194         unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
195                                           LPROCFS_CNTR_STDDEV;
196
197         LASSERT(*procroot_ret == NULL);
198         LASSERT(*stats_ret == NULL);
199
200         svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,0);
201         if (svc_stats == NULL)
202                 return;
203
204         if (dir) {
205                 svc_procroot = lprocfs_register(dir, root, NULL, NULL);
206                 if (IS_ERR(svc_procroot)) {
207                         lprocfs_free_stats(&svc_stats);
208                         return;
209                 }
210         } else {
211                 svc_procroot = root;
212         }
213
214         lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
215                              svc_counter_config, "req_waittime", "usec");
216         lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
217                              svc_counter_config, "req_qdepth", "reqs");
218         lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
219                              svc_counter_config, "req_active", "reqs");
220         lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
221                              svc_counter_config, "req_timeout", "sec");
222         lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
223                              svc_counter_config, "reqbuf_avail", "bufs");
224         for (i = 0; i < EXTRA_LAST_OPC; i++) {
225                 char *units;
226
227                 switch(i) {
228                 case BRW_WRITE_BYTES:
229                 case BRW_READ_BYTES:
230                         units = "bytes";
231                         break;
232                 default:
233                         units = "reqs";
234                         break;
235                 }
236                 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
237                                      svc_counter_config,
238                                      ll_eopcode2str(i), units);
239         }
240         for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
241                 __u32 opcode = ll_rpc_opcode_table[i].opcode;
242                 lprocfs_counter_init(svc_stats,
243                                      EXTRA_MAX_OPCODES + i, svc_counter_config,
244                                      ll_opcode2str(opcode), "usec");
245         }
246
247         rc = lprocfs_register_stats(svc_procroot, name, svc_stats);
248         if (rc < 0) {
249                 if (dir)
250                         lprocfs_remove(&svc_procroot);
251                 lprocfs_free_stats(&svc_stats);
252         } else {
253                 if (dir)
254                         *procroot_ret = svc_procroot;
255                 *stats_ret = svc_stats;
256         }
257 }
258
259 static int
260 ptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off,
261                                     int count, int *eof, void *data)
262 {
263         struct ptlrpc_service *svc = data;
264         struct ptlrpc_service_part *svcpt;
265         int     total = 0;
266         int     i;
267
268         *eof = 1;
269
270         ptlrpc_service_for_each_part(svcpt, i, svc)
271                 total += svcpt->scp_hist_nrqbds;
272
273         return snprintf(page, count, "%d\n", total);
274 }
275
276 static int
277 ptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off,
278                                     int count, int *eof, void *data)
279 {
280         struct ptlrpc_service *svc = data;
281         struct ptlrpc_service_part *svcpt;
282         int     total = 0;
283         int     i;
284
285         *eof = 1;
286         ptlrpc_service_for_each_part(svcpt, i, svc)
287                 total += svc->srv_hist_nrqbds_cpt_max;
288
289         return snprintf(page, count, "%d\n", total);
290 }
291
292 static int
293 ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
294                                      unsigned long count, void *data)
295 {
296         struct ptlrpc_service      *svc = data;
297         int                         bufpages;
298         int                         val;
299         int                         rc;
300
301         rc = lprocfs_write_helper(buffer, count, &val);
302         if (rc < 0)
303                 return rc;
304
305         if (val < 0)
306                 return -ERANGE;
307
308         /* This sanity check is more of an insanity check; we can still
309          * hose a kernel by allowing the request history to grow too
310          * far. */
311         bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
312         if (val > cfs_num_physpages/(2 * bufpages))
313                 return -ERANGE;
314
315         spin_lock(&svc->srv_lock);
316
317         if (val == 0)
318                 svc->srv_hist_nrqbds_cpt_max = 0;
319         else
320                 svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
321
322         spin_unlock(&svc->srv_lock);
323
324         return count;
325 }
326
327 static int
328 ptlrpc_lprocfs_rd_threads_min(char *page, char **start, off_t off,
329                               int count, int *eof, void *data)
330 {
331         struct ptlrpc_service *svc = data;
332
333         return snprintf(page, count, "%d\n",
334                         svc->srv_nthrs_cpt_init * svc->srv_ncpts);
335 }
336
337 static int
338 ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
339                               unsigned long count, void *data)
340 {
341         struct ptlrpc_service      *svc = data;
342         int     val;
343         int     rc = lprocfs_write_helper(buffer, count, &val);
344
345         if (rc < 0)
346                 return rc;
347
348         if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
349                 return -ERANGE;
350
351         spin_lock(&svc->srv_lock);
352         if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
353                 spin_unlock(&svc->srv_lock);
354                 return -ERANGE;
355         }
356
357         svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
358
359         spin_unlock(&svc->srv_lock);
360
361         return count;
362 }
363
364 static int
365 ptlrpc_lprocfs_rd_threads_started(char *page, char **start, off_t off,
366                                   int count, int *eof, void *data)
367 {
368         struct ptlrpc_service *svc = data;
369         struct ptlrpc_service_part *svcpt;
370         int     total = 0;
371         int     i;
372
373         LASSERT(svc->srv_parts != NULL);
374         ptlrpc_service_for_each_part(svcpt, i, svc)
375                 total += svcpt->scp_nthrs_running;
376
377         return snprintf(page, count, "%d\n", total);
378 }
379
380 static int
381 ptlrpc_lprocfs_rd_threads_max(char *page, char **start, off_t off,
382                               int count, int *eof, void *data)
383 {
384         struct ptlrpc_service *svc = data;
385
386         return snprintf(page, count, "%d\n",
387                         svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
388 }
389
390 static int
391 ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
392                               unsigned long count, void *data)
393 {
394         struct ptlrpc_service *svc = data;
395         int     val;
396         int     rc = lprocfs_write_helper(buffer, count, &val);
397
398         if (rc < 0)
399                 return rc;
400
401         if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
402                 return -ERANGE;
403
404         spin_lock(&svc->srv_lock);
405         if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
406                 spin_unlock(&svc->srv_lock);
407                 return -ERANGE;
408         }
409
410         svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
411
412         spin_unlock(&svc->srv_lock);
413
414         return count;
415 }
416
417 /**
418  * \addtogoup nrs
419  * @{
420  */
421 extern struct nrs_core nrs_core;
422
423 /**
424  * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
425  *
426  * \param[in] state The policy state
427  */
428 static const char *
429 nrs_state2str(enum ptlrpc_nrs_pol_state state)
430 {
431         switch (state) {
432         default:
433                 LBUG();
434         case NRS_POL_STATE_INVALID:
435                 return "invalid";
436         case NRS_POL_STATE_UNAVAIL:
437                 return "unavail";
438         case NRS_POL_STATE_STOPPED:
439                 return "stopped";
440         case NRS_POL_STATE_STOPPING:
441                 return "stopping";
442         case NRS_POL_STATE_STARTING:
443                 return "starting";
444         case NRS_POL_STATE_STARTED:
445                 return "started";
446         }
447 }
448
449 /**
450  * Obtains status information for \a policy.
451  *
452  * Information is copied in \a info.
453  *
454  * \param[in] policy The policy
455  * \param[out] info  Holds returned status information
456  */
457 void
458 nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
459                            struct ptlrpc_nrs_pol_info *info)
460 {
461         LASSERT(policy != NULL);
462         LASSERT(info != NULL);
463         LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
464
465         memcpy(info->pi_name, policy->pol_name, NRS_POL_NAME_MAX);
466
467         info->pi_fallback    = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
468         info->pi_state       = policy->pol_state;
469         /**
470          * XXX: These are accessed without holding
471          * ptlrpc_service_part::scp_req_lock.
472          */
473         info->pi_req_queued  = policy->pol_req_queued;
474         info->pi_req_started = policy->pol_req_started;
475 }
476
477 /**
478  * Reads and prints policy status information for all policies of a PTLRPC
479  * service.
480  */
481 static int
482 ptlrpc_lprocfs_rd_nrs(char *page, char **start, off_t off,
483                       int count, int *eof, void *data)
484 {
485         struct ptlrpc_service          *svc = data;
486         struct ptlrpc_service_part     *svcpt;
487         struct ptlrpc_nrs              *nrs;
488         struct ptlrpc_nrs_policy       *policy;
489         struct ptlrpc_nrs_pol_info     *infos;
490         struct ptlrpc_nrs_pol_info      tmp;
491         unsigned                        num_pols;
492         unsigned                        pol_idx = 0;
493         bool                            hp = false;
494         int                             i;
495         int                             rc = 0;
496         int                             rc2 = 0;
497         ENTRY;
498
499         /**
500          * Serialize NRS core lprocfs operations with policy registration/
501          * unregistration.
502          */
503         mutex_lock(&nrs_core.nrs_mutex);
504
505         /**
506          * Use the first service partition's regular NRS head in order to obtain
507          * the number of policies registered with NRS heads of this service. All
508          * service partitions will have the same number of policies.
509          */
510         nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
511
512         spin_lock(&nrs->nrs_lock);
513         num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
514         spin_unlock(&nrs->nrs_lock);
515
516         OBD_ALLOC(infos, num_pols * sizeof(*infos));
517         if (infos == NULL)
518                 GOTO(out, rc = -ENOMEM);
519 again:
520
521         ptlrpc_service_for_each_part(svcpt, i, svc) {
522                 nrs = nrs_svcpt2nrs(svcpt, hp);
523                 spin_lock(&nrs->nrs_lock);
524
525                 pol_idx = 0;
526
527                 cfs_list_for_each_entry(policy, &nrs->nrs_policy_list,
528                                         pol_list) {
529                         LASSERT(pol_idx < num_pols);
530
531                         nrs_policy_get_info_locked(policy, &tmp);
532                         /**
533                          * Copy values when handling the first service
534                          * partition.
535                          */
536                         if (i == 0) {
537                                 memcpy(infos[pol_idx].pi_name, tmp.pi_name,
538                                        NRS_POL_NAME_MAX);
539                                 memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
540                                        sizeof(tmp.pi_state));
541                                 infos[pol_idx].pi_fallback = tmp.pi_fallback;
542                                 /**
543                                  * For the rest of the service partitions
544                                  * sanity-check the values we get.
545                                  */
546                         } else {
547                                 LASSERT(strncmp(infos[pol_idx].pi_name,
548                                                 tmp.pi_name,
549                                                 NRS_POL_NAME_MAX) == 0);
550                                 /**
551                                  * Not asserting ptlrpc_nrs_pol_info::pi_state,
552                                  * because it may be different between
553                                  * instances of the same policy in different
554                                  * service partitions.
555                                  */
556                                 LASSERT(infos[pol_idx].pi_fallback ==
557                                         tmp.pi_fallback);
558                         }
559
560                         infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
561                         infos[pol_idx].pi_req_started += tmp.pi_req_started;
562
563                         pol_idx++;
564                 }
565                 spin_unlock(&nrs->nrs_lock);
566         }
567
568         /**
569          * Policy status information output is in YAML format.
570          * For example:
571          *
572          *      regular_requests:
573          *        - name: fifo
574          *          state: started
575          *          fallback: yes
576          *          queued: 0
577          *          active: 0
578          *
579          *        - name: crrn
580          *          state: started
581          *          fallback: no
582          *          queued: 2015
583          *          active: 384
584          *
585          *      high_priority_requests:
586          *        - name: fifo
587          *          state: started
588          *          fallback: yes
589          *          queued: 0
590          *          active: 2
591          *
592          *        - name: crrn
593          *          state: stopped
594          *          fallback: no
595          *          queued: 0
596          *          active: 0
597          */
598         rc2 = snprintf(page + rc, count - rc,
599                        "%s\n", !hp ?
600                        "\nregular_requests:" :
601                        "high_priority_requests:");
602
603         if (rc2 >= count - rc) {
604                 /** Output was truncated */
605                 GOTO(out, rc = -EFBIG);
606         }
607
608         rc += rc2;
609
610         for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
611                 rc2 = snprintf(page + rc, count - rc,
612                                "  - name: %s\n"
613                                "    state: %s\n"
614                                "    fallback: %s\n"
615                                "    queued: %-20d\n"
616                                "    active: %-20d\n\n",
617                                infos[pol_idx].pi_name,
618                                nrs_state2str(infos[pol_idx].pi_state),
619                                infos[pol_idx].pi_fallback ? "yes" : "no",
620                                (int)infos[pol_idx].pi_req_queued,
621                                (int)infos[pol_idx].pi_req_started);
622
623
624                 if (rc2 >= count - rc) {
625                         /** Output was truncated */
626                         GOTO(out, rc = -EFBIG);
627                 }
628
629                 rc += rc2;
630         }
631
632         if (!hp && nrs_svc_has_hp(svc)) {
633                 memset(infos, 0, num_pols * sizeof(*infos));
634
635                 /**
636                  * Redo the processing for the service's HP NRS heads' policies.
637                  */
638                 hp = true;
639                 goto again;
640         }
641
642         *eof = 1;
643
644 out:
645         if (infos)
646                 OBD_FREE(infos, num_pols * sizeof(*infos));
647
648         mutex_unlock(&nrs_core.nrs_mutex);
649
650         RETURN(rc);
651 }
652
653 /**
654  * The longest valid command string is the maxium policy name size, plus the
655  * length of the " reg" substring
656  */
657 #define LPROCFS_NRS_WR_MAX_CMD  (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
658
659 /**
660  * Starts and stops a given policy on a PTLRPC service.
661  *
662  * Commands consist of the policy name, followed by an optional [reg|hp] token;
663  * if the optional token is omitted, the operation is performed on both the
664  * regular and high-priority (if the service has one) NRS head.
665  */
666 static int
667 ptlrpc_lprocfs_wr_nrs(struct file *file, const char *buffer,
668                       unsigned long count, void *data)
669 {
670         struct ptlrpc_service          *svc = data;
671         enum ptlrpc_nrs_queue_type      queue = PTLRPC_NRS_QUEUE_BOTH;
672         char                           *cmd;
673         char                           *cmd_copy = NULL;
674         char                           *token;
675         int                             rc = 0;
676         ENTRY;
677
678         if (count >= LPROCFS_NRS_WR_MAX_CMD)
679                 GOTO(out, rc = -EINVAL);
680
681         OBD_ALLOC(cmd, LPROCFS_NRS_WR_MAX_CMD);
682         if (cmd == NULL)
683                 GOTO(out, rc = -ENOMEM);
684         /**
685          * strsep() modifies its argument, so keep a copy
686          */
687         cmd_copy = cmd;
688
689         if (cfs_copy_from_user(cmd, buffer, count))
690                 GOTO(out, rc = -EFAULT);
691
692         cmd[count] = '\0';
693
694         token = strsep(&cmd, " ");
695
696         if (strlen(token) > NRS_POL_NAME_MAX - 1)
697                 GOTO(out, rc = -EINVAL);
698
699         /**
700          * No [reg|hp] token has been specified
701          */
702         if (cmd == NULL)
703                 goto default_queue;
704
705         /**
706          * The second token is either NULL, or an optional [reg|hp] string
707          */
708         if (strcmp(cmd, "reg") == 0)
709                 queue = PTLRPC_NRS_QUEUE_REG;
710         else if (strcmp(cmd, "hp") == 0)
711                 queue = PTLRPC_NRS_QUEUE_HP;
712         else
713                 GOTO(out, rc = -EINVAL);
714
715 default_queue:
716
717         if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc))
718                 GOTO(out, rc = -ENODEV);
719         else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
720                 queue = PTLRPC_NRS_QUEUE_REG;
721
722         /**
723          * Serialize NRS core lprocfs operations with policy registration/
724          * unregistration.
725          */
726         mutex_lock(&nrs_core.nrs_mutex);
727
728         rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
729                                        false, NULL);
730
731         mutex_unlock(&nrs_core.nrs_mutex);
732 out:
733         if (cmd_copy)
734                 OBD_FREE(cmd_copy, LPROCFS_NRS_WR_MAX_CMD);
735
736         RETURN(rc < 0 ? rc : count);
737 }
738
739 /** @} nrs */
740
741 struct ptlrpc_srh_iterator {
742         int                     srhi_idx;
743         __u64                   srhi_seq;
744         struct ptlrpc_request   *srhi_req;
745 };
746
747 int
748 ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
749                                     struct ptlrpc_srh_iterator *srhi,
750                                     __u64 seq)
751 {
752         cfs_list_t              *e;
753         struct ptlrpc_request   *req;
754
755         if (srhi->srhi_req != NULL &&
756             srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
757             srhi->srhi_seq <= seq) {
758                 /* If srhi_req was set previously, hasn't been culled and
759                  * we're searching for a seq on or after it (i.e. more
760                  * recent), search from it onwards.
761                  * Since the service history is LRU (i.e. culled reqs will
762                  * be near the head), we shouldn't have to do long
763                  * re-scans */
764                 LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
765                          "%s:%d: seek seq "LPU64", request seq "LPU64"\n",
766                          svcpt->scp_service->srv_name, svcpt->scp_cpt,
767                          srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
768                 LASSERTF(!cfs_list_empty(&svcpt->scp_hist_reqs),
769                          "%s:%d: seek offset "LPU64", request seq "LPU64", "
770                          "last culled "LPU64"\n",
771                          svcpt->scp_service->srv_name, svcpt->scp_cpt,
772                          seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
773                 e = &srhi->srhi_req->rq_history_list;
774         } else {
775                 /* search from start */
776                 e = svcpt->scp_hist_reqs.next;
777         }
778
779         while (e != &svcpt->scp_hist_reqs) {
780                 req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list);
781
782                 if (req->rq_history_seq >= seq) {
783                         srhi->srhi_seq = req->rq_history_seq;
784                         srhi->srhi_req = req;
785                         return 0;
786                 }
787                 e = e->next;
788         }
789
790         return -ENOENT;
791 }
792
793 /*
794  * ptlrpc history sequence is used as "position" of seq_file, in some case,
795  * seq_read() will increase "position" to indicate reading the next
796  * element, however, low bits of history sequence are reserved for CPT id
797  * (check the details from comments before ptlrpc_req_add_history), which
798  * means seq_read() might change CPT id of history sequence and never
799  * finish reading of requests on a CPT. To make it work, we have to shift
800  * CPT id to high bits and timestamp to low bits, so seq_read() will only
801  * increase timestamp which can correctly indicate the next position.
802  */
803
804 /* convert seq_file pos to cpt */
805 #define PTLRPC_REQ_POS2CPT(svc, pos)                    \
806         ((svc)->srv_cpt_bits == 0 ? 0 :                 \
807          (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
808
809 /* make up seq_file pos from cpt */
810 #define PTLRPC_REQ_CPT2POS(svc, cpt)                    \
811         ((svc)->srv_cpt_bits == 0 ? 0 :                 \
812          (cpt) << (64 - (svc)->srv_cpt_bits))
813
814 /* convert sequence to position */
815 #define PTLRPC_REQ_SEQ2POS(svc, seq)                    \
816         ((svc)->srv_cpt_bits == 0 ? (seq) :             \
817          ((seq) >> (svc)->srv_cpt_bits) |               \
818          ((seq) << (64 - (svc)->srv_cpt_bits)))
819
820 /* convert position to sequence */
821 #define PTLRPC_REQ_POS2SEQ(svc, pos)                    \
822         ((svc)->srv_cpt_bits == 0 ? (pos) :             \
823          ((__u64)(pos) << (svc)->srv_cpt_bits) |        \
824          ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
825
826 static void *
827 ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
828 {
829         struct ptlrpc_service           *svc = s->private;
830         struct ptlrpc_service_part      *svcpt;
831         struct ptlrpc_srh_iterator      *srhi;
832         unsigned int                    cpt;
833         int                             rc;
834         int                             i;
835
836         if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
837                 CWARN("Failed to read request history because size of loff_t "
838                       "%d can't match size of u64\n", (int)sizeof(loff_t));
839                 return NULL;
840         }
841
842         OBD_ALLOC(srhi, sizeof(*srhi));
843         if (srhi == NULL)
844                 return NULL;
845
846         srhi->srhi_seq = 0;
847         srhi->srhi_req = NULL;
848
849         cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
850
851         ptlrpc_service_for_each_part(svcpt, i, svc) {
852                 if (i < cpt) /* skip */
853                         continue;
854                 if (i > cpt) /* make up the lowest position for this CPT */
855                         *pos = PTLRPC_REQ_CPT2POS(svc, i);
856
857                 spin_lock(&svcpt->scp_lock);
858                 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
859                                 PTLRPC_REQ_POS2SEQ(svc, *pos));
860                 spin_unlock(&svcpt->scp_lock);
861                 if (rc == 0) {
862                         *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
863                         srhi->srhi_idx = i;
864                         return srhi;
865                 }
866         }
867
868         OBD_FREE(srhi, sizeof(*srhi));
869         return NULL;
870 }
871
872 static void
873 ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
874 {
875         struct ptlrpc_srh_iterator *srhi = iter;
876
877         if (srhi != NULL)
878                 OBD_FREE(srhi, sizeof(*srhi));
879 }
880
881 static void *
882 ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
883                                     void *iter, loff_t *pos)
884 {
885         struct ptlrpc_service           *svc = s->private;
886         struct ptlrpc_srh_iterator      *srhi = iter;
887         struct ptlrpc_service_part      *svcpt;
888         __u64                           seq;
889         int                             rc;
890         int                             i;
891
892         for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
893                 svcpt = svc->srv_parts[i];
894
895                 if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
896                         srhi->srhi_req = NULL;
897                         seq = srhi->srhi_seq = 0;
898                 } else { /* the next sequence */
899                         seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
900                 }
901
902                 spin_lock(&svcpt->scp_lock);
903                 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
904                 spin_unlock(&svcpt->scp_lock);
905                 if (rc == 0) {
906                         *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
907                         srhi->srhi_idx = i;
908                         return srhi;
909                 }
910         }
911
912         OBD_FREE(srhi, sizeof(*srhi));
913         return NULL;
914 }
915
916 /* common ost/mdt so_req_printer */
917 void target_print_req(void *seq_file, struct ptlrpc_request *req)
918 {
919         /* Called holding srv_lock with irqs disabled.
920          * Print specific req contents and a newline.
921          * CAVEAT EMPTOR: check request message length before printing!!!
922          * You might have received any old crap so you must be just as
923          * careful here as the service's request parser!!! */
924         struct seq_file *sf = seq_file;
925
926         switch (req->rq_phase) {
927         case RQ_PHASE_NEW:
928                 /* still awaiting a service thread's attention, or rejected
929                  * because the generic request message didn't unpack */
930                 seq_printf(sf, "<not swabbed>\n");
931                 break;
932         case RQ_PHASE_INTERPRET:
933                 /* being handled, so basic msg swabbed, and opc is valid
934                  * but racing with mds_handle() */
935         case RQ_PHASE_COMPLETE:
936                 /* been handled by mds_handle() reply state possibly still
937                  * volatile */
938                 seq_printf(sf, "opc %d\n", lustre_msg_get_opc(req->rq_reqmsg));
939                 break;
940         default:
941                 DEBUG_REQ(D_ERROR, req, "bad phase %d", req->rq_phase);
942         }
943 }
944 EXPORT_SYMBOL(target_print_req);
945
946 static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
947 {
948         struct ptlrpc_service           *svc = s->private;
949         struct ptlrpc_srh_iterator      *srhi = iter;
950         struct ptlrpc_service_part      *svcpt;
951         struct ptlrpc_request           *req;
952         int                             rc;
953
954         LASSERT(srhi->srhi_idx < svc->srv_ncpts);
955
956         svcpt = svc->srv_parts[srhi->srhi_idx];
957
958         spin_lock(&svcpt->scp_lock);
959
960         rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
961
962         if (rc == 0) {
963                 req = srhi->srhi_req;
964
965                 /* Print common req fields.
966                  * CAVEAT EMPTOR: we're racing with the service handler
967                  * here.  The request could contain any old crap, so you
968                  * must be just as careful as the service's request
969                  * parser. Currently I only print stuff here I know is OK
970                  * to look at coz it was set up in request_in_callback()!!! */
971                 seq_printf(s, LPD64":%s:%s:x"LPU64":%d:%s:%ld:%lds(%+lds) ",
972                            req->rq_history_seq, libcfs_nid2str(req->rq_self),
973                            libcfs_id2str(req->rq_peer), req->rq_xid,
974                            req->rq_reqlen, ptlrpc_rqphase2str(req),
975                            req->rq_arrival_time.tv_sec,
976                            req->rq_sent - req->rq_arrival_time.tv_sec,
977                            req->rq_sent - req->rq_deadline);
978                 if (svc->srv_ops.so_req_printer == NULL)
979                         seq_printf(s, "\n");
980                 else
981                         svc->srv_ops.so_req_printer(s, srhi->srhi_req);
982         }
983
984         spin_unlock(&svcpt->scp_lock);
985         return rc;
986 }
987
988 static int
989 ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
990 {
991         static struct seq_operations sops = {
992                 .start = ptlrpc_lprocfs_svc_req_history_start,
993                 .stop  = ptlrpc_lprocfs_svc_req_history_stop,
994                 .next  = ptlrpc_lprocfs_svc_req_history_next,
995                 .show  = ptlrpc_lprocfs_svc_req_history_show,
996         };
997         struct proc_dir_entry *dp = PDE(inode);
998         struct seq_file       *seqf;
999         int                    rc;
1000
1001         LPROCFS_ENTRY_AND_CHECK(dp);
1002         rc = seq_open(file, &sops);
1003         if (rc) {
1004                 LPROCFS_EXIT();
1005                 return rc;
1006         }
1007
1008         seqf = file->private_data;
1009         seqf->private = dp->data;
1010         return 0;
1011 }
1012
1013 /* See also lprocfs_rd_timeouts */
1014 static int ptlrpc_lprocfs_rd_timeouts(char *page, char **start, off_t off,
1015                                       int count, int *eof, void *data)
1016 {
1017         struct ptlrpc_service           *svc = data;
1018         struct ptlrpc_service_part      *svcpt;
1019         struct dhms                     ts;
1020         time_t                          worstt;
1021         unsigned int                    cur;
1022         unsigned int                    worst;
1023         int                             nob = 0;
1024         int                             rc = 0;
1025         int                             i;
1026
1027         LASSERT(svc->srv_parts != NULL);
1028
1029         if (AT_OFF) {
1030                 rc += snprintf(page + rc, count - rc,
1031                                "adaptive timeouts off, using obd_timeout %u\n",
1032                                obd_timeout);
1033                 return rc;
1034         }
1035
1036         ptlrpc_service_for_each_part(svcpt, i, svc) {
1037                 cur     = at_get(&svcpt->scp_at_estimate);
1038                 worst   = svcpt->scp_at_estimate.at_worst_ever;
1039                 worstt  = svcpt->scp_at_estimate.at_worst_time;
1040                 s2dhms(&ts, cfs_time_current_sec() - worstt);
1041
1042                 nob = snprintf(page, count,
1043                                "%10s : cur %3u  worst %3u (at %ld, "
1044                                DHMS_FMT" ago) ", "service",
1045                                cur, worst, worstt, DHMS_VARS(&ts));
1046
1047                 nob = lprocfs_at_hist_helper(page, count, nob,
1048                                              &svcpt->scp_at_estimate);
1049                 rc += nob;
1050                 page += nob;
1051                 count -= nob;
1052
1053                 /*
1054                  * NB: for lustre proc read, the read count must be less
1055                  * than PAGE_SIZE, please see details in lprocfs_fops_read.
1056                  * It's unlikely that we exceed PAGE_SIZE at here because
1057                  * it means the service has more than 50 partitions.
1058                  */
1059                 if (count <= 0) {
1060                         CWARN("Can't fit AT information of %s in one page, "
1061                               "please contact with developer to fix this.\n",
1062                               svc->srv_name);
1063                         break;
1064                 }
1065         }
1066
1067         return rc;
1068 }
1069
1070 static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off,
1071                                       int count, int *eof, void *data)
1072 {
1073         struct ptlrpc_service *svc = data;
1074         int rc = snprintf(page, count, "%d", svc->srv_hpreq_ratio);
1075         return rc;
1076 }
1077
1078 static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
1079                                       unsigned long count, void *data)
1080 {
1081         struct ptlrpc_service           *svc = data;
1082         int     rc;
1083         int     val;
1084
1085         rc = lprocfs_write_helper(buffer, count, &val);
1086         if (rc < 0)
1087                 return rc;
1088
1089         if (val < 0)
1090                 return -ERANGE;
1091
1092         spin_lock(&svc->srv_lock);
1093         svc->srv_hpreq_ratio = val;
1094         spin_unlock(&svc->srv_lock);
1095
1096         return count;
1097 }
1098
1099 void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
1100                                      struct ptlrpc_service *svc)
1101 {
1102         struct lprocfs_vars lproc_vars[] = {
1103                 {.name       = "high_priority_ratio",
1104                  .read_fptr  = ptlrpc_lprocfs_rd_hp_ratio,
1105                  .write_fptr = ptlrpc_lprocfs_wr_hp_ratio,
1106                  .data       = svc},
1107                 {.name       = "req_buffer_history_len",
1108                  .read_fptr  = ptlrpc_lprocfs_read_req_history_len,
1109                  .data       = svc},
1110                 {.name       = "req_buffer_history_max",
1111                  .write_fptr = ptlrpc_lprocfs_write_req_history_max,
1112                  .read_fptr  = ptlrpc_lprocfs_read_req_history_max,
1113                  .data       = svc},
1114                 {.name       = "threads_min",
1115                  .read_fptr  = ptlrpc_lprocfs_rd_threads_min,
1116                  .write_fptr = ptlrpc_lprocfs_wr_threads_min,
1117                  .data       = svc},
1118                 {.name       = "threads_max",
1119                  .read_fptr  = ptlrpc_lprocfs_rd_threads_max,
1120                  .write_fptr = ptlrpc_lprocfs_wr_threads_max,
1121                  .data       = svc},
1122                 {.name       = "threads_started",
1123                  .read_fptr  = ptlrpc_lprocfs_rd_threads_started,
1124                  .data       = svc},
1125                 {.name       = "timeouts",
1126                  .read_fptr  = ptlrpc_lprocfs_rd_timeouts,
1127                  .data       = svc},
1128                 {.name       = "nrs_policies",
1129                  .read_fptr  = ptlrpc_lprocfs_rd_nrs,
1130                  .write_fptr = ptlrpc_lprocfs_wr_nrs,
1131                  .data       = svc},
1132                 {NULL}
1133         };
1134         static struct file_operations req_history_fops = {
1135                 .owner       = THIS_MODULE,
1136                 .open        = ptlrpc_lprocfs_svc_req_history_open,
1137                 .read        = seq_read,
1138                 .llseek      = seq_lseek,
1139                 .release     = lprocfs_seq_release,
1140         };
1141
1142         int rc;
1143
1144         ptlrpc_lprocfs_register(entry, svc->srv_name,
1145                                 "stats", &svc->srv_procroot,
1146                                 &svc->srv_stats);
1147
1148         if (svc->srv_procroot == NULL)
1149                 return;
1150
1151         lprocfs_add_vars(svc->srv_procroot, lproc_vars, NULL);
1152
1153         rc = lprocfs_seq_create(svc->srv_procroot, "req_history",
1154                                 0400, &req_history_fops, svc);
1155         if (rc)
1156                 CWARN("Error adding the req_history file\n");
1157 }
1158
1159 void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
1160 {
1161         ptlrpc_lprocfs_register(obddev->obd_proc_entry, NULL, "stats",
1162                                 &obddev->obd_svc_procroot,
1163                                 &obddev->obd_svc_stats);
1164 }
1165 EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
1166
1167 void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
1168 {
1169         struct lprocfs_stats *svc_stats;
1170         __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
1171         int opc = opcode_offset(op);
1172
1173         svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1174         if (svc_stats == NULL || opc <= 0)
1175                 return;
1176         LASSERT(opc < LUSTRE_MAX_OPCODES);
1177         if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
1178                 lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
1179 }
1180
1181 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
1182 {
1183         struct lprocfs_stats *svc_stats;
1184         int idx;
1185
1186         if (!req->rq_import)
1187                 return;
1188         svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1189         if (!svc_stats)
1190                 return;
1191         idx = lustre_msg_get_opc(req->rq_reqmsg);
1192         switch (idx) {
1193         case OST_READ:
1194                 idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
1195                 break;
1196         case OST_WRITE:
1197                 idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
1198                 break;
1199         default:
1200                 LASSERTF(0, "unsupported opcode %u\n", idx);
1201                 break;
1202         }
1203
1204         lprocfs_counter_add(svc_stats, idx, bytes);
1205 }
1206
1207 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
1208
1209 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
1210 {
1211         if (svc->srv_procroot != NULL)
1212                 lprocfs_remove(&svc->srv_procroot);
1213
1214         if (svc->srv_stats)
1215                 lprocfs_free_stats(&svc->srv_stats);
1216 }
1217
1218 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
1219 {
1220         if (obd->obd_svc_procroot)
1221                 lprocfs_remove(&obd->obd_svc_procroot);
1222
1223         if (obd->obd_svc_stats)
1224                 lprocfs_free_stats(&obd->obd_svc_stats);
1225 }
1226 EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
1227
1228
1229 #define BUFLEN (UUID_MAX + 5)
1230
1231 int lprocfs_wr_evict_client(struct file *file, const char *buffer,
1232                             unsigned long count, void *data)
1233 {
1234         struct obd_device *obd = data;
1235         char              *kbuf;
1236         char              *tmpbuf;
1237
1238         OBD_ALLOC(kbuf, BUFLEN);
1239         if (kbuf == NULL)
1240                 return -ENOMEM;
1241
1242         /*
1243          * OBD_ALLOC() will zero kbuf, but we only copy BUFLEN - 1
1244          * bytes into kbuf, to ensure that the string is NUL-terminated.
1245          * UUID_MAX should include a trailing NUL already.
1246          */
1247         if (cfs_copy_from_user(kbuf, buffer,
1248                                min_t(unsigned long, BUFLEN - 1, count))) {
1249                 count = -EFAULT;
1250                 goto out;
1251         }
1252         tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
1253         /* Kludge code(deadlock situation): the lprocfs lock has been held
1254          * since the client is evicted by writting client's
1255          * uuid/nid to procfs "evict_client" entry. However,
1256          * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
1257          * the proc entries under the being destroyed export{}, so I have
1258          * to drop the lock at first here.
1259          * - jay, jxiong@clusterfs.com */
1260         LPROCFS_EXIT();
1261         class_incref(obd, __FUNCTION__, cfs_current());
1262
1263         if (strncmp(tmpbuf, "nid:", 4) == 0)
1264                 obd_export_evict_by_nid(obd, tmpbuf + 4);
1265         else if (strncmp(tmpbuf, "uuid:", 5) == 0)
1266                 obd_export_evict_by_uuid(obd, tmpbuf + 5);
1267         else
1268                 obd_export_evict_by_uuid(obd, tmpbuf);
1269
1270         class_decref(obd, __FUNCTION__, cfs_current());
1271         LPROCFS_ENTRY();
1272
1273 out:
1274         OBD_FREE(kbuf, BUFLEN);
1275         return count;
1276 }
1277 EXPORT_SYMBOL(lprocfs_wr_evict_client);
1278
1279 #undef BUFLEN
1280
1281 int lprocfs_wr_ping(struct file *file, const char *buffer,
1282                     unsigned long count, void *data)
1283 {
1284         struct obd_device     *obd = data;
1285         struct ptlrpc_request *req;
1286         int                    rc;
1287         ENTRY;
1288
1289         LPROCFS_CLIMP_CHECK(obd);
1290         req = ptlrpc_prep_ping(obd->u.cli.cl_import);
1291         LPROCFS_CLIMP_EXIT(obd);
1292         if (req == NULL)
1293                 RETURN(-ENOMEM);
1294
1295         req->rq_send_state = LUSTRE_IMP_FULL;
1296
1297         rc = ptlrpc_queue_wait(req);
1298
1299         ptlrpc_req_finished(req);
1300         if (rc >= 0)
1301                 RETURN(count);
1302         RETURN(rc);
1303 }
1304 EXPORT_SYMBOL(lprocfs_wr_ping);
1305
1306 /* Write the connection UUID to this file to attempt to connect to that node.
1307  * The connection UUID is a node's primary NID. For example,
1308  * "echo connection=192.168.0.1@tcp0::instance > .../import".
1309  */
1310 int lprocfs_wr_import(struct file *file, const char *buffer,
1311                       unsigned long count, void *data)
1312 {
1313         struct obd_device *obd = data;
1314         struct obd_import *imp = obd->u.cli.cl_import;
1315         char *kbuf = NULL;
1316         char *uuid;
1317         char *ptr;
1318         int do_reconn = 1;
1319         const char prefix[] = "connection=";
1320         const int prefix_len = sizeof(prefix) - 1;
1321
1322         if (count > CFS_PAGE_SIZE - 1 || count <= prefix_len)
1323                 return -EINVAL;
1324
1325         OBD_ALLOC(kbuf, count + 1);
1326         if (kbuf == NULL)
1327                 return -ENOMEM;
1328
1329         if (cfs_copy_from_user(kbuf, buffer, count))
1330                 GOTO(out, count = -EFAULT);
1331
1332         kbuf[count] = 0;
1333
1334         /* only support connection=uuid::instance now */
1335         if (strncmp(prefix, kbuf, prefix_len) != 0)
1336                 GOTO(out, count = -EINVAL);
1337
1338         uuid = kbuf + prefix_len;
1339         ptr = strstr(uuid, "::");
1340         if (ptr) {
1341                 __u32 inst;
1342                 char *endptr;
1343
1344                 *ptr = 0;
1345                 do_reconn = 0;
1346                 ptr += strlen("::");
1347                 inst = simple_strtol(ptr, &endptr, 10);
1348                 if (*endptr) {
1349                         CERROR("config: wrong instance # %s\n", ptr);
1350                 } else if (inst != imp->imp_connect_data.ocd_instance) {
1351                         CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted "
1352                                "target(%u/%u), reconnecting...\n",
1353                                imp->imp_obd->obd_name,
1354                                imp->imp_connect_data.ocd_instance, inst);
1355                         do_reconn = 1;
1356                 } else {
1357                         CDEBUG(D_INFO, "IR: %s has already been connecting to "
1358                                "new target(%u)\n",
1359                                imp->imp_obd->obd_name, inst);
1360                 }
1361         }
1362
1363         if (do_reconn)
1364                 ptlrpc_recover_import(imp, uuid, 1);
1365
1366 out:
1367         OBD_FREE(kbuf, count + 1);
1368         return count;
1369 }
1370 EXPORT_SYMBOL(lprocfs_wr_import);
1371
1372 int lprocfs_rd_pinger_recov(char *page, char **start, off_t off,
1373                             int count, int *eof, void *data)
1374 {
1375         struct obd_device *obd = data;
1376         struct obd_import *imp = obd->u.cli.cl_import;
1377         int rc;
1378
1379         LPROCFS_CLIMP_CHECK(obd);
1380         rc = snprintf(page, count, "%d\n", !imp->imp_no_pinger_recover);
1381         LPROCFS_CLIMP_EXIT(obd);
1382
1383         return rc;
1384 }
1385 EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
1386
1387 int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
1388                       unsigned long count, void *data)
1389 {
1390         struct obd_device *obd = data;
1391         struct client_obd *cli = &obd->u.cli;
1392         struct obd_import *imp = cli->cl_import;
1393         int rc, val;
1394
1395         rc = lprocfs_write_helper(buffer, count, &val);
1396         if (rc < 0)
1397                 return rc;
1398
1399         if (val != 0 && val != 1)
1400                 return -ERANGE;
1401
1402         LPROCFS_CLIMP_CHECK(obd);
1403         spin_lock(&imp->imp_lock);
1404         imp->imp_no_pinger_recover = !val;
1405         spin_unlock(&imp->imp_lock);
1406         LPROCFS_CLIMP_EXIT(obd);
1407
1408         return count;
1409
1410 }
1411 EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
1412
1413 #endif /* LPROCFS */