Whamcloud - gitweb
LU-3030 build: Update Master Copyrights pre 2.4 split
[fs/lustre-release.git] / lustre / ptlrpc / lproc_ptlrpc.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 #define DEBUG_SUBSYSTEM S_CLASS
37
38 #ifndef __KERNEL__
39 # include <liblustre.h>
40 #endif
41
42 #include <obd_support.h>
43 #include <obd.h>
44 #include <lprocfs_status.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_net.h>
47 #include <obd_class.h>
48 #include "ptlrpc_internal.h"
49
50
51 struct ll_rpc_opcode {
52      __u32       opcode;
53      const char *opname;
54 } ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
55         { OST_REPLY,        "ost_reply" },
56         { OST_GETATTR,      "ost_getattr" },
57         { OST_SETATTR,      "ost_setattr" },
58         { OST_READ,         "ost_read" },
59         { OST_WRITE,        "ost_write" },
60         { OST_CREATE ,      "ost_create" },
61         { OST_DESTROY,      "ost_destroy" },
62         { OST_GET_INFO,     "ost_get_info" },
63         { OST_CONNECT,      "ost_connect" },
64         { OST_DISCONNECT,   "ost_disconnect" },
65         { OST_PUNCH,        "ost_punch" },
66         { OST_OPEN,         "ost_open" },
67         { OST_CLOSE,        "ost_close" },
68         { OST_STATFS,       "ost_statfs" },
69         { 14,                NULL },    /* formerly OST_SAN_READ */
70         { 15,                NULL },    /* formerly OST_SAN_WRITE */
71         { OST_SYNC,         "ost_sync" },
72         { OST_SET_INFO,     "ost_set_info" },
73         { OST_QUOTACHECK,   "ost_quotacheck" },
74         { OST_QUOTACTL,     "ost_quotactl" },
75         { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
76         { MDS_GETATTR,      "mds_getattr" },
77         { MDS_GETATTR_NAME, "mds_getattr_lock" },
78         { MDS_CLOSE,        "mds_close" },
79         { MDS_REINT,        "mds_reint" },
80         { MDS_READPAGE,     "mds_readpage" },
81         { MDS_CONNECT,      "mds_connect" },
82         { MDS_DISCONNECT,   "mds_disconnect" },
83         { MDS_GETSTATUS,    "mds_getstatus" },
84         { MDS_STATFS,       "mds_statfs" },
85         { MDS_PIN,          "mds_pin" },
86         { MDS_UNPIN,        "mds_unpin" },
87         { MDS_SYNC,         "mds_sync" },
88         { MDS_DONE_WRITING, "mds_done_writing" },
89         { MDS_SET_INFO,     "mds_set_info" },
90         { MDS_QUOTACHECK,   "mds_quotacheck" },
91         { MDS_QUOTACTL,     "mds_quotactl" },
92         { MDS_GETXATTR,     "mds_getxattr" },
93         { MDS_SETXATTR,     "mds_setxattr" },
94         { MDS_WRITEPAGE,    "mds_writepage" },
95         { MDS_IS_SUBDIR,    "mds_is_subdir" },
96         { MDS_GET_INFO,     "mds_get_info" },
97         { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
98         { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
99         { MDS_HSM_ACTION,   "mds_hsm_action" },
100         { MDS_HSM_PROGRESS, "mds_hsm_progress" },
101         { MDS_HSM_REQUEST,  "mds_hsm_request" },
102         { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
103         { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
104         { MDS_SWAP_LAYOUTS,     "mds_swap_layouts" },
105         { LDLM_ENQUEUE,     "ldlm_enqueue" },
106         { LDLM_CONVERT,     "ldlm_convert" },
107         { LDLM_CANCEL,      "ldlm_cancel" },
108         { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
109         { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
110         { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
111         { LDLM_SET_INFO,    "ldlm_set_info" },
112         { MGS_CONNECT,      "mgs_connect" },
113         { MGS_DISCONNECT,   "mgs_disconnect" },
114         { MGS_EXCEPTION,    "mgs_exception" },
115         { MGS_TARGET_REG,   "mgs_target_reg" },
116         { MGS_TARGET_DEL,   "mgs_target_del" },
117         { MGS_SET_INFO,     "mgs_set_info" },
118         { MGS_CONFIG_READ,  "mgs_config_read" },
119         { OBD_PING,         "obd_ping" },
120         { OBD_LOG_CANCEL,   "llog_origin_handle_cancel" },
121         { OBD_QC_CALLBACK,  "obd_quota_callback" },
122         { OBD_IDX_READ,     "dt_index_read" },
123         { LLOG_ORIGIN_HANDLE_CREATE,     "llog_origin_handle_create" },
124         { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
125         { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
126         { LLOG_ORIGIN_HANDLE_WRITE_REC,  "llog_origin_handle_write_rec" },
127         { LLOG_ORIGIN_HANDLE_CLOSE,      "llog_origin_handle_close" },
128         { LLOG_ORIGIN_CONNECT,           "llog_origin_connect" },
129         { LLOG_CATINFO,                  "llog_catinfo" },
130         { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
131         { LLOG_ORIGIN_HANDLE_DESTROY,    "llog_origin_handle_destroy" },
132         { QUOTA_DQACQ,      "quota_acquire" },
133         { QUOTA_DQREL,      "quota_release" },
134         { SEQ_QUERY,        "seq_query" },
135         { SEC_CTX_INIT,     "sec_ctx_init" },
136         { SEC_CTX_INIT_CONT,"sec_ctx_init_cont" },
137         { SEC_CTX_FINI,     "sec_ctx_fini" },
138         { FLD_QUERY,        "fld_query" },
139         { UPDATE_OBJ,       "update_obj" },
140 };
141
142 struct ll_eopcode {
143      __u32       opcode;
144      const char *opname;
145 } ll_eopcode_table[EXTRA_LAST_OPC] = {
146         { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
147         { LDLM_PLAIN_ENQUEUE,   "ldlm_plain_enqueue" },
148         { LDLM_EXTENT_ENQUEUE,  "ldlm_extent_enqueue" },
149         { LDLM_FLOCK_ENQUEUE,   "ldlm_flock_enqueue" },
150         { LDLM_IBITS_ENQUEUE,   "ldlm_ibits_enqueue" },
151         { MDS_REINT_SETATTR,    "mds_reint_setattr" },
152         { MDS_REINT_CREATE,     "mds_reint_create" },
153         { MDS_REINT_LINK,       "mds_reint_link" },
154         { MDS_REINT_UNLINK,     "mds_reint_unlink" },
155         { MDS_REINT_RENAME,     "mds_reint_rename" },
156         { MDS_REINT_OPEN,       "mds_reint_open" },
157         { MDS_REINT_SETXATTR,   "mds_reint_setxattr" },
158         { BRW_READ_BYTES,       "read_bytes" },
159         { BRW_WRITE_BYTES,      "write_bytes" },
160 };
161
162 const char *ll_opcode2str(__u32 opcode)
163 {
164         /* When one of the assertions below fail, chances are that:
165          *     1) A new opcode was added in include/lustre/lustre_idl.h,
166          *        but is missing from the table above.
167          * or  2) The opcode space was renumbered or rearranged,
168          *        and the opcode_offset() function in
169          *        ptlrpc_internal.h needs to be modified.
170          */
171         __u32 offset = opcode_offset(opcode);
172         LASSERTF(offset < LUSTRE_MAX_OPCODES,
173                  "offset %u >= LUSTRE_MAX_OPCODES %u\n",
174                  offset, LUSTRE_MAX_OPCODES);
175         LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
176                  "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
177                  offset, ll_rpc_opcode_table[offset].opcode, opcode);
178         return ll_rpc_opcode_table[offset].opname;
179 }
180
181 const char* ll_eopcode2str(__u32 opcode)
182 {
183         LASSERT(ll_eopcode_table[opcode].opcode == opcode);
184         return ll_eopcode_table[opcode].opname;
185 }
186 #ifdef LPROCFS
187 void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
188                              char *name, struct proc_dir_entry **procroot_ret,
189                              struct lprocfs_stats **stats_ret)
190 {
191         struct proc_dir_entry *svc_procroot;
192         struct lprocfs_stats *svc_stats;
193         int i, rc;
194         unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
195                                           LPROCFS_CNTR_STDDEV;
196
197         LASSERT(*procroot_ret == NULL);
198         LASSERT(*stats_ret == NULL);
199
200         svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,0);
201         if (svc_stats == NULL)
202                 return;
203
204         if (dir) {
205                 svc_procroot = lprocfs_register(dir, root, NULL, NULL);
206                 if (IS_ERR(svc_procroot)) {
207                         lprocfs_free_stats(&svc_stats);
208                         return;
209                 }
210         } else {
211                 svc_procroot = root;
212         }
213
214         lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
215                              svc_counter_config, "req_waittime", "usec");
216         lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
217                              svc_counter_config, "req_qdepth", "reqs");
218         lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
219                              svc_counter_config, "req_active", "reqs");
220         lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
221                              svc_counter_config, "req_timeout", "sec");
222         lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
223                              svc_counter_config, "reqbuf_avail", "bufs");
224         for (i = 0; i < EXTRA_LAST_OPC; i++) {
225                 char *units;
226
227                 switch(i) {
228                 case BRW_WRITE_BYTES:
229                 case BRW_READ_BYTES:
230                         units = "bytes";
231                         break;
232                 default:
233                         units = "reqs";
234                         break;
235                 }
236                 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
237                                      svc_counter_config,
238                                      ll_eopcode2str(i), units);
239         }
240         for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
241                 __u32 opcode = ll_rpc_opcode_table[i].opcode;
242                 lprocfs_counter_init(svc_stats,
243                                      EXTRA_MAX_OPCODES + i, svc_counter_config,
244                                      ll_opcode2str(opcode), "usec");
245         }
246
247         rc = lprocfs_register_stats(svc_procroot, name, svc_stats);
248         if (rc < 0) {
249                 if (dir)
250                         lprocfs_remove(&svc_procroot);
251                 lprocfs_free_stats(&svc_stats);
252         } else {
253                 if (dir)
254                         *procroot_ret = svc_procroot;
255                 *stats_ret = svc_stats;
256         }
257 }
258
259 static int
260 ptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off,
261                                     int count, int *eof, void *data)
262 {
263         struct ptlrpc_service *svc = data;
264         struct ptlrpc_service_part *svcpt;
265         int     total = 0;
266         int     i;
267
268         *eof = 1;
269
270         ptlrpc_service_for_each_part(svcpt, i, svc)
271                 total += svcpt->scp_hist_nrqbds;
272
273         return snprintf(page, count, "%d\n", total);
274 }
275
276 static int
277 ptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off,
278                                     int count, int *eof, void *data)
279 {
280         struct ptlrpc_service *svc = data;
281         struct ptlrpc_service_part *svcpt;
282         int     total = 0;
283         int     i;
284
285         *eof = 1;
286         ptlrpc_service_for_each_part(svcpt, i, svc)
287                 total += svc->srv_hist_nrqbds_cpt_max;
288
289         return snprintf(page, count, "%d\n", total);
290 }
291
292 static int
293 ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
294                                      unsigned long count, void *data)
295 {
296         struct ptlrpc_service      *svc = data;
297         int                         bufpages;
298         int                         val;
299         int                         rc;
300
301         rc = lprocfs_write_helper(buffer, count, &val);
302         if (rc < 0)
303                 return rc;
304
305         if (val < 0)
306                 return -ERANGE;
307
308         /* This sanity check is more of an insanity check; we can still
309          * hose a kernel by allowing the request history to grow too
310          * far. */
311         bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
312         if (val > cfs_num_physpages/(2 * bufpages))
313                 return -ERANGE;
314
315         spin_lock(&svc->srv_lock);
316
317         if (val == 0)
318                 svc->srv_hist_nrqbds_cpt_max = 0;
319         else
320                 svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
321
322         spin_unlock(&svc->srv_lock);
323
324         return count;
325 }
326
327 static int
328 ptlrpc_lprocfs_rd_threads_min(char *page, char **start, off_t off,
329                               int count, int *eof, void *data)
330 {
331         struct ptlrpc_service *svc = data;
332
333         return snprintf(page, count, "%d\n",
334                         svc->srv_nthrs_cpt_init * svc->srv_ncpts);
335 }
336
337 static int
338 ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
339                               unsigned long count, void *data)
340 {
341         struct ptlrpc_service      *svc = data;
342         int     val;
343         int     rc = lprocfs_write_helper(buffer, count, &val);
344
345         if (rc < 0)
346                 return rc;
347
348         if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
349                 return -ERANGE;
350
351         spin_lock(&svc->srv_lock);
352         if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
353                 spin_unlock(&svc->srv_lock);
354                 return -ERANGE;
355         }
356
357         svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
358
359         spin_unlock(&svc->srv_lock);
360
361         return count;
362 }
363
364 static int
365 ptlrpc_lprocfs_rd_threads_started(char *page, char **start, off_t off,
366                                   int count, int *eof, void *data)
367 {
368         struct ptlrpc_service *svc = data;
369         struct ptlrpc_service_part *svcpt;
370         int     total = 0;
371         int     i;
372
373         ptlrpc_service_for_each_part(svcpt, i, svc)
374                 total += svcpt->scp_nthrs_running;
375
376         return snprintf(page, count, "%d\n", total);
377 }
378
379 static int
380 ptlrpc_lprocfs_rd_threads_max(char *page, char **start, off_t off,
381                               int count, int *eof, void *data)
382 {
383         struct ptlrpc_service *svc = data;
384
385         return snprintf(page, count, "%d\n",
386                         svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
387 }
388
389 static int
390 ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
391                               unsigned long count, void *data)
392 {
393         struct ptlrpc_service *svc = data;
394         int     val;
395         int     rc = lprocfs_write_helper(buffer, count, &val);
396
397         if (rc < 0)
398                 return rc;
399
400         if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
401                 return -ERANGE;
402
403         spin_lock(&svc->srv_lock);
404         if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
405                 spin_unlock(&svc->srv_lock);
406                 return -ERANGE;
407         }
408
409         svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
410
411         spin_unlock(&svc->srv_lock);
412
413         return count;
414 }
415
416 /**
417  * \addtogoup nrs
418  * @{
419  */
420 extern struct nrs_core nrs_core;
421
422 /**
423  * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
424  *
425  * \param[in] state The policy state
426  */
427 static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
428 {
429         switch (state) {
430         default:
431                 LBUG();
432         case NRS_POL_STATE_INVALID:
433                 return "invalid";
434         case NRS_POL_STATE_STOPPED:
435                 return "stopped";
436         case NRS_POL_STATE_STOPPING:
437                 return "stopping";
438         case NRS_POL_STATE_STARTING:
439                 return "starting";
440         case NRS_POL_STATE_STARTED:
441                 return "started";
442         }
443 }
444
445 /**
446  * Obtains status information for \a policy.
447  *
448  * Information is copied in \a info.
449  *
450  * \param[in] policy The policy
451  * \param[out] info  Holds returned status information
452  */
453 void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
454                                 struct ptlrpc_nrs_pol_info *info)
455 {
456         LASSERT(policy != NULL);
457         LASSERT(info != NULL);
458         LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
459
460         memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
461
462         info->pi_fallback    = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
463         info->pi_state       = policy->pol_state;
464         /**
465          * XXX: These are accessed without holding
466          * ptlrpc_service_part::scp_req_lock.
467          */
468         info->pi_req_queued  = policy->pol_req_queued;
469         info->pi_req_started = policy->pol_req_started;
470 }
471
472 /**
473  * Reads and prints policy status information for all policies of a PTLRPC
474  * service.
475  */
476 static int ptlrpc_lprocfs_rd_nrs(char *page, char **start, off_t off,
477                                  int count, int *eof, void *data)
478 {
479         struct ptlrpc_service          *svc = data;
480         struct ptlrpc_service_part     *svcpt;
481         struct ptlrpc_nrs              *nrs;
482         struct ptlrpc_nrs_policy       *policy;
483         struct ptlrpc_nrs_pol_info     *infos;
484         struct ptlrpc_nrs_pol_info      tmp;
485         unsigned                        num_pols;
486         unsigned                        pol_idx = 0;
487         bool                            hp = false;
488         int                             i;
489         int                             rc = 0;
490         int                             rc2 = 0;
491         ENTRY;
492
493         /**
494          * Serialize NRS core lprocfs operations with policy registration/
495          * unregistration.
496          */
497         mutex_lock(&nrs_core.nrs_mutex);
498
499         /**
500          * Use the first service partition's regular NRS head in order to obtain
501          * the number of policies registered with NRS heads of this service. All
502          * service partitions will have the same number of policies.
503          */
504         nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
505
506         spin_lock(&nrs->nrs_lock);
507         num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
508         spin_unlock(&nrs->nrs_lock);
509
510         OBD_ALLOC(infos, num_pols * sizeof(*infos));
511         if (infos == NULL)
512                 GOTO(out, rc = -ENOMEM);
513 again:
514
515         ptlrpc_service_for_each_part(svcpt, i, svc) {
516                 nrs = nrs_svcpt2nrs(svcpt, hp);
517                 spin_lock(&nrs->nrs_lock);
518
519                 pol_idx = 0;
520
521                 cfs_list_for_each_entry(policy, &nrs->nrs_policy_list,
522                                         pol_list) {
523                         LASSERT(pol_idx < num_pols);
524
525                         nrs_policy_get_info_locked(policy, &tmp);
526                         /**
527                          * Copy values when handling the first service
528                          * partition.
529                          */
530                         if (i == 0) {
531                                 memcpy(infos[pol_idx].pi_name, tmp.pi_name,
532                                        NRS_POL_NAME_MAX);
533                                 memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
534                                        sizeof(tmp.pi_state));
535                                 infos[pol_idx].pi_fallback = tmp.pi_fallback;
536                                 /**
537                                  * For the rest of the service partitions
538                                  * sanity-check the values we get.
539                                  */
540                         } else {
541                                 LASSERT(strncmp(infos[pol_idx].pi_name,
542                                                 tmp.pi_name,
543                                                 NRS_POL_NAME_MAX) == 0);
544                                 /**
545                                  * Not asserting ptlrpc_nrs_pol_info::pi_state,
546                                  * because it may be different between
547                                  * instances of the same policy in different
548                                  * service partitions.
549                                  */
550                                 LASSERT(infos[pol_idx].pi_fallback ==
551                                         tmp.pi_fallback);
552                         }
553
554                         infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
555                         infos[pol_idx].pi_req_started += tmp.pi_req_started;
556
557                         pol_idx++;
558                 }
559                 spin_unlock(&nrs->nrs_lock);
560         }
561
562         /**
563          * Policy status information output is in YAML format.
564          * For example:
565          *
566          *      regular_requests:
567          *        - name: fifo
568          *          state: started
569          *          fallback: yes
570          *          queued: 0
571          *          active: 0
572          *
573          *        - name: crrn
574          *          state: started
575          *          fallback: no
576          *          queued: 2015
577          *          active: 384
578          *
579          *      high_priority_requests:
580          *        - name: fifo
581          *          state: started
582          *          fallback: yes
583          *          queued: 0
584          *          active: 2
585          *
586          *        - name: crrn
587          *          state: stopped
588          *          fallback: no
589          *          queued: 0
590          *          active: 0
591          */
592         rc2 = snprintf(page + rc, count - rc,
593                        "%s\n", !hp ?
594                        "\nregular_requests:" :
595                        "high_priority_requests:");
596
597         if (rc2 >= count - rc) {
598                 /** Output was truncated */
599                 GOTO(out, rc = -EFBIG);
600         }
601
602         rc += rc2;
603
604         for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
605                 rc2 = snprintf(page + rc, count - rc,
606                                "  - name: %s\n"
607                                "    state: %s\n"
608                                "    fallback: %s\n"
609                                "    queued: %-20d\n"
610                                "    active: %-20d\n\n",
611                                infos[pol_idx].pi_name,
612                                nrs_state2str(infos[pol_idx].pi_state),
613                                infos[pol_idx].pi_fallback ? "yes" : "no",
614                                (int)infos[pol_idx].pi_req_queued,
615                                (int)infos[pol_idx].pi_req_started);
616
617
618                 if (rc2 >= count - rc) {
619                         /** Output was truncated */
620                         GOTO(out, rc = -EFBIG);
621                 }
622
623                 rc += rc2;
624         }
625
626         if (!hp && nrs_svc_has_hp(svc)) {
627                 memset(infos, 0, num_pols * sizeof(*infos));
628
629                 /**
630                  * Redo the processing for the service's HP NRS heads' policies.
631                  */
632                 hp = true;
633                 goto again;
634         }
635
636         *eof = 1;
637
638 out:
639         if (infos)
640                 OBD_FREE(infos, num_pols * sizeof(*infos));
641
642         mutex_unlock(&nrs_core.nrs_mutex);
643
644         RETURN(rc);
645 }
646
647 /**
648  * The longest valid command string is the maxium policy name size, plus the
649  * length of the " reg" substring
650  */
651 #define LPROCFS_NRS_WR_MAX_CMD  (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
652
653 /**
654  * Starts and stops a given policy on a PTLRPC service.
655  *
656  * Commands consist of the policy name, followed by an optional [reg|hp] token;
657  * if the optional token is omitted, the operation is performed on both the
658  * regular and high-priority (if the service has one) NRS head.
659  */
660 static int ptlrpc_lprocfs_wr_nrs(struct file *file, const char *buffer,
661                                  unsigned long count, void *data)
662 {
663         struct ptlrpc_service          *svc = data;
664         enum ptlrpc_nrs_queue_type      queue = PTLRPC_NRS_QUEUE_BOTH;
665         char                           *cmd;
666         char                           *cmd_copy = NULL;
667         char                           *token;
668         int                             rc = 0;
669         ENTRY;
670
671         if (count >= LPROCFS_NRS_WR_MAX_CMD)
672                 GOTO(out, rc = -EINVAL);
673
674         OBD_ALLOC(cmd, LPROCFS_NRS_WR_MAX_CMD);
675         if (cmd == NULL)
676                 GOTO(out, rc = -ENOMEM);
677         /**
678          * strsep() modifies its argument, so keep a copy
679          */
680         cmd_copy = cmd;
681
682         if (cfs_copy_from_user(cmd, buffer, count))
683                 GOTO(out, rc = -EFAULT);
684
685         cmd[count] = '\0';
686
687         token = strsep(&cmd, " ");
688
689         if (strlen(token) > NRS_POL_NAME_MAX - 1)
690                 GOTO(out, rc = -EINVAL);
691
692         /**
693          * No [reg|hp] token has been specified
694          */
695         if (cmd == NULL)
696                 goto default_queue;
697
698         /**
699          * The second token is either NULL, or an optional [reg|hp] string
700          */
701         if (strcmp(cmd, "reg") == 0)
702                 queue = PTLRPC_NRS_QUEUE_REG;
703         else if (strcmp(cmd, "hp") == 0)
704                 queue = PTLRPC_NRS_QUEUE_HP;
705         else
706                 GOTO(out, rc = -EINVAL);
707
708 default_queue:
709
710         if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc))
711                 GOTO(out, rc = -ENODEV);
712         else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
713                 queue = PTLRPC_NRS_QUEUE_REG;
714
715         /**
716          * Serialize NRS core lprocfs operations with policy registration/
717          * unregistration.
718          */
719         mutex_lock(&nrs_core.nrs_mutex);
720
721         rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
722                                        false, NULL);
723
724         mutex_unlock(&nrs_core.nrs_mutex);
725 out:
726         if (cmd_copy)
727                 OBD_FREE(cmd_copy, LPROCFS_NRS_WR_MAX_CMD);
728
729         RETURN(rc < 0 ? rc : count);
730 }
731
732 /** @} nrs */
733
734 struct ptlrpc_srh_iterator {
735         int                     srhi_idx;
736         __u64                   srhi_seq;
737         struct ptlrpc_request   *srhi_req;
738 };
739
740 int
741 ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
742                                     struct ptlrpc_srh_iterator *srhi,
743                                     __u64 seq)
744 {
745         cfs_list_t              *e;
746         struct ptlrpc_request   *req;
747
748         if (srhi->srhi_req != NULL &&
749             srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
750             srhi->srhi_seq <= seq) {
751                 /* If srhi_req was set previously, hasn't been culled and
752                  * we're searching for a seq on or after it (i.e. more
753                  * recent), search from it onwards.
754                  * Since the service history is LRU (i.e. culled reqs will
755                  * be near the head), we shouldn't have to do long
756                  * re-scans */
757                 LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
758                          "%s:%d: seek seq "LPU64", request seq "LPU64"\n",
759                          svcpt->scp_service->srv_name, svcpt->scp_cpt,
760                          srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
761                 LASSERTF(!cfs_list_empty(&svcpt->scp_hist_reqs),
762                          "%s:%d: seek offset "LPU64", request seq "LPU64", "
763                          "last culled "LPU64"\n",
764                          svcpt->scp_service->srv_name, svcpt->scp_cpt,
765                          seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
766                 e = &srhi->srhi_req->rq_history_list;
767         } else {
768                 /* search from start */
769                 e = svcpt->scp_hist_reqs.next;
770         }
771
772         while (e != &svcpt->scp_hist_reqs) {
773                 req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list);
774
775                 if (req->rq_history_seq >= seq) {
776                         srhi->srhi_seq = req->rq_history_seq;
777                         srhi->srhi_req = req;
778                         return 0;
779                 }
780                 e = e->next;
781         }
782
783         return -ENOENT;
784 }
785
786 /*
787  * ptlrpc history sequence is used as "position" of seq_file, in some case,
788  * seq_read() will increase "position" to indicate reading the next
789  * element, however, low bits of history sequence are reserved for CPT id
790  * (check the details from comments before ptlrpc_req_add_history), which
791  * means seq_read() might change CPT id of history sequence and never
792  * finish reading of requests on a CPT. To make it work, we have to shift
793  * CPT id to high bits and timestamp to low bits, so seq_read() will only
794  * increase timestamp which can correctly indicate the next position.
795  */
796
797 /* convert seq_file pos to cpt */
798 #define PTLRPC_REQ_POS2CPT(svc, pos)                    \
799         ((svc)->srv_cpt_bits == 0 ? 0 :                 \
800          (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
801
802 /* make up seq_file pos from cpt */
803 #define PTLRPC_REQ_CPT2POS(svc, cpt)                    \
804         ((svc)->srv_cpt_bits == 0 ? 0 :                 \
805          (cpt) << (64 - (svc)->srv_cpt_bits))
806
807 /* convert sequence to position */
808 #define PTLRPC_REQ_SEQ2POS(svc, seq)                    \
809         ((svc)->srv_cpt_bits == 0 ? (seq) :             \
810          ((seq) >> (svc)->srv_cpt_bits) |               \
811          ((seq) << (64 - (svc)->srv_cpt_bits)))
812
813 /* convert position to sequence */
814 #define PTLRPC_REQ_POS2SEQ(svc, pos)                    \
815         ((svc)->srv_cpt_bits == 0 ? (pos) :             \
816          ((__u64)(pos) << (svc)->srv_cpt_bits) |        \
817          ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
818
819 static void *
820 ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
821 {
822         struct ptlrpc_service           *svc = s->private;
823         struct ptlrpc_service_part      *svcpt;
824         struct ptlrpc_srh_iterator      *srhi;
825         unsigned int                    cpt;
826         int                             rc;
827         int                             i;
828
829         if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
830                 CWARN("Failed to read request history because size of loff_t "
831                       "%d can't match size of u64\n", (int)sizeof(loff_t));
832                 return NULL;
833         }
834
835         OBD_ALLOC(srhi, sizeof(*srhi));
836         if (srhi == NULL)
837                 return NULL;
838
839         srhi->srhi_seq = 0;
840         srhi->srhi_req = NULL;
841
842         cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
843
844         ptlrpc_service_for_each_part(svcpt, i, svc) {
845                 if (i < cpt) /* skip */
846                         continue;
847                 if (i > cpt) /* make up the lowest position for this CPT */
848                         *pos = PTLRPC_REQ_CPT2POS(svc, i);
849
850                 spin_lock(&svcpt->scp_lock);
851                 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
852                                 PTLRPC_REQ_POS2SEQ(svc, *pos));
853                 spin_unlock(&svcpt->scp_lock);
854                 if (rc == 0) {
855                         *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
856                         srhi->srhi_idx = i;
857                         return srhi;
858                 }
859         }
860
861         OBD_FREE(srhi, sizeof(*srhi));
862         return NULL;
863 }
864
865 static void
866 ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
867 {
868         struct ptlrpc_srh_iterator *srhi = iter;
869
870         if (srhi != NULL)
871                 OBD_FREE(srhi, sizeof(*srhi));
872 }
873
874 static void *
875 ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
876                                     void *iter, loff_t *pos)
877 {
878         struct ptlrpc_service           *svc = s->private;
879         struct ptlrpc_srh_iterator      *srhi = iter;
880         struct ptlrpc_service_part      *svcpt;
881         __u64                           seq;
882         int                             rc;
883         int                             i;
884
885         for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
886                 svcpt = svc->srv_parts[i];
887
888                 if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
889                         srhi->srhi_req = NULL;
890                         seq = srhi->srhi_seq = 0;
891                 } else { /* the next sequence */
892                         seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
893                 }
894
895                 spin_lock(&svcpt->scp_lock);
896                 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
897                 spin_unlock(&svcpt->scp_lock);
898                 if (rc == 0) {
899                         *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
900                         srhi->srhi_idx = i;
901                         return srhi;
902                 }
903         }
904
905         OBD_FREE(srhi, sizeof(*srhi));
906         return NULL;
907 }
908
909 /* common ost/mdt so_req_printer */
910 void target_print_req(void *seq_file, struct ptlrpc_request *req)
911 {
912         /* Called holding srv_lock with irqs disabled.
913          * Print specific req contents and a newline.
914          * CAVEAT EMPTOR: check request message length before printing!!!
915          * You might have received any old crap so you must be just as
916          * careful here as the service's request parser!!! */
917         struct seq_file *sf = seq_file;
918
919         switch (req->rq_phase) {
920         case RQ_PHASE_NEW:
921                 /* still awaiting a service thread's attention, or rejected
922                  * because the generic request message didn't unpack */
923                 seq_printf(sf, "<not swabbed>\n");
924                 break;
925         case RQ_PHASE_INTERPRET:
926                 /* being handled, so basic msg swabbed, and opc is valid
927                  * but racing with mds_handle() */
928         case RQ_PHASE_COMPLETE:
929                 /* been handled by mds_handle() reply state possibly still
930                  * volatile */
931                 seq_printf(sf, "opc %d\n", lustre_msg_get_opc(req->rq_reqmsg));
932                 break;
933         default:
934                 DEBUG_REQ(D_ERROR, req, "bad phase %d", req->rq_phase);
935         }
936 }
937 EXPORT_SYMBOL(target_print_req);
938
939 static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
940 {
941         struct ptlrpc_service           *svc = s->private;
942         struct ptlrpc_srh_iterator      *srhi = iter;
943         struct ptlrpc_service_part      *svcpt;
944         struct ptlrpc_request           *req;
945         int                             rc;
946
947         LASSERT(srhi->srhi_idx < svc->srv_ncpts);
948
949         svcpt = svc->srv_parts[srhi->srhi_idx];
950
951         spin_lock(&svcpt->scp_lock);
952
953         rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
954
955         if (rc == 0) {
956                 req = srhi->srhi_req;
957
958                 /* Print common req fields.
959                  * CAVEAT EMPTOR: we're racing with the service handler
960                  * here.  The request could contain any old crap, so you
961                  * must be just as careful as the service's request
962                  * parser. Currently I only print stuff here I know is OK
963                  * to look at coz it was set up in request_in_callback()!!! */
964                 seq_printf(s, LPD64":%s:%s:x"LPU64":%d:%s:%ld:%lds(%+lds) ",
965                            req->rq_history_seq, libcfs_nid2str(req->rq_self),
966                            libcfs_id2str(req->rq_peer), req->rq_xid,
967                            req->rq_reqlen, ptlrpc_rqphase2str(req),
968                            req->rq_arrival_time.tv_sec,
969                            req->rq_sent - req->rq_arrival_time.tv_sec,
970                            req->rq_sent - req->rq_deadline);
971                 if (svc->srv_ops.so_req_printer == NULL)
972                         seq_printf(s, "\n");
973                 else
974                         svc->srv_ops.so_req_printer(s, srhi->srhi_req);
975         }
976
977         spin_unlock(&svcpt->scp_lock);
978         return rc;
979 }
980
981 static int
982 ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
983 {
984         static struct seq_operations sops = {
985                 .start = ptlrpc_lprocfs_svc_req_history_start,
986                 .stop  = ptlrpc_lprocfs_svc_req_history_stop,
987                 .next  = ptlrpc_lprocfs_svc_req_history_next,
988                 .show  = ptlrpc_lprocfs_svc_req_history_show,
989         };
990         struct proc_dir_entry *dp = PDE(inode);
991         struct seq_file       *seqf;
992         int                    rc;
993
994         LPROCFS_ENTRY_AND_CHECK(dp);
995         rc = seq_open(file, &sops);
996         if (rc) {
997                 LPROCFS_EXIT();
998                 return rc;
999         }
1000
1001         seqf = file->private_data;
1002         seqf->private = dp->data;
1003         return 0;
1004 }
1005
1006 /* See also lprocfs_rd_timeouts */
1007 static int ptlrpc_lprocfs_rd_timeouts(char *page, char **start, off_t off,
1008                                       int count, int *eof, void *data)
1009 {
1010         struct ptlrpc_service           *svc = data;
1011         struct ptlrpc_service_part      *svcpt;
1012         struct dhms                     ts;
1013         time_t                          worstt;
1014         unsigned int                    cur;
1015         unsigned int                    worst;
1016         int                             nob = 0;
1017         int                             rc = 0;
1018         int                             i;
1019
1020         if (AT_OFF) {
1021                 rc += snprintf(page + rc, count - rc,
1022                                "adaptive timeouts off, using obd_timeout %u\n",
1023                                obd_timeout);
1024                 return rc;
1025         }
1026
1027         ptlrpc_service_for_each_part(svcpt, i, svc) {
1028                 cur     = at_get(&svcpt->scp_at_estimate);
1029                 worst   = svcpt->scp_at_estimate.at_worst_ever;
1030                 worstt  = svcpt->scp_at_estimate.at_worst_time;
1031                 s2dhms(&ts, cfs_time_current_sec() - worstt);
1032
1033                 nob = snprintf(page, count,
1034                                "%10s : cur %3u  worst %3u (at %ld, "
1035                                DHMS_FMT" ago) ", "service",
1036                                cur, worst, worstt, DHMS_VARS(&ts));
1037
1038                 nob = lprocfs_at_hist_helper(page, count, nob,
1039                                              &svcpt->scp_at_estimate);
1040                 rc += nob;
1041                 page += nob;
1042                 count -= nob;
1043
1044                 /*
1045                  * NB: for lustre proc read, the read count must be less
1046                  * than PAGE_SIZE, please see details in lprocfs_fops_read.
1047                  * It's unlikely that we exceed PAGE_SIZE at here because
1048                  * it means the service has more than 50 partitions.
1049                  */
1050                 if (count <= 0) {
1051                         CWARN("Can't fit AT information of %s in one page, "
1052                               "please contact with developer to fix this.\n",
1053                               svc->srv_name);
1054                         break;
1055                 }
1056         }
1057
1058         return rc;
1059 }
1060
1061 static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off,
1062                                       int count, int *eof, void *data)
1063 {
1064         struct ptlrpc_service *svc = data;
1065         int rc = snprintf(page, count, "%d", svc->srv_hpreq_ratio);
1066         return rc;
1067 }
1068
1069 static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
1070                                       unsigned long count, void *data)
1071 {
1072         struct ptlrpc_service           *svc = data;
1073         int     rc;
1074         int     val;
1075
1076         rc = lprocfs_write_helper(buffer, count, &val);
1077         if (rc < 0)
1078                 return rc;
1079
1080         if (val < 0)
1081                 return -ERANGE;
1082
1083         spin_lock(&svc->srv_lock);
1084         svc->srv_hpreq_ratio = val;
1085         spin_unlock(&svc->srv_lock);
1086
1087         return count;
1088 }
1089
1090 void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
1091                                      struct ptlrpc_service *svc)
1092 {
1093         struct lprocfs_vars lproc_vars[] = {
1094                 {.name       = "high_priority_ratio",
1095                  .read_fptr  = ptlrpc_lprocfs_rd_hp_ratio,
1096                  .write_fptr = ptlrpc_lprocfs_wr_hp_ratio,
1097                  .data       = svc},
1098                 {.name       = "req_buffer_history_len",
1099                  .read_fptr  = ptlrpc_lprocfs_read_req_history_len,
1100                  .data       = svc},
1101                 {.name       = "req_buffer_history_max",
1102                  .write_fptr = ptlrpc_lprocfs_write_req_history_max,
1103                  .read_fptr  = ptlrpc_lprocfs_read_req_history_max,
1104                  .data       = svc},
1105                 {.name       = "threads_min",
1106                  .read_fptr  = ptlrpc_lprocfs_rd_threads_min,
1107                  .write_fptr = ptlrpc_lprocfs_wr_threads_min,
1108                  .data       = svc},
1109                 {.name       = "threads_max",
1110                  .read_fptr  = ptlrpc_lprocfs_rd_threads_max,
1111                  .write_fptr = ptlrpc_lprocfs_wr_threads_max,
1112                  .data       = svc},
1113                 {.name       = "threads_started",
1114                  .read_fptr  = ptlrpc_lprocfs_rd_threads_started,
1115                  .data       = svc},
1116                 {.name       = "timeouts",
1117                  .read_fptr  = ptlrpc_lprocfs_rd_timeouts,
1118                  .data       = svc},
1119                 {.name       = "nrs_policies",
1120                  .read_fptr  = ptlrpc_lprocfs_rd_nrs,
1121                  .write_fptr = ptlrpc_lprocfs_wr_nrs,
1122                  .data       = svc},
1123                 {NULL}
1124         };
1125         static struct file_operations req_history_fops = {
1126                 .owner       = THIS_MODULE,
1127                 .open        = ptlrpc_lprocfs_svc_req_history_open,
1128                 .read        = seq_read,
1129                 .llseek      = seq_lseek,
1130                 .release     = lprocfs_seq_release,
1131         };
1132
1133         int rc;
1134
1135         ptlrpc_lprocfs_register(entry, svc->srv_name,
1136                                 "stats", &svc->srv_procroot,
1137                                 &svc->srv_stats);
1138
1139         if (svc->srv_procroot == NULL)
1140                 return;
1141
1142         lprocfs_add_vars(svc->srv_procroot, lproc_vars, NULL);
1143
1144         rc = lprocfs_seq_create(svc->srv_procroot, "req_history",
1145                                 0400, &req_history_fops, svc);
1146         if (rc)
1147                 CWARN("Error adding the req_history file\n");
1148 }
1149
1150 void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
1151 {
1152         ptlrpc_lprocfs_register(obddev->obd_proc_entry, NULL, "stats",
1153                                 &obddev->obd_svc_procroot,
1154                                 &obddev->obd_svc_stats);
1155 }
1156 EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
1157
1158 void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
1159 {
1160         struct lprocfs_stats *svc_stats;
1161         __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
1162         int opc = opcode_offset(op);
1163
1164         svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1165         if (svc_stats == NULL || opc <= 0)
1166                 return;
1167         LASSERT(opc < LUSTRE_MAX_OPCODES);
1168         if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
1169                 lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
1170 }
1171
1172 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
1173 {
1174         struct lprocfs_stats *svc_stats;
1175         int idx;
1176
1177         if (!req->rq_import)
1178                 return;
1179         svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1180         if (!svc_stats)
1181                 return;
1182         idx = lustre_msg_get_opc(req->rq_reqmsg);
1183         switch (idx) {
1184         case OST_READ:
1185                 idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
1186                 break;
1187         case OST_WRITE:
1188                 idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
1189                 break;
1190         default:
1191                 LASSERTF(0, "unsupported opcode %u\n", idx);
1192                 break;
1193         }
1194
1195         lprocfs_counter_add(svc_stats, idx, bytes);
1196 }
1197
1198 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
1199
1200 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
1201 {
1202         if (svc->srv_procroot != NULL)
1203                 lprocfs_remove(&svc->srv_procroot);
1204
1205         if (svc->srv_stats)
1206                 lprocfs_free_stats(&svc->srv_stats);
1207 }
1208
1209 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
1210 {
1211         if (obd->obd_svc_procroot)
1212                 lprocfs_remove(&obd->obd_svc_procroot);
1213
1214         if (obd->obd_svc_stats)
1215                 lprocfs_free_stats(&obd->obd_svc_stats);
1216 }
1217 EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
1218
1219
1220 #define BUFLEN (UUID_MAX + 5)
1221
1222 int lprocfs_wr_evict_client(struct file *file, const char *buffer,
1223                             unsigned long count, void *data)
1224 {
1225         struct obd_device *obd = data;
1226         char              *kbuf;
1227         char              *tmpbuf;
1228
1229         OBD_ALLOC(kbuf, BUFLEN);
1230         if (kbuf == NULL)
1231                 return -ENOMEM;
1232
1233         /*
1234          * OBD_ALLOC() will zero kbuf, but we only copy BUFLEN - 1
1235          * bytes into kbuf, to ensure that the string is NUL-terminated.
1236          * UUID_MAX should include a trailing NUL already.
1237          */
1238         if (cfs_copy_from_user(kbuf, buffer,
1239                                min_t(unsigned long, BUFLEN - 1, count))) {
1240                 count = -EFAULT;
1241                 goto out;
1242         }
1243         tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
1244         /* Kludge code(deadlock situation): the lprocfs lock has been held
1245          * since the client is evicted by writting client's
1246          * uuid/nid to procfs "evict_client" entry. However,
1247          * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
1248          * the proc entries under the being destroyed export{}, so I have
1249          * to drop the lock at first here.
1250          * - jay, jxiong@clusterfs.com */
1251         LPROCFS_EXIT();
1252         class_incref(obd, __FUNCTION__, cfs_current());
1253
1254         if (strncmp(tmpbuf, "nid:", 4) == 0)
1255                 obd_export_evict_by_nid(obd, tmpbuf + 4);
1256         else if (strncmp(tmpbuf, "uuid:", 5) == 0)
1257                 obd_export_evict_by_uuid(obd, tmpbuf + 5);
1258         else
1259                 obd_export_evict_by_uuid(obd, tmpbuf);
1260
1261         class_decref(obd, __FUNCTION__, cfs_current());
1262         LPROCFS_ENTRY();
1263
1264 out:
1265         OBD_FREE(kbuf, BUFLEN);
1266         return count;
1267 }
1268 EXPORT_SYMBOL(lprocfs_wr_evict_client);
1269
1270 #undef BUFLEN
1271
1272 int lprocfs_wr_ping(struct file *file, const char *buffer,
1273                     unsigned long count, void *data)
1274 {
1275         struct obd_device     *obd = data;
1276         struct ptlrpc_request *req;
1277         int                    rc;
1278         ENTRY;
1279
1280         LPROCFS_CLIMP_CHECK(obd);
1281         req = ptlrpc_prep_ping(obd->u.cli.cl_import);
1282         LPROCFS_CLIMP_EXIT(obd);
1283         if (req == NULL)
1284                 RETURN(-ENOMEM);
1285
1286         req->rq_send_state = LUSTRE_IMP_FULL;
1287
1288         rc = ptlrpc_queue_wait(req);
1289
1290         ptlrpc_req_finished(req);
1291         if (rc >= 0)
1292                 RETURN(count);
1293         RETURN(rc);
1294 }
1295 EXPORT_SYMBOL(lprocfs_wr_ping);
1296
1297 /* Write the connection UUID to this file to attempt to connect to that node.
1298  * The connection UUID is a node's primary NID. For example,
1299  * "echo connection=192.168.0.1@tcp0::instance > .../import".
1300  */
1301 int lprocfs_wr_import(struct file *file, const char *buffer,
1302                       unsigned long count, void *data)
1303 {
1304         struct obd_device *obd = data;
1305         struct obd_import *imp = obd->u.cli.cl_import;
1306         char *kbuf = NULL;
1307         char *uuid;
1308         char *ptr;
1309         int do_reconn = 1;
1310         const char prefix[] = "connection=";
1311         const int prefix_len = sizeof(prefix) - 1;
1312
1313         if (count > CFS_PAGE_SIZE - 1 || count <= prefix_len)
1314                 return -EINVAL;
1315
1316         OBD_ALLOC(kbuf, count + 1);
1317         if (kbuf == NULL)
1318                 return -ENOMEM;
1319
1320         if (cfs_copy_from_user(kbuf, buffer, count))
1321                 GOTO(out, count = -EFAULT);
1322
1323         kbuf[count] = 0;
1324
1325         /* only support connection=uuid::instance now */
1326         if (strncmp(prefix, kbuf, prefix_len) != 0)
1327                 GOTO(out, count = -EINVAL);
1328
1329         uuid = kbuf + prefix_len;
1330         ptr = strstr(uuid, "::");
1331         if (ptr) {
1332                 __u32 inst;
1333                 char *endptr;
1334
1335                 *ptr = 0;
1336                 do_reconn = 0;
1337                 ptr += strlen("::");
1338                 inst = simple_strtol(ptr, &endptr, 10);
1339                 if (*endptr) {
1340                         CERROR("config: wrong instance # %s\n", ptr);
1341                 } else if (inst != imp->imp_connect_data.ocd_instance) {
1342                         CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted "
1343                                "target(%u/%u), reconnecting...\n",
1344                                imp->imp_obd->obd_name,
1345                                imp->imp_connect_data.ocd_instance, inst);
1346                         do_reconn = 1;
1347                 } else {
1348                         CDEBUG(D_INFO, "IR: %s has already been connecting to "
1349                                "new target(%u)\n",
1350                                imp->imp_obd->obd_name, inst);
1351                 }
1352         }
1353
1354         if (do_reconn)
1355                 ptlrpc_recover_import(imp, uuid, 1);
1356
1357 out:
1358         OBD_FREE(kbuf, count + 1);
1359         return count;
1360 }
1361 EXPORT_SYMBOL(lprocfs_wr_import);
1362
1363 int lprocfs_rd_pinger_recov(char *page, char **start, off_t off,
1364                             int count, int *eof, void *data)
1365 {
1366         struct obd_device *obd = data;
1367         struct obd_import *imp = obd->u.cli.cl_import;
1368         int rc;
1369
1370         LPROCFS_CLIMP_CHECK(obd);
1371         rc = snprintf(page, count, "%d\n", !imp->imp_no_pinger_recover);
1372         LPROCFS_CLIMP_EXIT(obd);
1373
1374         return rc;
1375 }
1376 EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
1377
1378 int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
1379                       unsigned long count, void *data)
1380 {
1381         struct obd_device *obd = data;
1382         struct client_obd *cli = &obd->u.cli;
1383         struct obd_import *imp = cli->cl_import;
1384         int rc, val;
1385
1386         rc = lprocfs_write_helper(buffer, count, &val);
1387         if (rc < 0)
1388                 return rc;
1389
1390         if (val != 0 && val != 1)
1391                 return -ERANGE;
1392
1393         LPROCFS_CLIMP_CHECK(obd);
1394         spin_lock(&imp->imp_lock);
1395         imp->imp_no_pinger_recover = !val;
1396         spin_unlock(&imp->imp_lock);
1397         LPROCFS_CLIMP_EXIT(obd);
1398
1399         return count;
1400
1401 }
1402 EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
1403
1404 #endif /* LPROCFS */