3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License version 2 for more details (a copy is included
13 * in the LICENSE file that accompanied this code).
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see
17 * http://www.gnu.org/licenses/gpl-2.0.html
22 * Copyright (c) 2012, 2016, Intel Corporation.
23 * Use is subject to license terms.
25 * Author: Niu Yawei <niu@whamcloud.com>
28 * lustre/obdclass/lprocfs_jobstats.c
31 #define DEBUG_SUBSYSTEM S_CLASS
33 #include <obd_class.h>
34 #include <lprocfs_status.h>
39 * JobID formats & JobID environment variable names for supported
43 * JobID format: 32 bit integer.
44 * JobID env var: SLURM_JOB_ID.
46 * JobID format: Decimal integer range to 99999.
47 * JobID env var: JOB_ID.
49 * JobID format: 6 digit integer by default (up to 999999), can be
50 * increased to 10 digit (up to 2147483646).
51 * JobID env var: LSB_JOBID.
53 * JobID format: String of machine_name.cluster_id.process_id, for
54 * example: fr2n02.32.0
55 * JobID env var: LOADL_STEP_ID.
57 * JobID format: String of sequence_number[.server_name][@server].
58 * JobID env var: PBS_JOBID.
60 * JobID format: Same as PBS.
61 * JobID env var: Same as PBS.
65 struct hlist_node js_hash; /* hash struct for this jobid */
66 struct list_head js_list; /* on ojs_list, with ojs_lock */
67 atomic_t js_refcount; /* num users of this struct */
68 char js_jobid[LUSTRE_JOBID_SIZE]; /* job name + NUL*/
69 time64_t js_timestamp; /* seconds of most recent stat*/
70 struct lprocfs_stats *js_stats; /* per-job statistics */
71 struct obd_job_stats *js_jobstats; /* for accessing ojs_lock */
75 job_stat_hash(struct cfs_hash *hs, const void *key, unsigned mask)
77 return cfs_hash_djb2_hash(key, strlen(key), mask);
80 static void *job_stat_key(struct hlist_node *hnode)
83 job = hlist_entry(hnode, struct job_stat, js_hash);
87 static int job_stat_keycmp(const void *key, struct hlist_node *hnode)
90 job = hlist_entry(hnode, struct job_stat, js_hash);
91 return (strlen(job->js_jobid) == strlen(key)) &&
92 !strncmp(job->js_jobid, key, strlen(key));
95 static void *job_stat_object(struct hlist_node *hnode)
97 return hlist_entry(hnode, struct job_stat, js_hash);
100 static void job_stat_get(struct cfs_hash *hs, struct hlist_node *hnode)
102 struct job_stat *job;
103 job = hlist_entry(hnode, struct job_stat, js_hash);
104 atomic_inc(&job->js_refcount);
107 static void job_free(struct job_stat *job)
109 LASSERT(atomic_read(&job->js_refcount) == 0);
110 LASSERT(job->js_jobstats != NULL);
112 write_lock(&job->js_jobstats->ojs_lock);
113 list_del_init(&job->js_list);
114 write_unlock(&job->js_jobstats->ojs_lock);
116 lprocfs_free_stats(&job->js_stats);
120 static void job_putref(struct job_stat *job)
122 LASSERT(atomic_read(&job->js_refcount) > 0);
123 if (atomic_dec_and_test(&job->js_refcount))
127 static void job_stat_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
129 struct job_stat *job;
130 job = hlist_entry(hnode, struct job_stat, js_hash);
134 static void job_stat_exit(struct cfs_hash *hs, struct hlist_node *hnode)
136 CERROR("should not have any items\n");
139 static struct cfs_hash_ops job_stats_hash_ops = {
140 .hs_hash = job_stat_hash,
141 .hs_key = job_stat_key,
142 .hs_keycmp = job_stat_keycmp,
143 .hs_object = job_stat_object,
144 .hs_get = job_stat_get,
145 .hs_put_locked = job_stat_put_locked,
146 .hs_exit = job_stat_exit,
150 * Jobstats expiry iterator to clean up old jobids
152 * Called for each job_stat structure on this device, it should delete stats
153 * older than the specified \a oldest_time in seconds. If \a oldest_time is
154 * in the future then this will delete all statistics (e.g. during shutdown).
156 * \param[in] hs hash of all jobids on this device
157 * \param[in] bd hash bucket containing this jobid
158 * \param[in] hnode hash structure for this jobid
159 * \param[in] data pointer to stats expiry time in seconds
161 static int job_cleanup_iter_callback(struct cfs_hash *hs,
162 struct cfs_hash_bd *bd,
163 struct hlist_node *hnode, void *data)
165 time64_t oldest_time = *((time64_t *)data);
166 struct job_stat *job;
168 job = hlist_entry(hnode, struct job_stat, js_hash);
169 if (job->js_timestamp < oldest_time)
170 cfs_hash_bd_del_locked(hs, bd, hnode);
176 * Clean up jobstats that were updated more than \a before seconds ago.
178 * Since this function may be called frequently, do not scan all of the
179 * jobstats on each call, only twice per cleanup interval. That means stats
180 * may be around on average cleanup_interval / 4 longer than necessary,
181 * but that is not considered harmful.
183 * If \a before is negative then this will force clean up all jobstats due
184 * to the expiry time being in the future (e.g. at shutdown).
186 * If there is already another thread doing jobstats cleanup, don't try to
187 * do this again in the current thread unless this is a force cleanup.
189 * \param[in] stats stucture tracking all job stats for this device
190 * \param[in] before expire jobstats updated more than this many seconds ago
192 static void lprocfs_job_cleanup(struct obd_job_stats *stats, int before)
194 time64_t now = ktime_get_real_seconds();
197 if (likely(before >= 0)) {
198 unsigned int cleanup_interval = stats->ojs_cleanup_interval;
200 if (cleanup_interval == 0 || before == 0)
203 if (now < stats->ojs_last_cleanup + cleanup_interval / 2)
206 if (stats->ojs_cleaning)
210 write_lock(&stats->ojs_lock);
211 if (before >= 0 && stats->ojs_cleaning) {
212 write_unlock(&stats->ojs_lock);
216 stats->ojs_cleaning = true;
217 write_unlock(&stats->ojs_lock);
219 /* Can't hold ojs_lock over hash iteration, since it is grabbed by
220 * job_cleanup_iter_callback()
221 * ->cfs_hash_bd_del_locked()
225 * Holding ojs_lock isn't necessary for safety of the hash iteration,
226 * since locking of the hash is handled internally, but there isn't
227 * any benefit to having multiple threads doing cleanup at one time.
229 oldest = now - before;
230 cfs_hash_for_each_safe(stats->ojs_hash, job_cleanup_iter_callback,
233 write_lock(&stats->ojs_lock);
234 stats->ojs_cleaning = false;
235 stats->ojs_last_cleanup = ktime_get_real_seconds();
236 write_unlock(&stats->ojs_lock);
239 static struct job_stat *job_alloc(char *jobid, struct obd_job_stats *jobs)
241 struct job_stat *job;
247 job->js_stats = lprocfs_alloc_stats(jobs->ojs_cntr_num, 0);
248 if (job->js_stats == NULL) {
253 jobs->ojs_cntr_init_fn(job->js_stats);
255 memcpy(job->js_jobid, jobid, sizeof(job->js_jobid));
256 job->js_timestamp = ktime_get_real_seconds();
257 job->js_jobstats = jobs;
258 INIT_HLIST_NODE(&job->js_hash);
259 INIT_LIST_HEAD(&job->js_list);
260 atomic_set(&job->js_refcount, 1);
265 int lprocfs_job_stats_log(struct obd_device *obd, char *jobid,
266 int event, long amount)
268 struct obd_job_stats *stats = &obd->u.obt.obt_jobstats;
269 struct job_stat *job, *job2;
272 LASSERT(stats != NULL);
273 LASSERT(stats->ojs_hash != NULL);
275 if (event >= stats->ojs_cntr_num)
278 if (jobid == NULL || strlen(jobid) == 0)
281 if (strlen(jobid) >= LUSTRE_JOBID_SIZE) {
282 CERROR("Invalid jobid size (%lu), expect(%d)\n",
283 (unsigned long)strlen(jobid) + 1, LUSTRE_JOBID_SIZE);
287 job = cfs_hash_lookup(stats->ojs_hash, jobid);
291 lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
293 job = job_alloc(jobid, stats);
297 job2 = cfs_hash_findadd_unique(stats->ojs_hash, job->js_jobid,
302 /* We cannot LASSERT(!list_empty(&job->js_list)) here,
303 * since we just lost the race for inserting "job" into the
304 * ojs_list, and some other thread is doing it _right_now_.
305 * Instead, be content the other thread is doing this, since
306 * "job2" was initialized in job_alloc() already. LU-2163 */
308 LASSERT(list_empty(&job->js_list));
309 write_lock(&stats->ojs_lock);
310 list_add_tail(&job->js_list, &stats->ojs_list);
311 write_unlock(&stats->ojs_lock);
315 LASSERT(stats == job->js_jobstats);
316 job->js_timestamp = ktime_get_real_seconds();
317 lprocfs_counter_add(job->js_stats, event, amount);
323 EXPORT_SYMBOL(lprocfs_job_stats_log);
325 void lprocfs_job_stats_fini(struct obd_device *obd)
327 struct obd_job_stats *stats = &obd->u.obt.obt_jobstats;
329 if (stats->ojs_hash == NULL)
332 lprocfs_job_cleanup(stats, -99);
333 cfs_hash_putref(stats->ojs_hash);
334 stats->ojs_hash = NULL;
335 LASSERT(list_empty(&stats->ojs_list));
337 EXPORT_SYMBOL(lprocfs_job_stats_fini);
339 static void *lprocfs_jobstats_seq_start(struct seq_file *p, loff_t *pos)
341 struct obd_job_stats *stats = p->private;
343 struct job_stat *job;
345 read_lock(&stats->ojs_lock);
347 return SEQ_START_TOKEN;
349 list_for_each_entry(job, &stats->ojs_list, js_list) {
356 static void lprocfs_jobstats_seq_stop(struct seq_file *p, void *v)
358 struct obd_job_stats *stats = p->private;
360 read_unlock(&stats->ojs_lock);
363 static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos)
365 struct obd_job_stats *stats = p->private;
366 struct job_stat *job;
367 struct list_head *next;
370 if (v == SEQ_START_TOKEN) {
371 next = stats->ojs_list.next;
373 job = (struct job_stat *)v;
374 next = job->js_list.next;
377 return next == &stats->ojs_list ? NULL :
378 list_entry(next, struct job_stat, js_list);
382 * Example of output on MDT:
386 * snapshot_time: 1322494486
387 * open: { samples: 1, unit: reqs }
388 * close: { samples: 1, unit: reqs }
389 * mknod: { samples: 0, unit: reqs }
390 * link: { samples: 0, unit: reqs }
391 * unlink: { samples: 0, unit: reqs }
392 * mkdir: { samples: 0, unit: reqs }
393 * rmdir: { samples: 0, unit: reqs }
394 * rename: { samples: 0, unit: reqs }
395 * getattr: { samples: 1, unit: reqs }
396 * setattr: { samples: 0, unit: reqs }
397 * getxattr: { samples: 0, unit: reqs }
398 * setxattr: { samples: 0, unit: reqs }
399 * statfs: { samples: 0, unit: reqs }
400 * sync: { samples: 0, unit: reqs }
402 * Example of output on OST:
406 * snapshot_time: 1322494602
407 * read: { samples: 0, unit: bytes, min: 0, max: 0, sum: 0 }
408 * write: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 }
409 * setattr: { samples: 0, unit: reqs }
410 * punch: { samples: 0, unit: reqs }
411 * sync: { samples: 0, unit: reqs }
414 static const char spaces[] = " ";
416 static int inline width(const char *str, int len)
418 return len - min((int)strlen(str), 15);
421 static int lprocfs_jobstats_seq_show(struct seq_file *p, void *v)
423 struct job_stat *job = v;
424 struct lprocfs_stats *s;
425 struct lprocfs_counter ret;
426 struct lprocfs_counter_header *cntr_header;
429 if (v == SEQ_START_TOKEN) {
430 seq_printf(p, "job_stats:\n");
434 /* Replace the non-printable character in jobid with '?', so
435 * that the output of jobid will be confined in single line. */
436 seq_printf(p, "- %-16s ", "job_id:");
437 for (i = 0; i < strlen(job->js_jobid); i++) {
438 if (isprint(job->js_jobid[i]) != 0)
439 seq_putc(p, job->js_jobid[i]);
445 seq_printf(p, " %-16s %lld\n", "snapshot_time:", job->js_timestamp);
448 for (i = 0; i < s->ls_num; i++) {
449 cntr_header = &s->ls_cnt_header[i];
450 lprocfs_stats_collect(s, i, &ret);
452 seq_printf(p, " %s:%.*s { samples: %11llu",
453 cntr_header->lc_name,
454 width(cntr_header->lc_name, 15), spaces,
456 if (cntr_header->lc_units[0] != '\0')
457 seq_printf(p, ", unit: %5s", cntr_header->lc_units);
459 if (cntr_header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
460 seq_printf(p, ", min:%8llu, max:%8llu,"
462 ret.lc_count ? ret.lc_min : 0,
463 ret.lc_count ? ret.lc_max : 0,
464 ret.lc_count ? ret.lc_sum : 0);
466 if (cntr_header->lc_config & LPROCFS_CNTR_STDDEV) {
467 seq_printf(p, ", sumsq: %18llu",
468 ret.lc_count ? ret.lc_sumsquare : 0);
471 seq_printf(p, " }\n");
477 static const struct seq_operations lprocfs_jobstats_seq_sops = {
478 .start = lprocfs_jobstats_seq_start,
479 .stop = lprocfs_jobstats_seq_stop,
480 .next = lprocfs_jobstats_seq_next,
481 .show = lprocfs_jobstats_seq_show,
484 static int lprocfs_jobstats_seq_open(struct inode *inode, struct file *file)
486 struct seq_file *seq;
489 rc = LPROCFS_ENTRY_CHECK(inode);
493 rc = seq_open(file, &lprocfs_jobstats_seq_sops);
496 seq = file->private_data;
497 seq->private = PDE_DATA(inode);
501 static ssize_t lprocfs_jobstats_seq_write(struct file *file,
502 const char __user *buf,
503 size_t len, loff_t *off)
505 struct seq_file *seq = file->private_data;
506 struct obd_job_stats *stats = seq->private;
507 char jobid[LUSTRE_JOBID_SIZE];
508 struct job_stat *job;
510 if (len == 0 || len >= LUSTRE_JOBID_SIZE)
513 if (stats->ojs_hash == NULL)
516 if (copy_from_user(jobid, buf, len))
520 /* Trim '\n' if any */
521 if (jobid[len - 1] == '\n')
524 if (strcmp(jobid, "clear") == 0) {
525 lprocfs_job_cleanup(stats, -99);
530 if (strlen(jobid) == 0)
533 job = cfs_hash_lookup(stats->ojs_hash, jobid);
537 cfs_hash_del_key(stats->ojs_hash, jobid);
544 * Clean up the seq file state when the /proc file is closed.
546 * This also expires old job stats from the cache after they have been
547 * printed in case the system is idle and not generating new jobstats.
549 * \param[in] inode struct inode for seq file being closed
550 * \param[in] file struct file for seq file being closed
552 * \retval 0 on success
553 * \retval negative errno on failure
555 static int lprocfs_jobstats_seq_release(struct inode *inode, struct file *file)
557 struct seq_file *seq = file->private_data;
558 struct obd_job_stats *stats = seq->private;
560 lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
562 return lprocfs_seq_release(inode, file);
565 static const struct file_operations lprocfs_jobstats_seq_fops = {
566 .owner = THIS_MODULE,
567 .open = lprocfs_jobstats_seq_open,
569 .write = lprocfs_jobstats_seq_write,
571 .release = lprocfs_jobstats_seq_release,
574 int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
575 cntr_init_callback init_fn)
577 struct proc_dir_entry *entry;
578 struct obd_job_stats *stats;
581 LASSERT(obd->obd_proc_entry != NULL);
582 LASSERT(obd->obd_type->typ_name);
590 /* Currently needs to be a target due to the use of obt_jobstats. */
591 if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0 &&
592 strcmp(obd->obd_type->typ_name, LUSTRE_OST_NAME) != 0) {
593 CERROR("%s: invalid device type %s for job stats: rc = %d\n",
594 obd->obd_name, obd->obd_type->typ_name, -EINVAL);
597 stats = &obd->u.obt.obt_jobstats;
599 LASSERT(stats->ojs_hash == NULL);
600 stats->ojs_hash = cfs_hash_create("JOB_STATS",
601 HASH_JOB_STATS_CUR_BITS,
602 HASH_JOB_STATS_MAX_BITS,
603 HASH_JOB_STATS_BKT_BITS, 0,
608 if (stats->ojs_hash == NULL)
611 INIT_LIST_HEAD(&stats->ojs_list);
612 rwlock_init(&stats->ojs_lock);
613 stats->ojs_cntr_num = cntr_num;
614 stats->ojs_cntr_init_fn = init_fn;
615 stats->ojs_cleanup_interval = 600; /* 10 mins by default */
616 stats->ojs_last_cleanup = ktime_get_real_seconds();
618 entry = lprocfs_add_simple(obd->obd_proc_entry, "job_stats", stats,
619 &lprocfs_jobstats_seq_fops);
621 lprocfs_job_stats_fini(obd);
626 EXPORT_SYMBOL(lprocfs_job_stats_init);
628 int lprocfs_job_interval_seq_show(struct seq_file *m, void *data)
630 struct obd_device *obd = m->private;
631 struct obd_job_stats *stats;
636 stats = &obd->u.obt.obt_jobstats;
637 seq_printf(m, "%d\n", stats->ojs_cleanup_interval);
640 EXPORT_SYMBOL(lprocfs_job_interval_seq_show);
643 lprocfs_job_interval_seq_write(struct file *file, const char __user *buffer,
644 size_t count, loff_t *off)
646 struct obd_device *obd;
647 struct obd_job_stats *stats;
651 obd = ((struct seq_file *)file->private_data)->private;
655 stats = &obd->u.obt.obt_jobstats;
657 rc = kstrtouint_from_user(buffer, count, 0, &val);
661 stats->ojs_cleanup_interval = val;
662 lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
665 EXPORT_SYMBOL(lprocfs_job_interval_seq_write);
666 #endif /* CONFIG_PROC_FS*/