* GPL HEADER END
*/
/*
- * Copyright (c) 2011 Whamcloud, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*
* Author: Niu Yawei <niu@whamcloud.com>
LASSERT(atomic_read(&job->js_refcount) == 0);
LASSERT(job->js_jobstats);
- cfs_write_lock(&job->js_jobstats->ojs_lock);
+ write_lock(&job->js_jobstats->ojs_lock);
cfs_list_del_init(&job->js_list);
- cfs_write_unlock(&job->js_jobstats->ojs_lock);
+ write_unlock(&job->js_jobstats->ojs_lock);
lprocfs_free_stats(&job->js_stats);
OBD_FREE_PTR(job);
.hs_exit = job_stat_exit,
};
+static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ cfs_hlist_node_t *hnode, void *data)
+{
+ time_t oldest = *((time_t *)data);
+ struct job_stat *job;
+
+ job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ if (!oldest || job->js_timestamp < oldest)
+ cfs_hash_bd_del_locked(hs, bd, hnode);
+
+ return 0;
+}
+
+static void lprocfs_job_cleanup(struct obd_job_stats *stats, bool force)
+{
+ time_t oldest, now;
+
+ if (stats->ojs_cleanup_interval == 0)
+ return;
+
+ now = cfs_time_current_sec();
+ if (!force && now < stats->ojs_last_cleanup +
+ stats->ojs_cleanup_interval)
+ return;
+
+ oldest = now - stats->ojs_cleanup_interval;
+ cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback,
+ &oldest);
+ stats->ojs_last_cleanup = cfs_time_current_sec();
+}
+
static struct job_stat *job_alloc(char *jobid, struct obd_job_stats *jobs)
{
struct job_stat *job;
LASSERT(stats && stats->ojs_hash);
+ lprocfs_job_cleanup(stats, false);
+
if (!jobid || !strlen(jobid))
RETURN(-EINVAL);
if (job2 != job) {
job_putref(job);
job = job2;
- LASSERT(!cfs_list_empty(&job->js_list));
+ /* We cannot LASSERT(!cfs_list_empty(&job->js_list)) here,
+ * since we just lost the race for inserting "job" into the
+ * ojs_list, and some other thread is doing it _right_now_.
+ * Instead, be content the other thread is doing this, since
+ * "job2" was initialized in job_alloc() already. LU-2163 */
} else {
LASSERT(cfs_list_empty(&job->js_list));
- cfs_write_lock(&stats->ojs_lock);
+ write_lock(&stats->ojs_lock);
cfs_list_add_tail(&job->js_list, &stats->ojs_list);
- cfs_write_unlock(&stats->ojs_lock);
+ write_unlock(&stats->ojs_lock);
}
found:
}
EXPORT_SYMBOL(lprocfs_job_stats_log);
-static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
-{
- time_t oldest = *((time_t *)data);
- struct job_stat *job;
-
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
- if (!oldest || job->js_timestamp < oldest)
- cfs_hash_bd_del_locked(hs, bd, hnode);
-
- return 0;
-}
-
void lprocfs_job_stats_fini(struct obd_device *obd)
{
struct obd_job_stats *stats = &obd->u.obt.obt_jobstats;
if (stats->ojs_hash == NULL)
return;
- cfs_timer_disarm(&stats->ojs_cleanup_timer);
cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback, &oldest);
cfs_hash_putref(stats->ojs_hash);
stats->ojs_hash = NULL;
loff_t off = *pos;
struct job_stat *job;
- cfs_read_lock(&stats->ojs_lock);
+ read_lock(&stats->ojs_lock);
if (off == 0)
return SEQ_START_TOKEN;
off--;
{
struct obd_job_stats *stats = p->private;
- cfs_read_unlock(&stats->ojs_lock);
+ read_unlock(&stats->ojs_lock);
}
static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos)
static int lprocfs_jobstats_seq_show(struct seq_file *p, void *v)
{
- struct job_stat *job = v;
- struct lprocfs_stats *s;
- struct lprocfs_counter ret, *cntr;
- int i;
+ struct job_stat *job = v;
+ struct lprocfs_stats *s;
+ struct lprocfs_counter ret;
+ struct lprocfs_counter *cntr;
+ struct lprocfs_counter_header *cntr_header;
+ int i;
if (v == SEQ_START_TOKEN) {
seq_printf(p, "job_stats:\n");
s = job->js_stats;
for (i = 0; i < s->ls_num; i++) {
- cntr = &(s->ls_percpu[0]->lp_cntr[i]);
+ cntr = lprocfs_stats_counter_get(s, 0, i);
+ cntr_header = &s->ls_cnt_header[i];
lprocfs_stats_collect(s, i, &ret);
seq_printf(p, " %s:%.*s { samples: %11"LPF64"u",
- cntr->lc_name, width(cntr->lc_name, 15), spaces,
+ cntr_header->lc_name,
+ width(cntr_header->lc_name, 15), spaces,
ret.lc_count);
- if (cntr->lc_units[0] != '\0')
- seq_printf(p, ", unit: %5s", cntr->lc_units);
+ if (cntr_header->lc_units[0] != '\0')
+ seq_printf(p, ", unit: %5s", cntr_header->lc_units);
- if (cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+ if (cntr_header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
seq_printf(p, ", min:%8"LPF64"u, max:%8"LPF64"u,"
" sum:%16"LPF64"u",
ret.lc_count ? ret.lc_min : 0,
ret.lc_count ? ret.lc_max : 0,
ret.lc_count ? ret.lc_sum : 0);
}
- if (cntr->lc_config & LPROCFS_CNTR_STDDEV) {
+ if (cntr_header->lc_config & LPROCFS_CNTR_STDDEV) {
seq_printf(p, ", sumsq: %18"LPF64"u",
ret.lc_count ? ret.lc_sumsquare : 0);
}
.release = lprocfs_seq_release,
};
-static void job_cleanup_callback(unsigned long data)
-{
- struct obd_job_stats *stats = (struct obd_job_stats *)data;
- time_t oldest;
-
- if (stats->ojs_cleanup_interval) {
- oldest = cfs_time_current_sec() - stats->ojs_cleanup_interval;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback,
- &oldest);
- cfs_timer_arm(&stats->ojs_cleanup_timer,
- cfs_time_shift(stats->ojs_cleanup_interval));
- }
-}
-
int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
cntr_init_callback init_fn)
{
RETURN(-ENOMEM);
CFS_INIT_LIST_HEAD(&stats->ojs_list);
- cfs_rwlock_init(&stats->ojs_lock);
+ rwlock_init(&stats->ojs_lock);
stats->ojs_cntr_num = cntr_num;
stats->ojs_cntr_init_fn = init_fn;
- cfs_timer_init(&stats->ojs_cleanup_timer, job_cleanup_callback, stats);
stats->ojs_cleanup_interval = 600; /* 10 mins by default */
- cfs_timer_arm(&stats->ojs_cleanup_timer,
- cfs_time_shift(stats->ojs_cleanup_interval));
+ stats->ojs_last_cleanup = cfs_time_current_sec();
LPROCFS_WRITE_ENTRY();
entry = create_proc_entry("job_stats", 0644, obd->obd_proc_entry);
return rc;
stats->ojs_cleanup_interval = val;
- if (!stats->ojs_cleanup_interval)
- cfs_timer_disarm(&stats->ojs_cleanup_timer);
- else
- cfs_timer_arm(&stats->ojs_cleanup_timer,
- cfs_time_shift(stats->ojs_cleanup_interval));
+ lprocfs_job_cleanup(stats, true);
return count;