* GPL HEADER END
*/
/*
- * Copyright (c) 2011 Whamcloud, Inc.
+ * Copyright (c) 2012, 2014, Intel Corporation.
* Use is subject to license terms.
*
* Author: Niu Yawei <niu@whamcloud.com>
* lustre/obdclass/lprocfs_jobstats.c
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_CLASS
-#ifndef __KERNEL__
-# include <liblustre.h>
-#endif
#include <obd_class.h>
#include <lprocfs_status.h>
#include <lustre/lustre_idl.h>
-#if defined(LPROCFS)
+#ifdef CONFIG_PROC_FS
/*
* JobID formats & JobID environment variable names for supported
*/
struct job_stat {
- cfs_hlist_node_t js_hash;
- cfs_list_t js_list;
- cfs_atomic_t js_refcount;
- char js_jobid[JOBSTATS_JOBID_SIZE];
- time_t js_timestamp; /* seconds */
- struct lprocfs_stats *js_stats;
- struct obd_job_stats *js_jobstats;
+ struct hlist_node js_hash; /* hash struct for this jobid */
+ struct list_head js_list; /* on ojs_list, with ojs_lock */
+ atomic_t js_refcount; /* num users of this struct */
+ char js_jobid[LUSTRE_JOBID_SIZE]; /* job name */
+ time_t js_timestamp; /* seconds of most recent stat*/
+ struct lprocfs_stats *js_stats; /* per-job statistics */
+ struct obd_job_stats *js_jobstats; /* for accessing ojs_lock */
};
-static unsigned job_stat_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+static unsigned
+job_stat_hash(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_djb2_hash(key, strlen(key), mask);
}
-static void *job_stat_key(cfs_hlist_node_t *hnode)
+static void *job_stat_key(struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
return job->js_jobid;
}
-static int job_stat_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int job_stat_keycmp(const void *key, struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
return (strlen(job->js_jobid) == strlen(key)) &&
!strncmp(job->js_jobid, key, strlen(key));
}
-static void *job_stat_object(cfs_hlist_node_t *hnode)
+static void *job_stat_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ return hlist_entry(hnode, struct job_stat, js_hash);
}
-static void job_stat_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void job_stat_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
- cfs_atomic_inc(&job->js_refcount);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
+ atomic_inc(&job->js_refcount);
}
static void job_free(struct job_stat *job)
{
LASSERT(atomic_read(&job->js_refcount) == 0);
- LASSERT(job->js_jobstats);
+ LASSERT(job->js_jobstats != NULL);
- cfs_write_lock(&job->js_jobstats->ojs_lock);
- cfs_list_del_init(&job->js_list);
- cfs_write_unlock(&job->js_jobstats->ojs_lock);
+ write_lock(&job->js_jobstats->ojs_lock);
+ list_del_init(&job->js_list);
+ write_unlock(&job->js_jobstats->ojs_lock);
lprocfs_free_stats(&job->js_stats);
OBD_FREE_PTR(job);
job_free(job);
}
-static void job_stat_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void job_stat_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
+ job = hlist_entry(hnode, struct job_stat, js_hash);
job_putref(job);
}
-static void job_stat_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void job_stat_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
- CERROR("Should not have any items!");
+ CERROR("should not have any items\n");
}
-static cfs_hash_ops_t job_stats_hash_ops = {
+static struct cfs_hash_ops job_stats_hash_ops = {
.hs_hash = job_stat_hash,
.hs_key = job_stat_key,
.hs_keycmp = job_stat_keycmp,
.hs_exit = job_stat_exit,
};
-static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+/**
+ * Jobstats expiry iterator to clean up old jobids
+ *
+ * Called for each job_stat structure on this device, it should delete stats
+ * older than the specified \a oldest_time in seconds. If \a oldest_time is
+ * in the future then this will delete all statistics (e.g. during shutdown).
+ *
+ * \param[in] hs hash of all jobids on this device
+ * \param[in] bd hash bucket containing this jobid
+ * \param[in] hnode hash structure for this jobid
+ * \param[in] data pointer to stats expiry time in seconds
+ */
+static int job_cleanup_iter_callback(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd,
+ struct hlist_node *hnode, void *data)
{
- time_t oldest = *((time_t *)data);
+ time_t oldest_time = *((time_t *)data);
struct job_stat *job;
- job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
- if (!oldest || job->js_timestamp < oldest)
+ job = hlist_entry(hnode, struct job_stat, js_hash);
+ if (job->js_timestamp < oldest_time)
cfs_hash_bd_del_locked(hs, bd, hnode);
return 0;
}
-static void lprocfs_job_cleanup(struct obd_job_stats *stats, bool force)
+/**
+ * Clean up jobstats that were updated more than \a before seconds ago.
+ *
+ * Since this function may be called frequently, do not scan all of the
+ * jobstats on each call, only twice per cleanup interval. That means stats
+ * may be around on average cleanup_interval / 4 longer than necessary,
+ * but that is not considered harmful.
+ *
+ * If \a before is negative then this will force clean up all jobstats due
+ * to the expiry time being in the future (e.g. at shutdown).
+ *
+ * If there is already another thread doing jobstats cleanup, don't try to
+ * do this again in the current thread unless this is a force cleanup.
+ *
+ * \param[in] stats stucture tracking all job stats for this device
+ * \param[in] before expire jobstats updated more than this many seconds ago
+ */
+static void lprocfs_job_cleanup(struct obd_job_stats *stats, int before)
{
- time_t oldest, now;
+ time_t now = cfs_time_current_sec();
+ time_t oldest;
- if (stats->ojs_cleanup_interval == 0)
- return;
+ if (likely(before >= 0)) {
+ unsigned int cleanup_interval = stats->ojs_cleanup_interval;
+
+ if (cleanup_interval == 0 || before == 0)
+ return;
+
+ if (now < stats->ojs_last_cleanup + cleanup_interval / 2)
+ return;
+
+ if (stats->ojs_cleaning)
+ return;
+ }
- now = cfs_time_current_sec();
- if (!force && now < stats->ojs_last_cleanup +
- stats->ojs_cleanup_interval)
+ write_lock(&stats->ojs_lock);
+ if (before >= 0 && stats->ojs_cleaning) {
+ write_unlock(&stats->ojs_lock);
return;
+ }
- oldest = now - stats->ojs_cleanup_interval;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback,
+ stats->ojs_cleaning = true;
+ write_unlock(&stats->ojs_lock);
+
+ /* Can't hold ojs_lock over hash iteration, since it is grabbed by
+ * job_cleanup_iter_callback()
+ * ->cfs_hash_bd_del_locked()
+ * ->job_putref()
+ * ->job_free()
+ *
+ * Holding ojs_lock isn't necessary for safety of the hash iteration,
+ * since locking of the hash is handled internally, but there isn't
+ * any benefit to having multiple threads doing cleanup at one time.
+ */
+ oldest = now - before;
+ cfs_hash_for_each_safe(stats->ojs_hash, job_cleanup_iter_callback,
&oldest);
+
+ write_lock(&stats->ojs_lock);
+ stats->ojs_cleaning = false;
stats->ojs_last_cleanup = cfs_time_current_sec();
+ write_unlock(&stats->ojs_lock);
}
static struct job_stat *job_alloc(char *jobid, struct obd_job_stats *jobs)
{
struct job_stat *job;
- LASSERT(jobs->ojs_cntr_num && jobs->ojs_cntr_init_fn);
-
OBD_ALLOC_PTR(job);
if (job == NULL)
return NULL;
jobs->ojs_cntr_init_fn(job->js_stats);
- memcpy(job->js_jobid, jobid, JOBSTATS_JOBID_SIZE);
+ memcpy(job->js_jobid, jobid, LUSTRE_JOBID_SIZE);
job->js_timestamp = cfs_time_current_sec();
job->js_jobstats = jobs;
- CFS_INIT_HLIST_NODE(&job->js_hash);
- CFS_INIT_LIST_HEAD(&job->js_list);
- cfs_atomic_set(&job->js_refcount, 1);
+ INIT_HLIST_NODE(&job->js_hash);
+ INIT_LIST_HEAD(&job->js_list);
+ atomic_set(&job->js_refcount, 1);
return job;
}
struct job_stat *job, *job2;
ENTRY;
- LASSERT(stats && stats->ojs_hash);
+ LASSERT(stats != NULL);
+ LASSERT(stats->ojs_hash != NULL);
- lprocfs_job_cleanup(stats, false);
+ if (event >= stats->ojs_cntr_num)
+ RETURN(-EINVAL);
- if (!jobid || !strlen(jobid))
+ if (jobid == NULL || strlen(jobid) == 0)
RETURN(-EINVAL);
- if (strlen(jobid) >= JOBSTATS_JOBID_SIZE) {
+ if (strlen(jobid) >= LUSTRE_JOBID_SIZE) {
CERROR("Invalid jobid size (%lu), expect(%d)\n",
- (unsigned long)strlen(jobid) + 1, JOBSTATS_JOBID_SIZE);
+ (unsigned long)strlen(jobid) + 1, LUSTRE_JOBID_SIZE);
RETURN(-EINVAL);
}
if (job)
goto found;
+ lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
+
job = job_alloc(jobid, stats);
if (job == NULL)
RETURN(-ENOMEM);
if (job2 != job) {
job_putref(job);
job = job2;
- LASSERT(!cfs_list_empty(&job->js_list));
+ /* We cannot LASSERT(!list_empty(&job->js_list)) here,
+ * since we just lost the race for inserting "job" into the
+ * ojs_list, and some other thread is doing it _right_now_.
+ * Instead, be content the other thread is doing this, since
+ * "job2" was initialized in job_alloc() already. LU-2163 */
} else {
- LASSERT(cfs_list_empty(&job->js_list));
- cfs_write_lock(&stats->ojs_lock);
- cfs_list_add_tail(&job->js_list, &stats->ojs_list);
- cfs_write_unlock(&stats->ojs_lock);
+ LASSERT(list_empty(&job->js_list));
+ write_lock(&stats->ojs_lock);
+ list_add_tail(&job->js_list, &stats->ojs_list);
+ write_unlock(&stats->ojs_lock);
}
found:
LASSERT(stats == job->js_jobstats);
- LASSERT(stats->ojs_cntr_num > event);
job->js_timestamp = cfs_time_current_sec();
lprocfs_counter_add(job->js_stats, event, amount);
job_putref(job);
+
RETURN(0);
}
EXPORT_SYMBOL(lprocfs_job_stats_log);
void lprocfs_job_stats_fini(struct obd_device *obd)
{
struct obd_job_stats *stats = &obd->u.obt.obt_jobstats;
- time_t oldest = 0;
if (stats->ojs_hash == NULL)
return;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback, &oldest);
+
+ lprocfs_job_cleanup(stats, -99);
cfs_hash_putref(stats->ojs_hash);
stats->ojs_hash = NULL;
- LASSERT(cfs_list_empty(&stats->ojs_list));
+ LASSERT(list_empty(&stats->ojs_list));
}
EXPORT_SYMBOL(lprocfs_job_stats_fini);
loff_t off = *pos;
struct job_stat *job;
- cfs_read_lock(&stats->ojs_lock);
+ read_lock(&stats->ojs_lock);
if (off == 0)
return SEQ_START_TOKEN;
off--;
- cfs_list_for_each_entry(job, &stats->ojs_list, js_list) {
+ list_for_each_entry(job, &stats->ojs_list, js_list) {
if (!off--)
return job;
}
{
struct obd_job_stats *stats = p->private;
- cfs_read_unlock(&stats->ojs_lock);
+ read_unlock(&stats->ojs_lock);
}
static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos)
{
struct obd_job_stats *stats = p->private;
struct job_stat *job;
- cfs_list_t *next;
+ struct list_head *next;
++*pos;
if (v == SEQ_START_TOKEN) {
}
return next == &stats->ojs_list ? NULL :
- cfs_list_entry(next, struct job_stat, js_list);
+ list_entry(next, struct job_stat, js_list);
}
/*
* Example of output on MDT:
*
* job_stats:
- * - job_id: test_id.222.25844
+ * - job_id: dd.4854
* snapshot_time: 1322494486
- * open: { samples: 3, unit: reqs }
- * close: { samples: 3, unit: reqs }
+ * open: { samples: 1, unit: reqs }
+ * close: { samples: 1, unit: reqs }
* mknod: { samples: 0, unit: reqs }
* link: { samples: 0, unit: reqs }
* unlink: { samples: 0, unit: reqs }
* mkdir: { samples: 0, unit: reqs }
* rmdir: { samples: 0, unit: reqs }
- * rename: { samples: 1, unit: reqs }
- * getattr: { samples: 7, unit: reqs }
+ * rename: { samples: 0, unit: reqs }
+ * getattr: { samples: 1, unit: reqs }
* setattr: { samples: 0, unit: reqs }
* getxattr: { samples: 0, unit: reqs }
* setxattr: { samples: 0, unit: reqs }
* Example of output on OST:
*
* job_stats:
- * - job_id 4854
+ * - job_id dd.4854
* snapshot_time: 1322494602
- * read: { samples: 0, unit: bytes, min: 0, max: 0, sum: 0 }
- * write: { samples: 1, unit: bytes, min: 10, max: 10, sum: 10 }
- * setattr: { samples: 0, unit: reqs }
- * punch: { samples: 0, unit: reqs }
- * sync: { samples: 0, unit: reqs }
+ * read: { samples: 0, unit: bytes, min: 0, max: 0, sum: 0 }
+ * write: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 }
+ * setattr: { samples: 0, unit: reqs }
+ * punch: { samples: 0, unit: reqs }
+ * sync: { samples: 0, unit: reqs }
*/
static const char spaces[] = " ";
static int lprocfs_jobstats_seq_show(struct seq_file *p, void *v)
{
- struct job_stat *job = v;
- struct lprocfs_stats *s;
- struct lprocfs_counter ret, *cntr;
- int i;
+ struct job_stat *job = v;
+ struct lprocfs_stats *s;
+ struct lprocfs_counter ret;
+ struct lprocfs_counter_header *cntr_header;
+ int i;
if (v == SEQ_START_TOKEN) {
seq_printf(p, "job_stats:\n");
s = job->js_stats;
for (i = 0; i < s->ls_num; i++) {
- cntr = &(s->ls_percpu[0]->lp_cntr[i]);
+ cntr_header = &s->ls_cnt_header[i];
lprocfs_stats_collect(s, i, &ret);
seq_printf(p, " %s:%.*s { samples: %11"LPF64"u",
- cntr->lc_name, width(cntr->lc_name, 15), spaces,
+ cntr_header->lc_name,
+ width(cntr_header->lc_name, 15), spaces,
ret.lc_count);
- if (cntr->lc_units[0] != '\0')
- seq_printf(p, ", unit: %5s", cntr->lc_units);
+ if (cntr_header->lc_units[0] != '\0')
+ seq_printf(p, ", unit: %5s", cntr_header->lc_units);
- if (cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+ if (cntr_header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
seq_printf(p, ", min:%8"LPF64"u, max:%8"LPF64"u,"
" sum:%16"LPF64"u",
ret.lc_count ? ret.lc_min : 0,
ret.lc_count ? ret.lc_max : 0,
ret.lc_count ? ret.lc_sum : 0);
}
- if (cntr->lc_config & LPROCFS_CNTR_STDDEV) {
+ if (cntr_header->lc_config & LPROCFS_CNTR_STDDEV) {
seq_printf(p, ", sumsq: %18"LPF64"u",
ret.lc_count ? ret.lc_sumsquare : 0);
}
return 0;
}
-struct seq_operations lprocfs_jobstats_seq_sops = {
+static const struct seq_operations lprocfs_jobstats_seq_sops = {
start: lprocfs_jobstats_seq_start,
stop: lprocfs_jobstats_seq_stop,
next: lprocfs_jobstats_seq_next,
static int lprocfs_jobstats_seq_open(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(inode);
struct seq_file *seq;
int rc;
- if (LPROCFS_ENTRY_AND_CHECK(dp))
- return -ENOENT;
+ rc = LPROCFS_ENTRY_CHECK(inode);
+ if (rc < 0)
+ return rc;
rc = seq_open(file, &lprocfs_jobstats_seq_sops);
- if (rc) {
- LPROCFS_EXIT();
+ if (rc)
return rc;
- }
seq = file->private_data;
- seq->private = dp->data;
+ seq->private = PDE_DATA(inode);
return 0;
}
-static ssize_t lprocfs_jobstats_seq_write(struct file *file, const char *buf,
+static ssize_t lprocfs_jobstats_seq_write(struct file *file,
+ const char __user *buf,
size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct obd_job_stats *stats = seq->private;
- char jobid[JOBSTATS_JOBID_SIZE];
- int all = 0;
+ char jobid[LUSTRE_JOBID_SIZE];
struct job_stat *job;
- if (!memcmp(buf, "clear", strlen("clear"))) {
- all = 1;
- } else if (len < JOBSTATS_JOBID_SIZE) {
- memset(jobid, 0, JOBSTATS_JOBID_SIZE);
- /* Trim '\n' if any */
- if (buf[len - 1] == '\n')
- memcpy(jobid, buf, len - 1);
- else
- memcpy(jobid, buf, len);
- } else {
+ if (len == 0 || len >= LUSTRE_JOBID_SIZE)
return -EINVAL;
- }
- LASSERT(stats->ojs_hash);
- if (all) {
- time_t oldest = 0;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback,
- &oldest);
+ if (stats->ojs_hash == NULL)
+ return -ENODEV;
+
+ if (copy_from_user(jobid, buf, len))
+ return -EFAULT;
+ jobid[len] = 0;
+
+ /* Trim '\n' if any */
+ if (jobid[len - 1] == '\n')
+ jobid[len - 1] = 0;
+
+ if (strcmp(jobid, "clear") == 0) {
+ lprocfs_job_cleanup(stats, -99);
+
return len;
}
- if (!strlen(jobid))
+ if (strlen(jobid) == 0)
return -EINVAL;
job = cfs_hash_lookup(stats->ojs_hash, jobid);
return len;
}
-struct file_operations lprocfs_jobstats_seq_fops = {
+/**
+ * Clean up the seq file state when the /proc file is closed.
+ *
+ * This also expires old job stats from the cache after they have been
+ * printed in case the system is idle and not generating new jobstats.
+ *
+ * \param[in] inode struct inode for seq file being closed
+ * \param[in] file struct file for seq file being closed
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
+ */
+static int lprocfs_jobstats_seq_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct obd_job_stats *stats = seq->private;
+
+ lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
+
+ return lprocfs_seq_release(inode, file);
+}
+
+static const struct file_operations lprocfs_jobstats_seq_fops = {
.owner = THIS_MODULE,
.open = lprocfs_jobstats_seq_open,
.read = seq_read,
.write = lprocfs_jobstats_seq_write,
.llseek = seq_lseek,
- .release = lprocfs_seq_release,
+ .release = lprocfs_jobstats_seq_release,
};
int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
LASSERT(obd->obd_proc_entry != NULL);
LASSERT(obd->obd_type->typ_name);
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OST_NAME)) {
- CERROR("Invalid obd device type.\n");
+ if (cntr_num <= 0)
+ RETURN(-EINVAL);
+
+ if (init_fn == NULL)
+ RETURN(-EINVAL);
+
+ /* Currently needs to be a target due to the use of obt_jobstats. */
+ if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0 &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OST_NAME) != 0) {
+ CERROR("%s: invalid device type %s for job stats: rc = %d\n",
+ obd->obd_name, obd->obd_type->typ_name, -EINVAL);
RETURN(-EINVAL);
}
stats = &obd->u.obt.obt_jobstats;
if (stats->ojs_hash == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&stats->ojs_list);
- cfs_rwlock_init(&stats->ojs_lock);
+ INIT_LIST_HEAD(&stats->ojs_list);
+ rwlock_init(&stats->ojs_lock);
stats->ojs_cntr_num = cntr_num;
stats->ojs_cntr_init_fn = init_fn;
stats->ojs_cleanup_interval = 600; /* 10 mins by default */
stats->ojs_last_cleanup = cfs_time_current_sec();
- LPROCFS_WRITE_ENTRY();
- entry = create_proc_entry("job_stats", 0644, obd->obd_proc_entry);
- LPROCFS_WRITE_EXIT();
- if (entry) {
- entry->proc_fops = &lprocfs_jobstats_seq_fops;
- entry->data = stats;
- RETURN(0);
- } else {
+ entry = lprocfs_add_simple(obd->obd_proc_entry, "job_stats", stats,
+ &lprocfs_jobstats_seq_fops);
+ if (IS_ERR(entry)) {
lprocfs_job_stats_fini(obd);
RETURN(-ENOMEM);
}
+ RETURN(0);
}
EXPORT_SYMBOL(lprocfs_job_stats_init);
-int lprocfs_rd_job_interval(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+int lprocfs_job_interval_seq_show(struct seq_file *m, void *data)
{
- struct obd_device *obd = (struct obd_device *)data;
+ struct obd_device *obd = m->private;
struct obd_job_stats *stats;
- LASSERT(obd != NULL);
+ if (obd == NULL)
+ return -ENODEV;
+
stats = &obd->u.obt.obt_jobstats;
- *eof = 1;
- return snprintf(page, count, "%d\n", stats->ojs_cleanup_interval);
+ return seq_printf(m, "%d\n", stats->ojs_cleanup_interval);
}
-EXPORT_SYMBOL(lprocfs_rd_job_interval);
+EXPORT_SYMBOL(lprocfs_job_interval_seq_show);
-int lprocfs_wr_job_interval(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ssize_t
+lprocfs_job_interval_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
{
- struct obd_device *obd = (struct obd_device *)data;
+ struct obd_device *obd;
struct obd_job_stats *stats;
int val, rc;
- LASSERT(obd != NULL);
+ obd = ((struct seq_file *)file->private_data)->private;
+ if (obd == NULL)
+ return -ENODEV;
+
stats = &obd->u.obt.obt_jobstats;
rc = lprocfs_write_helper(buffer, count, &val);
return rc;
stats->ojs_cleanup_interval = val;
- lprocfs_job_cleanup(stats, true);
-
+ lprocfs_job_cleanup(stats, stats->ojs_cleanup_interval);
return count;
-
}
-EXPORT_SYMBOL(lprocfs_wr_job_interval);
-
-#endif /* LPROCFS*/
+EXPORT_SYMBOL(lprocfs_job_interval_seq_write);
+#endif /* CONFIG_PROC_FS*/