if (*cpu_pattern) {
cfs_cpt_tab = cfs_cpt_table_create_pattern(cpu_pattern);
if (IS_ERR(cfs_cpt_tab)) {
- CERROR("Failed to create cptab from pattern '%s'\n",
- cpu_pattern);
ret = PTR_ERR(cfs_cpt_tab);
+ pr_err("libcfs: failed to create cptab from pattern '%s': rc = %d\n",
+ cpu_pattern, ret);
goto failed_alloc_table;
}
-
} else {
cfs_cpt_tab = cfs_cpt_table_create(cpu_npartitions);
if (IS_ERR(cfs_cpt_tab)) {
- CERROR("Failed to create cptab with npartitions %d\n",
- cpu_npartitions);
ret = PTR_ERR(cfs_cpt_tab);
+ pr_err("libcfs: failed to create cptab with npartitions=%d: rc = %d\n",
+ cpu_npartitions, ret);
goto failed_alloc_table;
}
}
cpus_read_unlock();
- LCONSOLE(0, "HW NUMA nodes: %d, HW CPU cores: %d, npartitions: %d\n",
- num_online_nodes(), num_online_cpus(),
- cfs_cpt_number(cfs_cpt_tab));
+ pr_notice("libcfs: HW NUMA nodes: %d, HW CPU cores: %d, npartitions: %d\n",
+ num_online_nodes(), num_online_cpus(),
+ cfs_cpt_number(cfs_cpt_tab));
return 0;
failed_alloc_table:
static void job_stat_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct job_stat *job;
+
job = hlist_entry(hnode, struct job_stat, js_hash);
job_putref(job);
}
* since locking of the hash is handled internally, but there isn't
* any benefit to having multiple threads doing cleanup at one time.
*
- * Subtract twice the cleanup_interval, since it is 1/2 the maximum age.
+ * Subtract or add twice the cleanup_interval, since it is 1/2 the
+ * maximum age. When clearing all stats, push oldest into the future.
*/
- oldest = ktime_sub(now, ktime_add(cleanup_interval, cleanup_interval));
+ cleanup_interval = ktime_add(cleanup_interval, cleanup_interval);
+ if (likely(!clear))
+ oldest = ktime_sub(now, cleanup_interval);
+ else
+ oldest = ktime_add(now, cleanup_interval);
cfs_hash_for_each_safe(stats->ojs_hash, job_cleanup_iter_callback,
&oldest);