* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* if we find more consumers this could be generalized */
#define OBD_HIST_MAX 32
struct obd_histogram {
- cfs_spinlock_t oh_lock;
- unsigned long oh_buckets[OBD_HIST_MAX];
+ spinlock_t oh_lock;
+ unsigned long oh_buckets[OBD_HIST_MAX];
};
enum {
LPROCFS_TYPE_CYCLE = 0x0800,
};
-struct lprocfs_atomic {
- cfs_atomic_t la_entry;
- cfs_atomic_t la_exit;
-};
-
#define LC_MIN_INIT ((~(__u64)0) >> 1)
+struct lprocfs_counter_header {
+ unsigned int lc_config;
+ const char *lc_name; /* must be static */
+ const char *lc_units; /* must be static */
+};
+
struct lprocfs_counter {
- struct lprocfs_atomic lc_cntl; /* may need to move to per set */
- unsigned int lc_config;
- __s64 lc_count;
- __s64 lc_sum;
- __s64 lc_sum_irq;
- __s64 lc_min;
- __s64 lc_max;
- __s64 lc_sumsquare;
- const char *lc_name; /* must be static */
- const char *lc_units; /* must be static */
+ __s64 lc_count;
+ __s64 lc_min;
+ __s64 lc_max;
+ __s64 lc_sumsquare;
+ /*
+ * Every counter has lc_array_sum[0], while lc_array_sum[1] is only
+ * for irq context counter, i.e. stats with
+ * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need
+ * lc_array_sum[1]
+ */
+ __s64 lc_array_sum[1];
};
+#define lc_sum lc_array_sum[0]
+#define lc_sum_irq lc_array_sum[1]
struct lprocfs_percpu {
-#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
- __s64 pad;
+#ifndef __GNUC__
+ __s64 pad;
#endif
struct lprocfs_counter lp_cntr[0];
};
#define LPROCFS_GET_SMP_ID 0x0002
enum lprocfs_stats_flags {
- LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
- LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
- * area and need locking */
+ LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
+ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
+ * area and need locking */
+ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
};
enum lprocfs_fields_flags {
};
struct lprocfs_stats {
- unsigned int ls_num; /* # of counters */
- int ls_flags; /* See LPROCFS_STATS_FLAG_* */
- cfs_spinlock_t ls_lock; /* Lock used only when there are
- * no percpu stats areas */
- struct lprocfs_percpu *ls_percpu[0];
+ /* # of counters */
+ unsigned short ls_num;
+ /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
+ unsigned short ls_biggest_alloc_num;
+ enum lprocfs_stats_flags ls_flags;
+ /* Lock used when there are no percpu stats areas; For percpu stats,
+ * it is used to protect ls_biggest_alloc_num change */
+ spinlock_t ls_lock;
+
+ /* has ls_num of counter headers */
+ struct lprocfs_counter_header *ls_cnt_header;
+ struct lprocfs_percpu *ls_percpu[0];
};
#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
OPC_RANGE(LDLM) +
OPC_RANGE(MDS) +
OPC_RANGE(OST));
- } else {
+ } else if (opc < UPDATE_LAST_OPC) {
+ /* update opcode */
+ return (opc - UPDATE_FIRST_OPC +
+ OPC_RANGE(FLD) +
+ OPC_RANGE(SEC) +
+ OPC_RANGE(SEQ) +
+ OPC_RANGE(QUOTA) +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else {
/* Unknown Opcode */
return -1;
}
OPC_RANGE(SEC) + \
OPC_RANGE(SEQ) + \
OPC_RANGE(SEC) + \
- OPC_RANGE(FLD) )
+ OPC_RANGE(FLD) + \
+ OPC_RANGE(UPDATE))
#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
OPC_RANGE(EXTRA))
#define DHMS_FMT "%dd%dh%02dm%02ds"
#define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s
+#define JOBSTATS_JOBID_VAR_MAX_LEN 20
+#define JOBSTATS_DISABLE "disable"
+#define JOBSTATS_PROCNAME_UID "procname_uid"
+
+typedef void (*cntr_init_callback)(struct lprocfs_stats *stats);
+
+struct obd_job_stats {
+ cfs_hash_t *ojs_hash;
+ cfs_list_t ojs_list;
+ rwlock_t ojs_lock; /* protect the obj_list */
+ cntr_init_callback ojs_cntr_init_fn;
+ int ojs_cntr_num;
+ int ojs_cleanup_interval;
+ time_t ojs_last_cleanup;
+};
#ifdef LPROCFS
-static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc)
+extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
+ unsigned int cpuid);
+/*
+ * \return value
+ * < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID)
+ */
+static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
+ unsigned long *flags)
{
- switch (opc) {
- default:
- LBUG();
-
- case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- cfs_spin_lock(&stats->ls_lock);
- return 0;
- } else {
- return cfs_get_cpu();
- }
-
- case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- cfs_spin_lock(&stats->ls_lock);
- return 1;
- } else {
- return cfs_num_possible_cpus();
- }
- }
+ int rc = 0;
+
+ switch (opc) {
+ default:
+ LBUG();
+
+ case LPROCFS_GET_SMP_ID:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return 0;
+ } else {
+ unsigned int cpuid = cfs_get_cpu();
+
+ if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
+ rc = lprocfs_stats_alloc_one(stats, cpuid);
+ if (rc < 0) {
+ cfs_put_cpu();
+ return rc;
+ }
+ }
+ return cpuid;
+ }
+
+ case LPROCFS_GET_NUM_CPU:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return 1;
+ } else {
+ return stats->ls_biggest_alloc_num;
+ }
+ }
}
-static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc)
+static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
+ unsigned long *flags)
{
- switch (opc) {
- default:
- LBUG();
-
- case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- cfs_spin_unlock(&stats->ls_lock);
- else
- cfs_put_cpu();
- return;
-
- case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- cfs_spin_unlock(&stats->ls_lock);
- return;
- }
+ switch (opc) {
+ default:
+ LBUG();
+
+ case LPROCFS_GET_SMP_ID:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ spin_unlock_irqrestore(&stats->ls_lock,
+ *flags);
+ } else {
+ spin_unlock(&stats->ls_lock);
+ }
+ } else {
+ cfs_put_cpu();
+ }
+ return;
+
+ case LPROCFS_GET_NUM_CPU:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ spin_unlock_irqrestore(&stats->ls_lock,
+ *flags);
+ } else {
+ spin_unlock(&stats->ls_lock);
+ }
+ }
+ return;
+ }
+}
+
+static inline unsigned int
+lprocfs_stats_counter_size(struct lprocfs_stats *stats)
+{
+ unsigned int percpusize;
+
+ percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
+
+ /* irq safe stats need lc_array_sum[1] */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ percpusize += stats->ls_num * sizeof(__s64);
+
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
+ percpusize = CFS_L1_CACHE_ALIGN(percpusize);
+
+ return percpusize;
+}
+
+static inline struct lprocfs_counter *
+lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
+ int index)
+{
+ struct lprocfs_counter *cntr;
+
+ cntr = &stats->ls_percpu[cpuid]->lp_cntr[index];
+
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ cntr = (void *)cntr + index * sizeof(__s64);
+
+ return cntr;
}
/* Two optimized LPROCFS counter increment functions are provided:
lprocfs_counter_sub(stats, idx, 1)
extern __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- enum lprocfs_fields_flags field);
+ struct lprocfs_counter_header *header,
+ enum lprocfs_stats_flags flags,
+ enum lprocfs_fields_flags field);
static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
int idx,
enum lprocfs_fields_flags field)
{
- __u64 ret = 0;
- int i;
- unsigned int num_cpu;
-
- LASSERT(stats != NULL);
-
- num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU);
- for (i = 0; i < num_cpu; i++)
- ret += lprocfs_read_helper(&(stats->ls_percpu[i]->lp_cntr[idx]),
- field);
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU);
- return ret;
+ int i;
+ unsigned int num_cpu;
+ unsigned long flags = 0;
+ __u64 ret = 0;
+
+ LASSERT(stats != NULL);
+
+ num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ for (i = 0; i < num_cpu; i++) {
+ if (stats->ls_percpu[i] == NULL)
+ continue;
+ ret += lprocfs_read_helper(
+ lprocfs_stats_counter_get(stats, i, idx),
+ &stats->ls_cnt_header[idx], stats->ls_flags,
+ field);
+ }
+ lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ return ret;
}
extern struct lprocfs_stats *
extern void lprocfs_remove(cfs_proc_dir_entry_t **root);
extern void lprocfs_remove_proc_entry(const char *name,
struct proc_dir_entry *parent);
+extern void lprocfs_try_remove_proc_entry(const char *name,
+ struct proc_dir_entry *parent);
extern cfs_proc_dir_entry_t *lprocfs_srch(cfs_proc_dir_entry_t *root,
const char *name);
extern int lprocfs_obd_cleanup(struct obd_device *obd);
extern struct file_operations lprocfs_evict_client_fops;
-extern int lprocfs_seq_create(cfs_proc_dir_entry_t *parent, char *name,
- mode_t mode, struct file_operations *seq_fops,
- void *data);
-extern int lprocfs_obd_seq_create(struct obd_device *dev, char *name,
- mode_t mode, struct file_operations *seq_fops,
- void *data);
+extern int lprocfs_seq_create(cfs_proc_dir_entry_t *parent, const char *name,
+ mode_t mode,
+ const struct file_operations *seq_fops,
+ void *data);
+extern int lprocfs_obd_seq_create(struct obd_device *dev, const char *name,
+ mode_t mode,
+ const struct file_operations *seq_fops,
+ void *data);
/* Generic callbacks */
int count, int *eof, void *data);
extern int lprocfs_rd_name(char *page, char **start, off_t off,
int count, int *eof, void *data);
-extern int lprocfs_rd_fstype(char *page, char **start, off_t off,
- int count, int *eof, void *data);
extern int lprocfs_rd_server_uuid(char *page, char **start, off_t off,
int count, int *eof, void *data);
extern int lprocfs_rd_conn_uuid(char *page, char **start, off_t off,
extern int lprocfs_write_frac_u64_helper(const char *buffer,
unsigned long count,
__u64 *val, int mult);
+char *lprocfs_find_named_value(const char *buffer, const char *name,
+ unsigned long *count);
void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
void lprocfs_oh_clear(struct obd_histogram *oh);
int lprocfs_obd_wr_ir_factor(struct file *file, const char *buffer,
unsigned long count, void *data);
+extern int lprocfs_single_release(cfs_inode_t *, struct file *);
extern int lprocfs_seq_release(cfs_inode_t *, struct file *);
/* You must use these macros when you want to refer to
* the import in a client obd_device for a lprocfs entry */
#define LPROCFS_CLIMP_CHECK(obd) do { \
typecheck(struct obd_device *, obd); \
- cfs_down_read(&(obd)->u.cli.cl_sem); \
+ down_read(&(obd)->u.cli.cl_sem); \
if ((obd)->u.cli.cl_import == NULL) { \
- cfs_up_read(&(obd)->u.cli.cl_sem); \
+ up_read(&(obd)->u.cli.cl_sem); \
return -ENODEV; \
} \
} while(0)
#define LPROCFS_CLIMP_EXIT(obd) \
- cfs_up_read(&(obd)->u.cli.cl_sem);
+ up_read(&(obd)->u.cli.cl_sem);
/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
call lprocfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
#define __LPROC_SEQ_FOPS(name, custom_seq_write) \
-static int name##_seq_open(cfs_inode_t *inode, struct file *file) { \
+static int name##_single_open(cfs_inode_t *inode, struct file *file) { \
struct proc_dir_entry *dp = PDE(inode); \
int rc; \
LPROCFS_ENTRY_AND_CHECK(dp); \
} \
struct file_operations name##_fops = { \
.owner = THIS_MODULE, \
- .open = name##_seq_open, \
+ .open = name##_single_open, \
.read = seq_read, \
.write = custom_seq_write, \
.llseek = seq_lseek, \
- .release = lprocfs_seq_release, \
+ .release = lprocfs_single_release, \
}
#define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL)
#define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write)
+/* lprocfs_jobstats.c */
+int lprocfs_job_stats_log(struct obd_device *obd, char *jobid,
+ int event, long amount);
+void lprocfs_job_stats_fini(struct obd_device *obd);
+int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
+ cntr_init_callback fn);
+int lprocfs_rd_job_interval(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int lprocfs_wr_job_interval(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+
/* lproc_ptlrpc.c */
struct ptlrpc_request;
extern void target_print_req(void *seq_file, struct ptlrpc_request *req);
int lprocfs_obd_wr_recovery_time_hard(struct file *file,
const char *buffer,
unsigned long count, void *data);
-int lprocfs_obd_rd_mntdev(char *page, char **start, off_t off,
- int count, int *eof, void *data);
int lprocfs_obd_rd_max_pages_per_rpc(char *page, char **start, off_t off,
int count, int *eof, void *data);
int lprocfs_obd_wr_max_pages_per_rpc(struct file *file, const char *buffer,
#else
/* LPROCFS is not defined */
-
+#define proc_lustre_root NULL
static inline void lprocfs_counter_add(struct lprocfs_stats *stats,
int index, long amount)
static inline void lprocfs_remove_proc_entry(const char *name,
struct proc_dir_entry *parent)
{ return; }
+static inline void lprocfs_try_remove_proc_entry(const char *name,
+ struct proc_dir_entry *parent)
+{ return; }
static inline cfs_proc_dir_entry_t *lprocfs_srch(cfs_proc_dir_entry_t *head,
const char *name)
{ return 0; }
#define LPROC_SEQ_FOPS_RO(name)
#define LPROC_SEQ_FOPS(name)
+/* lprocfs_jobstats.c */
+static inline
+int lprocfs_job_stats_log(struct obd_device *obd, char *jobid, int event,
+ long amount)
+{ return 0; }
+static inline
+void lprocfs_job_stats_fini(struct obd_device *obd)
+{ return; }
+static inline
+int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
+ cntr_init_callback fn)
+{ return 0; }
+
+
/* lproc_ptlrpc.c */
#define target_print_req NULL