* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define LC_MIN_INIT ((~(__u64)0) >> 1)
+struct lprocfs_counter_header {
+ unsigned int lc_config;
+ const char *lc_name; /* must be static */
+ const char *lc_units; /* must be static */
+};
+
struct lprocfs_counter {
- unsigned int lc_config;
- __s64 lc_count;
- __s64 lc_sum;
- __s64 lc_sum_irq;
- __s64 lc_min;
- __s64 lc_max;
- __s64 lc_sumsquare;
- const char *lc_name; /* must be static */
- const char *lc_units; /* must be static */
+ __s64 lc_count;
+ __s64 lc_min;
+ __s64 lc_max;
+ __s64 lc_sumsquare;
+ /*
+ * Every counter has lc_array_sum[0], while lc_array_sum[1] is only
+ * for irq context counter, i.e. stats with
+ * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need
+ * lc_array_sum[1]
+ */
+ __s64 lc_array_sum[1];
};
+#define lc_sum lc_array_sum[0]
+#define lc_sum_irq lc_array_sum[1]
struct lprocfs_percpu {
#ifndef __GNUC__
};
struct lprocfs_stats {
- unsigned short ls_num; /* # of counters */
- unsigned short ls_biggest_alloc_num;
- /* 1 + the highest slot index which has
- * been allocated, the 0th entry is
- * a statically intialized template */
- int ls_flags; /* See LPROCFS_STATS_FLAG_* */
+ /* # of counters */
+ unsigned short ls_num;
+ /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
+ unsigned short ls_biggest_alloc_num;
+ enum lprocfs_stats_flags ls_flags;
/* Lock used when there are no percpu stats areas; For percpu stats,
* it is used to protect ls_biggest_alloc_num change */
- spinlock_t ls_lock;
- struct lprocfs_percpu *ls_percpu[0];
+ spinlock_t ls_lock;
+
+ /* has ls_num of counter headers */
+ struct lprocfs_counter_header *ls_cnt_header;
+ struct lprocfs_percpu *ls_percpu[0];
};
#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
OPC_RANGE(LDLM) +
OPC_RANGE(MDS) +
OPC_RANGE(OST));
- } else {
+ } else if (opc < UPDATE_LAST_OPC) {
+ /* update opcode */
+ return (opc - UPDATE_FIRST_OPC +
+ OPC_RANGE(FLD) +
+ OPC_RANGE(SEC) +
+ OPC_RANGE(SEQ) +
+ OPC_RANGE(QUOTA) +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else {
/* Unknown Opcode */
return -1;
}
OPC_RANGE(SEC) + \
OPC_RANGE(SEQ) + \
OPC_RANGE(SEC) + \
- OPC_RANGE(FLD) )
+ OPC_RANGE(FLD) + \
+ OPC_RANGE(UPDATE))
#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
OPC_RANGE(EXTRA))
unsigned long *flags)
{
int rc = 0;
- unsigned int cpuid;
switch (opc) {
default:
LBUG();
case LPROCFS_GET_SMP_ID:
- /* percpu counter stats */
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0) {
- cpuid = cfs_get_cpu();
-
- if (unlikely(stats->ls_percpu[cpuid + 1] == NULL))
- rc = lprocfs_stats_alloc_one(stats, cpuid + 1);
- return rc < 0 ? rc : cpuid + 1;
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return 0;
+ } else {
+ unsigned int cpuid = cfs_get_cpu();
+
+ if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
+ rc = lprocfs_stats_alloc_one(stats, cpuid);
+ if (rc < 0) {
+ cfs_put_cpu();
+ return rc;
+ }
+ }
+ return cpuid;
}
- /* non-percpu counter stats */
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return 0;
-
case LPROCFS_GET_NUM_CPU:
- /* percpu counter stats */
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return 1;
+ } else {
return stats->ls_biggest_alloc_num;
-
- /* non-percpu counter stats */
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return 1;
+ }
}
}
}
}
+static inline unsigned int
+lprocfs_stats_counter_size(struct lprocfs_stats *stats)
+{
+ unsigned int percpusize;
+
+ percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
+
+ /* irq safe stats need lc_array_sum[1] */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ percpusize += stats->ls_num * sizeof(__s64);
+
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
+ percpusize = CFS_L1_CACHE_ALIGN(percpusize);
+
+ return percpusize;
+}
+
+static inline struct lprocfs_counter *
+lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
+ int index)
+{
+ struct lprocfs_counter *cntr;
+
+ cntr = &stats->ls_percpu[cpuid]->lp_cntr[index];
+
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ cntr = (void *)cntr + index * sizeof(__s64);
+
+ return cntr;
+}
+
/* Two optimized LPROCFS counter increment functions are provided:
* lprocfs_counter_incr(cntr, value) - optimized for by-one counters
* lprocfs_counter_add(cntr) - use for multi-valued counters
lprocfs_counter_sub(stats, idx, 1)
extern __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- enum lprocfs_fields_flags field);
+ struct lprocfs_counter_header *header,
+ enum lprocfs_stats_flags flags,
+ enum lprocfs_fields_flags field);
static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
int idx,
enum lprocfs_fields_flags field)
for (i = 0; i < num_cpu; i++) {
if (stats->ls_percpu[i] == NULL)
continue;
- ret += lprocfs_read_helper(&(stats->ls_percpu[i]->lp_cntr[idx]),
- field);
+ ret += lprocfs_read_helper(
+ lprocfs_stats_counter_get(stats, i, idx),
+ &stats->ls_cnt_header[idx], stats->ls_flags,
+ field);
}
lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
return ret;
int count, int *eof, void *data);
extern int lprocfs_rd_filegroups(char *page, char **start, off_t off,
int count, int *eof, void *data);
-extern int lprocfs_osd_rd_blksize(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-extern int lprocfs_osd_rd_kbytesfree(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-extern int lprocfs_osd_rd_kbytesavail(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-extern int lprocfs_osd_rd_filestotal(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-extern int lprocfs_osd_rd_filesfree(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-extern int lprocfs_osd_rd_kbytestotal(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-
extern int lprocfs_write_helper(const char *buffer, unsigned long count,
int *val);
int lprocfs_obd_wr_recovery_time_hard(struct file *file,
const char *buffer,
unsigned long count, void *data);
-int lprocfs_obd_rd_mntdev(char *page, char **start, off_t off,
- int count, int *eof, void *data);
int lprocfs_obd_rd_max_pages_per_rpc(char *page, char **start, off_t off,
int count, int *eof, void *data);
int lprocfs_obd_wr_max_pages_per_rpc(struct file *file, const char *buffer,