#define LPROCFS_GET_SMP_ID 0x0002
enum lprocfs_stats_flags {
- LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
- LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
- * area and need locking */
+ LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
+ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
+ * area and need locking */
+ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
};
enum lprocfs_fields_flags {
* been allocated, the 0th entry is
* a statically intialized template */
int ls_flags; /* See LPROCFS_STATS_FLAG_* */
- cfs_spinlock_t ls_lock; /* Lock used only when there are
- * no percpu stats areas */
+ /* Lock used when there are no percpu stats areas; For percpu stats,
+ * it is used to protect ls_biggest_alloc_num change */
+ cfs_spinlock_t ls_lock;
struct lprocfs_percpu *ls_percpu[0];
};
static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
unsigned long *flags)
{
- int rc = 0;
+ int rc = 0;
+ unsigned int cpuid;
switch (opc) {
default:
LBUG();
case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
- return 0;
- } else {
- unsigned int cpuid = cfs_get_cpu();
+ /* percpu counter stats */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0) {
+ cpuid = cfs_get_cpu();
if (unlikely(stats->ls_percpu[cpuid + 1] == NULL))
rc = lprocfs_stats_alloc_one(stats, cpuid + 1);
return rc < 0 ? rc : cpuid + 1;
}
- case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ /* non-percpu counter stats */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
- return 1;
- } else {
+ else
+ cfs_spin_lock(&stats->ls_lock);
+ return 0;
+
+ case LPROCFS_GET_NUM_CPU:
+ /* percpu counter stats */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
return stats->ls_biggest_alloc_num;
- }
+
+ /* non-percpu counter stats */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ cfs_spin_lock(&stats->ls_lock);
+ return 1;
}
}
LBUG();
case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- cfs_spin_unlock_irqrestore(&stats->ls_lock, *flags);
- else
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ cfs_spin_unlock_irqrestore(&stats->ls_lock,
+ *flags);
+ } else {
+ cfs_spin_unlock(&stats->ls_lock);
+ }
+ } else {
cfs_put_cpu();
+ }
return;
case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- cfs_spin_unlock_irqrestore(&stats->ls_lock, *flags);
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ cfs_spin_unlock_irqrestore(&stats->ls_lock,
+ *flags);
+ } else {
+ cfs_spin_unlock(&stats->ls_lock);
+ }
+ }
return;
}
}
int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int idx)
{
- unsigned int percpusize;
- int rc = -ENOMEM;
+ unsigned int percpusize;
+ int rc = -ENOMEM;
+ unsigned long flags = 0;
/* the 1st percpu entry was statically allocated in
* lprocfs_alloc_stats() */
OBD_ALLOC_GFP(stats->ls_percpu[idx], percpusize, CFS_ALLOC_ATOMIC);
if (stats->ls_percpu[idx] != NULL) {
rc = 0;
- if (unlikely(stats->ls_biggest_alloc_num <= idx))
- stats->ls_biggest_alloc_num = idx + 1;
+ if (unlikely(stats->ls_biggest_alloc_num <= idx)) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ cfs_spin_lock_irqsave(&stats->ls_lock, flags);
+ else
+ cfs_spin_lock(&stats->ls_lock);
+ if (stats->ls_biggest_alloc_num <= idx)
+ stats->ls_biggest_alloc_num = idx + 1;
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ cfs_spin_unlock_irqrestore(&stats->ls_lock,
+ flags);
+ } else {
+ cfs_spin_unlock(&stats->ls_lock);
+ }
+ }
/* initialize the ls_percpu[idx] by copying the 0th template
* entry */
stats->ls_num = num;
stats->ls_biggest_alloc_num = 1;
- if (flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- stats->ls_flags = flags;
- cfs_spin_lock_init(&stats->ls_lock);
- /* Use this lock only if there are no percpu areas */
- } else {
- stats->ls_flags = 0;
- }
+ stats->ls_flags = flags;
+ cfs_spin_lock_init(&stats->ls_lock);
percpusize = offsetof(struct lprocfs_percpu, lp_cntr[num]);
if (num_entry > 1)
int i;
int j;
unsigned int num_entry;
- unsigned int percpusize;
unsigned long flags = 0;
num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
- percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
- if (num_entry > 1)
- percpusize = CFS_L1_CACHE_ALIGN(percpusize);
-
for (i = 0; i < num_entry; i++) {
if (stats->ls_percpu[i] == NULL)
continue;
- /* the 1st percpu entry was statically allocated in
- * lprocfs_alloc_stats() */
- if (i > 0) {
- OBD_FREE(stats->ls_percpu[i], percpusize);
- stats->ls_percpu[i] = NULL;
- continue;
- }
for (j = 0; j < stats->ls_num; j++) {
percpu_cntr = &(stats->ls_percpu[i])->lp_cntr[j];
cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);