X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Flprocfs_status.c;h=53e46a632bf71dfc14de4e198adebb40763d9751;hp=08db676a3c8765bf02edcb649cc42bbbdabbc5ec;hb=451f99d73128f1c6350c1e1fbb7be7c697f02835;hpb=f0d5693584bc70913541201c54c3666a473715b2 diff --git a/lustre/obdclass/lprocfs_status.c b/lustre/obdclass/lprocfs_status.c index 08db676..53e46a6 100644 --- a/lustre/obdclass/lprocfs_status.c +++ b/lustre/obdclass/lprocfs_status.c @@ -532,6 +532,95 @@ int lprocfs_conn_uuid_seq_show(struct seq_file *m, void *data) EXPORT_SYMBOL(lprocfs_conn_uuid_seq_show); /** add up per-cpu counters */ + +/** + * Lock statistics structure for access, possibly only on this CPU. + * + * The statistics struct may be allocated with per-CPU structures for + * efficient concurrent update (usually only on server-wide stats), or + * as a single global struct (e.g. for per-client or per-job statistics), + * so the required locking depends on the type of structure allocated. + * + * For per-CPU statistics, pin the thread to the current cpuid so that + * will only access the statistics for that CPU. If the stats structure + * for the current CPU has not been allocated (or previously freed), + * allocate it now. The per-CPU statistics do not need locking since + * the thread is pinned to the CPU during update. + * + * For global statistics, lock the stats structure to prevent concurrent update. + * + * \param[in] stats statistics structure to lock + * \param[in] opc type of operation: + * LPROCFS_GET_SMP_ID: "lock" and return current CPU index + * for incrementing statistics for that CPU + * LPROCFS_GET_NUM_CPU: "lock" and return number of used + * CPU indices to iterate over all indices + * \param[out] flags CPU interrupt saved state for IRQ-safe locking + * + * \retval cpuid of current thread or number of allocated structs + * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats) + */ +int lprocfs_stats_lock(struct lprocfs_stats *stats, + enum lprocfs_stats_lock_ops opc, + unsigned long *flags) +{ + if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_lock_irqsave(&stats->ls_lock, *flags); + else + spin_lock(&stats->ls_lock); + return opc == LPROCFS_GET_NUM_CPU ? 1 : 0; + } + + switch (opc) { + case LPROCFS_GET_SMP_ID: { + unsigned int cpuid = get_cpu(); + + if (unlikely(!stats->ls_percpu[cpuid])) { + int rc = lprocfs_stats_alloc_one(stats, cpuid); + + if (rc < 0) { + put_cpu(); + return rc; + } + } + return cpuid; + } + case LPROCFS_GET_NUM_CPU: + return stats->ls_biggest_alloc_num; + default: + LBUG(); + } +} + +/** + * Unlock statistics structure after access. + * + * Unlock the lock acquired via lprocfs_stats_lock() for global statistics, + * or unpin this thread from the current cpuid for per-CPU statistics. + * + * This function must be called using the same arguments as used when calling + * lprocfs_stats_lock() so that the correct operation can be performed. + * + * \param[in] stats statistics structure to unlock + * \param[in] opc type of operation (current cpuid or number of structs) + * \param[in] flags CPU interrupt saved state for IRQ-safe locking + */ +void lprocfs_stats_unlock(struct lprocfs_stats *stats, + enum lprocfs_stats_lock_ops opc, + unsigned long *flags) +{ + if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else + spin_unlock(&stats->ls_lock); + } else if (opc == LPROCFS_GET_SMP_ID) { + put_cpu(); + } +} + +/** add up per-cpu counters */ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, struct lprocfs_counter *cnt) { @@ -1123,7 +1212,6 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) } return rc; } -EXPORT_SYMBOL(lprocfs_stats_alloc_one); struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags) @@ -1208,6 +1296,32 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) } EXPORT_SYMBOL(lprocfs_free_stats); +u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx, + enum lprocfs_fields_flags field) +{ + unsigned long flags = 0; + unsigned int num_cpu; + unsigned int i; + u64 ret = 0; + + LASSERT(stats); + + num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); + for (i = 0; i < num_cpu; i++) { + struct lprocfs_counter *cntr; + + if (!stats->ls_percpu[i]) + continue; + + cntr = lprocfs_stats_counter_get(stats, i, idx); + ret += lprocfs_read_helper(cntr, &stats->ls_cnt_header[idx], + stats->ls_flags, field); + } + lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags); + return ret; +} +EXPORT_SYMBOL(lprocfs_stats_collector); + void lprocfs_clear_stats(struct lprocfs_stats *stats) { struct lprocfs_counter *percpu_cntr;