Remove usage of trace_call_on_all_cpus()
b=15878
i=adilger
i=robert.read
{
return max_permit_mb;
}
{
return max_permit_mb;
}
-
-void
-trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
-{
-#error "tbd"
-}
/* arch related info initialized */
tcd_for_each(tcd, i, j) {
/* arch related info initialized */
tcd_for_each(tcd, i, j) {
+ spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
struct trace_cpu_data *
trace_get_tcd(void)
{
struct trace_cpu_data *
trace_get_tcd(void)
{
+ struct trace_cpu_data *tcd;
int cpu;
cpu = get_cpu();
if (in_irq())
int cpu;
cpu = get_cpu();
if (in_irq())
- return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
+ tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
- return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
- return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
+ tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
+ else
+ tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
+
+ trace_lock_tcd(tcd);
+
+ return tcd;
}
void
trace_put_tcd (struct trace_cpu_data *tcd)
{
}
void
trace_put_tcd (struct trace_cpu_data *tcd)
{
+ trace_unlock_tcd(tcd);
+
put_cpu();
}
int trace_lock_tcd(struct trace_cpu_data *tcd)
{
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
put_cpu();
}
int trace_lock_tcd(struct trace_cpu_data *tcd)
{
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
- if (tcd->tcd_type == TCD_TYPE_IRQ)
- local_irq_disable();
- else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
- local_bh_disable();
+
+ spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+
return 1;
}
void trace_unlock_tcd(struct trace_cpu_data *tcd)
{
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
return 1;
}
void trace_unlock_tcd(struct trace_cpu_data *tcd)
{
__LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
- if (tcd->tcd_type == TCD_TYPE_IRQ)
- local_irq_enable();
- else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
- local_bh_enable();
+
+ spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
}
int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
}
int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
return MAX(512, (total_mb * 80)/100);
}
return MAX(512, (total_mb * 80)/100);
}
-
-void
-trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
-{
- cpumask_t cpus_allowed = current->cpus_allowed;
- /* use cpus_allowed to quiet 2.4 UP kernel warning only */
- cpumask_t m = cpus_allowed;
- int cpu;
-
- /* Run the given routine on every CPU in thread context */
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
- if (!cpu_online(cpu))
- continue;
-
- cpus_clear(m);
- cpu_set(cpu, m);
- set_cpus_allowed(current, m);
-
- fn(arg);
-
- set_cpus_allowed(current, cpus_allowed);
- }
-}
-static void collect_pages_on_cpu(void *info)
+static void collect_pages_on_all_cpus(struct page_collection *pc)
{
struct trace_cpu_data *tcd;
{
struct trace_cpu_data *tcd;
- struct page_collection *pc = info;
- int i;
- tcd_for_each_type_lock(tcd, i) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
}
}
spin_unlock(&pc->pc_lock);
}
}
spin_unlock(&pc->pc_lock);
if (libcfs_panic_in_progress)
panic_collect_pages(pc);
else
if (libcfs_panic_in_progress)
panic_collect_pages(pc);
else
- trace_call_on_all_cpus(collect_pages_on_cpu, pc);
+ collect_pages_on_all_cpus(pc);
-static void put_pages_back_on_cpu(void *info)
+static void put_pages_back_on_all_cpus(struct page_collection *pc)
- struct page_collection *pc = info;
struct trace_cpu_data *tcd;
struct list_head *cur_head;
struct trace_page *tage;
struct trace_page *tmp;
struct trace_cpu_data *tcd;
struct list_head *cur_head;
struct trace_page *tage;
struct trace_page *tmp;
- tcd_for_each_type_lock(tcd, i) {
- cur_head = tcd->tcd_pages.next;
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu) {
+ cur_head = tcd->tcd_pages.next;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
- struct trace_page, linkage) {
+ cfs_list_for_each_entry_safe_typed(tage, tmp,
+ &pc->pc_pages,
+ struct trace_page,
+ linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != smp_processor_id() || tage->type != i)
- continue;
+ if (tage->cpu != cpu || tage->type != i)
+ continue;
- tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
+ tage_to_tail(tage, cur_head);
+ tcd->tcd_cur_pages++;
+ }
}
}
spin_unlock(&pc->pc_lock);
}
}
spin_unlock(&pc->pc_lock);
static void put_pages_back(struct page_collection *pc)
{
if (!libcfs_panic_in_progress)
static void put_pages_back(struct page_collection *pc)
{
if (!libcfs_panic_in_progress)
- trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
+ put_pages_back_on_all_cpus(pc);
}
/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
}
/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
__LASSERT_TAGE_INVARIANT(tage);
__LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != smp_processor_id() ||
- tage->type != tcd->tcd_type)
+ if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
continue;
tage_to_tail(tage, &tcd->tcd_daemon_pages);
continue;
tage_to_tail(tage, &tcd->tcd_daemon_pages);
spin_unlock(&pc->pc_lock);
}
spin_unlock(&pc->pc_lock);
}
-static void put_pages_on_daemon_list_on_cpu(void *info)
+static void put_pages_on_daemon_list(struct page_collection *pc)
{
struct trace_cpu_data *tcd;
{
struct trace_cpu_data *tcd;
- int i;
-
- tcd_for_each_type_lock(tcd, i)
- put_pages_on_tcd_daemon_list(info, tcd);
-}
-static void put_pages_on_daemon_list(struct page_collection *pc)
-{
- trace_call_on_all_cpus(put_pages_on_daemon_list_on_cpu, pc);
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu)
+ put_pages_on_tcd_daemon_list(pc, tcd);
+ }
}
void trace_debug_print(void)
}
void trace_debug_print(void)
-static void trace_cleanup_on_cpu(void *info)
+static void trace_cleanup_on_all_cpus(void)
{
struct trace_cpu_data *tcd;
struct trace_page *tage;
struct trace_page *tmp;
{
struct trace_cpu_data *tcd;
struct trace_page *tage;
struct trace_page *tmp;
- tcd_for_each_type_lock(tcd, i) {
- tcd->tcd_shutting_down = 1;
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu) {
+ tcd->tcd_shutting_down = 1;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
- struct trace_page, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ cfs_list_for_each_entry_safe_typed(tage, tmp,
+ &tcd->tcd_pages,
+ struct trace_page,
+ linkage) {
+ __LASSERT_TAGE_INVARIANT(tage);
- list_del(&tage->linkage);
- tage_free(tage);
+ list_del(&tage->linkage);
+ tage_free(tage);
+ }
+
+ tcd->tcd_cur_pages = 0;
- tcd->tcd_cur_pages = 0;
CFS_INIT_LIST_HEAD(&pc.pc_pages);
spin_lock_init(&pc.pc_lock);
CFS_INIT_LIST_HEAD(&pc.pc_pages);
spin_lock_init(&pc.pc_lock);
- trace_call_on_all_cpus(trace_cleanup_on_cpu, &pc);
+ trace_cleanup_on_all_cpus();
union trace_data_union {
struct trace_cpu_data {
/*
union trace_data_union {
struct trace_cpu_data {
/*
+ * Even though this structure is meant to be per-CPU, locking
+ * is needed because in some places the data may be accessed
+ * from other CPUs. This lock is directly used in trace_get_tcd
+ * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
+ * tcd_for_each_type_lock
+ */
+ spinlock_t tcd_lock;
+ unsigned long tcd_lock_flags;
+
+ /*
* pages with trace records not yet processed by tracefiled.
*/
struct list_head tcd_pages;
* pages with trace records not yet processed by tracefiled.
*/
struct list_head tcd_pages;
for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
-#define tcd_for_each_type_lock(tcd, i) \
+#define tcd_for_each_type_lock(tcd, i, cpu) \
for (i = 0; trace_data[i] && \
for (i = 0; trace_data[i] && \
- (tcd = &(*trace_data[i])[smp_processor_id()].tcd) && \
+ (tcd = &(*trace_data[i])[cpu].tcd) && \
trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
/* XXX nikita: this declaration is internal to tracefile.c and should probably
trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
/* XXX nikita: this declaration is internal to tracefile.c and should probably
extern char *trace_get_console_buffer(void);
extern void trace_put_console_buffer(char *buffer);
extern char *trace_get_console_buffer(void);
extern void trace_put_console_buffer(char *buffer);
-extern void trace_call_on_all_cpus(void (*fn)(void *arg), void *arg);
-
int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
struct list_head *stock);
int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
struct list_head *stock);
return MAX(512, (total_mb * 80)/100);
}
return MAX(512, (total_mb * 80)/100);
}
-
-void
-trace_call_on_all_cpus(void (*fn)(void *_arg), void *arg)
-{
- int cpu;
- KAFFINITY mask = cfs_query_thread_affinity();
-
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
- if (cfs_tie_thread_to_cpu(cpu)) {
- ASSERT((int)KeGetCurrentProcessorNumber() == cpu);
- fn(arg);
- cfs_set_thread_affinity(mask);
- }
- }
-}