#define put_cpu() do { } while (0)
#endif
-extern union trace_data_union trace_data[NR_CPUS];
+/* three types of trace_data in linux */
+enum {
+ TCD_TYPE_PROC = 0,
+ TCD_TYPE_SOFTIRQ,
+ TCD_TYPE_IRQ,
+ TCD_TYPE_MAX
+};
+
+/* percents to share the total debug memory for each type */
+static unsigned int pages_factor[TCD_TYPE_MAX] = {
+ 80, /* 80% pages for TCD_TYPE_PROC */
+ 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
+ 10 /* 10% pages for TCD_TYPE_IRQ */
+};
char *trace_console_buffers[NR_CPUS][3];
{
int i;
int j;
+ struct trace_cpu_data *tcd;
init_rwsem(&tracefile_sem);
+ /* initialize trace_data */
+ memset(trace_data, 0, sizeof(trace_data));
+ for (i = 0; i < TCD_TYPE_MAX; i++) {
+ trace_data[i]=kmalloc(sizeof(union trace_data_union)*NR_CPUS,
+ GFP_KERNEL);
+ if (trace_data[i] == NULL)
+ goto out;
+
+ }
+
+ /* arch related info initialized */
+ tcd_for_each(tcd, i, j) {
+ tcd->tcd_pages_factor = pages_factor[i];
+ tcd->tcd_type = i;
+ tcd->tcd_cpu = j;
+ }
+
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
trace_console_buffers[i][j] =
kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
GFP_KERNEL);
- if (trace_console_buffers[i][j] == NULL) {
- tracefile_fini_arch();
- printk(KERN_ERR
- "Can't allocate "
- "console message buffer\n");
- return -ENOMEM;
- }
+ if (trace_console_buffers[i][j] == NULL)
+ goto out;
}
return 0;
+
+out:
+ tracefile_fini_arch();
+ printk(KERN_ERR "lnet: No enough memory\n");
+ return -ENOMEM;
+
}
void tracefile_fini_arch()
kfree(trace_console_buffers[i][j]);
trace_console_buffers[i][j] = NULL;
}
+
+ for (i = 0; trace_data[i] != NULL; i++) {
+ kfree(trace_data[i]);
+ trace_data[i] = NULL;
+ }
}
void tracefile_read_lock()
{
int cpu;
- if (in_interrupt()) /* no logging in IRQ context */
- return NULL;
-
cpu = get_cpu();
- return &trace_data[cpu].tcd;
+ if (in_irq())
+ return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
+ else if (in_softirq())
+ return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
+ return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
}
void
trace_put_tcd (struct trace_cpu_data *tcd)
{
- __LASSERT (!in_interrupt());
put_cpu();
}
+int trace_lock_tcd(struct trace_cpu_data *tcd)
+{
+ __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+ if (tcd->tcd_type == TCD_TYPE_IRQ)
+ local_irq_disable();
+ else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
+ local_bh_disable();
+ return 1;
+}
+
+void trace_unlock_tcd(struct trace_cpu_data *tcd)
+{
+ __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+ if (tcd->tcd_type == TCD_TYPE_IRQ)
+ local_irq_enable();
+ else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
+ local_bh_enable();
+}
+
int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
{
/*
#include <libcfs/libcfs.h>
/* XXX move things up to the top, comment */
-union trace_data_union trace_data[NR_CPUS] __cacheline_aligned;
+union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
char tracefile[TRACEFILE_NAME_SIZE];
long long tracefile_size = TRACEFILE_SIZE;
tage->used = 0;
tage->cpu = smp_processor_id();
+ tage->type = tcd->tcd_type;
list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages++;
* CPUs have been stopped during a panic. If this isn't true for some
* arch, this will have to be implemented separately in each arch. */
int i;
+ int j;
struct trace_cpu_data *tcd;
CFS_INIT_LIST_HEAD(&pc->pc_pages);
- for (i = 0; i < num_possible_cpus(); i++) {
- tcd = &trace_data[i].tcd;
-
- list_splice(&tcd->tcd_pages, &pc->pc_pages);
- CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
+ tcd_for_each(tcd, i, j) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
- list_splice(&tcd->tcd_daemon_pages, &pc->pc_pages);
- CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
+ list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
}
{
struct trace_cpu_data *tcd;
struct page_collection *pc = info;
-
- tcd = trace_get_tcd();
- __LASSERT (tcd != NULL);
+ int i;
spin_lock(&pc->pc_lock);
- list_splice(&tcd->tcd_pages, &pc->pc_pages);
- CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
- tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- list_splice(&tcd->tcd_daemon_pages, &pc->pc_pages);
- CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
- tcd->tcd_cur_daemon_pages = 0;
+ tcd_for_each_type_lock(tcd, i) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
}
spin_unlock(&pc->pc_lock);
-
- trace_put_tcd(tcd);
}
static void collect_pages(struct page_collection *pc)
struct list_head *cur_head;
struct trace_page *tage;
struct trace_page *tmp;
-
- tcd = trace_get_tcd();
- __LASSERT (tcd != NULL);
-
- cur_head = tcd->tcd_pages.next;
+ int i;
spin_lock(&pc->pc_lock);
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
+ tcd_for_each_type_lock(tcd, i) {
+ cur_head = tcd->tcd_pages.next;
- __LASSERT_TAGE_INVARIANT(tage);
+ list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
- if (tage->cpu != smp_processor_id())
- continue;
+ __LASSERT_TAGE_INVARIANT(tage);
- tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
+ if (tage->cpu != smp_processor_id() || tage->type != i)
+ continue;
+
+ tage_to_tail(tage, cur_head);
+ tcd->tcd_cur_pages++;
+ }
}
spin_unlock(&pc->pc_lock);
-
- trace_put_tcd(tcd);
}
static void put_pages_back(struct page_collection *pc)
__LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != smp_processor_id())
+ if (tage->cpu != smp_processor_id() ||
+ tage->type != tcd->tcd_type)
continue;
tage_to_tail(tage, &tcd->tcd_daemon_pages);
static void put_pages_on_daemon_list_on_cpu(void *info)
{
struct trace_cpu_data *tcd;
+ int i;
- tcd = trace_get_tcd();
- __LASSERT (tcd != NULL);
-
- put_pages_on_tcd_daemon_list(info, tcd);
-
- trace_put_tcd(tcd);
+ tcd_for_each_type_lock(tcd, i)
+ put_pages_on_tcd_daemon_list(info, tcd);
}
static void put_pages_on_daemon_list(struct page_collection *pc)
int trace_set_debug_mb(int mb)
{
int i;
+ int j;
+ int pages;
int limit = trace_max_debug_mb();
+ struct trace_cpu_data *tcd;
- if (mb <= 0)
+ if (mb < num_possible_cpus())
return -EINVAL;
if (mb > limit) {
}
mb /= num_possible_cpus();
+ pages = mb << (20 - CFS_PAGE_SHIFT);
tracefile_write_lock();
- for (i = 0; i < num_possible_cpus(); i++) {
- struct trace_cpu_data *tcd = &trace_data[i].tcd;
-
- tcd->tcd_max_pages = mb << (20 - CFS_PAGE_SHIFT);
- }
+ tcd_for_each(tcd, i, j)
+ tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
tracefile_write_unlock();
int trace_get_debug_mb(void)
{
int i;
+ int j;
+ struct trace_cpu_data *tcd;
int total_pages = 0;
tracefile_read_lock();
- for (i = 0; i < num_possible_cpus(); i++) {
- struct trace_cpu_data *tcd = &trace_data[i].tcd;
-
+ tcd_for_each(tcd, i, j)
total_pages += tcd->tcd_max_pages;
- }
tracefile_read_unlock();
- return total_pages >> (20 - CFS_PAGE_SHIFT);
+ return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
{
struct trace_cpu_data *tcd;
int i;
+ int j;
int rc;
+ int factor;
rc = tracefile_init_arch();
if (rc != 0)
return rc;
- for (i = 0; i < num_possible_cpus(); i++) {
- tcd = &trace_data[i].tcd;
+ tcd_for_each(tcd, i, j) {
+ /* tcd_pages_factor is initialized int tracefile_init_arch. */
+ factor = tcd->tcd_pages_factor;
CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages);
CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
tcd->tcd_cur_pages = 0;
tcd->tcd_cur_stock_pages = 0;
tcd->tcd_cur_daemon_pages = 0;
- tcd->tcd_max_pages = TCD_MAX_PAGES;
+ tcd->tcd_max_pages = (TCD_MAX_PAGES * factor) / 100;
+ LASSERT(tcd->tcd_max_pages > 0);
tcd->tcd_shutting_down = 0;
- tcd->tcd_cpu = i;
}
return 0;
struct trace_cpu_data *tcd;
struct trace_page *tage;
struct trace_page *tmp;
+ int i;
- tcd = trace_get_tcd();
- __LASSERT (tcd != NULL);
-
- tcd->tcd_shutting_down = 1;
+ tcd_for_each_type_lock(tcd, i) {
+ tcd->tcd_shutting_down = 1;
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
+ __LASSERT_TAGE_INVARIANT(tage);
- list_del(&tage->linkage);
- tage_free(tage);
+ list_del(&tage->linkage);
+ tage_free(tage);
+ }
+ tcd->tcd_cur_pages = 0;
}
- tcd->tcd_cur_pages = 0;
-
- trace_put_tcd(tcd);
}
static void trace_cleanup(void)
#define TRACEFILE_SIZE (500 << 20)
-/* Size of a buffer for sprinting console messages to in IRQ context (no
- * logging in IRQ context) */
+/* Size of a buffer for sprinting console messages if we can't get a page
+ * from system */
#define TRACE_CONSOLE_BUFFER_SIZE 1024
union trace_data_union {
/*
* Maximal number of pages allowed on ->tcd_pages and
- * ->tcd_daemon_pages each. Always TCD_MAX_PAGES in current
+ * ->tcd_daemon_pages each.
+ * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
* implementation.
*/
unsigned long tcd_max_pages;
/* number of pages on ->tcd_stock_pages */
unsigned long tcd_cur_stock_pages;
- int tcd_shutting_down;
- int tcd_cpu;
+ unsigned short tcd_shutting_down;
+ unsigned short tcd_cpu;
+ unsigned short tcd_type;
+ /* The factors to share debug memory. */
+ unsigned short tcd_pages_factor;
} tcd;
- char __pad[SMP_CACHE_BYTES];
+ char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
};
+#define TCD_MAX_TYPES 8
+extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
+
+#define tcd_for_each(tcd, i, j) \
+ for (i = 0; trace_data[i] != NULL; i++) \
+ for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
+ j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
+
+#define tcd_for_each_type_lock(tcd, i) \
+ for (i = 0; trace_data[i] && \
+ (tcd = &(*trace_data[i])[smp_processor_id()].tcd) && \
+ trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
+
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct page_collection {
/*
* cpu that owns this page
*/
- int cpu;
+ unsigned short cpu;
+ /*
+ * type(context) of this page
+ */
+ unsigned short type;
};
extern void set_ptldebug_header(struct ptldebug_header *header,
extern struct trace_cpu_data *trace_get_tcd(void);
extern void trace_put_tcd(struct trace_cpu_data *tcd);
+extern int trace_lock_tcd(struct trace_cpu_data *tcd);
+extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
extern char *trace_get_console_buffer(void);
extern void trace_put_console_buffer(char *buffer);
#define put_cpu() do { } while (0)
#endif
-extern union trace_data_union trace_data[NR_CPUS];
+#define TCD_TYPE_MAX 1
event_t tracefile_event;
{
int i;
int j;
+ struct trace_cpu_data *tcd;
cfs_init_event(&tracefile_event, TRUE, TRUE);
+ /* initialize trace_data */
+ memset(trace_data, 0, sizeof(trace_data));
+ for (i = 0; i < TCD_TYPE_MAX; i++) {
+ trace_data[i]=cfs_alloc(sizeof(struct trace_data_union)*NR_CPUS, 0);
+ if (trace_data[i] == NULL)
+ goto out;
+ }
+
+ /* arch related info initialized */
+ tcd_for_each(tcd, i, j) {
+ tcd->tcd_pages_factor = 100; /* Only one type */
+ tcd->tcd_cpu = j;
+ tcd->tcd_type = i;
+ }
+
memset(trace_console_buffers, 0, sizeof(trace_console_buffers));
for (i = 0; i < NR_CPUS; i++) {
cfs_alloc(TRACE_CONSOLE_BUFFER_SIZE,
CFS_ALLOC_ZERO);
- if (trace_console_buffers[i][j] == NULL) {
- tracefile_fini_arch();
- KsPrint((0, "Can't allocate console message buffer\n"));
- return -ENOMEM;
- }
+ if (trace_console_buffers[i][j] == NULL)
+ goto out;
}
}
return 0;
+
+out:
+ tracefile_fini_arch();
+ KsPrint((0, "lnet: No enough memory\n"));
+ return -ENOMEM;
}
void tracefile_fini_arch()
}
}
}
+
+ for (i = 0; trace_data[i] != NULL; i++) {
+ cfs_free(trace_data[i]);
+ trace_data[i] = NULL;
+ }
}
void tracefile_read_lock()
#pragma message("todo: return NULL if in interrupt context")
int cpu = (int) KeGetCurrentProcessorNumber();
- return &trace_data[cpu].tcd;
+ return &(*trace_data[0])[cpu].tcd;
}
void
{
}
+int
+trace_lock_tcd(struct trace_cpu_data *tcd)
+{
+ __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+ return 1;
+}
+
+void
+trace_unlock_tcd(struct trace_cpu_data *tcd)
+{
+ __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
+}
+
void
set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
const int line, unsigned long stack)