1 #define DEBUG_SUBSYSTEM S_LNET
2 #define LUSTRE_TRACEFILE_PRIVATE
4 #include <libcfs/libcfs.h>
8 #define get_cpu() smp_processor_id()
9 #define put_cpu() do { } while (0)
12 /* three types of trace_data in linux */
20 /* percents to share the total debug memory for each type */
21 static unsigned int pages_factor[TCD_TYPE_MAX] = {
22 80, /* 80% pages for TCD_TYPE_PROC */
23 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
24 10 /* 10% pages for TCD_TYPE_IRQ */
27 char *trace_console_buffers[NR_CPUS][3];
29 struct rw_semaphore tracefile_sem;
31 int tracefile_init_arch()
35 struct trace_cpu_data *tcd;
37 init_rwsem(&tracefile_sem);
39 /* initialize trace_data */
40 memset(trace_data, 0, sizeof(trace_data));
41 for (i = 0; i < TCD_TYPE_MAX; i++) {
42 trace_data[i]=kmalloc(sizeof(union trace_data_union)*NR_CPUS,
44 if (trace_data[i] == NULL)
49 /* arch related info initialized */
50 tcd_for_each(tcd, i, j) {
51 tcd->tcd_pages_factor = pages_factor[i];
56 for (i = 0; i < num_possible_cpus(); i++)
57 for (j = 0; j < 3; j++) {
58 trace_console_buffers[i][j] =
59 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
62 if (trace_console_buffers[i][j] == NULL)
69 tracefile_fini_arch();
70 printk(KERN_ERR "lnet: No enough memory\n");
75 void tracefile_fini_arch()
80 for (i = 0; i < num_possible_cpus(); i++)
81 for (j = 0; j < 3; j++)
82 if (trace_console_buffers[i][j] != NULL) {
83 kfree(trace_console_buffers[i][j]);
84 trace_console_buffers[i][j] = NULL;
87 for (i = 0; trace_data[i] != NULL; i++) {
93 void tracefile_read_lock()
95 down_read(&tracefile_sem);
98 void tracefile_read_unlock()
100 up_read(&tracefile_sem);
103 void tracefile_write_lock()
105 down_write(&tracefile_sem);
108 void tracefile_write_unlock()
110 up_write(&tracefile_sem);
114 trace_get_console_buffer(void)
121 } else if (in_softirq()) {
127 return trace_console_buffers[cpu][idx];
131 trace_put_console_buffer(char *buffer)
136 struct trace_cpu_data *
143 return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
144 else if (in_softirq())
145 return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
146 return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
150 trace_put_tcd (struct trace_cpu_data *tcd)
155 int trace_lock_tcd(struct trace_cpu_data *tcd)
157 __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
158 if (tcd->tcd_type == TCD_TYPE_IRQ)
160 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
165 void trace_unlock_tcd(struct trace_cpu_data *tcd)
167 __LASSERT(tcd->tcd_type < TCD_TYPE_MAX);
168 if (tcd->tcd_type == TCD_TYPE_IRQ)
170 else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
174 int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage)
177 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
178 * from here: this will lead to infinite recursion.
180 return tcd->tcd_cpu == tage->cpu;
184 set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
185 const int line, unsigned long stack)
189 do_gettimeofday(&tv);
191 header->ph_subsys = subsys;
192 header->ph_mask = mask;
193 header->ph_cpu_id = smp_processor_id();
194 header->ph_sec = (__u32)tv.tv_sec;
195 header->ph_usec = tv.tv_usec;
196 header->ph_stack = stack;
197 header->ph_pid = current->pid;
198 header->ph_line_num = line;
199 #if defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20))
200 header->ph_extern_pid = current->thread.extern_pid;
201 #elif defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
202 header->ph_extern_pid = current->thread.mode.tt.extern_pid;
204 header->ph_extern_pid = 0;
209 void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
210 int len, const char *file, const char *fn)
212 char *prefix = "Lustre", *ptype = NULL;
214 if ((mask & D_EMERG) != 0) {
215 prefix = "LustreError";
217 } else if ((mask & D_ERROR) != 0) {
218 prefix = "LustreError";
220 } else if ((mask & D_WARNING) != 0) {
222 ptype = KERN_WARNING;
223 } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
228 if ((mask & D_CONSOLE) != 0) {
229 printk("%s%s: %.*s", ptype, prefix, len, buf);
231 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, hdr->ph_pid,
232 hdr->ph_extern_pid, file, hdr->ph_line_num, fn, len, buf);
237 int trace_max_debug_mb(void)
239 int total_mb = (num_physpages >> (20 - CFS_PAGE_SHIFT));
241 return MAX(512, (total_mb * 80)/100);
245 trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
247 cpumask_t cpus_allowed = current->cpus_allowed;
248 /* use cpus_allowed to quiet 2.4 UP kernel warning only */
249 cpumask_t m = cpus_allowed;
252 /* Run the given routine on every CPU in thread context */
253 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
254 if (!cpu_online(cpu))
259 set_cpus_allowed(current, m);
263 set_cpus_allowed(current, cpus_allowed);