1 #ifndef __LIBCFS_TRACEFILE_H__
2 #define __LIBCFS_TRACEFILE_H__
4 #include <libcfs/libcfs.h>
6 /* trace file lock routines */
8 #define TRACEFILE_NAME_SIZE 1024
9 extern char tracefile[TRACEFILE_NAME_SIZE];
10 extern long long tracefile_size;
12 int tracefile_init_arch(void);
13 void tracefile_fini_arch(void);
15 void tracefile_read_lock(void);
16 void tracefile_read_unlock(void);
17 void tracefile_write_lock(void);
18 void tracefile_write_unlock(void);
20 int tracefile_dump_all_pages(char *filename);
21 void trace_debug_print(void);
22 void trace_flush_pages(void);
23 int trace_start_thread(void);
24 void trace_stop_thread(void);
25 int tracefile_init(void);
26 void tracefile_exit(void);
30 int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
31 const char *usr_buffer, int usr_buffer_nob);
32 int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
33 const char *knl_str, char *append);
34 int trace_allocate_string_buffer(char **str, int nob);
35 void trace_free_string_buffer(char *str, int nob);
36 int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob);
37 int trace_daemon_command(char *str);
38 int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob);
39 int trace_set_debug_mb(int mb);
40 int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob);
41 int trace_get_debug_mb(void);
43 extern void libcfs_debug_dumplog_internal(void *arg);
44 extern void libcfs_register_panic_notifier(void);
45 extern void libcfs_unregister_panic_notifier(void);
46 extern int libcfs_panic_in_progress;
48 #ifdef LUSTRE_TRACEFILE_PRIVATE
51 * Private declare for tracefile
53 #define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
54 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
56 #define TRACEFILE_SIZE (500 << 20)
58 /* Size of a buffer for sprinting console messages if we can't get a page
60 #define TRACE_CONSOLE_BUFFER_SIZE 1024
62 union trace_data_union {
63 struct trace_cpu_data {
65 * pages with trace records not yet processed by tracefiled.
67 struct list_head tcd_pages;
68 /* number of pages on ->tcd_pages */
69 unsigned long tcd_cur_pages;
72 * pages with trace records already processed by
73 * tracefiled. These pages are kept in memory, so that some
74 * portion of log can be written in the event of LBUG. This
75 * list is maintained in LRU order.
77 * Pages are moved to ->tcd_daemon_pages by tracefiled()
78 * (put_pages_on_daemon_list()). LRU pages from this list are
79 * discarded when list grows too large.
81 struct list_head tcd_daemon_pages;
82 /* number of pages on ->tcd_daemon_pages */
83 unsigned long tcd_cur_daemon_pages;
86 * Maximal number of pages allowed on ->tcd_pages and
87 * ->tcd_daemon_pages each.
88 * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
91 unsigned long tcd_max_pages;
94 * preallocated pages to write trace records into. Pages from
95 * ->tcd_stock_pages are moved to ->tcd_pages by
96 * portals_debug_msg().
98 * This list is necessary, because on some platforms it's
99 * impossible to perform efficient atomic page allocation in a
100 * non-blockable context.
102 * Such platforms fill ->tcd_stock_pages "on occasion", when
103 * tracing code is entered in blockable context.
105 * trace_get_tage_try() tries to get a page from
106 * ->tcd_stock_pages first and resorts to atomic page
107 * allocation only if this queue is empty. ->tcd_stock_pages
108 * is replenished when tracing code is entered in blocking
109 * context (darwin-tracefile.c:trace_get_tcd()). We try to
110 * maintain TCD_STOCK_PAGES (40 by default) pages in this
111 * queue. Atomic allocation is only required if more than
112 * TCD_STOCK_PAGES pagesful are consumed by trace records all
113 * emitted in non-blocking contexts. Which is quite unlikely.
115 struct list_head tcd_stock_pages;
116 /* number of pages on ->tcd_stock_pages */
117 unsigned long tcd_cur_stock_pages;
119 unsigned short tcd_shutting_down;
120 unsigned short tcd_cpu;
121 unsigned short tcd_type;
122 /* The factors to share debug memory. */
123 unsigned short tcd_pages_factor;
125 char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
128 #define TCD_MAX_TYPES 8
129 extern union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS];
131 #define tcd_for_each(tcd, i, j) \
132 for (i = 0; trace_data[i] != NULL; i++) \
133 for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
134 j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
136 #define tcd_for_each_type_lock(tcd, i) \
137 for (i = 0; trace_data[i] && \
138 (tcd = &(*trace_data[i])[smp_processor_id()].tcd) && \
139 trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
141 /* XXX nikita: this declaration is internal to tracefile.c and should probably
143 struct page_collection {
144 struct list_head pc_pages;
146 * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
147 * call-back functions. XXX nikita: Which is horrible: all processors
148 * receive NMI at the same time only to be serialized by this
149 * lock. Probably ->pc_pages should be replaced with an array of
150 * NR_CPUS elements accessed locklessly.
154 * if this flag is set, collect_pages() will spill both
155 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
156 * only ->tcd_pages are spilled.
158 int pc_want_daemon_pages;
161 /* XXX nikita: this declaration is internal to tracefile.c and should probably
163 struct tracefiled_ctl {
164 struct completion tctl_start;
165 struct completion tctl_stop;
166 cfs_waitq_t tctl_waitq;
168 atomic_t tctl_shutdown;
172 * small data-structure for each page owned by tracefiled.
174 /* XXX nikita: this declaration is internal to tracefile.c and should probably
182 * linkage into one of the lists in trace_data_union or
185 struct list_head linkage;
187 * number of bytes used within this page
191 * cpu that owns this page
195 * type(context) of this page
200 extern void set_ptldebug_header(struct ptldebug_header *header,
201 int subsys, int mask, const int line,
202 unsigned long stack);
203 extern void print_to_console(struct ptldebug_header *hdr, int mask, const char *buf,
204 int len, const char *file, const char *fn);
206 extern struct trace_cpu_data *trace_get_tcd(void);
207 extern void trace_put_tcd(struct trace_cpu_data *tcd);
208 extern int trace_lock_tcd(struct trace_cpu_data *tcd);
209 extern void trace_unlock_tcd(struct trace_cpu_data *tcd);
210 extern char *trace_get_console_buffer(void);
211 extern void trace_put_console_buffer(char *buffer);
213 extern void trace_call_on_all_cpus(void (*fn)(void *arg), void *arg);
215 extern int trace_max_debug_mb(void);
217 int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
218 struct list_head *stock);
221 int tcd_owns_tage(struct trace_cpu_data *tcd, struct trace_page *tage);
223 extern void trace_assertion_failed(const char *str, const char *fn,
224 const char *file, int line);
226 /* ASSERTION that is safe to use within the debug system */
227 #define __LASSERT(cond) \
229 if (unlikely(!(cond))) { \
230 trace_assertion_failed("ASSERTION("#cond") failed", \
231 __FUNCTION__, __FILE__, __LINE__); \
235 #define __LASSERT_TAGE_INVARIANT(tage) \
237 __LASSERT(tage != NULL); \
238 __LASSERT(tage->page != NULL); \
239 __LASSERT(tage->used <= CFS_PAGE_SIZE); \
240 __LASSERT(cfs_page_count(tage->page) > 0); \
243 #endif /* LUSTRE_TRACEFILE_PRIVATE */
245 #endif /* __LIBCFS_TRACEFILE_H__ */