Whamcloud - gitweb
LU-9859 libcfs: use wait_event_timeout() in tracefiled().
[fs/lustre-release.git] / libcfs / libcfs / tracefile.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/tracefile.c
33  *
34  * Author: Zach Brown <zab@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LNET
39 #include "tracefile.h"
40
41 #include <linux/ctype.h>
42 #include <linux/fs.h>
43 #include <linux/kthread.h>
44 #include <linux/pagemap.h>
45 #include <linux/poll.h>
46 #include <linux/tty.h>
47 #include <linux/uaccess.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <libcfs/libcfs.h>
50
51 #define CFS_TRACE_CONSOLE_BUFFER_SIZE   1024
52
53 enum cfs_trace_buf_type {
54         CFS_TCD_TYPE_PROC = 0,
55         CFS_TCD_TYPE_SOFTIRQ,
56         CFS_TCD_TYPE_IRQ,
57         CFS_TCD_TYPE_CNT
58 };
59
60 union cfs_trace_data_union (*cfs_trace_data[CFS_TCD_TYPE_CNT])[NR_CPUS] __cacheline_aligned;
61
62 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_CNT];
63 char cfs_tracefile[TRACEFILE_NAME_SIZE];
64 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
65 static struct tracefiled_ctl trace_tctl;
66 static DEFINE_MUTEX(cfs_trace_thread_mutex);
67 static int thread_running = 0;
68
69 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
70 static DECLARE_RWSEM(cfs_tracefile_sem);
71
72 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
73                                         struct cfs_trace_cpu_data *tcd);
74
75 /* trace file lock routines */
76 /* The walking argument indicates the locking comes from all tcd types
77  * iterator and we must lock it and dissable local irqs to avoid deadlocks
78  * with other interrupt locks that might be happening. See LU-1311
79  * for details.
80  */
81 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
82         __acquires(&tcd->tcd_lock)
83 {
84         __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
85         if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
86                 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
87         else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
88                 spin_lock_bh(&tcd->tcd_lock);
89         else if (unlikely(walking))
90                 spin_lock_irq(&tcd->tcd_lock);
91         else
92                 spin_lock(&tcd->tcd_lock);
93         return 1;
94 }
95
96 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
97         __releases(&tcd->tcd_lock)
98 {
99         __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
100         if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
101                 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
102         else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
103                 spin_unlock_bh(&tcd->tcd_lock);
104         else if (unlikely(walking))
105                 spin_unlock_irq(&tcd->tcd_lock);
106         else
107                 spin_unlock(&tcd->tcd_lock);
108 }
109
110 #define cfs_tcd_for_each(tcd, i, j)                                     \
111         for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++)     \
112                 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);     \
113                      j < num_possible_cpus();                           \
114                      j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
115
116 #define cfs_tcd_for_each_type_lock(tcd, i, cpu)                         \
117         for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i] &&        \
118              (tcd = &(*cfs_trace_data[i])[cpu].tcd) &&                  \
119              cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
120
121 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
122 {
123         if (in_irq())
124                 return CFS_TCD_TYPE_IRQ;
125         if (in_softirq())
126                 return CFS_TCD_TYPE_SOFTIRQ;
127         return CFS_TCD_TYPE_PROC;
128 }
129
130 static inline char *cfs_trace_get_console_buffer(void)
131 {
132         unsigned int i = get_cpu();
133         unsigned int j = cfs_trace_buf_idx_get();
134
135         return cfs_trace_console_buffers[i][j];
136 }
137
138 static inline struct cfs_trace_cpu_data *
139 cfs_trace_get_tcd(void)
140 {
141         struct cfs_trace_cpu_data *tcd =
142                 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
143
144         cfs_trace_lock_tcd(tcd, 0);
145
146         return tcd;
147 }
148
149 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
150 {
151         cfs_trace_unlock_tcd(tcd, 0);
152
153         put_cpu();
154 }
155
156 static inline struct cfs_trace_page *
157 cfs_tage_from_list(struct list_head *list)
158 {
159         return list_entry(list, struct cfs_trace_page, linkage);
160 }
161
162 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
163 {
164         struct page            *page;
165         struct cfs_trace_page *tage;
166
167         /* My caller is trying to free memory */
168         if (!in_interrupt() && (current->flags & PF_MEMALLOC))
169                 return NULL;
170
171         /*
172          * Don't spam console with allocation failures: they will be reported
173          * by upper layer anyway.
174          */
175         gfp |= __GFP_NOWARN;
176         page = alloc_page(gfp);
177         if (page == NULL)
178                 return NULL;
179
180         tage = kmalloc(sizeof(*tage), gfp);
181         if (tage == NULL) {
182                 __free_page(page);
183                 return NULL;
184         }
185
186         tage->page = page;
187         atomic_inc(&cfs_tage_allocated);
188         return tage;
189 }
190
191 static void cfs_tage_free(struct cfs_trace_page *tage)
192 {
193         __LASSERT(tage != NULL);
194         __LASSERT(tage->page != NULL);
195
196         __free_page(tage->page);
197         kfree(tage);
198         atomic_dec(&cfs_tage_allocated);
199 }
200
201 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
202                              struct list_head *queue)
203 {
204         __LASSERT(tage != NULL);
205         __LASSERT(queue != NULL);
206
207         list_move_tail(&tage->linkage, queue);
208 }
209
210 /* return a page that has 'len' bytes left at the end */
211 static struct cfs_trace_page *
212 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
213 {
214         struct cfs_trace_page *tage;
215
216         if (tcd->tcd_cur_pages > 0) {
217                 __LASSERT(!list_empty(&tcd->tcd_pages));
218                 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
219                 if (tage->used + len <= PAGE_SIZE)
220                         return tage;
221         }
222
223         if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
224                 if (tcd->tcd_cur_stock_pages > 0) {
225                         tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
226                         --tcd->tcd_cur_stock_pages;
227                         list_del_init(&tage->linkage);
228                 } else {
229                         tage = cfs_tage_alloc(GFP_ATOMIC);
230                         if (unlikely(tage == NULL)) {
231                                 if ((!(current->flags & PF_MEMALLOC) ||
232                                      in_interrupt()) && printk_ratelimit())
233                                         pr_warn("Lustre: cannot allocate a tage (%ld)\n",
234                                                 tcd->tcd_cur_pages);
235                                 return NULL;
236                         }
237                 }
238
239                 tage->used = 0;
240                 tage->cpu = smp_processor_id();
241                 tage->type = tcd->tcd_type;
242                 list_add_tail(&tage->linkage, &tcd->tcd_pages);
243                 tcd->tcd_cur_pages++;
244
245                 if (tcd->tcd_cur_pages > 8 && thread_running) {
246                         struct tracefiled_ctl *tctl = &trace_tctl;
247                         /*
248                          * wake up tracefiled to process some pages.
249                          */
250                         wake_up(&tctl->tctl_waitq);
251                 }
252                 return tage;
253         }
254         return NULL;
255 }
256
257 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
258 {
259         int pgcount = tcd->tcd_cur_pages / 10;
260         struct page_collection pc;
261         struct cfs_trace_page *tage;
262         struct cfs_trace_page *tmp;
263
264         /*
265          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
266          * from here: this will lead to infinite recursion.
267          */
268
269         if (printk_ratelimit())
270                 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
271                         pgcount + 1, tcd->tcd_cur_pages);
272
273         INIT_LIST_HEAD(&pc.pc_pages);
274
275         list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
276                 if (pgcount-- == 0)
277                         break;
278
279                 list_move_tail(&tage->linkage, &pc.pc_pages);
280                 tcd->tcd_cur_pages--;
281         }
282         put_pages_on_tcd_daemon_list(&pc, tcd);
283 }
284
285 /* return a page that has 'len' bytes left at the end */
286 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
287                                                  unsigned long len)
288 {
289         struct cfs_trace_page *tage;
290
291         /*
292          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
293          * from here: this will lead to infinite recursion.
294          */
295
296         if (len > PAGE_SIZE) {
297                 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
298                        len);
299                 return NULL;
300         }
301
302         tage = cfs_trace_get_tage_try(tcd, len);
303         if (tage != NULL)
304                 return tage;
305         if (thread_running)
306                 cfs_tcd_shrink(tcd);
307         if (tcd->tcd_cur_pages > 0) {
308                 tage = cfs_tage_from_list(tcd->tcd_pages.next);
309                 tage->used = 0;
310                 cfs_tage_to_tail(tage, &tcd->tcd_pages);
311         }
312         return tage;
313 }
314
315 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
316                                     struct libcfs_debug_msg_data *msgdata,
317                                     unsigned long stack)
318 {
319         struct timespec64 ts;
320
321         ktime_get_real_ts64(&ts);
322
323         header->ph_subsys = msgdata->msg_subsys;
324         header->ph_mask = msgdata->msg_mask;
325         header->ph_cpu_id = smp_processor_id();
326         header->ph_type = cfs_trace_buf_idx_get();
327         /* y2038 safe since all user space treats this as unsigned, but
328          * will overflow in 2106
329          */
330         header->ph_sec = (u32)ts.tv_sec;
331         header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
332         header->ph_stack = stack;
333         header->ph_pid = current->pid;
334         header->ph_line_num = msgdata->msg_line;
335         header->ph_extern_pid = 0;
336 }
337
338 /**
339  * tty_write_msg - write a message to a certain tty, not just the console.
340  * @tty: the destination tty_struct
341  * @msg: the message to write
342  *
343  * tty_write_message is not exported, so write a same function for it
344  *
345  */
346 static void tty_write_msg(struct tty_struct *tty, const char *msg)
347 {
348         mutex_lock(&tty->atomic_write_lock);
349         tty_lock(tty);
350         if (tty->ops->write && tty->count > 0)
351                 tty->ops->write(tty, msg, strlen(msg));
352         tty_unlock(tty);
353         mutex_unlock(&tty->atomic_write_lock);
354         wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
355 }
356
357 static void cfs_tty_write_message(const char *prefix, int mask, const char *msg)
358 {
359         struct tty_struct *tty;
360
361         tty = get_current_tty();
362         if (!tty)
363                 return;
364
365         tty_write_msg(tty, prefix);
366         if ((mask & D_EMERG) || (mask & D_ERROR))
367                 tty_write_msg(tty, "Error");
368         tty_write_msg(tty, ": ");
369         tty_write_msg(tty, msg);
370         tty_kref_put(tty);
371 }
372
373 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
374                                  const char *buf, int len, const char *file,
375                                  const char *fn)
376 {
377         char *prefix = "Lustre";
378
379         if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
380                 prefix = "LNet";
381
382         if (mask & D_CONSOLE) {
383                 if (mask & D_EMERG)
384                         pr_emerg("%sError: %.*s", prefix, len, buf);
385                 else if (mask & D_ERROR)
386                         pr_err("%sError: %.*s", prefix, len, buf);
387                 else if (mask & D_WARNING)
388                         pr_warn("%s: %.*s", prefix, len, buf);
389                 else if (mask & libcfs_printk)
390                         pr_info("%s: %.*s", prefix, len, buf);
391         } else {
392                 if (mask & D_EMERG)
393                         pr_emerg("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
394                                  hdr->ph_pid, hdr->ph_extern_pid, file,
395                                  hdr->ph_line_num, fn, len, buf);
396                 else if (mask & D_ERROR)
397                         pr_err("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
398                                hdr->ph_pid, hdr->ph_extern_pid, file,
399                                hdr->ph_line_num, fn, len, buf);
400                 else if (mask & D_WARNING)
401                         pr_warn("%s: %d:%d:(%s:%d:%s()) %.*s", prefix,
402                                 hdr->ph_pid, hdr->ph_extern_pid, file,
403                                 hdr->ph_line_num, fn, len, buf);
404                 else if (mask & (D_CONSOLE | libcfs_printk))
405                         pr_info("%s: %.*s", prefix, len, buf);
406         }
407
408         if (mask & D_TTY)
409                 cfs_tty_write_message(prefix, mask, buf);
410 }
411
412 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
413                      const char *format, ...)
414 {
415         struct cfs_trace_cpu_data *tcd = NULL;
416         struct ptldebug_header header = {0};
417         struct cfs_trace_page *tage;
418         /* string_buf is used only if tcd != NULL, and is always set then */
419         char *string_buf = NULL;
420         char *debug_buf;
421         int known_size;
422         int needed = 85; /* seeded with average message length */
423         int max_nob;
424         va_list ap;
425         int retry;
426         int mask = msgdata->msg_mask;
427         char *file = (char *)msgdata->msg_file;
428         struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
429
430         if (strchr(file, '/'))
431                 file = strrchr(file, '/') + 1;
432
433         tcd = cfs_trace_get_tcd();
434
435         /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
436          * pins us to a particular CPU.  This avoids an smp_processor_id()
437          * warning on Linux when debugging is enabled.
438          */
439         cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
440
441         if (!tcd)                /* arch may not log in IRQ context */
442                 goto console;
443
444         if (tcd->tcd_cur_pages == 0)
445                 header.ph_flags |= PH_FLAG_FIRST_RECORD;
446
447         if (tcd->tcd_shutting_down) {
448                 cfs_trace_put_tcd(tcd);
449                 tcd = NULL;
450                 goto console;
451         }
452
453         known_size = strlen(file) + 1;
454         if (msgdata->msg_fn)
455                 known_size += strlen(msgdata->msg_fn) + 1;
456
457         if (libcfs_debug_binary)
458                 known_size += sizeof(header);
459
460         /*
461          * May perform an additional pass to update 'needed' and increase
462          * tage buffer size to match vsnprintf reported size required
463          * On the second pass (retry=1) use vscnprintf [which returns
464          * number of bytes written not including the terminating nul]
465          * to clarify `needed` is used as number of bytes written
466          * for the remainder of this function
467          */
468         for (retry = 0; retry < 2; retry++) {
469                 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
470                 if (!tage) {
471                         if (needed + known_size > PAGE_SIZE)
472                                 mask |= D_ERROR;
473
474                         cfs_trace_put_tcd(tcd);
475                         tcd = NULL;
476                         goto console;
477                 }
478
479                 string_buf = (char *)page_address(tage->page) +
480                              tage->used + known_size;
481
482                 max_nob = PAGE_SIZE - tage->used - known_size;
483                 if (max_nob <= 0) {
484                         pr_emerg("LustreError: negative max_nob: %d\n",
485                                  max_nob);
486                         mask |= D_ERROR;
487                         cfs_trace_put_tcd(tcd);
488                         tcd = NULL;
489                         goto console;
490                 }
491
492                 va_start(ap, format);
493                 if (retry)
494                         needed = vscnprintf(string_buf, max_nob, format, ap);
495                 else
496                         needed = vsnprintf(string_buf, max_nob, format, ap);
497                 va_end(ap);
498
499                 if (needed < max_nob) /* well. printing ok.. */
500                         break;
501         }
502
503         /* `needed` is actual bytes written to string_buf */
504         if (*(string_buf + needed - 1) != '\n') {
505                 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
506                         file, msgdata->msg_line, msgdata->msg_fn);
507         } else if (mask & D_TTY) {
508                 /* TTY needs '\r\n' to move carriage to leftmost position */
509                 if (needed < 2 || *(string_buf + needed - 2) != '\r')
510                         pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n",
511                                 file, msgdata->msg_line, msgdata->msg_fn);
512         }
513
514         header.ph_len = known_size + needed;
515         debug_buf = (char *)page_address(tage->page) + tage->used;
516
517         if (libcfs_debug_binary) {
518                 memcpy(debug_buf, &header, sizeof(header));
519                 tage->used += sizeof(header);
520                 debug_buf += sizeof(header);
521         }
522
523         strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
524         tage->used += strlen(file) + 1;
525         debug_buf += strlen(file) + 1;
526
527         if (msgdata->msg_fn) {
528                 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
529                 tage->used += strlen(msgdata->msg_fn) + 1;
530                 debug_buf += strlen(msgdata->msg_fn) + 1;
531         }
532
533         __LASSERT(debug_buf == string_buf);
534
535         tage->used += needed;
536         __LASSERT(tage->used <= PAGE_SIZE);
537
538 console:
539         if ((mask & libcfs_printk) == 0) {
540                 /* no console output requested */
541                 if (tcd != NULL)
542                         cfs_trace_put_tcd(tcd);
543                 return 1;
544         }
545
546         if (cdls != NULL) {
547                 if (libcfs_console_ratelimit &&
548                     cdls->cdls_next != 0 &&     /* not first time ever */
549                     time_before(jiffies, cdls->cdls_next)) {
550                         /* skipping a console message */
551                         cdls->cdls_count++;
552                         if (tcd != NULL)
553                                 cfs_trace_put_tcd(tcd);
554                         return 1;
555                 }
556
557                 if (time_after(jiffies, cdls->cdls_next +
558                                         libcfs_console_max_delay +
559                                         cfs_time_seconds(10))) {
560                         /* last timeout was a long time ago */
561                         cdls->cdls_delay /= libcfs_console_backoff * 4;
562                 } else {
563                         cdls->cdls_delay *= libcfs_console_backoff;
564                 }
565
566                 if (cdls->cdls_delay < libcfs_console_min_delay)
567                         cdls->cdls_delay = libcfs_console_min_delay;
568                 else if (cdls->cdls_delay > libcfs_console_max_delay)
569                         cdls->cdls_delay = libcfs_console_max_delay;
570
571                 /* ensure cdls_next is never zero after it's been seen */
572                 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
573         }
574
575         if (tcd) {
576                 cfs_print_to_console(&header, mask, string_buf, needed, file,
577                                      msgdata->msg_fn);
578                 cfs_trace_put_tcd(tcd);
579         } else {
580                 string_buf = cfs_trace_get_console_buffer();
581
582                 va_start(ap, format);
583                 needed = vscnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
584                                     format, ap);
585                 va_end(ap);
586
587                 cfs_print_to_console(&header, mask,
588                                      string_buf, needed, file, msgdata->msg_fn);
589
590                 put_cpu();
591         }
592
593         if (cdls != NULL && cdls->cdls_count != 0) {
594                 string_buf = cfs_trace_get_console_buffer();
595
596                 needed = scnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
597                                    "Skipped %d previous similar message%s\n",
598                                    cdls->cdls_count,
599                                    (cdls->cdls_count > 1) ? "s" : "");
600
601                 /* Do not allow print this to TTY */
602                 cfs_print_to_console(&header, mask & ~D_TTY, string_buf,
603                                      needed, file, msgdata->msg_fn);
604
605                 put_cpu();
606                 cdls->cdls_count = 0;
607         }
608
609         return 0;
610 }
611 EXPORT_SYMBOL(libcfs_debug_msg);
612
613 void
614 cfs_trace_assertion_failed(const char *str,
615                            struct libcfs_debug_msg_data *msgdata)
616 {
617         struct ptldebug_header hdr;
618
619         libcfs_panic_in_progress = 1;
620         libcfs_catastrophe = 1;
621         smp_mb();
622
623         cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
624
625         cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
626                              msgdata->msg_file, msgdata->msg_fn);
627
628         panic("Lustre debug assertion failure\n");
629
630         /* not reached */
631 }
632
633 static void
634 panic_collect_pages(struct page_collection *pc)
635 {
636         /* Do the collect_pages job on a single CPU: assumes that all other
637          * CPUs have been stopped during a panic.  If this isn't true for some
638          * arch, this will have to be implemented separately in each arch.  */
639         int                        i;
640         int                        j;
641         struct cfs_trace_cpu_data *tcd;
642
643         INIT_LIST_HEAD(&pc->pc_pages);
644
645         cfs_tcd_for_each(tcd, i, j) {
646                 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
647                 tcd->tcd_cur_pages = 0;
648
649                 if (pc->pc_want_daemon_pages) {
650                         list_splice_init(&tcd->tcd_daemon_pages,
651                                                 &pc->pc_pages);
652                         tcd->tcd_cur_daemon_pages = 0;
653                 }
654         }
655 }
656
657 static void collect_pages_on_all_cpus(struct page_collection *pc)
658 {
659         struct cfs_trace_cpu_data *tcd;
660         int i, cpu;
661
662         for_each_possible_cpu(cpu) {
663                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
664                         list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
665                         tcd->tcd_cur_pages = 0;
666                         if (pc->pc_want_daemon_pages) {
667                                 list_splice_init(&tcd->tcd_daemon_pages,
668                                                         &pc->pc_pages);
669                                 tcd->tcd_cur_daemon_pages = 0;
670                         }
671                 }
672         }
673 }
674
675 static void collect_pages(struct page_collection *pc)
676 {
677         INIT_LIST_HEAD(&pc->pc_pages);
678
679         if (libcfs_panic_in_progress)
680                 panic_collect_pages(pc);
681         else
682                 collect_pages_on_all_cpus(pc);
683 }
684
685 static void put_pages_back_on_all_cpus(struct page_collection *pc)
686 {
687         struct cfs_trace_cpu_data *tcd;
688         struct list_head *cur_head;
689         struct cfs_trace_page *tage;
690         struct cfs_trace_page *tmp;
691         int i, cpu;
692
693         for_each_possible_cpu(cpu) {
694                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
695                         cur_head = tcd->tcd_pages.next;
696
697                         list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
698                                                  linkage) {
699
700                                 __LASSERT_TAGE_INVARIANT(tage);
701
702                                 if (tage->cpu != cpu || tage->type != i)
703                                         continue;
704
705                                 cfs_tage_to_tail(tage, cur_head);
706                                 tcd->tcd_cur_pages++;
707                         }
708                 }
709         }
710 }
711
712 static void put_pages_back(struct page_collection *pc)
713 {
714         if (!libcfs_panic_in_progress)
715                 put_pages_back_on_all_cpus(pc);
716 }
717
718 /* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
719  * we have a good amount of data at all times for dumping during an LBUG, even
720  * if we have been steadily writing (and otherwise discarding) pages via the
721  * debug daemon. */
722 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
723                                          struct cfs_trace_cpu_data *tcd)
724 {
725         struct cfs_trace_page *tage;
726         struct cfs_trace_page *tmp;
727
728         list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
729                 __LASSERT_TAGE_INVARIANT(tage);
730
731                 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
732                         continue;
733
734                 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
735                 tcd->tcd_cur_daemon_pages++;
736
737                 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
738                         struct cfs_trace_page *victim;
739
740                         __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
741                         victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
742
743                         __LASSERT_TAGE_INVARIANT(victim);
744
745                         list_del(&victim->linkage);
746                         cfs_tage_free(victim);
747                         tcd->tcd_cur_daemon_pages--;
748                 }
749         }
750 }
751
752 static void put_pages_on_daemon_list(struct page_collection *pc)
753 {
754         struct cfs_trace_cpu_data *tcd;
755         int i, cpu;
756
757         for_each_possible_cpu(cpu) {
758                 cfs_tcd_for_each_type_lock(tcd, i, cpu)
759                         put_pages_on_tcd_daemon_list(pc, tcd);
760         }
761 }
762
763 void cfs_trace_debug_print(void)
764 {
765         struct page_collection pc;
766         struct cfs_trace_page *tage;
767         struct cfs_trace_page *tmp;
768
769         pc.pc_want_daemon_pages = 1;
770         collect_pages(&pc);
771         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
772                 char *p, *file, *fn;
773                 struct page *page;
774
775                 __LASSERT_TAGE_INVARIANT(tage);
776
777                 page = tage->page;
778                 p = page_address(page);
779                 while (p < ((char *)page_address(page) + tage->used)) {
780                         struct ptldebug_header *hdr;
781                         int len;
782                         hdr = (void *)p;
783                         p += sizeof(*hdr);
784                         file = p;
785                         p += strlen(file) + 1;
786                         fn = p;
787                         p += strlen(fn) + 1;
788                         len = hdr->ph_len - (int)(p - (char *)hdr);
789
790                         cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
791
792                         p += len;
793                 }
794
795                 list_del(&tage->linkage);
796                 cfs_tage_free(tage);
797         }
798 }
799
800 int cfs_tracefile_dump_all_pages(char *filename)
801 {
802         struct page_collection  pc;
803         struct file             *filp;
804         struct cfs_trace_page   *tage;
805         struct cfs_trace_page   *tmp;
806         char                    *buf;
807         int rc;
808
809         down_write(&cfs_tracefile_sem);
810
811         filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
812         if (IS_ERR(filp)) {
813                 rc = PTR_ERR(filp);
814                 filp = NULL;
815                 pr_err("LustreError: can't open %s for dump: rc = %d\n",
816                       filename, rc);
817                 goto out;
818         }
819
820         pc.pc_want_daemon_pages = 1;
821         collect_pages(&pc);
822         if (list_empty(&pc.pc_pages)) {
823                 rc = 0;
824                 goto close;
825         }
826
827         /* ok, for now, just write the pages.  in the future we'll be building
828          * iobufs with the pages and calling generic_direct_IO */
829         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
830
831                 __LASSERT_TAGE_INVARIANT(tage);
832
833                 buf = kmap(tage->page);
834                 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
835                 kunmap(tage->page);
836                 if (rc != (int)tage->used) {
837                         pr_warn("Lustre: wanted to write %u but wrote %d\n",
838                                 tage->used, rc);
839                         put_pages_back(&pc);
840                         __LASSERT(list_empty(&pc.pc_pages));
841                         break;
842                 }
843                 list_del(&tage->linkage);
844                 cfs_tage_free(tage);
845         }
846
847         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
848         if (rc)
849                 pr_err("LustreError: sync returns: rc = %d\n", rc);
850 close:
851         filp_close(filp, NULL);
852 out:
853         up_write(&cfs_tracefile_sem);
854         return rc;
855 }
856
857 void cfs_trace_flush_pages(void)
858 {
859         struct page_collection pc;
860         struct cfs_trace_page *tage;
861         struct cfs_trace_page *tmp;
862
863         pc.pc_want_daemon_pages = 1;
864         collect_pages(&pc);
865         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
866
867                 __LASSERT_TAGE_INVARIANT(tage);
868
869                 list_del(&tage->linkage);
870                 cfs_tage_free(tage);
871         }
872 }
873
874 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
875                             const char __user *usr_buffer, int usr_buffer_nob)
876 {
877         int    nob;
878
879         if (usr_buffer_nob > knl_buffer_nob)
880                 return -EOVERFLOW;
881
882         if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
883                 return -EFAULT;
884
885         nob = strnlen(knl_buffer, usr_buffer_nob);
886         while (--nob >= 0)                      /* strip trailing whitespace */
887                 if (!isspace(knl_buffer[nob]))
888                         break;
889
890         if (nob < 0)                            /* empty string */
891                 return -EINVAL;
892
893         if (nob == knl_buffer_nob)              /* no space to terminate */
894                 return -EOVERFLOW;
895
896         knl_buffer[nob + 1] = 0;                /* terminate */
897         return 0;
898 }
899 EXPORT_SYMBOL(cfs_trace_copyin_string);
900
901 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
902                              const char *knl_buffer, char *append)
903 {
904         /* NB if 'append' != NULL, it's a single character to append to the
905          * copied out string - usually "\n", for /proc entries and "" (i.e. a
906          * terminating zero byte) for sysctl entries */
907         int   nob = strlen(knl_buffer);
908
909         if (nob > usr_buffer_nob)
910                 nob = usr_buffer_nob;
911
912         if (copy_to_user(usr_buffer, knl_buffer, nob))
913                 return -EFAULT;
914
915         if (append != NULL && nob < usr_buffer_nob) {
916                 if (copy_to_user(usr_buffer + nob, append, 1))
917                         return -EFAULT;
918
919                 nob++;
920         }
921
922         return nob;
923 }
924 EXPORT_SYMBOL(cfs_trace_copyout_string);
925
926 int cfs_trace_allocate_string_buffer(char **str, int nob)
927 {
928         if (nob > 2 * PAGE_SIZE)        /* string must be "sensible" */
929                 return -EINVAL;
930
931         *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
932         if (*str == NULL)
933                 return -ENOMEM;
934
935         return 0;
936 }
937
938 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
939 {
940         char         *str;
941         int           rc;
942
943         rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
944         if (rc != 0)
945                 return rc;
946
947         rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
948                                      usr_str, usr_str_nob);
949         if (rc != 0)
950                 goto out;
951
952         if (str[0] != '/') {
953                 rc = -EINVAL;
954                 goto out;
955         }
956         rc = cfs_tracefile_dump_all_pages(str);
957 out:
958         kfree(str);
959         return rc;
960 }
961
962 int cfs_trace_daemon_command(char *str)
963 {
964         int       rc = 0;
965
966         down_write(&cfs_tracefile_sem);
967
968         if (strcmp(str, "stop") == 0) {
969                 up_write(&cfs_tracefile_sem);
970                 cfs_trace_stop_thread();
971                 down_write(&cfs_tracefile_sem);
972                 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
973
974         } else if (strncmp(str, "size=", 5) == 0) {
975                 unsigned long tmp;
976
977                 rc = kstrtoul(str + 5, 10, &tmp);
978                 if (!rc) {
979                         if (tmp < 10 || tmp > 20480)
980                                 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
981                         else
982                                 cfs_tracefile_size = tmp << 20;
983                 }
984         } else if (strlen(str) >= sizeof(cfs_tracefile)) {
985                 rc = -ENAMETOOLONG;
986         } else if (str[0] != '/') {
987                 rc = -EINVAL;
988         } else {
989                 strcpy(cfs_tracefile, str);
990
991                 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
992                         cfs_tracefile, (long)(cfs_tracefile_size >> 10));
993
994                 cfs_trace_start_thread();
995         }
996
997         up_write(&cfs_tracefile_sem);
998         return rc;
999 }
1000
1001 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
1002 {
1003         char *str;
1004         int   rc;
1005
1006         rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
1007         if (rc != 0)
1008                 return rc;
1009
1010         rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
1011                                  usr_str, usr_str_nob);
1012         if (rc == 0)
1013                 rc = cfs_trace_daemon_command(str);
1014
1015         kfree(str);
1016         return rc;
1017 }
1018
1019 int cfs_trace_set_debug_mb(int mb)
1020 {
1021         int i;
1022         int j;
1023         unsigned long pages;
1024         unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
1025         unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
1026         struct cfs_trace_cpu_data *tcd;
1027
1028         if (mb < num_possible_cpus()) {
1029                 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
1030                         mb, num_possible_cpus());
1031                 mb = num_possible_cpus();
1032         }
1033
1034         if (mb > limit) {
1035                 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
1036                         mb, limit);
1037                 mb = limit;
1038         }
1039
1040         mb /= num_possible_cpus();
1041         pages = mb << (20 - PAGE_SHIFT);
1042
1043         down_write(&cfs_tracefile_sem);
1044
1045         cfs_tcd_for_each(tcd, i, j)
1046                 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1047
1048         up_write(&cfs_tracefile_sem);
1049
1050         return mb;
1051 }
1052
1053 int cfs_trace_get_debug_mb(void)
1054 {
1055         int i;
1056         int j;
1057         struct cfs_trace_cpu_data *tcd;
1058         int total_pages = 0;
1059
1060         down_read(&cfs_tracefile_sem);
1061
1062         cfs_tcd_for_each(tcd, i, j)
1063                 total_pages += tcd->tcd_max_pages;
1064
1065         up_read(&cfs_tracefile_sem);
1066
1067         return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1068 }
1069
1070 static int tracefiled(void *arg)
1071 {
1072         struct page_collection pc;
1073         struct tracefiled_ctl *tctl = arg;
1074         struct cfs_trace_page *tage;
1075         struct cfs_trace_page *tmp;
1076         struct file *filp;
1077         char *buf;
1078         int last_loop = 0;
1079         int rc;
1080
1081         /* we're started late enough that we pick up init's fs context */
1082         /* this is so broken in uml?  what on earth is going on? */
1083
1084         complete(&tctl->tctl_start);
1085
1086         pc.pc_want_daemon_pages = 0;
1087
1088         while (!last_loop) {
1089                 wait_event_timeout(tctl->tctl_waitq,
1090                                    ({ collect_pages(&pc);
1091                                      !list_empty(&pc.pc_pages); }) ||
1092                                    atomic_read(&tctl->tctl_shutdown),
1093                                    cfs_time_seconds(1));
1094                 if (atomic_read(&tctl->tctl_shutdown))
1095                         last_loop = 1;
1096                 if (list_empty(&pc.pc_pages))
1097                         continue;
1098
1099                 filp = NULL;
1100                 down_read(&cfs_tracefile_sem);
1101                 if (cfs_tracefile[0] != 0) {
1102                         filp = filp_open(cfs_tracefile,
1103                                          O_CREAT | O_RDWR | O_LARGEFILE,
1104                                          0600);
1105                         if (IS_ERR(filp)) {
1106                                 rc = PTR_ERR(filp);
1107                                 filp = NULL;
1108                                 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1109                                         cfs_tracefile, rc);
1110                         }
1111                 }
1112                 up_read(&cfs_tracefile_sem);
1113                 if (filp == NULL) {
1114                         put_pages_on_daemon_list(&pc);
1115                         __LASSERT(list_empty(&pc.pc_pages));
1116                         continue;
1117                 }
1118
1119                 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1120                         struct dentry *de = file_dentry(filp);
1121                         static loff_t f_pos;
1122
1123                         __LASSERT_TAGE_INVARIANT(tage);
1124
1125                         if (f_pos >= (off_t)cfs_tracefile_size)
1126                                 f_pos = 0;
1127                         else if (f_pos > i_size_read(de->d_inode))
1128                                 f_pos = i_size_read(de->d_inode);
1129
1130                         buf = kmap(tage->page);
1131                         rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
1132                         kunmap(tage->page);
1133                         if (rc != (int)tage->used) {
1134                                 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1135                                         tage->used, rc);
1136                                 put_pages_back(&pc);
1137                                 __LASSERT(list_empty(&pc.pc_pages));
1138                                 break;
1139                         }
1140                 }
1141
1142                 filp_close(filp, NULL);
1143                 put_pages_on_daemon_list(&pc);
1144                 if (!list_empty(&pc.pc_pages)) {
1145                         int i;
1146
1147                         pr_alert("Lustre: trace pages aren't empty\n");
1148                         pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1149                         for (i = 0; i < num_possible_cpus(); i++)
1150                                 if (cpu_online(i))
1151                                         pr_cont("%d(on) ", i);
1152                                 else
1153                                         pr_cont("%d(off) ", i);
1154                         pr_cont("\n");
1155
1156                         i = 0;
1157                         list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1158                                                  linkage)
1159                                 pr_err("Lustre: page %d belongs to cpu %d\n",
1160                                        ++i, tage->cpu);
1161                         pr_err("Lustre: There are %d pages unwritten\n", i);
1162                 }
1163                 __LASSERT(list_empty(&pc.pc_pages));
1164         }
1165         complete(&tctl->tctl_stop);
1166         return 0;
1167 }
1168
1169 int cfs_trace_start_thread(void)
1170 {
1171         struct tracefiled_ctl *tctl = &trace_tctl;
1172         int rc = 0;
1173
1174         mutex_lock(&cfs_trace_thread_mutex);
1175         if (thread_running)
1176                 goto out;
1177
1178         init_completion(&tctl->tctl_start);
1179         init_completion(&tctl->tctl_stop);
1180         init_waitqueue_head(&tctl->tctl_waitq);
1181         atomic_set(&tctl->tctl_shutdown, 0);
1182
1183         if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1184                 rc = -ECHILD;
1185                 goto out;
1186         }
1187
1188         wait_for_completion(&tctl->tctl_start);
1189         thread_running = 1;
1190 out:
1191         mutex_unlock(&cfs_trace_thread_mutex);
1192         return rc;
1193 }
1194
1195 void cfs_trace_stop_thread(void)
1196 {
1197         struct tracefiled_ctl *tctl = &trace_tctl;
1198
1199         mutex_lock(&cfs_trace_thread_mutex);
1200         if (thread_running) {
1201                 pr_info("Lustre: shutting down debug daemon thread...\n");
1202                 atomic_set(&tctl->tctl_shutdown, 1);
1203                 wait_for_completion(&tctl->tctl_stop);
1204                 thread_running = 0;
1205         }
1206         mutex_unlock(&cfs_trace_thread_mutex);
1207 }
1208
1209 /* percents to share the total debug memory for each type */
1210 static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = {
1211         80, /* 80% pages for CFS_TCD_TYPE_PROC */
1212         10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1213         10  /* 10% pages for CFS_TCD_TYPE_IRQ */
1214 };
1215
1216 int cfs_tracefile_init(int max_pages)
1217 {
1218         struct cfs_trace_cpu_data *tcd;
1219         int i;
1220         int j;
1221
1222         /* initialize trace_data */
1223         memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1224         for (i = 0; i < CFS_TCD_TYPE_CNT; i++) {
1225                 cfs_trace_data[i] =
1226                         kmalloc_array(num_possible_cpus(),
1227                                       sizeof(union cfs_trace_data_union),
1228                                       GFP_KERNEL);
1229                 if (!cfs_trace_data[i])
1230                         goto out_trace_data;
1231         }
1232
1233         /* arch related info initialized */
1234         cfs_tcd_for_each(tcd, i, j) {
1235                 int factor = pages_factor[i];
1236
1237                 spin_lock_init(&tcd->tcd_lock);
1238                 tcd->tcd_pages_factor = factor;
1239                 tcd->tcd_type = i;
1240                 tcd->tcd_cpu = j;
1241
1242                 INIT_LIST_HEAD(&tcd->tcd_pages);
1243                 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1244                 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1245                 tcd->tcd_cur_pages = 0;
1246                 tcd->tcd_cur_stock_pages = 0;
1247                 tcd->tcd_cur_daemon_pages = 0;
1248                 tcd->tcd_max_pages = (max_pages * factor) / 100;
1249                 LASSERT(tcd->tcd_max_pages > 0);
1250                 tcd->tcd_shutting_down = 0;
1251         }
1252
1253         for (i = 0; i < num_possible_cpus(); i++)
1254                 for (j = 0; j < 3; j++) {
1255                         cfs_trace_console_buffers[i][j] =
1256                                 kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
1257                                         GFP_KERNEL);
1258                         if (!cfs_trace_console_buffers[i][j])
1259                                 goto out_buffers;
1260                 }
1261
1262         return 0;
1263
1264 out_buffers:
1265         for (i = 0; i < num_possible_cpus(); i++)
1266                 for (j = 0; j < 3; j++) {
1267                         kfree(cfs_trace_console_buffers[i][j]);
1268                         cfs_trace_console_buffers[i][j] = NULL;
1269                 }
1270 out_trace_data:
1271         for (i = 0; cfs_trace_data[i]; i++) {
1272                 kfree(cfs_trace_data[i]);
1273                 cfs_trace_data[i] = NULL;
1274         }
1275         pr_err("lnet: Not enough memory\n");
1276         return -ENOMEM;
1277 }
1278
1279 static void trace_cleanup_on_all_cpus(void)
1280 {
1281         struct cfs_trace_cpu_data *tcd;
1282         struct cfs_trace_page *tage;
1283         struct cfs_trace_page *tmp;
1284         int i, cpu;
1285
1286         for_each_possible_cpu(cpu) {
1287                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1288                         if (!tcd->tcd_pages_factor)
1289                                 /* Not initialised */
1290                                 continue;
1291                         tcd->tcd_shutting_down = 1;
1292
1293                         list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1294                                 __LASSERT_TAGE_INVARIANT(tage);
1295
1296                                 list_del(&tage->linkage);
1297                                 cfs_tage_free(tage);
1298                         }
1299                         tcd->tcd_cur_pages = 0;
1300                 }
1301         }
1302 }
1303
1304 static void cfs_trace_cleanup(void)
1305 {
1306         struct page_collection pc;
1307         int i;
1308         int j;
1309
1310         INIT_LIST_HEAD(&pc.pc_pages);
1311
1312         trace_cleanup_on_all_cpus();
1313
1314         for (i = 0; i < num_possible_cpus(); i++)
1315                 for (j = 0; j < 3; j++) {
1316                         kfree(cfs_trace_console_buffers[i][j]);
1317                         cfs_trace_console_buffers[i][j] = NULL;
1318                 }
1319
1320         for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) {
1321                 kfree(cfs_trace_data[i]);
1322                 cfs_trace_data[i] = NULL;
1323         }
1324 }
1325
1326 void cfs_tracefile_exit(void)
1327 {
1328         cfs_trace_stop_thread();
1329         cfs_trace_cleanup();
1330 }