Whamcloud - gitweb
2d0ec57c5d21d1a284174c1a64ce4fcd5ae0ca7b
[fs/lustre-release.git] / libcfs / libcfs / tracefile.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/tracefile.c
33  *
34  * Author: Zach Brown <zab@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38
39 #define DEBUG_SUBSYSTEM S_LNET
40 #define LUSTRE_TRACEFILE_PRIVATE
41 #include "tracefile.h"
42
43 #include <linux/ctype.h>
44 #include <linux/fs.h>
45 #include <linux/kthread.h>
46 #include <linux/pagemap.h>
47 #include <linux/poll.h>
48 #include <linux/tty.h>
49 #include <linux/uaccess.h>
50 #include <libcfs/linux/linux-fs.h>
51 #include <libcfs/libcfs.h>
52
53 #define TCD_MAX_TYPES                   8
54
55 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
56
57 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
58 char cfs_tracefile[TRACEFILE_NAME_SIZE];
59 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
60 static struct tracefiled_ctl trace_tctl;
61 static DEFINE_MUTEX(cfs_trace_thread_mutex);
62 static int thread_running = 0;
63
64 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
65 static DECLARE_RWSEM(cfs_tracefile_sem);
66
67 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
68                                         struct cfs_trace_cpu_data *tcd);
69
70 /* trace file lock routines */
71 /* The walking argument indicates the locking comes from all tcd types
72  * iterator and we must lock it and dissable local irqs to avoid deadlocks
73  * with other interrupt locks that might be happening. See LU-1311
74  * for details.
75  */
76 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
77         __acquires(&tcd->tcd_lock)
78 {
79         __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
80         if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
81                 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
82         else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
83                 spin_lock_bh(&tcd->tcd_lock);
84         else if (unlikely(walking))
85                 spin_lock_irq(&tcd->tcd_lock);
86         else
87                 spin_lock(&tcd->tcd_lock);
88         return 1;
89 }
90
91 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
92         __releases(&tcd->tcd_lock)
93 {
94         __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
95         if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
96                 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
97         else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
98                 spin_unlock_bh(&tcd->tcd_lock);
99         else if (unlikely(walking))
100                 spin_unlock_irq(&tcd->tcd_lock);
101         else
102                 spin_unlock(&tcd->tcd_lock);
103 }
104
105 #define cfs_tcd_for_each(tcd, i, j)                                     \
106         for (i = 0; cfs_trace_data[i]; i++)                             \
107                 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);     \
108                      j < num_possible_cpus();                           \
109                      j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
110
111 #define cfs_tcd_for_each_type_lock(tcd, i, cpu)                         \
112         for (i = 0; cfs_trace_data[i] &&                                \
113              (tcd = &(*cfs_trace_data[i])[cpu].tcd) &&                  \
114              cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
115
116 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
117 {
118         if (in_irq())
119                 return CFS_TCD_TYPE_IRQ;
120         if (in_softirq())
121                 return CFS_TCD_TYPE_SOFTIRQ;
122         return CFS_TCD_TYPE_PROC;
123 }
124
125 static inline struct cfs_trace_cpu_data *
126 cfs_trace_get_tcd(void)
127 {
128         struct cfs_trace_cpu_data *tcd =
129                 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
130
131         cfs_trace_lock_tcd(tcd, 0);
132
133         return tcd;
134 }
135
136 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
137 {
138         cfs_trace_unlock_tcd(tcd, 0);
139
140         put_cpu();
141 }
142
143 static inline struct cfs_trace_page *
144 cfs_tage_from_list(struct list_head *list)
145 {
146         return list_entry(list, struct cfs_trace_page, linkage);
147 }
148
149 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
150 {
151         struct page            *page;
152         struct cfs_trace_page *tage;
153
154         /* My caller is trying to free memory */
155         if (!in_interrupt() && (current->flags & PF_MEMALLOC))
156                 return NULL;
157
158         /*
159          * Don't spam console with allocation failures: they will be reported
160          * by upper layer anyway.
161          */
162         gfp |= __GFP_NOWARN;
163         page = alloc_page(gfp);
164         if (page == NULL)
165                 return NULL;
166
167         tage = kmalloc(sizeof(*tage), gfp);
168         if (tage == NULL) {
169                 __free_page(page);
170                 return NULL;
171         }
172
173         tage->page = page;
174         atomic_inc(&cfs_tage_allocated);
175         return tage;
176 }
177
178 static void cfs_tage_free(struct cfs_trace_page *tage)
179 {
180         __LASSERT(tage != NULL);
181         __LASSERT(tage->page != NULL);
182
183         __free_page(tage->page);
184         kfree(tage);
185         atomic_dec(&cfs_tage_allocated);
186 }
187
188 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
189                              struct list_head *queue)
190 {
191         __LASSERT(tage != NULL);
192         __LASSERT(queue != NULL);
193
194         list_move_tail(&tage->linkage, queue);
195 }
196
197 /* return a page that has 'len' bytes left at the end */
198 static struct cfs_trace_page *
199 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
200 {
201         struct cfs_trace_page *tage;
202
203         if (tcd->tcd_cur_pages > 0) {
204                 __LASSERT(!list_empty(&tcd->tcd_pages));
205                 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
206                 if (tage->used + len <= PAGE_SIZE)
207                         return tage;
208         }
209
210         if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
211                 if (tcd->tcd_cur_stock_pages > 0) {
212                         tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
213                         --tcd->tcd_cur_stock_pages;
214                         list_del_init(&tage->linkage);
215                 } else {
216                         tage = cfs_tage_alloc(GFP_ATOMIC);
217                         if (unlikely(tage == NULL)) {
218                                 if ((!(current->flags & PF_MEMALLOC) ||
219                                      in_interrupt()) && printk_ratelimit())
220                                         pr_warn("Lustre: cannot allocate a tage (%ld)\n",
221                                                 tcd->tcd_cur_pages);
222                                 return NULL;
223                         }
224                 }
225
226                 tage->used = 0;
227                 tage->cpu = smp_processor_id();
228                 tage->type = tcd->tcd_type;
229                 list_add_tail(&tage->linkage, &tcd->tcd_pages);
230                 tcd->tcd_cur_pages++;
231
232                 if (tcd->tcd_cur_pages > 8 && thread_running) {
233                         struct tracefiled_ctl *tctl = &trace_tctl;
234                         /*
235                          * wake up tracefiled to process some pages.
236                          */
237                         wake_up(&tctl->tctl_waitq);
238                 }
239                 return tage;
240         }
241         return NULL;
242 }
243
244 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
245 {
246         int pgcount = tcd->tcd_cur_pages / 10;
247         struct page_collection pc;
248         struct cfs_trace_page *tage;
249         struct cfs_trace_page *tmp;
250
251         /*
252          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
253          * from here: this will lead to infinite recursion.
254          */
255
256         if (printk_ratelimit())
257                 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
258                         pgcount + 1, tcd->tcd_cur_pages);
259
260         INIT_LIST_HEAD(&pc.pc_pages);
261
262         list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
263                 if (pgcount-- == 0)
264                         break;
265
266                 list_move_tail(&tage->linkage, &pc.pc_pages);
267                 tcd->tcd_cur_pages--;
268         }
269         put_pages_on_tcd_daemon_list(&pc, tcd);
270 }
271
272 /* return a page that has 'len' bytes left at the end */
273 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
274                                                  unsigned long len)
275 {
276         struct cfs_trace_page *tage;
277
278         /*
279          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
280          * from here: this will lead to infinite recursion.
281          */
282
283         if (len > PAGE_SIZE) {
284                 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
285                        len);
286                 return NULL;
287         }
288
289         tage = cfs_trace_get_tage_try(tcd, len);
290         if (tage != NULL)
291                 return tage;
292         if (thread_running)
293                 cfs_tcd_shrink(tcd);
294         if (tcd->tcd_cur_pages > 0) {
295                 tage = cfs_tage_from_list(tcd->tcd_pages.next);
296                 tage->used = 0;
297                 cfs_tage_to_tail(tage, &tcd->tcd_pages);
298         }
299         return tage;
300 }
301
302 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
303                                     struct libcfs_debug_msg_data *msgdata,
304                                     unsigned long stack)
305 {
306         struct timespec64 ts;
307
308         ktime_get_real_ts64(&ts);
309
310         header->ph_subsys = msgdata->msg_subsys;
311         header->ph_mask = msgdata->msg_mask;
312         header->ph_cpu_id = smp_processor_id();
313         header->ph_type = cfs_trace_buf_idx_get();
314         /* y2038 safe since all user space treats this as unsigned, but
315          * will overflow in 2106
316          */
317         header->ph_sec = (u32)ts.tv_sec;
318         header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
319         header->ph_stack = stack;
320         header->ph_pid = current->pid;
321         header->ph_line_num = msgdata->msg_line;
322         header->ph_extern_pid = 0;
323 }
324
325 /**
326  * tty_write_msg - write a message to a certain tty, not just the console.
327  * @tty: the destination tty_struct
328  * @msg: the message to write
329  *
330  * tty_write_message is not exported, so write a same function for it
331  *
332  */
333 static void tty_write_msg(struct tty_struct *tty, const char *msg)
334 {
335         mutex_lock(&tty->atomic_write_lock);
336         tty_lock(tty);
337         if (tty->ops->write && tty->count > 0)
338                 tty->ops->write(tty, msg, strlen(msg));
339         tty_unlock(tty);
340         mutex_unlock(&tty->atomic_write_lock);
341         wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
342 }
343
344 static void cfs_tty_write_message(const char *prefix, int mask, const char *msg)
345 {
346         struct tty_struct *tty;
347
348         tty = get_current_tty();
349         if (!tty)
350                 return;
351
352         tty_write_msg(tty, prefix);
353         if ((mask & D_EMERG) || (mask & D_ERROR))
354                 tty_write_msg(tty, "Error");
355         tty_write_msg(tty, ": ");
356         tty_write_msg(tty, msg);
357         tty_kref_put(tty);
358 }
359
360 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
361                                  const char *buf, int len, const char *file,
362                                  const char *fn)
363 {
364         char *prefix = "Lustre";
365
366         if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
367                 prefix = "LNet";
368
369         if (mask & D_CONSOLE) {
370                 if (mask & D_EMERG)
371                         pr_emerg("%sError: %.*s", prefix, len, buf);
372                 else if (mask & D_ERROR)
373                         pr_err("%sError: %.*s", prefix, len, buf);
374                 else if (mask & D_WARNING)
375                         pr_warn("%s: %.*s", prefix, len, buf);
376                 else if (mask & libcfs_printk)
377                         pr_info("%s: %.*s", prefix, len, buf);
378         } else {
379                 if (mask & D_EMERG)
380                         pr_emerg("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
381                                  hdr->ph_pid, hdr->ph_extern_pid, file,
382                                  hdr->ph_line_num, fn, len, buf);
383                 else if (mask & D_ERROR)
384                         pr_err("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
385                                hdr->ph_pid, hdr->ph_extern_pid, file,
386                                hdr->ph_line_num, fn, len, buf);
387                 else if (mask & D_WARNING)
388                         pr_warn("%s: %d:%d:(%s:%d:%s()) %.*s", prefix,
389                                 hdr->ph_pid, hdr->ph_extern_pid, file,
390                                 hdr->ph_line_num, fn, len, buf);
391                 else if (mask & (D_CONSOLE | libcfs_printk))
392                         pr_info("%s: %.*s", prefix, len, buf);
393         }
394
395         if (mask & D_TTY)
396                 cfs_tty_write_message(prefix, mask, buf);
397 }
398
399 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
400                      const char *format, ...)
401 {
402         struct cfs_trace_cpu_data *tcd = NULL;
403         struct ptldebug_header header = {0};
404         struct cfs_trace_page *tage;
405         /* string_buf is used only if tcd != NULL, and is always set then */
406         char *string_buf = NULL;
407         char *debug_buf;
408         int known_size;
409         int needed = 85; /* seeded with average message length */
410         int max_nob;
411         va_list ap;
412         int retry;
413         int mask = msgdata->msg_mask;
414         char *file = (char *)msgdata->msg_file;
415         struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
416
417         if (strchr(file, '/'))
418                 file = strrchr(file, '/') + 1;
419
420         tcd = cfs_trace_get_tcd();
421
422         /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
423          * pins us to a particular CPU.  This avoids an smp_processor_id()
424          * warning on Linux when debugging is enabled.
425          */
426         cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
427
428         if (!tcd)                /* arch may not log in IRQ context */
429                 goto console;
430
431         if (tcd->tcd_cur_pages == 0)
432                 header.ph_flags |= PH_FLAG_FIRST_RECORD;
433
434         if (tcd->tcd_shutting_down) {
435                 cfs_trace_put_tcd(tcd);
436                 tcd = NULL;
437                 goto console;
438         }
439
440         known_size = strlen(file) + 1;
441         if (msgdata->msg_fn)
442                 known_size += strlen(msgdata->msg_fn) + 1;
443
444         if (libcfs_debug_binary)
445                 known_size += sizeof(header);
446
447         /*
448          * May perform an additional pass to update 'needed' and increase
449          * tage buffer size to match vsnprintf reported size required
450          * On the second pass (retry=1) use vscnprintf [which returns
451          * number of bytes written not including the terminating nul]
452          * to clarify `needed` is used as number of bytes written
453          * for the remainder of this function
454          */
455         for (retry = 0; retry < 2; retry++) {
456                 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
457                 if (!tage) {
458                         if (needed + known_size > PAGE_SIZE)
459                                 mask |= D_ERROR;
460
461                         cfs_trace_put_tcd(tcd);
462                         tcd = NULL;
463                         goto console;
464                 }
465
466                 string_buf = (char *)page_address(tage->page) +
467                              tage->used + known_size;
468
469                 max_nob = PAGE_SIZE - tage->used - known_size;
470                 if (max_nob <= 0) {
471                         pr_emerg("LustreError: negative max_nob: %d\n",
472                                  max_nob);
473                         mask |= D_ERROR;
474                         cfs_trace_put_tcd(tcd);
475                         tcd = NULL;
476                         goto console;
477                 }
478
479                 va_start(ap, format);
480                 if (retry)
481                         needed = vscnprintf(string_buf, max_nob, format, ap);
482                 else
483                         needed = vsnprintf(string_buf, max_nob, format, ap);
484                 va_end(ap);
485
486                 if (needed < max_nob) /* well. printing ok.. */
487                         break;
488         }
489
490         /* `needed` is actual bytes written to string_buf */
491         if (*(string_buf + needed - 1) != '\n') {
492                 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
493                         file, msgdata->msg_line, msgdata->msg_fn);
494         } else if (mask & D_TTY) {
495                 /* TTY needs '\r\n' to move carriage to leftmost position */
496                 if (needed < 2 || *(string_buf + needed - 2) != '\r')
497                         pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n",
498                                 file, msgdata->msg_line, msgdata->msg_fn);
499         }
500
501         header.ph_len = known_size + needed;
502         debug_buf = (char *)page_address(tage->page) + tage->used;
503
504         if (libcfs_debug_binary) {
505                 memcpy(debug_buf, &header, sizeof(header));
506                 tage->used += sizeof(header);
507                 debug_buf += sizeof(header);
508         }
509
510         strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
511         tage->used += strlen(file) + 1;
512         debug_buf += strlen(file) + 1;
513
514         if (msgdata->msg_fn) {
515                 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
516                 tage->used += strlen(msgdata->msg_fn) + 1;
517                 debug_buf += strlen(msgdata->msg_fn) + 1;
518         }
519
520         __LASSERT(debug_buf == string_buf);
521
522         tage->used += needed;
523         __LASSERT(tage->used <= PAGE_SIZE);
524
525 console:
526         if ((mask & libcfs_printk) == 0) {
527                 /* no console output requested */
528                 if (tcd != NULL)
529                         cfs_trace_put_tcd(tcd);
530                 return 1;
531         }
532
533         if (cdls != NULL) {
534                 if (libcfs_console_ratelimit &&
535                     cdls->cdls_next != 0 &&     /* not first time ever */
536                     time_before(jiffies, cdls->cdls_next)) {
537                         /* skipping a console message */
538                         cdls->cdls_count++;
539                         if (tcd != NULL)
540                                 cfs_trace_put_tcd(tcd);
541                         return 1;
542                 }
543
544                 if (time_after(jiffies, cdls->cdls_next +
545                                         libcfs_console_max_delay +
546                                         cfs_time_seconds(10))) {
547                         /* last timeout was a long time ago */
548                         cdls->cdls_delay /= libcfs_console_backoff * 4;
549                 } else {
550                         cdls->cdls_delay *= libcfs_console_backoff;
551                 }
552
553                 if (cdls->cdls_delay < libcfs_console_min_delay)
554                         cdls->cdls_delay = libcfs_console_min_delay;
555                 else if (cdls->cdls_delay > libcfs_console_max_delay)
556                         cdls->cdls_delay = libcfs_console_max_delay;
557
558                 /* ensure cdls_next is never zero after it's been seen */
559                 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
560         }
561
562         if (tcd) {
563                 cfs_print_to_console(&header, mask, string_buf, needed, file,
564                                      msgdata->msg_fn);
565                 cfs_trace_put_tcd(tcd);
566         } else {
567                 string_buf = cfs_trace_get_console_buffer();
568
569                 va_start(ap, format);
570                 needed = vscnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
571                                     format, ap);
572                 va_end(ap);
573
574                 cfs_print_to_console(&header, mask,
575                                      string_buf, needed, file, msgdata->msg_fn);
576
577                 put_cpu();
578         }
579
580         if (cdls != NULL && cdls->cdls_count != 0) {
581                 string_buf = cfs_trace_get_console_buffer();
582
583                 needed = scnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
584                                    "Skipped %d previous similar message%s\n",
585                                    cdls->cdls_count,
586                                    (cdls->cdls_count > 1) ? "s" : "");
587
588                 /* Do not allow print this to TTY */
589                 cfs_print_to_console(&header, mask & ~D_TTY, string_buf,
590                                      needed, file, msgdata->msg_fn);
591
592                 put_cpu();
593                 cdls->cdls_count = 0;
594         }
595
596         return 0;
597 }
598 EXPORT_SYMBOL(libcfs_debug_msg);
599
600 void
601 cfs_trace_assertion_failed(const char *str,
602                            struct libcfs_debug_msg_data *msgdata)
603 {
604         struct ptldebug_header hdr;
605
606         libcfs_panic_in_progress = 1;
607         libcfs_catastrophe = 1;
608         smp_mb();
609
610         cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
611
612         cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
613                              msgdata->msg_file, msgdata->msg_fn);
614
615         panic("Lustre debug assertion failure\n");
616
617         /* not reached */
618 }
619
620 static void
621 panic_collect_pages(struct page_collection *pc)
622 {
623         /* Do the collect_pages job on a single CPU: assumes that all other
624          * CPUs have been stopped during a panic.  If this isn't true for some
625          * arch, this will have to be implemented separately in each arch.  */
626         int                        i;
627         int                        j;
628         struct cfs_trace_cpu_data *tcd;
629
630         INIT_LIST_HEAD(&pc->pc_pages);
631
632         cfs_tcd_for_each(tcd, i, j) {
633                 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
634                 tcd->tcd_cur_pages = 0;
635
636                 if (pc->pc_want_daemon_pages) {
637                         list_splice_init(&tcd->tcd_daemon_pages,
638                                                 &pc->pc_pages);
639                         tcd->tcd_cur_daemon_pages = 0;
640                 }
641         }
642 }
643
644 static void collect_pages_on_all_cpus(struct page_collection *pc)
645 {
646         struct cfs_trace_cpu_data *tcd;
647         int i, cpu;
648
649         for_each_possible_cpu(cpu) {
650                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
651                         list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
652                         tcd->tcd_cur_pages = 0;
653                         if (pc->pc_want_daemon_pages) {
654                                 list_splice_init(&tcd->tcd_daemon_pages,
655                                                         &pc->pc_pages);
656                                 tcd->tcd_cur_daemon_pages = 0;
657                         }
658                 }
659         }
660 }
661
662 static void collect_pages(struct page_collection *pc)
663 {
664         INIT_LIST_HEAD(&pc->pc_pages);
665
666         if (libcfs_panic_in_progress)
667                 panic_collect_pages(pc);
668         else
669                 collect_pages_on_all_cpus(pc);
670 }
671
672 static void put_pages_back_on_all_cpus(struct page_collection *pc)
673 {
674         struct cfs_trace_cpu_data *tcd;
675         struct list_head *cur_head;
676         struct cfs_trace_page *tage;
677         struct cfs_trace_page *tmp;
678         int i, cpu;
679
680         for_each_possible_cpu(cpu) {
681                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
682                         cur_head = tcd->tcd_pages.next;
683
684                         list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
685                                                  linkage) {
686
687                                 __LASSERT_TAGE_INVARIANT(tage);
688
689                                 if (tage->cpu != cpu || tage->type != i)
690                                         continue;
691
692                                 cfs_tage_to_tail(tage, cur_head);
693                                 tcd->tcd_cur_pages++;
694                         }
695                 }
696         }
697 }
698
699 static void put_pages_back(struct page_collection *pc)
700 {
701         if (!libcfs_panic_in_progress)
702                 put_pages_back_on_all_cpus(pc);
703 }
704
705 /* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
706  * we have a good amount of data at all times for dumping during an LBUG, even
707  * if we have been steadily writing (and otherwise discarding) pages via the
708  * debug daemon. */
709 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
710                                          struct cfs_trace_cpu_data *tcd)
711 {
712         struct cfs_trace_page *tage;
713         struct cfs_trace_page *tmp;
714
715         list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
716                 __LASSERT_TAGE_INVARIANT(tage);
717
718                 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
719                         continue;
720
721                 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
722                 tcd->tcd_cur_daemon_pages++;
723
724                 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
725                         struct cfs_trace_page *victim;
726
727                         __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
728                         victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
729
730                         __LASSERT_TAGE_INVARIANT(victim);
731
732                         list_del(&victim->linkage);
733                         cfs_tage_free(victim);
734                         tcd->tcd_cur_daemon_pages--;
735                 }
736         }
737 }
738
739 static void put_pages_on_daemon_list(struct page_collection *pc)
740 {
741         struct cfs_trace_cpu_data *tcd;
742         int i, cpu;
743
744         for_each_possible_cpu(cpu) {
745                 cfs_tcd_for_each_type_lock(tcd, i, cpu)
746                         put_pages_on_tcd_daemon_list(pc, tcd);
747         }
748 }
749
750 void cfs_trace_debug_print(void)
751 {
752         struct page_collection pc;
753         struct cfs_trace_page *tage;
754         struct cfs_trace_page *tmp;
755
756         pc.pc_want_daemon_pages = 1;
757         collect_pages(&pc);
758         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
759                 char *p, *file, *fn;
760                 struct page *page;
761
762                 __LASSERT_TAGE_INVARIANT(tage);
763
764                 page = tage->page;
765                 p = page_address(page);
766                 while (p < ((char *)page_address(page) + tage->used)) {
767                         struct ptldebug_header *hdr;
768                         int len;
769                         hdr = (void *)p;
770                         p += sizeof(*hdr);
771                         file = p;
772                         p += strlen(file) + 1;
773                         fn = p;
774                         p += strlen(fn) + 1;
775                         len = hdr->ph_len - (int)(p - (char *)hdr);
776
777                         cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
778
779                         p += len;
780                 }
781
782                 list_del(&tage->linkage);
783                 cfs_tage_free(tage);
784         }
785 }
786
787 int cfs_tracefile_dump_all_pages(char *filename)
788 {
789         struct page_collection  pc;
790         struct file             *filp;
791         struct cfs_trace_page   *tage;
792         struct cfs_trace_page   *tmp;
793         char                    *buf;
794         int rc;
795
796         down_write(&cfs_tracefile_sem);
797
798         filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
799         if (IS_ERR(filp)) {
800                 rc = PTR_ERR(filp);
801                 filp = NULL;
802                 pr_err("LustreError: can't open %s for dump: rc = %d\n",
803                       filename, rc);
804                 goto out;
805         }
806
807         pc.pc_want_daemon_pages = 1;
808         collect_pages(&pc);
809         if (list_empty(&pc.pc_pages)) {
810                 rc = 0;
811                 goto close;
812         }
813
814         /* ok, for now, just write the pages.  in the future we'll be building
815          * iobufs with the pages and calling generic_direct_IO */
816         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
817
818                 __LASSERT_TAGE_INVARIANT(tage);
819
820                 buf = kmap(tage->page);
821                 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
822                 kunmap(tage->page);
823                 if (rc != (int)tage->used) {
824                         pr_warn("Lustre: wanted to write %u but wrote %d\n",
825                                 tage->used, rc);
826                         put_pages_back(&pc);
827                         __LASSERT(list_empty(&pc.pc_pages));
828                         break;
829                 }
830                 list_del(&tage->linkage);
831                 cfs_tage_free(tage);
832         }
833
834         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
835         if (rc)
836                 pr_err("LustreError: sync returns: rc = %d\n", rc);
837 close:
838         filp_close(filp, NULL);
839 out:
840         up_write(&cfs_tracefile_sem);
841         return rc;
842 }
843
844 void cfs_trace_flush_pages(void)
845 {
846         struct page_collection pc;
847         struct cfs_trace_page *tage;
848         struct cfs_trace_page *tmp;
849
850         pc.pc_want_daemon_pages = 1;
851         collect_pages(&pc);
852         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
853
854                 __LASSERT_TAGE_INVARIANT(tage);
855
856                 list_del(&tage->linkage);
857                 cfs_tage_free(tage);
858         }
859 }
860
861 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
862                             const char __user *usr_buffer, int usr_buffer_nob)
863 {
864         int    nob;
865
866         if (usr_buffer_nob > knl_buffer_nob)
867                 return -EOVERFLOW;
868
869         if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
870                 return -EFAULT;
871
872         nob = strnlen(knl_buffer, usr_buffer_nob);
873         while (--nob >= 0)                      /* strip trailing whitespace */
874                 if (!isspace(knl_buffer[nob]))
875                         break;
876
877         if (nob < 0)                            /* empty string */
878                 return -EINVAL;
879
880         if (nob == knl_buffer_nob)              /* no space to terminate */
881                 return -EOVERFLOW;
882
883         knl_buffer[nob + 1] = 0;                /* terminate */
884         return 0;
885 }
886 EXPORT_SYMBOL(cfs_trace_copyin_string);
887
888 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
889                              const char *knl_buffer, char *append)
890 {
891         /* NB if 'append' != NULL, it's a single character to append to the
892          * copied out string - usually "\n", for /proc entries and "" (i.e. a
893          * terminating zero byte) for sysctl entries */
894         int   nob = strlen(knl_buffer);
895
896         if (nob > usr_buffer_nob)
897                 nob = usr_buffer_nob;
898
899         if (copy_to_user(usr_buffer, knl_buffer, nob))
900                 return -EFAULT;
901
902         if (append != NULL && nob < usr_buffer_nob) {
903                 if (copy_to_user(usr_buffer + nob, append, 1))
904                         return -EFAULT;
905
906                 nob++;
907         }
908
909         return nob;
910 }
911 EXPORT_SYMBOL(cfs_trace_copyout_string);
912
913 int cfs_trace_allocate_string_buffer(char **str, int nob)
914 {
915         if (nob > 2 * PAGE_SIZE)        /* string must be "sensible" */
916                 return -EINVAL;
917
918         *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
919         if (*str == NULL)
920                 return -ENOMEM;
921
922         return 0;
923 }
924
925 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
926 {
927         char         *str;
928         int           rc;
929
930         rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
931         if (rc != 0)
932                 return rc;
933
934         rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
935                                      usr_str, usr_str_nob);
936         if (rc != 0)
937                 goto out;
938
939         if (str[0] != '/') {
940                 rc = -EINVAL;
941                 goto out;
942         }
943         rc = cfs_tracefile_dump_all_pages(str);
944 out:
945         kfree(str);
946         return rc;
947 }
948
949 int cfs_trace_daemon_command(char *str)
950 {
951         int       rc = 0;
952
953         down_write(&cfs_tracefile_sem);
954
955         if (strcmp(str, "stop") == 0) {
956                 up_write(&cfs_tracefile_sem);
957                 cfs_trace_stop_thread();
958                 down_write(&cfs_tracefile_sem);
959                 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
960
961         } else if (strncmp(str, "size=", 5) == 0) {
962                 unsigned long tmp;
963
964                 rc = kstrtoul(str + 5, 10, &tmp);
965                 if (!rc) {
966                         if (tmp < 10 || tmp > 20480)
967                                 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
968                         else
969                                 cfs_tracefile_size = tmp << 20;
970                 }
971         } else if (strlen(str) >= sizeof(cfs_tracefile)) {
972                 rc = -ENAMETOOLONG;
973         } else if (str[0] != '/') {
974                 rc = -EINVAL;
975         } else {
976                 strcpy(cfs_tracefile, str);
977
978                 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
979                         cfs_tracefile, (long)(cfs_tracefile_size >> 10));
980
981                 cfs_trace_start_thread();
982         }
983
984         up_write(&cfs_tracefile_sem);
985         return rc;
986 }
987
988 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
989 {
990         char *str;
991         int   rc;
992
993         rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
994         if (rc != 0)
995                 return rc;
996
997         rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
998                                  usr_str, usr_str_nob);
999         if (rc == 0)
1000                 rc = cfs_trace_daemon_command(str);
1001
1002         kfree(str);
1003         return rc;
1004 }
1005
1006 int cfs_trace_set_debug_mb(int mb)
1007 {
1008         int i;
1009         int j;
1010         unsigned long pages;
1011         unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
1012         unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
1013         struct cfs_trace_cpu_data *tcd;
1014
1015         if (mb < num_possible_cpus()) {
1016                 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
1017                         mb, num_possible_cpus());
1018                 mb = num_possible_cpus();
1019         }
1020
1021         if (mb > limit) {
1022                 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
1023                         mb, limit);
1024                 mb = limit;
1025         }
1026
1027         mb /= num_possible_cpus();
1028         pages = mb << (20 - PAGE_SHIFT);
1029
1030         down_write(&cfs_tracefile_sem);
1031
1032         cfs_tcd_for_each(tcd, i, j)
1033                 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1034
1035         up_write(&cfs_tracefile_sem);
1036
1037         return mb;
1038 }
1039
1040 int cfs_trace_get_debug_mb(void)
1041 {
1042         int i;
1043         int j;
1044         struct cfs_trace_cpu_data *tcd;
1045         int total_pages = 0;
1046
1047         down_read(&cfs_tracefile_sem);
1048
1049         cfs_tcd_for_each(tcd, i, j)
1050                 total_pages += tcd->tcd_max_pages;
1051
1052         up_read(&cfs_tracefile_sem);
1053
1054         return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1055 }
1056
1057 static int tracefiled(void *arg)
1058 {
1059         struct page_collection pc;
1060         struct tracefiled_ctl *tctl = arg;
1061         struct cfs_trace_page *tage;
1062         struct cfs_trace_page *tmp;
1063         struct file *filp;
1064         char *buf;
1065         int last_loop = 0;
1066         int rc;
1067
1068         /* we're started late enough that we pick up init's fs context */
1069         /* this is so broken in uml?  what on earth is going on? */
1070
1071         complete(&tctl->tctl_start);
1072
1073         while (1) {
1074                 wait_queue_entry_t __wait;
1075
1076                 pc.pc_want_daemon_pages = 0;
1077                 collect_pages(&pc);
1078                 if (list_empty(&pc.pc_pages))
1079                         goto end_loop;
1080
1081                 filp = NULL;
1082                 down_read(&cfs_tracefile_sem);
1083                 if (cfs_tracefile[0] != 0) {
1084                         filp = filp_open(cfs_tracefile,
1085                                          O_CREAT | O_RDWR | O_LARGEFILE,
1086                                          0600);
1087                         if (IS_ERR(filp)) {
1088                                 rc = PTR_ERR(filp);
1089                                 filp = NULL;
1090                                 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1091                                         cfs_tracefile, rc);
1092                         }
1093                 }
1094                 up_read(&cfs_tracefile_sem);
1095                 if (filp == NULL) {
1096                         put_pages_on_daemon_list(&pc);
1097                         __LASSERT(list_empty(&pc.pc_pages));
1098                         goto end_loop;
1099                 }
1100
1101                 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1102                         struct dentry *de = file_dentry(filp);
1103                         static loff_t f_pos;
1104
1105                         __LASSERT_TAGE_INVARIANT(tage);
1106
1107                         if (f_pos >= (off_t)cfs_tracefile_size)
1108                                 f_pos = 0;
1109                         else if (f_pos > i_size_read(de->d_inode))
1110                                 f_pos = i_size_read(de->d_inode);
1111
1112                         buf = kmap(tage->page);
1113                         rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
1114                         kunmap(tage->page);
1115                         if (rc != (int)tage->used) {
1116                                 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1117                                         tage->used, rc);
1118                                 put_pages_back(&pc);
1119                                 __LASSERT(list_empty(&pc.pc_pages));
1120                                 break;
1121                         }
1122                 }
1123
1124                 filp_close(filp, NULL);
1125                 put_pages_on_daemon_list(&pc);
1126                 if (!list_empty(&pc.pc_pages)) {
1127                         int i;
1128
1129                         pr_alert("Lustre: trace pages aren't empty\n");
1130                         pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1131                         for (i = 0; i < num_possible_cpus(); i++)
1132                                 if (cpu_online(i))
1133                                         pr_cont("%d(on) ", i);
1134                                 else
1135                                         pr_cont("%d(off) ", i);
1136                         pr_cont("\n");
1137
1138                         i = 0;
1139                         list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1140                                                  linkage)
1141                                 pr_err("Lustre: page %d belongs to cpu %d\n",
1142                                        ++i, tage->cpu);
1143                         pr_err("Lustre: There are %d pages unwritten\n", i);
1144                 }
1145                 __LASSERT(list_empty(&pc.pc_pages));
1146 end_loop:
1147                 if (atomic_read(&tctl->tctl_shutdown)) {
1148                         if (last_loop == 0) {
1149                                 last_loop = 1;
1150                                 continue;
1151                         } else {
1152                                 break;
1153                         }
1154                 }
1155                 init_waitqueue_entry(&__wait, current);
1156                 add_wait_queue(&tctl->tctl_waitq, &__wait);
1157                 schedule_timeout_interruptible(cfs_time_seconds(1));
1158                 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1159         }
1160         complete(&tctl->tctl_stop);
1161         return 0;
1162 }
1163
1164 int cfs_trace_start_thread(void)
1165 {
1166         struct tracefiled_ctl *tctl = &trace_tctl;
1167         int rc = 0;
1168
1169         mutex_lock(&cfs_trace_thread_mutex);
1170         if (thread_running)
1171                 goto out;
1172
1173         init_completion(&tctl->tctl_start);
1174         init_completion(&tctl->tctl_stop);
1175         init_waitqueue_head(&tctl->tctl_waitq);
1176         atomic_set(&tctl->tctl_shutdown, 0);
1177
1178         if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1179                 rc = -ECHILD;
1180                 goto out;
1181         }
1182
1183         wait_for_completion(&tctl->tctl_start);
1184         thread_running = 1;
1185 out:
1186         mutex_unlock(&cfs_trace_thread_mutex);
1187         return rc;
1188 }
1189
1190 void cfs_trace_stop_thread(void)
1191 {
1192         struct tracefiled_ctl *tctl = &trace_tctl;
1193
1194         mutex_lock(&cfs_trace_thread_mutex);
1195         if (thread_running) {
1196                 pr_info("Lustre: shutting down debug daemon thread...\n");
1197                 atomic_set(&tctl->tctl_shutdown, 1);
1198                 wait_for_completion(&tctl->tctl_stop);
1199                 thread_running = 0;
1200         }
1201         mutex_unlock(&cfs_trace_thread_mutex);
1202 }
1203
1204 /* percents to share the total debug memory for each type */
1205 static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
1206         80, /* 80% pages for CFS_TCD_TYPE_PROC */
1207         10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1208         10  /* 10% pages for CFS_TCD_TYPE_IRQ */
1209 };
1210
1211 int cfs_tracefile_init(int max_pages)
1212 {
1213         struct cfs_trace_cpu_data *tcd;
1214         int i;
1215         int j;
1216
1217         /* initialize trace_data */
1218         memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1219         for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
1220                 cfs_trace_data[i] =
1221                         kmalloc_array(num_possible_cpus(),
1222                                       sizeof(union cfs_trace_data_union),
1223                                       GFP_KERNEL);
1224                 if (!cfs_trace_data[i])
1225                         goto out_trace_data;
1226         }
1227
1228         /* arch related info initialized */
1229         cfs_tcd_for_each(tcd, i, j) {
1230                 int factor = pages_factor[i];
1231
1232                 spin_lock_init(&tcd->tcd_lock);
1233                 tcd->tcd_pages_factor = factor;
1234                 tcd->tcd_type = i;
1235                 tcd->tcd_cpu = j;
1236
1237                 INIT_LIST_HEAD(&tcd->tcd_pages);
1238                 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1239                 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1240                 tcd->tcd_cur_pages = 0;
1241                 tcd->tcd_cur_stock_pages = 0;
1242                 tcd->tcd_cur_daemon_pages = 0;
1243                 tcd->tcd_max_pages = (max_pages * factor) / 100;
1244                 LASSERT(tcd->tcd_max_pages > 0);
1245                 tcd->tcd_shutting_down = 0;
1246         }
1247
1248         for (i = 0; i < num_possible_cpus(); i++)
1249                 for (j = 0; j < 3; j++) {
1250                         cfs_trace_console_buffers[i][j] =
1251                                 kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
1252                                         GFP_KERNEL);
1253                         if (!cfs_trace_console_buffers[i][j])
1254                                 goto out_buffers;
1255                 }
1256
1257         return 0;
1258
1259 out_buffers:
1260         for (i = 0; i < num_possible_cpus(); i++)
1261                 for (j = 0; j < 3; j++) {
1262                         kfree(cfs_trace_console_buffers[i][j]);
1263                         cfs_trace_console_buffers[i][j] = NULL;
1264                 }
1265 out_trace_data:
1266         for (i = 0; cfs_trace_data[i]; i++) {
1267                 kfree(cfs_trace_data[i]);
1268                 cfs_trace_data[i] = NULL;
1269         }
1270         pr_err("lnet: Not enough memory\n");
1271         return -ENOMEM;
1272 }
1273
1274 static void trace_cleanup_on_all_cpus(void)
1275 {
1276         struct cfs_trace_cpu_data *tcd;
1277         struct cfs_trace_page *tage;
1278         struct cfs_trace_page *tmp;
1279         int i, cpu;
1280
1281         for_each_possible_cpu(cpu) {
1282                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1283                         if (!tcd->tcd_pages_factor)
1284                                 /* Not initialised */
1285                                 continue;
1286                         tcd->tcd_shutting_down = 1;
1287
1288                         list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1289                                 __LASSERT_TAGE_INVARIANT(tage);
1290
1291                                 list_del(&tage->linkage);
1292                                 cfs_tage_free(tage);
1293                         }
1294                         tcd->tcd_cur_pages = 0;
1295                 }
1296         }
1297 }
1298
1299 static void cfs_trace_cleanup(void)
1300 {
1301         struct page_collection pc;
1302         int i;
1303         int j;
1304
1305         INIT_LIST_HEAD(&pc.pc_pages);
1306
1307         trace_cleanup_on_all_cpus();
1308
1309         for (i = 0; i < num_possible_cpus(); i++)
1310                 for (j = 0; j < 3; j++) {
1311                         kfree(cfs_trace_console_buffers[i][j]);
1312                         cfs_trace_console_buffers[i][j] = NULL;
1313                 }
1314
1315         for (i = 0; cfs_trace_data[i]; i++) {
1316                 kfree(cfs_trace_data[i]);
1317                 cfs_trace_data[i] = NULL;
1318         }
1319 }
1320
1321 void cfs_tracefile_exit(void)
1322 {
1323         cfs_trace_stop_thread();
1324         cfs_trace_cleanup();
1325 }